Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / broadcom / compiler / vir.c
1 /*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26 #include "util/u_prim.h"
27 #include "compiler/nir/nir_schedule.h"
28
29 int
30 vir_get_nsrc(struct qinst *inst)
31 {
32 switch (inst->qpu.type) {
33 case V3D_QPU_INSTR_TYPE_BRANCH:
34 return 0;
35 case V3D_QPU_INSTR_TYPE_ALU:
36 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
37 return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
38 else
39 return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
40 }
41
42 return 0;
43 }
44
45 /**
46 * Returns whether the instruction has any side effects that must be
47 * preserved.
48 */
49 bool
50 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
51 {
52 switch (inst->qpu.type) {
53 case V3D_QPU_INSTR_TYPE_BRANCH:
54 return true;
55 case V3D_QPU_INSTR_TYPE_ALU:
56 switch (inst->qpu.alu.add.op) {
57 case V3D_QPU_A_SETREVF:
58 case V3D_QPU_A_SETMSF:
59 case V3D_QPU_A_VPMSETUP:
60 case V3D_QPU_A_STVPMV:
61 case V3D_QPU_A_STVPMD:
62 case V3D_QPU_A_STVPMP:
63 case V3D_QPU_A_VPMWT:
64 case V3D_QPU_A_TMUWT:
65 return true;
66 default:
67 break;
68 }
69
70 switch (inst->qpu.alu.mul.op) {
71 case V3D_QPU_M_MULTOP:
72 return true;
73 default:
74 break;
75 }
76 }
77
78 if (inst->qpu.sig.ldtmu ||
79 inst->qpu.sig.ldvary ||
80 inst->qpu.sig.ldtlbu ||
81 inst->qpu.sig.ldtlb ||
82 inst->qpu.sig.wrtmuc ||
83 inst->qpu.sig.thrsw) {
84 return true;
85 }
86
87 return false;
88 }
89
90 bool
91 vir_is_raw_mov(struct qinst *inst)
92 {
93 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
94 (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
95 inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
96 return false;
97 }
98
99 if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
100 inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
101 return false;
102 }
103
104 if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE ||
105 inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE ||
106 inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE ||
107 inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) {
108 return false;
109 }
110
111 if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
112 inst->qpu.flags.mc != V3D_QPU_COND_NONE)
113 return false;
114
115 return true;
116 }
117
118 bool
119 vir_is_add(struct qinst *inst)
120 {
121 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
122 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
123 }
124
125 bool
126 vir_is_mul(struct qinst *inst)
127 {
128 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
129 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
130 }
131
132 bool
133 vir_is_tex(struct qinst *inst)
134 {
135 if (inst->dst.file == QFILE_MAGIC)
136 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
137
138 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
139 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
140 return true;
141 }
142
143 return false;
144 }
145
146 bool
147 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
148 {
149 for (int i = 0; i < vir_get_nsrc(inst); i++) {
150 switch (inst->src[i].file) {
151 case QFILE_VPM:
152 return true;
153 default:
154 break;
155 }
156 }
157
158 if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
159 inst->qpu.sig.ldtlb ||
160 inst->qpu.sig.ldtlbu ||
161 inst->qpu.sig.ldvpm)) {
162 return true;
163 }
164
165 return false;
166 }
167
168 bool
169 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
170 {
171 switch (inst->dst.file) {
172 case QFILE_MAGIC:
173 switch (inst->dst.index) {
174 case V3D_QPU_WADDR_RECIP:
175 case V3D_QPU_WADDR_RSQRT:
176 case V3D_QPU_WADDR_EXP:
177 case V3D_QPU_WADDR_LOG:
178 case V3D_QPU_WADDR_SIN:
179 return true;
180 }
181 break;
182 default:
183 break;
184 }
185
186 if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
187 return true;
188
189 return false;
190 }
191
192 void
193 vir_set_unpack(struct qinst *inst, int src,
194 enum v3d_qpu_input_unpack unpack)
195 {
196 assert(src == 0 || src == 1);
197
198 if (vir_is_add(inst)) {
199 if (src == 0)
200 inst->qpu.alu.add.a_unpack = unpack;
201 else
202 inst->qpu.alu.add.b_unpack = unpack;
203 } else {
204 assert(vir_is_mul(inst));
205 if (src == 0)
206 inst->qpu.alu.mul.a_unpack = unpack;
207 else
208 inst->qpu.alu.mul.b_unpack = unpack;
209 }
210 }
211
212 void
213 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
214 {
215 if (vir_is_add(inst)) {
216 inst->qpu.flags.ac = cond;
217 } else {
218 assert(vir_is_mul(inst));
219 inst->qpu.flags.mc = cond;
220 }
221 }
222
223 void
224 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
225 {
226 if (vir_is_add(inst)) {
227 inst->qpu.flags.apf = pf;
228 } else {
229 assert(vir_is_mul(inst));
230 inst->qpu.flags.mpf = pf;
231 }
232 }
233
234 void
235 vir_set_uf(struct qinst *inst, enum v3d_qpu_uf uf)
236 {
237 if (vir_is_add(inst)) {
238 inst->qpu.flags.auf = uf;
239 } else {
240 assert(vir_is_mul(inst));
241 inst->qpu.flags.muf = uf;
242 }
243 }
244
245 #if 0
246 uint8_t
247 vir_channels_written(struct qinst *inst)
248 {
249 if (vir_is_mul(inst)) {
250 switch (inst->dst.pack) {
251 case QPU_PACK_MUL_NOP:
252 case QPU_PACK_MUL_8888:
253 return 0xf;
254 case QPU_PACK_MUL_8A:
255 return 0x1;
256 case QPU_PACK_MUL_8B:
257 return 0x2;
258 case QPU_PACK_MUL_8C:
259 return 0x4;
260 case QPU_PACK_MUL_8D:
261 return 0x8;
262 }
263 } else {
264 switch (inst->dst.pack) {
265 case QPU_PACK_A_NOP:
266 case QPU_PACK_A_8888:
267 case QPU_PACK_A_8888_SAT:
268 case QPU_PACK_A_32_SAT:
269 return 0xf;
270 case QPU_PACK_A_8A:
271 case QPU_PACK_A_8A_SAT:
272 return 0x1;
273 case QPU_PACK_A_8B:
274 case QPU_PACK_A_8B_SAT:
275 return 0x2;
276 case QPU_PACK_A_8C:
277 case QPU_PACK_A_8C_SAT:
278 return 0x4;
279 case QPU_PACK_A_8D:
280 case QPU_PACK_A_8D_SAT:
281 return 0x8;
282 case QPU_PACK_A_16A:
283 case QPU_PACK_A_16A_SAT:
284 return 0x3;
285 case QPU_PACK_A_16B:
286 case QPU_PACK_A_16B_SAT:
287 return 0xc;
288 }
289 }
290 unreachable("Bad pack field");
291 }
292 #endif
293
294 struct qreg
295 vir_get_temp(struct v3d_compile *c)
296 {
297 struct qreg reg;
298
299 reg.file = QFILE_TEMP;
300 reg.index = c->num_temps++;
301
302 if (c->num_temps > c->defs_array_size) {
303 uint32_t old_size = c->defs_array_size;
304 c->defs_array_size = MAX2(old_size * 2, 16);
305
306 c->defs = reralloc(c, c->defs, struct qinst *,
307 c->defs_array_size);
308 memset(&c->defs[old_size], 0,
309 sizeof(c->defs[0]) * (c->defs_array_size - old_size));
310
311 c->spillable = reralloc(c, c->spillable,
312 BITSET_WORD,
313 BITSET_WORDS(c->defs_array_size));
314 for (int i = old_size; i < c->defs_array_size; i++)
315 BITSET_SET(c->spillable, i);
316 }
317
318 return reg;
319 }
320
321 struct qinst *
322 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
323 {
324 struct qinst *inst = calloc(1, sizeof(*inst));
325
326 inst->qpu = v3d_qpu_nop();
327 inst->qpu.alu.add.op = op;
328
329 inst->dst = dst;
330 inst->src[0] = src0;
331 inst->src[1] = src1;
332 inst->uniform = ~0;
333
334 return inst;
335 }
336
337 struct qinst *
338 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
339 {
340 struct qinst *inst = calloc(1, sizeof(*inst));
341
342 inst->qpu = v3d_qpu_nop();
343 inst->qpu.alu.mul.op = op;
344
345 inst->dst = dst;
346 inst->src[0] = src0;
347 inst->src[1] = src1;
348 inst->uniform = ~0;
349
350 return inst;
351 }
352
353 struct qinst *
354 vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
355 {
356 struct qinst *inst = calloc(1, sizeof(*inst));
357
358 inst->qpu = v3d_qpu_nop();
359 inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
360 inst->qpu.branch.cond = cond;
361 inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
362 inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
363 inst->qpu.branch.ub = true;
364 inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
365
366 inst->dst = vir_nop_reg();
367 inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0);
368
369 return inst;
370 }
371
372 static void
373 vir_emit(struct v3d_compile *c, struct qinst *inst)
374 {
375 switch (c->cursor.mode) {
376 case vir_cursor_add:
377 list_add(&inst->link, c->cursor.link);
378 break;
379 case vir_cursor_addtail:
380 list_addtail(&inst->link, c->cursor.link);
381 break;
382 }
383
384 c->cursor = vir_after_inst(inst);
385 c->live_intervals_valid = false;
386 }
387
388 /* Updates inst to write to a new temporary, emits it, and notes the def. */
389 struct qreg
390 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
391 {
392 assert(inst->dst.file == QFILE_NULL);
393
394 /* If we're emitting an instruction that's a def, it had better be
395 * writing a register.
396 */
397 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
398 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
399 v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
400 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
401 v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
402 }
403
404 inst->dst = vir_get_temp(c);
405
406 if (inst->dst.file == QFILE_TEMP)
407 c->defs[inst->dst.index] = inst;
408
409 vir_emit(c, inst);
410
411 return inst->dst;
412 }
413
414 struct qinst *
415 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
416 {
417 if (inst->dst.file == QFILE_TEMP)
418 c->defs[inst->dst.index] = NULL;
419
420 vir_emit(c, inst);
421
422 return inst;
423 }
424
425 struct qblock *
426 vir_new_block(struct v3d_compile *c)
427 {
428 struct qblock *block = rzalloc(c, struct qblock);
429
430 list_inithead(&block->instructions);
431
432 block->predecessors = _mesa_set_create(block,
433 _mesa_hash_pointer,
434 _mesa_key_pointer_equal);
435
436 block->index = c->next_block_index++;
437
438 return block;
439 }
440
441 void
442 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
443 {
444 c->cur_block = block;
445 c->cursor = vir_after_block(block);
446 list_addtail(&block->link, &c->blocks);
447 }
448
449 struct qblock *
450 vir_entry_block(struct v3d_compile *c)
451 {
452 return list_first_entry(&c->blocks, struct qblock, link);
453 }
454
455 struct qblock *
456 vir_exit_block(struct v3d_compile *c)
457 {
458 return list_last_entry(&c->blocks, struct qblock, link);
459 }
460
461 void
462 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
463 {
464 _mesa_set_add(successor->predecessors, predecessor);
465 if (predecessor->successors[0]) {
466 assert(!predecessor->successors[1]);
467 predecessor->successors[1] = successor;
468 } else {
469 predecessor->successors[0] = successor;
470 }
471 }
472
473 const struct v3d_compiler *
474 v3d_compiler_init(const struct v3d_device_info *devinfo)
475 {
476 struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
477 if (!compiler)
478 return NULL;
479
480 compiler->devinfo = devinfo;
481
482 if (!vir_init_reg_sets(compiler)) {
483 ralloc_free(compiler);
484 return NULL;
485 }
486
487 return compiler;
488 }
489
490 void
491 v3d_compiler_free(const struct v3d_compiler *compiler)
492 {
493 ralloc_free((void *)compiler);
494 }
495
496 static struct v3d_compile *
497 vir_compile_init(const struct v3d_compiler *compiler,
498 struct v3d_key *key,
499 nir_shader *s,
500 void (*debug_output)(const char *msg,
501 void *debug_output_data),
502 void *debug_output_data,
503 int program_id, int variant_id,
504 bool fallback_scheduler)
505 {
506 struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
507
508 c->compiler = compiler;
509 c->devinfo = compiler->devinfo;
510 c->key = key;
511 c->program_id = program_id;
512 c->variant_id = variant_id;
513 c->threads = 4;
514 c->debug_output = debug_output;
515 c->debug_output_data = debug_output_data;
516 c->compilation_result = V3D_COMPILATION_SUCCEEDED;
517 c->fallback_scheduler = fallback_scheduler;
518
519 s = nir_shader_clone(c, s);
520 c->s = s;
521
522 list_inithead(&c->blocks);
523 vir_set_emit_block(c, vir_new_block(c));
524
525 c->output_position_index = -1;
526 c->output_sample_mask_index = -1;
527
528 c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
529 _mesa_key_pointer_equal);
530
531 return c;
532 }
533
534 static int
535 type_size_vec4(const struct glsl_type *type, bool bindless)
536 {
537 return glsl_count_attribute_slots(type, false);
538 }
539
540 static void
541 v3d_lower_nir(struct v3d_compile *c)
542 {
543 struct nir_lower_tex_options tex_options = {
544 .lower_txd = true,
545 .lower_tg4_broadcom_swizzle = true,
546
547 .lower_rect = false, /* XXX: Use this on V3D 3.x */
548 .lower_txp = ~0,
549 /* Apply swizzles to all samplers. */
550 .swizzle_result = ~0,
551 };
552
553 /* Lower the format swizzle and (for 32-bit returns)
554 * ARB_texture_swizzle-style swizzle.
555 */
556 for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
557 for (int j = 0; j < 4; j++)
558 tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
559
560 if (c->key->tex[i].clamp_s)
561 tex_options.saturate_s |= 1 << i;
562 if (c->key->tex[i].clamp_t)
563 tex_options.saturate_t |= 1 << i;
564 if (c->key->tex[i].clamp_r)
565 tex_options.saturate_r |= 1 << i;
566 if (c->key->tex[i].return_size == 16) {
567 tex_options.lower_tex_packing[i] =
568 nir_lower_tex_packing_16;
569 }
570 }
571
572 /* CS textures may not have return_size reflecting the shadow state. */
573 nir_foreach_uniform_variable(var, c->s) {
574 const struct glsl_type *type = glsl_without_array(var->type);
575 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
576
577 if (!glsl_type_is_sampler(type) ||
578 !glsl_sampler_type_is_shadow(type))
579 continue;
580
581 for (int i = 0; i < array_len; i++) {
582 tex_options.lower_tex_packing[var->data.binding + i] =
583 nir_lower_tex_packing_16;
584 }
585 }
586
587 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
588 NIR_PASS_V(c->s, nir_lower_system_values);
589 NIR_PASS_V(c->s, nir_lower_compute_system_values, NULL);
590
591 NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
592 nir_var_function_temp,
593 0,
594 glsl_get_natural_size_align_bytes);
595 NIR_PASS_V(c->s, v3d_nir_lower_scratch);
596 }
597
598 static void
599 v3d_set_prog_data_uniforms(struct v3d_compile *c,
600 struct v3d_prog_data *prog_data)
601 {
602 int count = c->num_uniforms;
603 struct v3d_uniform_list *ulist = &prog_data->uniforms;
604
605 ulist->count = count;
606 ulist->data = ralloc_array(prog_data, uint32_t, count);
607 memcpy(ulist->data, c->uniform_data,
608 count * sizeof(*ulist->data));
609 ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
610 memcpy(ulist->contents, c->uniform_contents,
611 count * sizeof(*ulist->contents));
612 }
613
614 static void
615 v3d_vs_set_prog_data(struct v3d_compile *c,
616 struct v3d_vs_prog_data *prog_data)
617 {
618 /* The vertex data gets format converted by the VPM so that
619 * each attribute channel takes up a VPM column. Precompute
620 * the sizes for the shader record.
621 */
622 for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
623 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
624 prog_data->vpm_input_size += c->vattr_sizes[i];
625 }
626
627 prog_data->uses_vid = (c->s->info.system_values_read &
628 (1ull << SYSTEM_VALUE_VERTEX_ID));
629 prog_data->uses_iid = (c->s->info.system_values_read &
630 (1ull << SYSTEM_VALUE_INSTANCE_ID));
631
632 if (prog_data->uses_vid)
633 prog_data->vpm_input_size++;
634 if (prog_data->uses_iid)
635 prog_data->vpm_input_size++;
636
637 /* Input/output segment size are in sectors (8 rows of 32 bits per
638 * channel).
639 */
640 prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
641 prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
642
643 /* Set us up for shared input/output segments. This is apparently
644 * necessary for our VCM setup to avoid varying corruption.
645 */
646 prog_data->separate_segments = false;
647 prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
648 prog_data->vpm_input_size);
649 prog_data->vpm_input_size = 0;
650
651 /* Compute VCM cache size. We set up our program to take up less than
652 * half of the VPM, so that any set of bin and render programs won't
653 * run out of space. We need space for at least one input segment,
654 * and then allocate the rest to output segments (one for the current
655 * program, the rest to VCM). The valid range of the VCM cache size
656 * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
657 * batches.
658 */
659 assert(c->devinfo->vpm_size);
660 int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
661 int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
662 int half_vpm = vpm_size_in_sectors / 2;
663 int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
664 int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
665 assert(vpm_output_batches >= 2);
666 prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
667 }
668
669 static void
670 v3d_gs_set_prog_data(struct v3d_compile *c,
671 struct v3d_gs_prog_data *prog_data)
672 {
673 prog_data->num_inputs = c->num_inputs;
674 memcpy(prog_data->input_slots, c->input_slots,
675 c->num_inputs * sizeof(*c->input_slots));
676
677 /* gl_PrimitiveIdIn is written by the GBG into the first word of the
678 * VPM output header automatically and the shader will overwrite
679 * it after reading it if necessary, so it doesn't add to the VPM
680 * size requirements.
681 */
682 prog_data->uses_pid = (c->s->info.system_values_read &
683 (1ull << SYSTEM_VALUE_PRIMITIVE_ID));
684
685 /* Output segment size is in sectors (8 rows of 32 bits per channel) */
686 prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
687
688 /* Compute SIMD dispatch width and update VPM output size accordingly
689 * to ensure we can fit our program in memory. Available widths are
690 * 16, 8, 4, 1.
691 *
692 * Notice that at draw time we will have to consider VPM memory
693 * requirements from other stages and choose a smaller dispatch
694 * width if needed to fit the program in VPM memory.
695 */
696 prog_data->simd_width = 16;
697 while ((prog_data->simd_width > 1 && prog_data->vpm_output_size > 16) ||
698 prog_data->simd_width == 2) {
699 prog_data->simd_width >>= 1;
700 prog_data->vpm_output_size =
701 align(prog_data->vpm_output_size, 2) / 2;
702 }
703 assert(prog_data->vpm_output_size <= 16);
704 assert(prog_data->simd_width != 2);
705
706 prog_data->out_prim_type = c->s->info.gs.output_primitive;
707 prog_data->num_invocations = c->s->info.gs.invocations;
708 }
709
710 static void
711 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
712 struct v3d_fs_prog_data *prog_data)
713 {
714 prog_data->num_inputs = c->num_inputs;
715 memcpy(prog_data->input_slots, c->input_slots,
716 c->num_inputs * sizeof(*c->input_slots));
717
718 STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
719 (V3D_MAX_FS_INPUTS - 1) / 24);
720 for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
721 if (BITSET_TEST(c->flat_shade_flags, i))
722 prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
723
724 if (BITSET_TEST(c->noperspective_flags, i))
725 prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
726
727 if (BITSET_TEST(c->centroid_flags, i))
728 prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
729 }
730 }
731
732 static void
733 v3d_fs_set_prog_data(struct v3d_compile *c,
734 struct v3d_fs_prog_data *prog_data)
735 {
736 v3d_set_fs_prog_data_inputs(c, prog_data);
737 prog_data->writes_z = c->writes_z;
738 prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
739 prog_data->uses_center_w = c->uses_center_w;
740 prog_data->uses_implicit_point_line_varyings =
741 c->uses_implicit_point_line_varyings;
742 prog_data->lock_scoreboard_on_first_thrsw =
743 c->lock_scoreboard_on_first_thrsw;
744 }
745
746 static void
747 v3d_cs_set_prog_data(struct v3d_compile *c,
748 struct v3d_compute_prog_data *prog_data)
749 {
750 prog_data->shared_size = c->s->info.cs.shared_size;
751 }
752
753 static void
754 v3d_set_prog_data(struct v3d_compile *c,
755 struct v3d_prog_data *prog_data)
756 {
757 prog_data->threads = c->threads;
758 prog_data->single_seg = !c->last_thrsw;
759 prog_data->spill_size = c->spill_size;
760 prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl;
761
762 v3d_set_prog_data_uniforms(c, prog_data);
763
764 switch (c->s->info.stage) {
765 case MESA_SHADER_VERTEX:
766 v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
767 break;
768 case MESA_SHADER_GEOMETRY:
769 v3d_gs_set_prog_data(c, (struct v3d_gs_prog_data *)prog_data);
770 break;
771 case MESA_SHADER_FRAGMENT:
772 v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);
773 break;
774 case MESA_SHADER_COMPUTE:
775 v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);
776 break;
777 default:
778 unreachable("unsupported shader stage");
779 }
780 }
781
782 static uint64_t *
783 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
784 {
785 *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
786
787 uint64_t *qpu_insts = malloc(*final_assembly_size);
788 if (!qpu_insts)
789 return NULL;
790
791 memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
792
793 vir_compile_destroy(c);
794
795 return qpu_insts;
796 }
797
798 static void
799 v3d_nir_lower_vs_early(struct v3d_compile *c)
800 {
801 /* Split our I/O vars and dead code eliminate the unused
802 * components.
803 */
804 NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
805 nir_var_shader_in | nir_var_shader_out);
806 uint64_t used_outputs[4] = {0};
807 for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
808 int slot = v3d_slot_get_slot(c->vs_key->used_outputs[i]);
809 int comp = v3d_slot_get_component(c->vs_key->used_outputs[i]);
810 used_outputs[comp] |= 1ull << slot;
811 }
812 NIR_PASS_V(c->s, nir_remove_unused_io_vars,
813 nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
814 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
815 v3d_optimize_nir(c->s);
816 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
817
818 /* This must go before nir_lower_io */
819 if (c->vs_key->per_vertex_point_size)
820 NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
821
822 NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
823 type_size_vec4,
824 (nir_lower_io_options)0);
825 /* clean up nir_lower_io's deref_var remains and do a constant folding pass
826 * on the code it generated.
827 */
828 NIR_PASS_V(c->s, nir_opt_dce);
829 NIR_PASS_V(c->s, nir_opt_constant_folding);
830 }
831
832 static void
833 v3d_nir_lower_gs_early(struct v3d_compile *c)
834 {
835 /* Split our I/O vars and dead code eliminate the unused
836 * components.
837 */
838 NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
839 nir_var_shader_in | nir_var_shader_out);
840 uint64_t used_outputs[4] = {0};
841 for (int i = 0; i < c->gs_key->num_used_outputs; i++) {
842 int slot = v3d_slot_get_slot(c->gs_key->used_outputs[i]);
843 int comp = v3d_slot_get_component(c->gs_key->used_outputs[i]);
844 used_outputs[comp] |= 1ull << slot;
845 }
846 NIR_PASS_V(c->s, nir_remove_unused_io_vars,
847 nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
848 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
849 v3d_optimize_nir(c->s);
850 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
851
852 /* This must go before nir_lower_io */
853 if (c->gs_key->per_vertex_point_size)
854 NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
855
856 NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
857 type_size_vec4,
858 (nir_lower_io_options)0);
859 /* clean up nir_lower_io's deref_var remains */
860 NIR_PASS_V(c->s, nir_opt_dce);
861 }
862
863 static void
864 v3d_fixup_fs_output_types(struct v3d_compile *c)
865 {
866 nir_foreach_shader_out_variable(var, c->s) {
867 uint32_t mask = 0;
868
869 switch (var->data.location) {
870 case FRAG_RESULT_COLOR:
871 mask = ~0;
872 break;
873 case FRAG_RESULT_DATA0:
874 case FRAG_RESULT_DATA1:
875 case FRAG_RESULT_DATA2:
876 case FRAG_RESULT_DATA3:
877 mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
878 break;
879 }
880
881 if (c->fs_key->int_color_rb & mask) {
882 var->type =
883 glsl_vector_type(GLSL_TYPE_INT,
884 glsl_get_components(var->type));
885 } else if (c->fs_key->uint_color_rb & mask) {
886 var->type =
887 glsl_vector_type(GLSL_TYPE_UINT,
888 glsl_get_components(var->type));
889 }
890 }
891 }
892
893 static void
894 v3d_nir_lower_fs_early(struct v3d_compile *c)
895 {
896 if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
897 v3d_fixup_fs_output_types(c);
898
899 NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
900
901 if (c->fs_key->line_smoothing) {
902 v3d_nir_lower_line_smooth(c->s);
903 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
904 /* The lowering pass can introduce new sysval reads */
905 nir_shader_gather_info(c->s, nir_shader_get_entrypoint(c->s));
906 }
907
908 /* If the shader has no non-TLB side effects, we can promote it to
909 * enabling early_fragment_tests even if the user didn't.
910 */
911 if (!(c->s->info.num_images ||
912 c->s->info.num_ssbos)) {
913 c->s->info.fs.early_fragment_tests = true;
914 }
915 }
916
917 static void
918 v3d_nir_lower_gs_late(struct v3d_compile *c)
919 {
920 if (c->key->ucp_enables) {
921 NIR_PASS_V(c->s, nir_lower_clip_gs, c->key->ucp_enables,
922 false, NULL);
923 }
924
925 /* Note: GS output scalarizing must happen after nir_lower_clip_gs. */
926 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
927 }
928
929 static void
930 v3d_nir_lower_vs_late(struct v3d_compile *c)
931 {
932 if (c->vs_key->clamp_color)
933 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
934
935 if (c->key->ucp_enables) {
936 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
937 false, false, NULL);
938 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
939 nir_var_shader_out);
940 }
941
942 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
943 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
944 }
945
946 static void
947 v3d_nir_lower_fs_late(struct v3d_compile *c)
948 {
949 if (c->fs_key->light_twoside)
950 NIR_PASS_V(c->s, nir_lower_two_sided_color, true);
951
952 if (c->fs_key->clamp_color)
953 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
954
955 if (c->fs_key->alpha_test) {
956 NIR_PASS_V(c->s, nir_lower_alpha_test,
957 c->fs_key->alpha_test_func,
958 false, NULL);
959 }
960
961 /* In OpenGL the fragment shader can't read gl_ClipDistance[], but
962 * Vulkan allows it, in which case the SPIR-V compiler will declare
963 * VARING_SLOT_CLIP_DIST0 as compact array variable. Pass true as
964 * the last parameter to always operate with a compact array in both
965 * OpenGL and Vulkan so we do't have to care about the API we
966 * are using.
967 */
968 if (c->key->ucp_enables)
969 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, true);
970
971 /* Note: FS input scalarizing must happen after
972 * nir_lower_two_sided_color, which only handles a vec4 at a time.
973 */
974 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
975 }
976
977 static uint32_t
978 vir_get_max_temps(struct v3d_compile *c)
979 {
980 int max_ip = 0;
981 vir_for_each_inst_inorder(inst, c)
982 max_ip++;
983
984 uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip);
985
986 for (int t = 0; t < c->num_temps; t++) {
987 for (int i = c->temp_start[t]; (i < c->temp_end[t] &&
988 i < max_ip); i++) {
989 if (i > max_ip)
990 break;
991 pressure[i]++;
992 }
993 }
994
995 uint32_t max_temps = 0;
996 for (int i = 0; i < max_ip; i++)
997 max_temps = MAX2(max_temps, pressure[i]);
998
999 ralloc_free(pressure);
1000
1001 return max_temps;
1002 }
1003
1004 enum v3d_dependency_class {
1005 V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0
1006 };
1007
1008 static bool
1009 v3d_intrinsic_dependency_cb(nir_intrinsic_instr *intr,
1010 nir_schedule_dependency *dep,
1011 void *user_data)
1012 {
1013 struct v3d_compile *c = user_data;
1014
1015 switch (intr->intrinsic) {
1016 case nir_intrinsic_store_output:
1017 /* Writing to location 0 overwrites the value passed in for
1018 * gl_PrimitiveID on geometry shaders
1019 */
1020 if (c->s->info.stage != MESA_SHADER_GEOMETRY ||
1021 nir_intrinsic_base(intr) != 0)
1022 break;
1023
1024 nir_const_value *const_value =
1025 nir_src_as_const_value(intr->src[1]);
1026
1027 if (const_value == NULL)
1028 break;
1029
1030 uint64_t offset =
1031 nir_const_value_as_uint(*const_value,
1032 nir_src_bit_size(intr->src[1]));
1033 if (offset != 0)
1034 break;
1035
1036 dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1037 dep->type = NIR_SCHEDULE_WRITE_DEPENDENCY;
1038 return true;
1039
1040 case nir_intrinsic_load_primitive_id:
1041 if (c->s->info.stage != MESA_SHADER_GEOMETRY)
1042 break;
1043
1044 dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1045 dep->type = NIR_SCHEDULE_READ_DEPENDENCY;
1046 return true;
1047
1048 default:
1049 break;
1050 }
1051
1052 return false;
1053 }
1054
1055 static void
1056 v3d_attempt_compile(struct v3d_compile *c)
1057 {
1058 switch (c->s->info.stage) {
1059 case MESA_SHADER_VERTEX:
1060 c->vs_key = (struct v3d_vs_key *) c->key;
1061 break;
1062 case MESA_SHADER_GEOMETRY:
1063 c->gs_key = (struct v3d_gs_key *) c->key;
1064 break;
1065 case MESA_SHADER_FRAGMENT:
1066 c->fs_key = (struct v3d_fs_key *) c->key;
1067 break;
1068 case MESA_SHADER_COMPUTE:
1069 break;
1070 default:
1071 unreachable("unsupported shader stage");
1072 }
1073
1074 switch (c->s->info.stage) {
1075 case MESA_SHADER_VERTEX:
1076 v3d_nir_lower_vs_early(c);
1077 break;
1078 case MESA_SHADER_GEOMETRY:
1079 v3d_nir_lower_gs_early(c);
1080 break;
1081 case MESA_SHADER_FRAGMENT:
1082 v3d_nir_lower_fs_early(c);
1083 break;
1084 default:
1085 break;
1086 }
1087
1088 v3d_lower_nir(c);
1089
1090 switch (c->s->info.stage) {
1091 case MESA_SHADER_VERTEX:
1092 v3d_nir_lower_vs_late(c);
1093 break;
1094 case MESA_SHADER_GEOMETRY:
1095 v3d_nir_lower_gs_late(c);
1096 break;
1097 case MESA_SHADER_FRAGMENT:
1098 v3d_nir_lower_fs_late(c);
1099 break;
1100 default:
1101 break;
1102 }
1103
1104 NIR_PASS_V(c->s, v3d_nir_lower_io, c);
1105 NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
1106 NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
1107 NIR_PASS_V(c->s, nir_lower_idiv, nir_lower_idiv_fast);
1108
1109 v3d_optimize_nir(c->s);
1110
1111 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1112 * subs, then the mandatory cleanup after algebraic. Note that it may
1113 * produce fnegs, and if so then we need to keep running to squash
1114 * fneg(fneg(a)).
1115 */
1116 bool more_late_algebraic = true;
1117 while (more_late_algebraic) {
1118 more_late_algebraic = false;
1119 NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
1120 NIR_PASS_V(c->s, nir_opt_constant_folding);
1121 NIR_PASS_V(c->s, nir_copy_prop);
1122 NIR_PASS_V(c->s, nir_opt_dce);
1123 NIR_PASS_V(c->s, nir_opt_cse);
1124 }
1125
1126 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
1127 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1128
1129 struct nir_schedule_options schedule_options = {
1130 /* Schedule for about half our register space, to enable more
1131 * shaders to hit 4 threads.
1132 */
1133 .threshold = 24,
1134
1135 /* Vertex shaders share the same memory for inputs and outputs,
1136 * fragement and geometry shaders do not.
1137 */
1138 .stages_with_shared_io_memory =
1139 (((1 << MESA_ALL_SHADER_STAGES) - 1) &
1140 ~((1 << MESA_SHADER_FRAGMENT) |
1141 (1 << MESA_SHADER_GEOMETRY))),
1142
1143 .fallback = c->fallback_scheduler,
1144
1145 .intrinsic_cb = v3d_intrinsic_dependency_cb,
1146 .intrinsic_cb_data = c,
1147 };
1148 NIR_PASS_V(c->s, nir_schedule, &schedule_options);
1149
1150 v3d_nir_to_vir(c);
1151 }
1152
1153 uint32_t
1154 v3d_prog_data_size(gl_shader_stage stage)
1155 {
1156 static const int prog_data_size[] = {
1157 [MESA_SHADER_VERTEX] = sizeof(struct v3d_vs_prog_data),
1158 [MESA_SHADER_GEOMETRY] = sizeof(struct v3d_gs_prog_data),
1159 [MESA_SHADER_FRAGMENT] = sizeof(struct v3d_fs_prog_data),
1160 [MESA_SHADER_COMPUTE] = sizeof(struct v3d_compute_prog_data),
1161 };
1162
1163 assert(stage >= 0 &&
1164 stage < ARRAY_SIZE(prog_data_size) &&
1165 prog_data_size[stage]);
1166
1167 return prog_data_size[stage];
1168 }
1169
1170 uint64_t *v3d_compile(const struct v3d_compiler *compiler,
1171 struct v3d_key *key,
1172 struct v3d_prog_data **out_prog_data,
1173 nir_shader *s,
1174 void (*debug_output)(const char *msg,
1175 void *debug_output_data),
1176 void *debug_output_data,
1177 int program_id, int variant_id,
1178 uint32_t *final_assembly_size)
1179 {
1180 struct v3d_compile *c;
1181
1182 for (int i = 0; true; i++) {
1183 c = vir_compile_init(compiler, key, s,
1184 debug_output, debug_output_data,
1185 program_id, variant_id,
1186 i > 0 /* fallback_scheduler */);
1187
1188 v3d_attempt_compile(c);
1189
1190 if (i > 0 ||
1191 c->compilation_result !=
1192 V3D_COMPILATION_FAILED_REGISTER_ALLOCATION)
1193 break;
1194
1195 char *debug_msg;
1196 int ret = asprintf(&debug_msg,
1197 "Using fallback scheduler for %s",
1198 vir_get_stage_name(c));
1199
1200 if (ret >= 0) {
1201 if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF))
1202 fprintf(stderr, "%s\n", debug_msg);
1203
1204 c->debug_output(debug_msg, c->debug_output_data);
1205 free(debug_msg);
1206 }
1207
1208 vir_compile_destroy(c);
1209 }
1210
1211 struct v3d_prog_data *prog_data;
1212
1213 prog_data = rzalloc_size(NULL, v3d_prog_data_size(c->s->info.stage));
1214
1215 v3d_set_prog_data(c, prog_data);
1216
1217 *out_prog_data = prog_data;
1218
1219 char *shaderdb;
1220 int ret = asprintf(&shaderdb,
1221 "%s shader: %d inst, %d threads, %d loops, "
1222 "%d uniforms, %d max-temps, %d:%d spills:fills, "
1223 "%d sfu-stalls, %d inst-and-stalls",
1224 vir_get_stage_name(c),
1225 c->qpu_inst_count,
1226 c->threads,
1227 c->loops,
1228 c->num_uniforms,
1229 vir_get_max_temps(c),
1230 c->spills,
1231 c->fills,
1232 c->qpu_inst_stalled_count,
1233 c->qpu_inst_count + c->qpu_inst_stalled_count);
1234 if (ret >= 0) {
1235 if (V3D_DEBUG & V3D_DEBUG_SHADERDB)
1236 fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
1237
1238 c->debug_output(shaderdb, c->debug_output_data);
1239 free(shaderdb);
1240 }
1241
1242 return v3d_return_qpu_insts(c, final_assembly_size);
1243 }
1244
1245 void
1246 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
1247 {
1248 if (qinst->dst.file == QFILE_TEMP)
1249 c->defs[qinst->dst.index] = NULL;
1250
1251 assert(&qinst->link != c->cursor.link);
1252
1253 list_del(&qinst->link);
1254 free(qinst);
1255
1256 c->live_intervals_valid = false;
1257 }
1258
1259 struct qreg
1260 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
1261 {
1262 /* XXX
1263 int pack = reg.pack;
1264
1265 while (reg.file == QFILE_TEMP &&
1266 c->defs[reg.index] &&
1267 (c->defs[reg.index]->op == QOP_MOV ||
1268 c->defs[reg.index]->op == QOP_FMOV) &&
1269 !c->defs[reg.index]->dst.pack &&
1270 !c->defs[reg.index]->src[0].pack) {
1271 reg = c->defs[reg.index]->src[0];
1272 }
1273
1274 reg.pack = pack;
1275 */
1276 return reg;
1277 }
1278
1279 void
1280 vir_compile_destroy(struct v3d_compile *c)
1281 {
1282 /* Defuse the assert that we aren't removing the cursor's instruction.
1283 */
1284 c->cursor.link = NULL;
1285
1286 vir_for_each_block(block, c) {
1287 while (!list_is_empty(&block->instructions)) {
1288 struct qinst *qinst =
1289 list_first_entry(&block->instructions,
1290 struct qinst, link);
1291 vir_remove_instruction(c, qinst);
1292 }
1293 }
1294
1295 ralloc_free(c);
1296 }
1297
1298 uint32_t
1299 vir_get_uniform_index(struct v3d_compile *c,
1300 enum quniform_contents contents,
1301 uint32_t data)
1302 {
1303 for (int i = 0; i < c->num_uniforms; i++) {
1304 if (c->uniform_contents[i] == contents &&
1305 c->uniform_data[i] == data) {
1306 return i;
1307 }
1308 }
1309
1310 uint32_t uniform = c->num_uniforms++;
1311
1312 if (uniform >= c->uniform_array_size) {
1313 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1314 c->uniform_array_size * 2);
1315
1316 c->uniform_data = reralloc(c, c->uniform_data,
1317 uint32_t,
1318 c->uniform_array_size);
1319 c->uniform_contents = reralloc(c, c->uniform_contents,
1320 enum quniform_contents,
1321 c->uniform_array_size);
1322 }
1323
1324 c->uniform_contents[uniform] = contents;
1325 c->uniform_data[uniform] = data;
1326
1327 return uniform;
1328 }
1329
1330 struct qreg
1331 vir_uniform(struct v3d_compile *c,
1332 enum quniform_contents contents,
1333 uint32_t data)
1334 {
1335 struct qinst *inst = vir_NOP(c);
1336 inst->qpu.sig.ldunif = true;
1337 inst->uniform = vir_get_uniform_index(c, contents, data);
1338 inst->dst = vir_get_temp(c);
1339 c->defs[inst->dst.index] = inst;
1340 return inst->dst;
1341 }
1342
1343 #define OPTPASS(func) \
1344 do { \
1345 bool stage_progress = func(c); \
1346 if (stage_progress) { \
1347 progress = true; \
1348 if (print_opt_debug) { \
1349 fprintf(stderr, \
1350 "VIR opt pass %2d: %s progress\n", \
1351 pass, #func); \
1352 } \
1353 /*XXX vir_validate(c);*/ \
1354 } \
1355 } while (0)
1356
1357 void
1358 vir_optimize(struct v3d_compile *c)
1359 {
1360 bool print_opt_debug = false;
1361 int pass = 1;
1362
1363 while (true) {
1364 bool progress = false;
1365
1366 OPTPASS(vir_opt_copy_propagate);
1367 OPTPASS(vir_opt_redundant_flags);
1368 OPTPASS(vir_opt_dead_code);
1369 OPTPASS(vir_opt_small_immediates);
1370
1371 if (!progress)
1372 break;
1373
1374 pass++;
1375 }
1376 }
1377
1378 const char *
1379 vir_get_stage_name(struct v3d_compile *c)
1380 {
1381 if (c->vs_key && c->vs_key->is_coord)
1382 return "MESA_SHADER_VERTEX_BIN";
1383 else if (c->gs_key && c->gs_key->is_coord)
1384 return "MESA_SHADER_GEOMETRY_BIN";
1385 else
1386 return gl_shader_stage_name(c->s->info.stage);
1387 }