v3d: Return an invalid src number if asked for a missing implicit uniform.
[mesa.git] / src / broadcom / compiler / vir.c
1 /*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26
27 int
28 vir_get_non_sideband_nsrc(struct qinst *inst)
29 {
30 switch (inst->qpu.type) {
31 case V3D_QPU_INSTR_TYPE_BRANCH:
32 return 0;
33 case V3D_QPU_INSTR_TYPE_ALU:
34 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
35 return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
36 else
37 return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
38 }
39
40 return 0;
41 }
42
43 int
44 vir_get_nsrc(struct qinst *inst)
45 {
46 int nsrc = vir_get_non_sideband_nsrc(inst);
47
48 if (vir_has_implicit_uniform(inst))
49 nsrc++;
50
51 return nsrc;
52 }
53
54 bool
55 vir_has_implicit_uniform(struct qinst *inst)
56 {
57 switch (inst->qpu.type) {
58 case V3D_QPU_INSTR_TYPE_BRANCH:
59 return true;
60 case V3D_QPU_INSTR_TYPE_ALU:
61 switch (inst->dst.file) {
62 case QFILE_TLBU:
63 return true;
64 default:
65 return inst->has_implicit_uniform;
66 }
67 }
68 return false;
69 }
70
71 /* The sideband uniform for textures gets stored after the normal ALU
72 * arguments.
73 */
74 int
75 vir_get_implicit_uniform_src(struct qinst *inst)
76 {
77 if (!vir_has_implicit_uniform(inst))
78 return -1;
79 return vir_get_nsrc(inst) - 1;
80 }
81
82 /**
83 * Returns whether the instruction has any side effects that must be
84 * preserved.
85 */
86 bool
87 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
88 {
89 switch (inst->qpu.type) {
90 case V3D_QPU_INSTR_TYPE_BRANCH:
91 return true;
92 case V3D_QPU_INSTR_TYPE_ALU:
93 switch (inst->qpu.alu.add.op) {
94 case V3D_QPU_A_SETREVF:
95 case V3D_QPU_A_SETMSF:
96 case V3D_QPU_A_VPMSETUP:
97 case V3D_QPU_A_STVPMV:
98 case V3D_QPU_A_STVPMD:
99 case V3D_QPU_A_STVPMP:
100 case V3D_QPU_A_VPMWT:
101 return true;
102 default:
103 break;
104 }
105
106 switch (inst->qpu.alu.mul.op) {
107 case V3D_QPU_M_MULTOP:
108 return true;
109 default:
110 break;
111 }
112 }
113
114 if (inst->qpu.sig.ldtmu ||
115 inst->qpu.sig.ldvary ||
116 inst->qpu.sig.wrtmuc ||
117 inst->qpu.sig.thrsw) {
118 return true;
119 }
120
121 return false;
122 }
123
124 bool
125 vir_is_float_input(struct qinst *inst)
126 {
127 /* XXX: More instrs */
128 switch (inst->qpu.type) {
129 case V3D_QPU_INSTR_TYPE_BRANCH:
130 return false;
131 case V3D_QPU_INSTR_TYPE_ALU:
132 switch (inst->qpu.alu.add.op) {
133 case V3D_QPU_A_FADD:
134 case V3D_QPU_A_FSUB:
135 case V3D_QPU_A_FMIN:
136 case V3D_QPU_A_FMAX:
137 case V3D_QPU_A_FTOIN:
138 return true;
139 default:
140 break;
141 }
142
143 switch (inst->qpu.alu.mul.op) {
144 case V3D_QPU_M_FMOV:
145 case V3D_QPU_M_VFMUL:
146 case V3D_QPU_M_FMUL:
147 return true;
148 default:
149 break;
150 }
151 }
152
153 return false;
154 }
155
156 bool
157 vir_is_raw_mov(struct qinst *inst)
158 {
159 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
160 (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
161 inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
162 return false;
163 }
164
165 if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
166 inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
167 return false;
168 }
169
170 if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
171 inst->qpu.flags.mc != V3D_QPU_COND_NONE)
172 return false;
173
174 return true;
175 }
176
177 bool
178 vir_is_add(struct qinst *inst)
179 {
180 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
181 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
182 }
183
184 bool
185 vir_is_mul(struct qinst *inst)
186 {
187 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
188 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
189 }
190
191 bool
192 vir_is_tex(struct qinst *inst)
193 {
194 if (inst->dst.file == QFILE_MAGIC)
195 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
196
197 return false;
198 }
199
200 bool
201 vir_depends_on_flags(struct qinst *inst)
202 {
203 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH) {
204 return (inst->qpu.branch.cond != V3D_QPU_BRANCH_COND_ALWAYS);
205 } else {
206 return (inst->qpu.flags.ac != V3D_QPU_COND_NONE &&
207 inst->qpu.flags.mc != V3D_QPU_COND_NONE);
208 }
209 }
210
211 bool
212 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
213 {
214 for (int i = 0; i < vir_get_nsrc(inst); i++) {
215 switch (inst->src[i].file) {
216 case QFILE_VPM:
217 return true;
218 default:
219 break;
220 }
221 }
222
223 if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
224 inst->qpu.sig.ldtlb ||
225 inst->qpu.sig.ldtlbu ||
226 inst->qpu.sig.ldvpm)) {
227 return true;
228 }
229
230 return false;
231 }
232
233 bool
234 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
235 {
236 switch (inst->dst.file) {
237 case QFILE_MAGIC:
238 switch (inst->dst.index) {
239 case V3D_QPU_WADDR_RECIP:
240 case V3D_QPU_WADDR_RSQRT:
241 case V3D_QPU_WADDR_EXP:
242 case V3D_QPU_WADDR_LOG:
243 case V3D_QPU_WADDR_SIN:
244 return true;
245 }
246 break;
247 default:
248 break;
249 }
250
251 if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
252 return true;
253
254 return false;
255 }
256
257 void
258 vir_set_unpack(struct qinst *inst, int src,
259 enum v3d_qpu_input_unpack unpack)
260 {
261 assert(src == 0 || src == 1);
262
263 if (vir_is_add(inst)) {
264 if (src == 0)
265 inst->qpu.alu.add.a_unpack = unpack;
266 else
267 inst->qpu.alu.add.b_unpack = unpack;
268 } else {
269 assert(vir_is_mul(inst));
270 if (src == 0)
271 inst->qpu.alu.mul.a_unpack = unpack;
272 else
273 inst->qpu.alu.mul.b_unpack = unpack;
274 }
275 }
276
277 void
278 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
279 {
280 if (vir_is_add(inst)) {
281 inst->qpu.flags.ac = cond;
282 } else {
283 assert(vir_is_mul(inst));
284 inst->qpu.flags.mc = cond;
285 }
286 }
287
288 void
289 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
290 {
291 if (vir_is_add(inst)) {
292 inst->qpu.flags.apf = pf;
293 } else {
294 assert(vir_is_mul(inst));
295 inst->qpu.flags.mpf = pf;
296 }
297 }
298
299 #if 0
300 uint8_t
301 vir_channels_written(struct qinst *inst)
302 {
303 if (vir_is_mul(inst)) {
304 switch (inst->dst.pack) {
305 case QPU_PACK_MUL_NOP:
306 case QPU_PACK_MUL_8888:
307 return 0xf;
308 case QPU_PACK_MUL_8A:
309 return 0x1;
310 case QPU_PACK_MUL_8B:
311 return 0x2;
312 case QPU_PACK_MUL_8C:
313 return 0x4;
314 case QPU_PACK_MUL_8D:
315 return 0x8;
316 }
317 } else {
318 switch (inst->dst.pack) {
319 case QPU_PACK_A_NOP:
320 case QPU_PACK_A_8888:
321 case QPU_PACK_A_8888_SAT:
322 case QPU_PACK_A_32_SAT:
323 return 0xf;
324 case QPU_PACK_A_8A:
325 case QPU_PACK_A_8A_SAT:
326 return 0x1;
327 case QPU_PACK_A_8B:
328 case QPU_PACK_A_8B_SAT:
329 return 0x2;
330 case QPU_PACK_A_8C:
331 case QPU_PACK_A_8C_SAT:
332 return 0x4;
333 case QPU_PACK_A_8D:
334 case QPU_PACK_A_8D_SAT:
335 return 0x8;
336 case QPU_PACK_A_16A:
337 case QPU_PACK_A_16A_SAT:
338 return 0x3;
339 case QPU_PACK_A_16B:
340 case QPU_PACK_A_16B_SAT:
341 return 0xc;
342 }
343 }
344 unreachable("Bad pack field");
345 }
346 #endif
347
348 struct qreg
349 vir_get_temp(struct v3d_compile *c)
350 {
351 struct qreg reg;
352
353 reg.file = QFILE_TEMP;
354 reg.index = c->num_temps++;
355
356 if (c->num_temps > c->defs_array_size) {
357 uint32_t old_size = c->defs_array_size;
358 c->defs_array_size = MAX2(old_size * 2, 16);
359
360 c->defs = reralloc(c, c->defs, struct qinst *,
361 c->defs_array_size);
362 memset(&c->defs[old_size], 0,
363 sizeof(c->defs[0]) * (c->defs_array_size - old_size));
364
365 c->spillable = reralloc(c, c->spillable,
366 BITSET_WORD,
367 BITSET_WORDS(c->defs_array_size));
368 for (int i = old_size; i < c->defs_array_size; i++)
369 BITSET_SET(c->spillable, i);
370 }
371
372 return reg;
373 }
374
375 struct qinst *
376 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
377 {
378 struct qinst *inst = calloc(1, sizeof(*inst));
379
380 inst->qpu = v3d_qpu_nop();
381 inst->qpu.alu.add.op = op;
382
383 inst->dst = dst;
384 inst->src[0] = src0;
385 inst->src[1] = src1;
386 inst->uniform = ~0;
387
388 return inst;
389 }
390
391 struct qinst *
392 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
393 {
394 struct qinst *inst = calloc(1, sizeof(*inst));
395
396 inst->qpu = v3d_qpu_nop();
397 inst->qpu.alu.mul.op = op;
398
399 inst->dst = dst;
400 inst->src[0] = src0;
401 inst->src[1] = src1;
402 inst->uniform = ~0;
403
404 return inst;
405 }
406
407 struct qinst *
408 vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src)
409 {
410 struct qinst *inst = calloc(1, sizeof(*inst));
411
412 inst->qpu = v3d_qpu_nop();
413 inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
414 inst->qpu.branch.cond = cond;
415 inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
416 inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
417 inst->qpu.branch.ub = true;
418 inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
419
420 inst->dst = vir_reg(QFILE_NULL, 0);
421 inst->src[0] = src;
422 inst->uniform = ~0;
423
424 return inst;
425 }
426
427 static void
428 vir_emit(struct v3d_compile *c, struct qinst *inst)
429 {
430 switch (c->cursor.mode) {
431 case vir_cursor_add:
432 list_add(&inst->link, c->cursor.link);
433 break;
434 case vir_cursor_addtail:
435 list_addtail(&inst->link, c->cursor.link);
436 break;
437 }
438
439 c->cursor = vir_after_inst(inst);
440 c->live_intervals_valid = false;
441 }
442
443 /* Updates inst to write to a new temporary, emits it, and notes the def. */
444 struct qreg
445 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
446 {
447 assert(inst->dst.file == QFILE_NULL);
448
449 inst->dst = vir_get_temp(c);
450
451 if (inst->dst.file == QFILE_TEMP)
452 c->defs[inst->dst.index] = inst;
453
454 vir_emit(c, inst);
455
456 return inst->dst;
457 }
458
459 struct qinst *
460 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
461 {
462 if (inst->dst.file == QFILE_TEMP)
463 c->defs[inst->dst.index] = NULL;
464
465 vir_emit(c, inst);
466
467 return inst;
468 }
469
470 struct qblock *
471 vir_new_block(struct v3d_compile *c)
472 {
473 struct qblock *block = rzalloc(c, struct qblock);
474
475 list_inithead(&block->instructions);
476
477 block->predecessors = _mesa_set_create(block,
478 _mesa_hash_pointer,
479 _mesa_key_pointer_equal);
480
481 block->index = c->next_block_index++;
482
483 return block;
484 }
485
486 void
487 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
488 {
489 c->cur_block = block;
490 c->cursor = vir_after_block(block);
491 list_addtail(&block->link, &c->blocks);
492 }
493
494 struct qblock *
495 vir_entry_block(struct v3d_compile *c)
496 {
497 return list_first_entry(&c->blocks, struct qblock, link);
498 }
499
500 struct qblock *
501 vir_exit_block(struct v3d_compile *c)
502 {
503 return list_last_entry(&c->blocks, struct qblock, link);
504 }
505
506 void
507 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
508 {
509 _mesa_set_add(successor->predecessors, predecessor);
510 if (predecessor->successors[0]) {
511 assert(!predecessor->successors[1]);
512 predecessor->successors[1] = successor;
513 } else {
514 predecessor->successors[0] = successor;
515 }
516 }
517
518 const struct v3d_compiler *
519 v3d_compiler_init(const struct v3d_device_info *devinfo)
520 {
521 struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
522 if (!compiler)
523 return NULL;
524
525 compiler->devinfo = devinfo;
526
527 if (!vir_init_reg_sets(compiler)) {
528 ralloc_free(compiler);
529 return NULL;
530 }
531
532 return compiler;
533 }
534
535 void
536 v3d_compiler_free(const struct v3d_compiler *compiler)
537 {
538 ralloc_free((void *)compiler);
539 }
540
541 static struct v3d_compile *
542 vir_compile_init(const struct v3d_compiler *compiler,
543 struct v3d_key *key,
544 nir_shader *s,
545 int program_id, int variant_id)
546 {
547 struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
548
549 c->compiler = compiler;
550 c->devinfo = compiler->devinfo;
551 c->key = key;
552 c->program_id = program_id;
553 c->variant_id = variant_id;
554 c->threads = 4;
555
556 s = nir_shader_clone(c, s);
557 c->s = s;
558
559 list_inithead(&c->blocks);
560 vir_set_emit_block(c, vir_new_block(c));
561
562 c->output_position_index = -1;
563 c->output_point_size_index = -1;
564 c->output_sample_mask_index = -1;
565
566 c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
567 _mesa_key_pointer_equal);
568
569 return c;
570 }
571
572 static void
573 v3d_lower_nir(struct v3d_compile *c)
574 {
575 struct nir_lower_tex_options tex_options = {
576 .lower_txd = true,
577 .lower_rect = false, /* XXX: Use this on V3D 3.x */
578 .lower_txp = ~0,
579 /* Apply swizzles to all samplers. */
580 .swizzle_result = ~0,
581 };
582
583 /* Lower the format swizzle and (for 32-bit returns)
584 * ARB_texture_swizzle-style swizzle.
585 */
586 for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
587 for (int j = 0; j < 4; j++)
588 tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
589
590 if (c->key->tex[i].clamp_s)
591 tex_options.saturate_s |= 1 << i;
592 if (c->key->tex[i].clamp_t)
593 tex_options.saturate_t |= 1 << i;
594 if (c->key->tex[i].clamp_r)
595 tex_options.saturate_r |= 1 << i;
596 }
597
598 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
599 }
600
601 static void
602 v3d_lower_nir_late(struct v3d_compile *c)
603 {
604 NIR_PASS_V(c->s, v3d_nir_lower_io, c);
605 NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
606 NIR_PASS_V(c->s, nir_lower_idiv);
607 }
608
609 static void
610 v3d_set_prog_data_uniforms(struct v3d_compile *c,
611 struct v3d_prog_data *prog_data)
612 {
613 int count = c->num_uniforms;
614 struct v3d_uniform_list *ulist = &prog_data->uniforms;
615
616 ulist->count = count;
617 ulist->data = ralloc_array(prog_data, uint32_t, count);
618 memcpy(ulist->data, c->uniform_data,
619 count * sizeof(*ulist->data));
620 ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
621 memcpy(ulist->contents, c->uniform_contents,
622 count * sizeof(*ulist->contents));
623 }
624
625 /* Copy the compiler UBO range state to the compiled shader, dropping out
626 * arrays that were never referenced by an indirect load.
627 *
628 * (Note that QIR dead code elimination of an array access still leaves that
629 * array alive, though)
630 */
631 static void
632 v3d_set_prog_data_ubo(struct v3d_compile *c,
633 struct v3d_prog_data *prog_data)
634 {
635 if (!c->num_ubo_ranges)
636 return;
637
638 prog_data->num_ubo_ranges = 0;
639 prog_data->ubo_ranges = ralloc_array(prog_data, struct v3d_ubo_range,
640 c->num_ubo_ranges);
641 for (int i = 0; i < c->num_ubo_ranges; i++) {
642 if (!c->ubo_range_used[i])
643 continue;
644
645 struct v3d_ubo_range *range = &c->ubo_ranges[i];
646 prog_data->ubo_ranges[prog_data->num_ubo_ranges++] = *range;
647 prog_data->ubo_size += range->size;
648 }
649
650 if (prog_data->ubo_size) {
651 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
652 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
653 vir_get_stage_name(c),
654 c->program_id, c->variant_id,
655 prog_data->ubo_size / 4);
656 }
657 }
658 }
659
660 static void
661 v3d_set_prog_data(struct v3d_compile *c,
662 struct v3d_prog_data *prog_data)
663 {
664 prog_data->threads = c->threads;
665 prog_data->single_seg = !c->last_thrsw;
666 prog_data->spill_size = c->spill_size;
667
668 v3d_set_prog_data_uniforms(c, prog_data);
669 v3d_set_prog_data_ubo(c, prog_data);
670 }
671
672 static uint64_t *
673 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
674 {
675 *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
676
677 uint64_t *qpu_insts = malloc(*final_assembly_size);
678 if (!qpu_insts)
679 return NULL;
680
681 memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
682
683 vir_compile_destroy(c);
684
685 return qpu_insts;
686 }
687
688 uint64_t *v3d_compile_vs(const struct v3d_compiler *compiler,
689 struct v3d_vs_key *key,
690 struct v3d_vs_prog_data *prog_data,
691 nir_shader *s,
692 int program_id, int variant_id,
693 uint32_t *final_assembly_size)
694 {
695 struct v3d_compile *c = vir_compile_init(compiler, &key->base, s,
696 program_id, variant_id);
697
698 c->vs_key = key;
699
700 v3d_lower_nir(c);
701
702 if (key->clamp_color)
703 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
704
705 if (key->base.ucp_enables) {
706 NIR_PASS_V(c->s, nir_lower_clip_vs, key->base.ucp_enables);
707 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
708 nir_var_shader_out);
709 }
710
711 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
712 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
713
714 v3d_lower_nir_late(c);
715 v3d_optimize_nir(c->s);
716 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
717
718 v3d_nir_to_vir(c);
719
720 v3d_set_prog_data(c, &prog_data->base);
721
722 prog_data->base.num_inputs = c->num_inputs;
723
724 /* The vertex data gets format converted by the VPM so that
725 * each attribute channel takes up a VPM column. Precompute
726 * the sizes for the shader record.
727 */
728 for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
729 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
730 prog_data->vpm_input_size += c->vattr_sizes[i];
731 }
732
733 prog_data->uses_vid = (s->info.system_values_read &
734 (1ull << SYSTEM_VALUE_VERTEX_ID));
735 prog_data->uses_iid = (s->info.system_values_read &
736 (1ull << SYSTEM_VALUE_INSTANCE_ID));
737
738 if (prog_data->uses_vid)
739 prog_data->vpm_input_size++;
740 if (prog_data->uses_iid)
741 prog_data->vpm_input_size++;
742
743 /* Input/output segment size are in 8x32-bit multiples. */
744 prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
745 prog_data->vpm_output_size = align(c->num_vpm_writes, 8) / 8;
746
747 return v3d_return_qpu_insts(c, final_assembly_size);
748 }
749
750 static void
751 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
752 struct v3d_fs_prog_data *prog_data)
753 {
754 prog_data->base.num_inputs = c->num_inputs;
755 memcpy(prog_data->input_slots, c->input_slots,
756 c->num_inputs * sizeof(*c->input_slots));
757
758 STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
759 (V3D_MAX_FS_INPUTS - 1) / 24);
760 for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
761 if (BITSET_TEST(c->flat_shade_flags, i))
762 prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
763
764 if (BITSET_TEST(c->noperspective_flags, i))
765 prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
766
767 if (BITSET_TEST(c->centroid_flags, i))
768 prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
769 }
770 }
771
772 static void
773 v3d_fixup_fs_output_types(struct v3d_compile *c)
774 {
775 nir_foreach_variable(var, &c->s->outputs) {
776 uint32_t mask = 0;
777
778 switch (var->data.location) {
779 case FRAG_RESULT_COLOR:
780 mask = ~0;
781 break;
782 case FRAG_RESULT_DATA0:
783 case FRAG_RESULT_DATA1:
784 case FRAG_RESULT_DATA2:
785 case FRAG_RESULT_DATA3:
786 mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
787 break;
788 }
789
790 if (c->fs_key->int_color_rb & mask) {
791 var->type =
792 glsl_vector_type(GLSL_TYPE_INT,
793 glsl_get_components(var->type));
794 } else if (c->fs_key->uint_color_rb & mask) {
795 var->type =
796 glsl_vector_type(GLSL_TYPE_UINT,
797 glsl_get_components(var->type));
798 }
799 }
800 }
801
802 uint64_t *v3d_compile_fs(const struct v3d_compiler *compiler,
803 struct v3d_fs_key *key,
804 struct v3d_fs_prog_data *prog_data,
805 nir_shader *s,
806 int program_id, int variant_id,
807 uint32_t *final_assembly_size)
808 {
809 struct v3d_compile *c = vir_compile_init(compiler, &key->base, s,
810 program_id, variant_id);
811
812 c->fs_key = key;
813
814 if (key->int_color_rb || key->uint_color_rb)
815 v3d_fixup_fs_output_types(c);
816
817 v3d_lower_nir(c);
818
819 if (key->light_twoside)
820 NIR_PASS_V(c->s, nir_lower_two_sided_color);
821
822 if (key->clamp_color)
823 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
824
825 if (key->alpha_test) {
826 NIR_PASS_V(c->s, nir_lower_alpha_test, key->alpha_test_func,
827 false);
828 }
829
830 if (key->base.ucp_enables)
831 NIR_PASS_V(c->s, nir_lower_clip_fs, key->base.ucp_enables);
832
833 /* Note: FS input scalarizing must happen after
834 * nir_lower_two_sided_color, which only handles a vec4 at a time.
835 */
836 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
837
838 v3d_lower_nir_late(c);
839 v3d_optimize_nir(c->s);
840 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
841
842 v3d_nir_to_vir(c);
843
844 v3d_set_prog_data(c, &prog_data->base);
845 v3d_set_fs_prog_data_inputs(c, prog_data);
846 prog_data->writes_z = (c->s->info.outputs_written &
847 (1 << FRAG_RESULT_DEPTH));
848 prog_data->discard = (c->s->info.fs.uses_discard ||
849 c->fs_key->sample_alpha_to_coverage);
850 prog_data->uses_center_w = c->uses_center_w;
851
852 return v3d_return_qpu_insts(c, final_assembly_size);
853 }
854
855 void
856 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
857 {
858 if (qinst->dst.file == QFILE_TEMP)
859 c->defs[qinst->dst.index] = NULL;
860
861 assert(&qinst->link != c->cursor.link);
862
863 list_del(&qinst->link);
864 free(qinst);
865
866 c->live_intervals_valid = false;
867 }
868
869 struct qreg
870 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
871 {
872 /* XXX
873 int pack = reg.pack;
874
875 while (reg.file == QFILE_TEMP &&
876 c->defs[reg.index] &&
877 (c->defs[reg.index]->op == QOP_MOV ||
878 c->defs[reg.index]->op == QOP_FMOV) &&
879 !c->defs[reg.index]->dst.pack &&
880 !c->defs[reg.index]->src[0].pack) {
881 reg = c->defs[reg.index]->src[0];
882 }
883
884 reg.pack = pack;
885 */
886 return reg;
887 }
888
889 void
890 vir_compile_destroy(struct v3d_compile *c)
891 {
892 /* Defuse the assert that we aren't removing the cursor's instruction.
893 */
894 c->cursor.link = NULL;
895
896 vir_for_each_block(block, c) {
897 while (!list_empty(&block->instructions)) {
898 struct qinst *qinst =
899 list_first_entry(&block->instructions,
900 struct qinst, link);
901 vir_remove_instruction(c, qinst);
902 }
903 }
904
905 ralloc_free(c);
906 }
907
908 struct qreg
909 vir_uniform(struct v3d_compile *c,
910 enum quniform_contents contents,
911 uint32_t data)
912 {
913 for (int i = 0; i < c->num_uniforms; i++) {
914 if (c->uniform_contents[i] == contents &&
915 c->uniform_data[i] == data) {
916 return vir_reg(QFILE_UNIF, i);
917 }
918 }
919
920 uint32_t uniform = c->num_uniforms++;
921
922 if (uniform >= c->uniform_array_size) {
923 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
924 c->uniform_array_size * 2);
925
926 c->uniform_data = reralloc(c, c->uniform_data,
927 uint32_t,
928 c->uniform_array_size);
929 c->uniform_contents = reralloc(c, c->uniform_contents,
930 enum quniform_contents,
931 c->uniform_array_size);
932 }
933
934 c->uniform_contents[uniform] = contents;
935 c->uniform_data[uniform] = data;
936
937 return vir_reg(QFILE_UNIF, uniform);
938 }
939
940 static bool
941 vir_can_set_flags(struct v3d_compile *c, struct qinst *inst)
942 {
943 if (c->devinfo->ver >= 40 && (v3d_qpu_reads_vpm(&inst->qpu) ||
944 v3d_qpu_uses_sfu(&inst->qpu))) {
945 return false;
946 }
947
948 return true;
949 }
950
951 void
952 vir_PF(struct v3d_compile *c, struct qreg src, enum v3d_qpu_pf pf)
953 {
954 struct qinst *last_inst = NULL;
955
956 if (!list_empty(&c->cur_block->instructions)) {
957 last_inst = (struct qinst *)c->cur_block->instructions.prev;
958
959 /* Can't stuff the PF into the last last inst if our cursor
960 * isn't pointing after it.
961 */
962 struct vir_cursor after_inst = vir_after_inst(last_inst);
963 if (c->cursor.mode != after_inst.mode ||
964 c->cursor.link != after_inst.link)
965 last_inst = NULL;
966 }
967
968 if (src.file != QFILE_TEMP ||
969 !c->defs[src.index] ||
970 last_inst != c->defs[src.index] ||
971 !vir_can_set_flags(c, last_inst)) {
972 /* XXX: Make the MOV be the appropriate type */
973 last_inst = vir_MOV_dest(c, vir_reg(QFILE_NULL, 0), src);
974 }
975
976 vir_set_pf(last_inst, pf);
977 }
978
979 #define OPTPASS(func) \
980 do { \
981 bool stage_progress = func(c); \
982 if (stage_progress) { \
983 progress = true; \
984 if (print_opt_debug) { \
985 fprintf(stderr, \
986 "VIR opt pass %2d: %s progress\n", \
987 pass, #func); \
988 } \
989 /*XXX vir_validate(c);*/ \
990 } \
991 } while (0)
992
993 void
994 vir_optimize(struct v3d_compile *c)
995 {
996 bool print_opt_debug = false;
997 int pass = 1;
998
999 while (true) {
1000 bool progress = false;
1001
1002 OPTPASS(vir_opt_copy_propagate);
1003 OPTPASS(vir_opt_dead_code);
1004
1005 if (!progress)
1006 break;
1007
1008 pass++;
1009 }
1010 }
1011
1012 const char *
1013 vir_get_stage_name(struct v3d_compile *c)
1014 {
1015 if (c->vs_key && c->vs_key->is_coord)
1016 return "MESA_SHADER_COORD";
1017 else
1018 return gl_shader_stage_name(c->s->info.stage);
1019 }