v3d: Add support for the TMUWT instruction.
[mesa.git] / src / broadcom / compiler / vir.c
1 /*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26
27 int
28 vir_get_non_sideband_nsrc(struct qinst *inst)
29 {
30 switch (inst->qpu.type) {
31 case V3D_QPU_INSTR_TYPE_BRANCH:
32 return 0;
33 case V3D_QPU_INSTR_TYPE_ALU:
34 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
35 return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
36 else
37 return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
38 }
39
40 return 0;
41 }
42
43 int
44 vir_get_nsrc(struct qinst *inst)
45 {
46 int nsrc = vir_get_non_sideband_nsrc(inst);
47
48 if (vir_has_implicit_uniform(inst))
49 nsrc++;
50
51 return nsrc;
52 }
53
54 bool
55 vir_has_implicit_uniform(struct qinst *inst)
56 {
57 switch (inst->qpu.type) {
58 case V3D_QPU_INSTR_TYPE_BRANCH:
59 return true;
60 case V3D_QPU_INSTR_TYPE_ALU:
61 switch (inst->dst.file) {
62 case QFILE_TLBU:
63 return true;
64 default:
65 return inst->has_implicit_uniform;
66 }
67 }
68 return false;
69 }
70
71 /* The sideband uniform for textures gets stored after the normal ALU
72 * arguments.
73 */
74 int
75 vir_get_implicit_uniform_src(struct qinst *inst)
76 {
77 if (!vir_has_implicit_uniform(inst))
78 return -1;
79 return vir_get_nsrc(inst) - 1;
80 }
81
82 /**
83 * Returns whether the instruction has any side effects that must be
84 * preserved.
85 */
86 bool
87 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
88 {
89 switch (inst->qpu.type) {
90 case V3D_QPU_INSTR_TYPE_BRANCH:
91 return true;
92 case V3D_QPU_INSTR_TYPE_ALU:
93 switch (inst->qpu.alu.add.op) {
94 case V3D_QPU_A_SETREVF:
95 case V3D_QPU_A_SETMSF:
96 case V3D_QPU_A_VPMSETUP:
97 case V3D_QPU_A_STVPMV:
98 case V3D_QPU_A_STVPMD:
99 case V3D_QPU_A_STVPMP:
100 case V3D_QPU_A_VPMWT:
101 case V3D_QPU_A_TMUWT:
102 return true;
103 default:
104 break;
105 }
106
107 switch (inst->qpu.alu.mul.op) {
108 case V3D_QPU_M_MULTOP:
109 return true;
110 default:
111 break;
112 }
113 }
114
115 if (inst->qpu.sig.ldtmu ||
116 inst->qpu.sig.ldvary ||
117 inst->qpu.sig.wrtmuc ||
118 inst->qpu.sig.thrsw) {
119 return true;
120 }
121
122 return false;
123 }
124
125 bool
126 vir_is_float_input(struct qinst *inst)
127 {
128 /* XXX: More instrs */
129 switch (inst->qpu.type) {
130 case V3D_QPU_INSTR_TYPE_BRANCH:
131 return false;
132 case V3D_QPU_INSTR_TYPE_ALU:
133 switch (inst->qpu.alu.add.op) {
134 case V3D_QPU_A_FADD:
135 case V3D_QPU_A_FSUB:
136 case V3D_QPU_A_FMIN:
137 case V3D_QPU_A_FMAX:
138 case V3D_QPU_A_FTOIN:
139 return true;
140 default:
141 break;
142 }
143
144 switch (inst->qpu.alu.mul.op) {
145 case V3D_QPU_M_FMOV:
146 case V3D_QPU_M_VFMUL:
147 case V3D_QPU_M_FMUL:
148 return true;
149 default:
150 break;
151 }
152 }
153
154 return false;
155 }
156
157 bool
158 vir_is_raw_mov(struct qinst *inst)
159 {
160 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
161 (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
162 inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
163 return false;
164 }
165
166 if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
167 inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
168 return false;
169 }
170
171 if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
172 inst->qpu.flags.mc != V3D_QPU_COND_NONE)
173 return false;
174
175 return true;
176 }
177
178 bool
179 vir_is_add(struct qinst *inst)
180 {
181 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
182 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
183 }
184
185 bool
186 vir_is_mul(struct qinst *inst)
187 {
188 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
189 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
190 }
191
192 bool
193 vir_is_tex(struct qinst *inst)
194 {
195 if (inst->dst.file == QFILE_MAGIC)
196 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
197
198 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
199 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
200 return true;
201 }
202
203 return false;
204 }
205
206 bool
207 vir_depends_on_flags(struct qinst *inst)
208 {
209 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH) {
210 return (inst->qpu.branch.cond != V3D_QPU_BRANCH_COND_ALWAYS);
211 } else {
212 return (inst->qpu.flags.ac != V3D_QPU_COND_NONE &&
213 inst->qpu.flags.mc != V3D_QPU_COND_NONE);
214 }
215 }
216
217 bool
218 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
219 {
220 for (int i = 0; i < vir_get_nsrc(inst); i++) {
221 switch (inst->src[i].file) {
222 case QFILE_VPM:
223 return true;
224 default:
225 break;
226 }
227 }
228
229 if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
230 inst->qpu.sig.ldtlb ||
231 inst->qpu.sig.ldtlbu ||
232 inst->qpu.sig.ldvpm)) {
233 return true;
234 }
235
236 return false;
237 }
238
239 bool
240 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
241 {
242 switch (inst->dst.file) {
243 case QFILE_MAGIC:
244 switch (inst->dst.index) {
245 case V3D_QPU_WADDR_RECIP:
246 case V3D_QPU_WADDR_RSQRT:
247 case V3D_QPU_WADDR_EXP:
248 case V3D_QPU_WADDR_LOG:
249 case V3D_QPU_WADDR_SIN:
250 return true;
251 }
252 break;
253 default:
254 break;
255 }
256
257 if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
258 return true;
259
260 return false;
261 }
262
263 void
264 vir_set_unpack(struct qinst *inst, int src,
265 enum v3d_qpu_input_unpack unpack)
266 {
267 assert(src == 0 || src == 1);
268
269 if (vir_is_add(inst)) {
270 if (src == 0)
271 inst->qpu.alu.add.a_unpack = unpack;
272 else
273 inst->qpu.alu.add.b_unpack = unpack;
274 } else {
275 assert(vir_is_mul(inst));
276 if (src == 0)
277 inst->qpu.alu.mul.a_unpack = unpack;
278 else
279 inst->qpu.alu.mul.b_unpack = unpack;
280 }
281 }
282
283 void
284 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
285 {
286 if (vir_is_add(inst)) {
287 inst->qpu.flags.ac = cond;
288 } else {
289 assert(vir_is_mul(inst));
290 inst->qpu.flags.mc = cond;
291 }
292 }
293
294 void
295 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
296 {
297 if (vir_is_add(inst)) {
298 inst->qpu.flags.apf = pf;
299 } else {
300 assert(vir_is_mul(inst));
301 inst->qpu.flags.mpf = pf;
302 }
303 }
304
305 #if 0
306 uint8_t
307 vir_channels_written(struct qinst *inst)
308 {
309 if (vir_is_mul(inst)) {
310 switch (inst->dst.pack) {
311 case QPU_PACK_MUL_NOP:
312 case QPU_PACK_MUL_8888:
313 return 0xf;
314 case QPU_PACK_MUL_8A:
315 return 0x1;
316 case QPU_PACK_MUL_8B:
317 return 0x2;
318 case QPU_PACK_MUL_8C:
319 return 0x4;
320 case QPU_PACK_MUL_8D:
321 return 0x8;
322 }
323 } else {
324 switch (inst->dst.pack) {
325 case QPU_PACK_A_NOP:
326 case QPU_PACK_A_8888:
327 case QPU_PACK_A_8888_SAT:
328 case QPU_PACK_A_32_SAT:
329 return 0xf;
330 case QPU_PACK_A_8A:
331 case QPU_PACK_A_8A_SAT:
332 return 0x1;
333 case QPU_PACK_A_8B:
334 case QPU_PACK_A_8B_SAT:
335 return 0x2;
336 case QPU_PACK_A_8C:
337 case QPU_PACK_A_8C_SAT:
338 return 0x4;
339 case QPU_PACK_A_8D:
340 case QPU_PACK_A_8D_SAT:
341 return 0x8;
342 case QPU_PACK_A_16A:
343 case QPU_PACK_A_16A_SAT:
344 return 0x3;
345 case QPU_PACK_A_16B:
346 case QPU_PACK_A_16B_SAT:
347 return 0xc;
348 }
349 }
350 unreachable("Bad pack field");
351 }
352 #endif
353
354 struct qreg
355 vir_get_temp(struct v3d_compile *c)
356 {
357 struct qreg reg;
358
359 reg.file = QFILE_TEMP;
360 reg.index = c->num_temps++;
361
362 if (c->num_temps > c->defs_array_size) {
363 uint32_t old_size = c->defs_array_size;
364 c->defs_array_size = MAX2(old_size * 2, 16);
365
366 c->defs = reralloc(c, c->defs, struct qinst *,
367 c->defs_array_size);
368 memset(&c->defs[old_size], 0,
369 sizeof(c->defs[0]) * (c->defs_array_size - old_size));
370
371 c->spillable = reralloc(c, c->spillable,
372 BITSET_WORD,
373 BITSET_WORDS(c->defs_array_size));
374 for (int i = old_size; i < c->defs_array_size; i++)
375 BITSET_SET(c->spillable, i);
376 }
377
378 return reg;
379 }
380
381 struct qinst *
382 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
383 {
384 struct qinst *inst = calloc(1, sizeof(*inst));
385
386 inst->qpu = v3d_qpu_nop();
387 inst->qpu.alu.add.op = op;
388
389 inst->dst = dst;
390 inst->src[0] = src0;
391 inst->src[1] = src1;
392 inst->uniform = ~0;
393
394 return inst;
395 }
396
397 struct qinst *
398 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
399 {
400 struct qinst *inst = calloc(1, sizeof(*inst));
401
402 inst->qpu = v3d_qpu_nop();
403 inst->qpu.alu.mul.op = op;
404
405 inst->dst = dst;
406 inst->src[0] = src0;
407 inst->src[1] = src1;
408 inst->uniform = ~0;
409
410 return inst;
411 }
412
413 struct qinst *
414 vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src)
415 {
416 struct qinst *inst = calloc(1, sizeof(*inst));
417
418 inst->qpu = v3d_qpu_nop();
419 inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
420 inst->qpu.branch.cond = cond;
421 inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
422 inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
423 inst->qpu.branch.ub = true;
424 inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
425
426 inst->dst = vir_reg(QFILE_NULL, 0);
427 inst->src[0] = src;
428 inst->uniform = ~0;
429
430 return inst;
431 }
432
433 static void
434 vir_emit(struct v3d_compile *c, struct qinst *inst)
435 {
436 switch (c->cursor.mode) {
437 case vir_cursor_add:
438 list_add(&inst->link, c->cursor.link);
439 break;
440 case vir_cursor_addtail:
441 list_addtail(&inst->link, c->cursor.link);
442 break;
443 }
444
445 c->cursor = vir_after_inst(inst);
446 c->live_intervals_valid = false;
447 }
448
449 /* Updates inst to write to a new temporary, emits it, and notes the def. */
450 struct qreg
451 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
452 {
453 assert(inst->dst.file == QFILE_NULL);
454
455 inst->dst = vir_get_temp(c);
456
457 if (inst->dst.file == QFILE_TEMP)
458 c->defs[inst->dst.index] = inst;
459
460 vir_emit(c, inst);
461
462 return inst->dst;
463 }
464
465 struct qinst *
466 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
467 {
468 if (inst->dst.file == QFILE_TEMP)
469 c->defs[inst->dst.index] = NULL;
470
471 vir_emit(c, inst);
472
473 return inst;
474 }
475
476 struct qblock *
477 vir_new_block(struct v3d_compile *c)
478 {
479 struct qblock *block = rzalloc(c, struct qblock);
480
481 list_inithead(&block->instructions);
482
483 block->predecessors = _mesa_set_create(block,
484 _mesa_hash_pointer,
485 _mesa_key_pointer_equal);
486
487 block->index = c->next_block_index++;
488
489 return block;
490 }
491
492 void
493 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
494 {
495 c->cur_block = block;
496 c->cursor = vir_after_block(block);
497 list_addtail(&block->link, &c->blocks);
498 }
499
500 struct qblock *
501 vir_entry_block(struct v3d_compile *c)
502 {
503 return list_first_entry(&c->blocks, struct qblock, link);
504 }
505
506 struct qblock *
507 vir_exit_block(struct v3d_compile *c)
508 {
509 return list_last_entry(&c->blocks, struct qblock, link);
510 }
511
512 void
513 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
514 {
515 _mesa_set_add(successor->predecessors, predecessor);
516 if (predecessor->successors[0]) {
517 assert(!predecessor->successors[1]);
518 predecessor->successors[1] = successor;
519 } else {
520 predecessor->successors[0] = successor;
521 }
522 }
523
524 const struct v3d_compiler *
525 v3d_compiler_init(const struct v3d_device_info *devinfo)
526 {
527 struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
528 if (!compiler)
529 return NULL;
530
531 compiler->devinfo = devinfo;
532
533 if (!vir_init_reg_sets(compiler)) {
534 ralloc_free(compiler);
535 return NULL;
536 }
537
538 return compiler;
539 }
540
541 void
542 v3d_compiler_free(const struct v3d_compiler *compiler)
543 {
544 ralloc_free((void *)compiler);
545 }
546
547 static struct v3d_compile *
548 vir_compile_init(const struct v3d_compiler *compiler,
549 struct v3d_key *key,
550 nir_shader *s,
551 int program_id, int variant_id)
552 {
553 struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
554
555 c->compiler = compiler;
556 c->devinfo = compiler->devinfo;
557 c->key = key;
558 c->program_id = program_id;
559 c->variant_id = variant_id;
560 c->threads = 4;
561
562 s = nir_shader_clone(c, s);
563 c->s = s;
564
565 list_inithead(&c->blocks);
566 vir_set_emit_block(c, vir_new_block(c));
567
568 c->output_position_index = -1;
569 c->output_point_size_index = -1;
570 c->output_sample_mask_index = -1;
571
572 c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
573 _mesa_key_pointer_equal);
574
575 return c;
576 }
577
578 static void
579 v3d_lower_nir(struct v3d_compile *c)
580 {
581 struct nir_lower_tex_options tex_options = {
582 .lower_txd = true,
583 .lower_rect = false, /* XXX: Use this on V3D 3.x */
584 .lower_txp = ~0,
585 /* Apply swizzles to all samplers. */
586 .swizzle_result = ~0,
587 };
588
589 /* Lower the format swizzle and (for 32-bit returns)
590 * ARB_texture_swizzle-style swizzle.
591 */
592 for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
593 for (int j = 0; j < 4; j++)
594 tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
595
596 if (c->key->tex[i].clamp_s)
597 tex_options.saturate_s |= 1 << i;
598 if (c->key->tex[i].clamp_t)
599 tex_options.saturate_t |= 1 << i;
600 if (c->key->tex[i].clamp_r)
601 tex_options.saturate_r |= 1 << i;
602 }
603
604 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
605 }
606
607 static void
608 v3d_lower_nir_late(struct v3d_compile *c)
609 {
610 NIR_PASS_V(c->s, v3d_nir_lower_io, c);
611 NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
612 NIR_PASS_V(c->s, nir_lower_idiv);
613 }
614
615 static void
616 v3d_set_prog_data_uniforms(struct v3d_compile *c,
617 struct v3d_prog_data *prog_data)
618 {
619 int count = c->num_uniforms;
620 struct v3d_uniform_list *ulist = &prog_data->uniforms;
621
622 ulist->count = count;
623 ulist->data = ralloc_array(prog_data, uint32_t, count);
624 memcpy(ulist->data, c->uniform_data,
625 count * sizeof(*ulist->data));
626 ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
627 memcpy(ulist->contents, c->uniform_contents,
628 count * sizeof(*ulist->contents));
629 }
630
631 /* Copy the compiler UBO range state to the compiled shader, dropping out
632 * arrays that were never referenced by an indirect load.
633 *
634 * (Note that QIR dead code elimination of an array access still leaves that
635 * array alive, though)
636 */
637 static void
638 v3d_set_prog_data_ubo(struct v3d_compile *c,
639 struct v3d_prog_data *prog_data)
640 {
641 if (!c->num_ubo_ranges)
642 return;
643
644 prog_data->num_ubo_ranges = 0;
645 prog_data->ubo_ranges = ralloc_array(prog_data, struct v3d_ubo_range,
646 c->num_ubo_ranges);
647 for (int i = 0; i < c->num_ubo_ranges; i++) {
648 if (!c->ubo_range_used[i])
649 continue;
650
651 struct v3d_ubo_range *range = &c->ubo_ranges[i];
652 prog_data->ubo_ranges[prog_data->num_ubo_ranges++] = *range;
653 prog_data->ubo_size += range->size;
654 }
655
656 if (prog_data->ubo_size) {
657 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
658 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
659 vir_get_stage_name(c),
660 c->program_id, c->variant_id,
661 prog_data->ubo_size / 4);
662 }
663 }
664 }
665
666 static void
667 v3d_set_prog_data(struct v3d_compile *c,
668 struct v3d_prog_data *prog_data)
669 {
670 prog_data->threads = c->threads;
671 prog_data->single_seg = !c->last_thrsw;
672 prog_data->spill_size = c->spill_size;
673
674 v3d_set_prog_data_uniforms(c, prog_data);
675 v3d_set_prog_data_ubo(c, prog_data);
676 }
677
678 static uint64_t *
679 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
680 {
681 *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
682
683 uint64_t *qpu_insts = malloc(*final_assembly_size);
684 if (!qpu_insts)
685 return NULL;
686
687 memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
688
689 vir_compile_destroy(c);
690
691 return qpu_insts;
692 }
693
694 uint64_t *v3d_compile_vs(const struct v3d_compiler *compiler,
695 struct v3d_vs_key *key,
696 struct v3d_vs_prog_data *prog_data,
697 nir_shader *s,
698 int program_id, int variant_id,
699 uint32_t *final_assembly_size)
700 {
701 struct v3d_compile *c = vir_compile_init(compiler, &key->base, s,
702 program_id, variant_id);
703
704 c->vs_key = key;
705
706 v3d_lower_nir(c);
707
708 if (key->clamp_color)
709 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
710
711 if (key->base.ucp_enables) {
712 NIR_PASS_V(c->s, nir_lower_clip_vs, key->base.ucp_enables);
713 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
714 nir_var_shader_out);
715 }
716
717 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
718 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
719
720 v3d_lower_nir_late(c);
721 v3d_optimize_nir(c->s);
722 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
723
724 v3d_nir_to_vir(c);
725
726 v3d_set_prog_data(c, &prog_data->base);
727
728 prog_data->base.num_inputs = c->num_inputs;
729
730 /* The vertex data gets format converted by the VPM so that
731 * each attribute channel takes up a VPM column. Precompute
732 * the sizes for the shader record.
733 */
734 for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
735 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
736 prog_data->vpm_input_size += c->vattr_sizes[i];
737 }
738
739 prog_data->uses_vid = (s->info.system_values_read &
740 (1ull << SYSTEM_VALUE_VERTEX_ID));
741 prog_data->uses_iid = (s->info.system_values_read &
742 (1ull << SYSTEM_VALUE_INSTANCE_ID));
743
744 if (prog_data->uses_vid)
745 prog_data->vpm_input_size++;
746 if (prog_data->uses_iid)
747 prog_data->vpm_input_size++;
748
749 /* Input/output segment size are in 8x32-bit multiples. */
750 prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
751 prog_data->vpm_output_size = align(c->num_vpm_writes, 8) / 8;
752
753 return v3d_return_qpu_insts(c, final_assembly_size);
754 }
755
756 static void
757 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
758 struct v3d_fs_prog_data *prog_data)
759 {
760 prog_data->base.num_inputs = c->num_inputs;
761 memcpy(prog_data->input_slots, c->input_slots,
762 c->num_inputs * sizeof(*c->input_slots));
763
764 STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
765 (V3D_MAX_FS_INPUTS - 1) / 24);
766 for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
767 if (BITSET_TEST(c->flat_shade_flags, i))
768 prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
769
770 if (BITSET_TEST(c->noperspective_flags, i))
771 prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
772
773 if (BITSET_TEST(c->centroid_flags, i))
774 prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
775 }
776 }
777
778 static void
779 v3d_fixup_fs_output_types(struct v3d_compile *c)
780 {
781 nir_foreach_variable(var, &c->s->outputs) {
782 uint32_t mask = 0;
783
784 switch (var->data.location) {
785 case FRAG_RESULT_COLOR:
786 mask = ~0;
787 break;
788 case FRAG_RESULT_DATA0:
789 case FRAG_RESULT_DATA1:
790 case FRAG_RESULT_DATA2:
791 case FRAG_RESULT_DATA3:
792 mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
793 break;
794 }
795
796 if (c->fs_key->int_color_rb & mask) {
797 var->type =
798 glsl_vector_type(GLSL_TYPE_INT,
799 glsl_get_components(var->type));
800 } else if (c->fs_key->uint_color_rb & mask) {
801 var->type =
802 glsl_vector_type(GLSL_TYPE_UINT,
803 glsl_get_components(var->type));
804 }
805 }
806 }
807
808 uint64_t *v3d_compile_fs(const struct v3d_compiler *compiler,
809 struct v3d_fs_key *key,
810 struct v3d_fs_prog_data *prog_data,
811 nir_shader *s,
812 int program_id, int variant_id,
813 uint32_t *final_assembly_size)
814 {
815 struct v3d_compile *c = vir_compile_init(compiler, &key->base, s,
816 program_id, variant_id);
817
818 c->fs_key = key;
819
820 if (key->int_color_rb || key->uint_color_rb)
821 v3d_fixup_fs_output_types(c);
822
823 v3d_lower_nir(c);
824
825 if (key->light_twoside)
826 NIR_PASS_V(c->s, nir_lower_two_sided_color);
827
828 if (key->clamp_color)
829 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
830
831 if (key->alpha_test) {
832 NIR_PASS_V(c->s, nir_lower_alpha_test, key->alpha_test_func,
833 false);
834 }
835
836 if (key->base.ucp_enables)
837 NIR_PASS_V(c->s, nir_lower_clip_fs, key->base.ucp_enables);
838
839 /* Note: FS input scalarizing must happen after
840 * nir_lower_two_sided_color, which only handles a vec4 at a time.
841 */
842 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
843
844 v3d_lower_nir_late(c);
845 v3d_optimize_nir(c->s);
846 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
847
848 v3d_nir_to_vir(c);
849
850 v3d_set_prog_data(c, &prog_data->base);
851 v3d_set_fs_prog_data_inputs(c, prog_data);
852 prog_data->writes_z = (c->s->info.outputs_written &
853 (1 << FRAG_RESULT_DEPTH));
854 prog_data->discard = (c->s->info.fs.uses_discard ||
855 c->fs_key->sample_alpha_to_coverage);
856 prog_data->uses_center_w = c->uses_center_w;
857
858 return v3d_return_qpu_insts(c, final_assembly_size);
859 }
860
861 void
862 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
863 {
864 if (qinst->dst.file == QFILE_TEMP)
865 c->defs[qinst->dst.index] = NULL;
866
867 assert(&qinst->link != c->cursor.link);
868
869 list_del(&qinst->link);
870 free(qinst);
871
872 c->live_intervals_valid = false;
873 }
874
875 struct qreg
876 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
877 {
878 /* XXX
879 int pack = reg.pack;
880
881 while (reg.file == QFILE_TEMP &&
882 c->defs[reg.index] &&
883 (c->defs[reg.index]->op == QOP_MOV ||
884 c->defs[reg.index]->op == QOP_FMOV) &&
885 !c->defs[reg.index]->dst.pack &&
886 !c->defs[reg.index]->src[0].pack) {
887 reg = c->defs[reg.index]->src[0];
888 }
889
890 reg.pack = pack;
891 */
892 return reg;
893 }
894
895 void
896 vir_compile_destroy(struct v3d_compile *c)
897 {
898 /* Defuse the assert that we aren't removing the cursor's instruction.
899 */
900 c->cursor.link = NULL;
901
902 vir_for_each_block(block, c) {
903 while (!list_empty(&block->instructions)) {
904 struct qinst *qinst =
905 list_first_entry(&block->instructions,
906 struct qinst, link);
907 vir_remove_instruction(c, qinst);
908 }
909 }
910
911 ralloc_free(c);
912 }
913
914 struct qreg
915 vir_uniform(struct v3d_compile *c,
916 enum quniform_contents contents,
917 uint32_t data)
918 {
919 for (int i = 0; i < c->num_uniforms; i++) {
920 if (c->uniform_contents[i] == contents &&
921 c->uniform_data[i] == data) {
922 return vir_reg(QFILE_UNIF, i);
923 }
924 }
925
926 uint32_t uniform = c->num_uniforms++;
927
928 if (uniform >= c->uniform_array_size) {
929 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
930 c->uniform_array_size * 2);
931
932 c->uniform_data = reralloc(c, c->uniform_data,
933 uint32_t,
934 c->uniform_array_size);
935 c->uniform_contents = reralloc(c, c->uniform_contents,
936 enum quniform_contents,
937 c->uniform_array_size);
938 }
939
940 c->uniform_contents[uniform] = contents;
941 c->uniform_data[uniform] = data;
942
943 return vir_reg(QFILE_UNIF, uniform);
944 }
945
946 static bool
947 vir_can_set_flags(struct v3d_compile *c, struct qinst *inst)
948 {
949 if (c->devinfo->ver >= 40 && (v3d_qpu_reads_vpm(&inst->qpu) ||
950 v3d_qpu_uses_sfu(&inst->qpu))) {
951 return false;
952 }
953
954 return true;
955 }
956
957 void
958 vir_PF(struct v3d_compile *c, struct qreg src, enum v3d_qpu_pf pf)
959 {
960 struct qinst *last_inst = NULL;
961
962 if (!list_empty(&c->cur_block->instructions)) {
963 last_inst = (struct qinst *)c->cur_block->instructions.prev;
964
965 /* Can't stuff the PF into the last last inst if our cursor
966 * isn't pointing after it.
967 */
968 struct vir_cursor after_inst = vir_after_inst(last_inst);
969 if (c->cursor.mode != after_inst.mode ||
970 c->cursor.link != after_inst.link)
971 last_inst = NULL;
972 }
973
974 if (src.file != QFILE_TEMP ||
975 !c->defs[src.index] ||
976 last_inst != c->defs[src.index] ||
977 !vir_can_set_flags(c, last_inst)) {
978 /* XXX: Make the MOV be the appropriate type */
979 last_inst = vir_MOV_dest(c, vir_reg(QFILE_NULL, 0), src);
980 }
981
982 vir_set_pf(last_inst, pf);
983 }
984
985 #define OPTPASS(func) \
986 do { \
987 bool stage_progress = func(c); \
988 if (stage_progress) { \
989 progress = true; \
990 if (print_opt_debug) { \
991 fprintf(stderr, \
992 "VIR opt pass %2d: %s progress\n", \
993 pass, #func); \
994 } \
995 /*XXX vir_validate(c);*/ \
996 } \
997 } while (0)
998
999 void
1000 vir_optimize(struct v3d_compile *c)
1001 {
1002 bool print_opt_debug = false;
1003 int pass = 1;
1004
1005 while (true) {
1006 bool progress = false;
1007
1008 OPTPASS(vir_opt_copy_propagate);
1009 OPTPASS(vir_opt_dead_code);
1010 OPTPASS(vir_opt_small_immediates);
1011
1012 if (!progress)
1013 break;
1014
1015 pass++;
1016 }
1017 }
1018
1019 const char *
1020 vir_get_stage_name(struct v3d_compile *c)
1021 {
1022 if (c->vs_key && c->vs_key->is_coord)
1023 return "MESA_SHADER_COORD";
1024 else
1025 return gl_shader_stage_name(c->s->info.stage);
1026 }