v3d: Fix vir_is_raw_mov() for input unpacks.
[mesa.git] / src / broadcom / compiler / vir.c
1 /*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26
27 int
28 vir_get_non_sideband_nsrc(struct qinst *inst)
29 {
30 switch (inst->qpu.type) {
31 case V3D_QPU_INSTR_TYPE_BRANCH:
32 return 0;
33 case V3D_QPU_INSTR_TYPE_ALU:
34 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
35 return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
36 else
37 return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
38 }
39
40 return 0;
41 }
42
43 int
44 vir_get_nsrc(struct qinst *inst)
45 {
46 int nsrc = vir_get_non_sideband_nsrc(inst);
47
48 if (vir_has_implicit_uniform(inst))
49 nsrc++;
50
51 return nsrc;
52 }
53
54 bool
55 vir_has_implicit_uniform(struct qinst *inst)
56 {
57 switch (inst->qpu.type) {
58 case V3D_QPU_INSTR_TYPE_BRANCH:
59 return true;
60 case V3D_QPU_INSTR_TYPE_ALU:
61 switch (inst->dst.file) {
62 case QFILE_TLBU:
63 return true;
64 case QFILE_MAGIC:
65 switch (inst->dst.index) {
66 case V3D_QPU_WADDR_TLBU:
67 case V3D_QPU_WADDR_TMUAU:
68 case V3D_QPU_WADDR_SYNCU:
69 return true;
70 default:
71 break;
72 }
73 break;
74 default:
75 return inst->has_implicit_uniform;
76 }
77 }
78 return false;
79 }
80
81 /* The sideband uniform for textures gets stored after the normal ALU
82 * arguments.
83 */
84 int
85 vir_get_implicit_uniform_src(struct qinst *inst)
86 {
87 if (!vir_has_implicit_uniform(inst))
88 return -1;
89 return vir_get_nsrc(inst) - 1;
90 }
91
92 /**
93 * Returns whether the instruction has any side effects that must be
94 * preserved.
95 */
96 bool
97 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
98 {
99 switch (inst->qpu.type) {
100 case V3D_QPU_INSTR_TYPE_BRANCH:
101 return true;
102 case V3D_QPU_INSTR_TYPE_ALU:
103 switch (inst->qpu.alu.add.op) {
104 case V3D_QPU_A_SETREVF:
105 case V3D_QPU_A_SETMSF:
106 case V3D_QPU_A_VPMSETUP:
107 case V3D_QPU_A_STVPMV:
108 case V3D_QPU_A_STVPMD:
109 case V3D_QPU_A_STVPMP:
110 case V3D_QPU_A_VPMWT:
111 case V3D_QPU_A_TMUWT:
112 return true;
113 default:
114 break;
115 }
116
117 switch (inst->qpu.alu.mul.op) {
118 case V3D_QPU_M_MULTOP:
119 return true;
120 default:
121 break;
122 }
123 }
124
125 if (inst->qpu.sig.ldtmu ||
126 inst->qpu.sig.ldvary ||
127 inst->qpu.sig.wrtmuc ||
128 inst->qpu.sig.thrsw) {
129 return true;
130 }
131
132 return false;
133 }
134
135 bool
136 vir_is_raw_mov(struct qinst *inst)
137 {
138 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
139 (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
140 inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
141 return false;
142 }
143
144 if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
145 inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
146 return false;
147 }
148
149 if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE ||
150 inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE ||
151 inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE ||
152 inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) {
153 return false;
154 }
155
156 if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
157 inst->qpu.flags.mc != V3D_QPU_COND_NONE)
158 return false;
159
160 return true;
161 }
162
163 bool
164 vir_is_add(struct qinst *inst)
165 {
166 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
167 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
168 }
169
170 bool
171 vir_is_mul(struct qinst *inst)
172 {
173 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
174 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
175 }
176
177 bool
178 vir_is_tex(struct qinst *inst)
179 {
180 if (inst->dst.file == QFILE_MAGIC)
181 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
182
183 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
184 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
185 return true;
186 }
187
188 return false;
189 }
190
191 bool
192 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
193 {
194 for (int i = 0; i < vir_get_nsrc(inst); i++) {
195 switch (inst->src[i].file) {
196 case QFILE_VPM:
197 return true;
198 default:
199 break;
200 }
201 }
202
203 if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
204 inst->qpu.sig.ldtlb ||
205 inst->qpu.sig.ldtlbu ||
206 inst->qpu.sig.ldvpm)) {
207 return true;
208 }
209
210 return false;
211 }
212
213 bool
214 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
215 {
216 switch (inst->dst.file) {
217 case QFILE_MAGIC:
218 switch (inst->dst.index) {
219 case V3D_QPU_WADDR_RECIP:
220 case V3D_QPU_WADDR_RSQRT:
221 case V3D_QPU_WADDR_EXP:
222 case V3D_QPU_WADDR_LOG:
223 case V3D_QPU_WADDR_SIN:
224 return true;
225 }
226 break;
227 default:
228 break;
229 }
230
231 if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
232 return true;
233
234 return false;
235 }
236
237 void
238 vir_set_unpack(struct qinst *inst, int src,
239 enum v3d_qpu_input_unpack unpack)
240 {
241 assert(src == 0 || src == 1);
242
243 if (vir_is_add(inst)) {
244 if (src == 0)
245 inst->qpu.alu.add.a_unpack = unpack;
246 else
247 inst->qpu.alu.add.b_unpack = unpack;
248 } else {
249 assert(vir_is_mul(inst));
250 if (src == 0)
251 inst->qpu.alu.mul.a_unpack = unpack;
252 else
253 inst->qpu.alu.mul.b_unpack = unpack;
254 }
255 }
256
257 void
258 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
259 {
260 if (vir_is_add(inst)) {
261 inst->qpu.flags.ac = cond;
262 } else {
263 assert(vir_is_mul(inst));
264 inst->qpu.flags.mc = cond;
265 }
266 }
267
268 void
269 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
270 {
271 if (vir_is_add(inst)) {
272 inst->qpu.flags.apf = pf;
273 } else {
274 assert(vir_is_mul(inst));
275 inst->qpu.flags.mpf = pf;
276 }
277 }
278
279 void
280 vir_set_uf(struct qinst *inst, enum v3d_qpu_uf uf)
281 {
282 if (vir_is_add(inst)) {
283 inst->qpu.flags.auf = uf;
284 } else {
285 assert(vir_is_mul(inst));
286 inst->qpu.flags.muf = uf;
287 }
288 }
289
290 #if 0
291 uint8_t
292 vir_channels_written(struct qinst *inst)
293 {
294 if (vir_is_mul(inst)) {
295 switch (inst->dst.pack) {
296 case QPU_PACK_MUL_NOP:
297 case QPU_PACK_MUL_8888:
298 return 0xf;
299 case QPU_PACK_MUL_8A:
300 return 0x1;
301 case QPU_PACK_MUL_8B:
302 return 0x2;
303 case QPU_PACK_MUL_8C:
304 return 0x4;
305 case QPU_PACK_MUL_8D:
306 return 0x8;
307 }
308 } else {
309 switch (inst->dst.pack) {
310 case QPU_PACK_A_NOP:
311 case QPU_PACK_A_8888:
312 case QPU_PACK_A_8888_SAT:
313 case QPU_PACK_A_32_SAT:
314 return 0xf;
315 case QPU_PACK_A_8A:
316 case QPU_PACK_A_8A_SAT:
317 return 0x1;
318 case QPU_PACK_A_8B:
319 case QPU_PACK_A_8B_SAT:
320 return 0x2;
321 case QPU_PACK_A_8C:
322 case QPU_PACK_A_8C_SAT:
323 return 0x4;
324 case QPU_PACK_A_8D:
325 case QPU_PACK_A_8D_SAT:
326 return 0x8;
327 case QPU_PACK_A_16A:
328 case QPU_PACK_A_16A_SAT:
329 return 0x3;
330 case QPU_PACK_A_16B:
331 case QPU_PACK_A_16B_SAT:
332 return 0xc;
333 }
334 }
335 unreachable("Bad pack field");
336 }
337 #endif
338
339 struct qreg
340 vir_get_temp(struct v3d_compile *c)
341 {
342 struct qreg reg;
343
344 reg.file = QFILE_TEMP;
345 reg.index = c->num_temps++;
346
347 if (c->num_temps > c->defs_array_size) {
348 uint32_t old_size = c->defs_array_size;
349 c->defs_array_size = MAX2(old_size * 2, 16);
350
351 c->defs = reralloc(c, c->defs, struct qinst *,
352 c->defs_array_size);
353 memset(&c->defs[old_size], 0,
354 sizeof(c->defs[0]) * (c->defs_array_size - old_size));
355
356 c->spillable = reralloc(c, c->spillable,
357 BITSET_WORD,
358 BITSET_WORDS(c->defs_array_size));
359 for (int i = old_size; i < c->defs_array_size; i++)
360 BITSET_SET(c->spillable, i);
361 }
362
363 return reg;
364 }
365
366 struct qinst *
367 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
368 {
369 struct qinst *inst = calloc(1, sizeof(*inst));
370
371 inst->qpu = v3d_qpu_nop();
372 inst->qpu.alu.add.op = op;
373
374 inst->dst = dst;
375 inst->src[0] = src0;
376 inst->src[1] = src1;
377 inst->uniform = ~0;
378
379 return inst;
380 }
381
382 struct qinst *
383 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
384 {
385 struct qinst *inst = calloc(1, sizeof(*inst));
386
387 inst->qpu = v3d_qpu_nop();
388 inst->qpu.alu.mul.op = op;
389
390 inst->dst = dst;
391 inst->src[0] = src0;
392 inst->src[1] = src1;
393 inst->uniform = ~0;
394
395 return inst;
396 }
397
398 struct qinst *
399 vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src)
400 {
401 struct qinst *inst = calloc(1, sizeof(*inst));
402
403 inst->qpu = v3d_qpu_nop();
404 inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
405 inst->qpu.branch.cond = cond;
406 inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
407 inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
408 inst->qpu.branch.ub = true;
409 inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
410
411 inst->dst = vir_nop_reg();
412 inst->src[0] = src;
413 inst->uniform = ~0;
414
415 return inst;
416 }
417
418 static void
419 vir_emit(struct v3d_compile *c, struct qinst *inst)
420 {
421 switch (c->cursor.mode) {
422 case vir_cursor_add:
423 list_add(&inst->link, c->cursor.link);
424 break;
425 case vir_cursor_addtail:
426 list_addtail(&inst->link, c->cursor.link);
427 break;
428 }
429
430 c->cursor = vir_after_inst(inst);
431 c->live_intervals_valid = false;
432 }
433
434 /* Updates inst to write to a new temporary, emits it, and notes the def. */
435 struct qreg
436 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
437 {
438 assert(inst->dst.file == QFILE_NULL);
439
440 /* If we're emitting an instruction that's a def, it had better be
441 * writing a register.
442 */
443 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
444 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
445 v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
446 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
447 v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
448 }
449
450 inst->dst = vir_get_temp(c);
451
452 if (inst->dst.file == QFILE_TEMP)
453 c->defs[inst->dst.index] = inst;
454
455 vir_emit(c, inst);
456
457 return inst->dst;
458 }
459
460 struct qinst *
461 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
462 {
463 if (inst->dst.file == QFILE_TEMP)
464 c->defs[inst->dst.index] = NULL;
465
466 vir_emit(c, inst);
467
468 return inst;
469 }
470
471 struct qblock *
472 vir_new_block(struct v3d_compile *c)
473 {
474 struct qblock *block = rzalloc(c, struct qblock);
475
476 list_inithead(&block->instructions);
477
478 block->predecessors = _mesa_set_create(block,
479 _mesa_hash_pointer,
480 _mesa_key_pointer_equal);
481
482 block->index = c->next_block_index++;
483
484 return block;
485 }
486
487 void
488 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
489 {
490 c->cur_block = block;
491 c->cursor = vir_after_block(block);
492 list_addtail(&block->link, &c->blocks);
493 }
494
495 struct qblock *
496 vir_entry_block(struct v3d_compile *c)
497 {
498 return list_first_entry(&c->blocks, struct qblock, link);
499 }
500
501 struct qblock *
502 vir_exit_block(struct v3d_compile *c)
503 {
504 return list_last_entry(&c->blocks, struct qblock, link);
505 }
506
507 void
508 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
509 {
510 _mesa_set_add(successor->predecessors, predecessor);
511 if (predecessor->successors[0]) {
512 assert(!predecessor->successors[1]);
513 predecessor->successors[1] = successor;
514 } else {
515 predecessor->successors[0] = successor;
516 }
517 }
518
519 const struct v3d_compiler *
520 v3d_compiler_init(const struct v3d_device_info *devinfo)
521 {
522 struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
523 if (!compiler)
524 return NULL;
525
526 compiler->devinfo = devinfo;
527
528 if (!vir_init_reg_sets(compiler)) {
529 ralloc_free(compiler);
530 return NULL;
531 }
532
533 return compiler;
534 }
535
536 void
537 v3d_compiler_free(const struct v3d_compiler *compiler)
538 {
539 ralloc_free((void *)compiler);
540 }
541
542 static struct v3d_compile *
543 vir_compile_init(const struct v3d_compiler *compiler,
544 struct v3d_key *key,
545 nir_shader *s,
546 void (*debug_output)(const char *msg,
547 void *debug_output_data),
548 void *debug_output_data,
549 int program_id, int variant_id)
550 {
551 struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
552
553 c->compiler = compiler;
554 c->devinfo = compiler->devinfo;
555 c->key = key;
556 c->program_id = program_id;
557 c->variant_id = variant_id;
558 c->threads = 4;
559 c->debug_output = debug_output;
560 c->debug_output_data = debug_output_data;
561
562 s = nir_shader_clone(c, s);
563 c->s = s;
564
565 list_inithead(&c->blocks);
566 vir_set_emit_block(c, vir_new_block(c));
567
568 c->output_position_index = -1;
569 c->output_point_size_index = -1;
570 c->output_sample_mask_index = -1;
571
572 c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
573 _mesa_key_pointer_equal);
574
575 return c;
576 }
577
578 static int
579 type_size_vec4(const struct glsl_type *type)
580 {
581 return glsl_count_attribute_slots(type, false);
582 }
583
584 static void
585 v3d_lower_nir(struct v3d_compile *c)
586 {
587 struct nir_lower_tex_options tex_options = {
588 .lower_txd = true,
589 .lower_tg4_broadcom_swizzle = true,
590
591 .lower_rect = false, /* XXX: Use this on V3D 3.x */
592 .lower_txp = ~0,
593 /* Apply swizzles to all samplers. */
594 .swizzle_result = ~0,
595 };
596
597 /* Lower the format swizzle and (for 32-bit returns)
598 * ARB_texture_swizzle-style swizzle.
599 */
600 for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
601 for (int j = 0; j < 4; j++)
602 tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
603
604 if (c->key->tex[i].clamp_s)
605 tex_options.saturate_s |= 1 << i;
606 if (c->key->tex[i].clamp_t)
607 tex_options.saturate_t |= 1 << i;
608 if (c->key->tex[i].clamp_r)
609 tex_options.saturate_r |= 1 << i;
610 if (c->key->tex[i].return_size == 16) {
611 tex_options.lower_tex_packing[i] =
612 nir_lower_tex_packing_16;
613 }
614 }
615
616 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
617 NIR_PASS_V(c->s, nir_lower_system_values);
618 }
619
620 static void
621 v3d_set_prog_data_uniforms(struct v3d_compile *c,
622 struct v3d_prog_data *prog_data)
623 {
624 int count = c->num_uniforms;
625 struct v3d_uniform_list *ulist = &prog_data->uniforms;
626
627 ulist->count = count;
628 ulist->data = ralloc_array(prog_data, uint32_t, count);
629 memcpy(ulist->data, c->uniform_data,
630 count * sizeof(*ulist->data));
631 ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
632 memcpy(ulist->contents, c->uniform_contents,
633 count * sizeof(*ulist->contents));
634 }
635
636 /* Copy the compiler UBO range state to the compiled shader, dropping out
637 * arrays that were never referenced by an indirect load.
638 *
639 * (Note that QIR dead code elimination of an array access still leaves that
640 * array alive, though)
641 */
642 static void
643 v3d_set_prog_data_ubo(struct v3d_compile *c,
644 struct v3d_prog_data *prog_data)
645 {
646 if (!c->num_ubo_ranges)
647 return;
648
649 prog_data->num_ubo_ranges = 0;
650 prog_data->ubo_ranges = ralloc_array(prog_data, struct v3d_ubo_range,
651 c->num_ubo_ranges);
652 for (int i = 0; i < c->num_ubo_ranges; i++) {
653 if (!c->ubo_range_used[i])
654 continue;
655
656 struct v3d_ubo_range *range = &c->ubo_ranges[i];
657 prog_data->ubo_ranges[prog_data->num_ubo_ranges++] = *range;
658 prog_data->ubo_size += range->size;
659 }
660
661 if (prog_data->ubo_size) {
662 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
663 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
664 vir_get_stage_name(c),
665 c->program_id, c->variant_id,
666 prog_data->ubo_size / 4);
667 }
668 }
669 }
670
671 static void
672 v3d_vs_set_prog_data(struct v3d_compile *c,
673 struct v3d_vs_prog_data *prog_data)
674 {
675 /* The vertex data gets format converted by the VPM so that
676 * each attribute channel takes up a VPM column. Precompute
677 * the sizes for the shader record.
678 */
679 for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
680 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
681 prog_data->vpm_input_size += c->vattr_sizes[i];
682 }
683
684 prog_data->uses_vid = (c->s->info.system_values_read &
685 (1ull << SYSTEM_VALUE_VERTEX_ID));
686 prog_data->uses_iid = (c->s->info.system_values_read &
687 (1ull << SYSTEM_VALUE_INSTANCE_ID));
688
689 if (prog_data->uses_vid)
690 prog_data->vpm_input_size++;
691 if (prog_data->uses_iid)
692 prog_data->vpm_input_size++;
693
694 /* Input/output segment size are in sectors (8 rows of 32 bits per
695 * channel).
696 */
697 prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
698 prog_data->vpm_output_size = align(c->num_vpm_writes, 8) / 8;
699
700 /* Set us up for shared input/output segments. This is apparently
701 * necessary for our VCM setup to avoid varying corruption.
702 */
703 prog_data->separate_segments = false;
704 prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
705 prog_data->vpm_input_size);
706 prog_data->vpm_input_size = 0;
707
708 /* Compute VCM cache size. We set up our program to take up less than
709 * half of the VPM, so that any set of bin and render programs won't
710 * run out of space. We need space for at least one input segment,
711 * and then allocate the rest to output segments (one for the current
712 * program, the rest to VCM). The valid range of the VCM cache size
713 * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
714 * batches.
715 */
716 assert(c->devinfo->vpm_size);
717 int sector_size = 16 * sizeof(uint32_t) * 8;
718 int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
719 int half_vpm = vpm_size_in_sectors / 2;
720 int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
721 int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
722 assert(vpm_output_batches >= 2);
723 prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
724 }
725
726 static void
727 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
728 struct v3d_fs_prog_data *prog_data)
729 {
730 prog_data->num_inputs = c->num_inputs;
731 memcpy(prog_data->input_slots, c->input_slots,
732 c->num_inputs * sizeof(*c->input_slots));
733
734 STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
735 (V3D_MAX_FS_INPUTS - 1) / 24);
736 for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
737 if (BITSET_TEST(c->flat_shade_flags, i))
738 prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
739
740 if (BITSET_TEST(c->noperspective_flags, i))
741 prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
742
743 if (BITSET_TEST(c->centroid_flags, i))
744 prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
745 }
746 }
747
748 static void
749 v3d_fs_set_prog_data(struct v3d_compile *c,
750 struct v3d_fs_prog_data *prog_data)
751 {
752 v3d_set_fs_prog_data_inputs(c, prog_data);
753 prog_data->writes_z = c->writes_z;
754 prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
755 prog_data->uses_center_w = c->uses_center_w;
756 }
757
758 static void
759 v3d_set_prog_data(struct v3d_compile *c,
760 struct v3d_prog_data *prog_data)
761 {
762 prog_data->threads = c->threads;
763 prog_data->single_seg = !c->last_thrsw;
764 prog_data->spill_size = c->spill_size;
765
766 v3d_set_prog_data_uniforms(c, prog_data);
767 v3d_set_prog_data_ubo(c, prog_data);
768
769 if (c->s->info.stage == MESA_SHADER_VERTEX) {
770 v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
771 } else {
772 assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
773 v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);
774 }
775 }
776
777 static uint64_t *
778 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
779 {
780 *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
781
782 uint64_t *qpu_insts = malloc(*final_assembly_size);
783 if (!qpu_insts)
784 return NULL;
785
786 memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
787
788 vir_compile_destroy(c);
789
790 return qpu_insts;
791 }
792
793 static void
794 v3d_nir_lower_vs_early(struct v3d_compile *c)
795 {
796 /* Split our I/O vars and dead code eliminate the unused
797 * components.
798 */
799 NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
800 nir_var_shader_in | nir_var_shader_out);
801 uint64_t used_outputs[4] = {0};
802 for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
803 int slot = v3d_slot_get_slot(c->vs_key->fs_inputs[i]);
804 int comp = v3d_slot_get_component(c->vs_key->fs_inputs[i]);
805 used_outputs[comp] |= 1ull << slot;
806 }
807 NIR_PASS_V(c->s, nir_remove_unused_io_vars,
808 &c->s->outputs, used_outputs, NULL); /* demotes to globals */
809 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
810 v3d_optimize_nir(c->s);
811 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in);
812 NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
813 type_size_vec4,
814 (nir_lower_io_options)0);
815 }
816
817 static void
818 v3d_fixup_fs_output_types(struct v3d_compile *c)
819 {
820 nir_foreach_variable(var, &c->s->outputs) {
821 uint32_t mask = 0;
822
823 switch (var->data.location) {
824 case FRAG_RESULT_COLOR:
825 mask = ~0;
826 break;
827 case FRAG_RESULT_DATA0:
828 case FRAG_RESULT_DATA1:
829 case FRAG_RESULT_DATA2:
830 case FRAG_RESULT_DATA3:
831 mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
832 break;
833 }
834
835 if (c->fs_key->int_color_rb & mask) {
836 var->type =
837 glsl_vector_type(GLSL_TYPE_INT,
838 glsl_get_components(var->type));
839 } else if (c->fs_key->uint_color_rb & mask) {
840 var->type =
841 glsl_vector_type(GLSL_TYPE_UINT,
842 glsl_get_components(var->type));
843 }
844 }
845 }
846
847 static void
848 v3d_nir_lower_fs_early(struct v3d_compile *c)
849 {
850 if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
851 v3d_fixup_fs_output_types(c);
852
853 /* If the shader has no non-TLB side effects, we can promote it to
854 * enabling early_fragment_tests even if the user didn't.
855 */
856 if (!(c->s->info.num_images ||
857 c->s->info.num_ssbos ||
858 c->s->info.num_abos)) {
859 c->s->info.fs.early_fragment_tests = true;
860 }
861 }
862
863 static void
864 v3d_nir_lower_vs_late(struct v3d_compile *c)
865 {
866 if (c->vs_key->clamp_color)
867 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
868
869 if (c->key->ucp_enables) {
870 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
871 false);
872 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
873 nir_var_shader_out);
874 }
875
876 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
877 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
878 }
879
880 static void
881 v3d_nir_lower_fs_late(struct v3d_compile *c)
882 {
883 if (c->fs_key->light_twoside)
884 NIR_PASS_V(c->s, nir_lower_two_sided_color);
885
886 if (c->fs_key->clamp_color)
887 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
888
889 if (c->fs_key->alpha_test) {
890 NIR_PASS_V(c->s, nir_lower_alpha_test,
891 c->fs_key->alpha_test_func,
892 false);
893 }
894
895 if (c->key->ucp_enables)
896 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
897
898 /* Note: FS input scalarizing must happen after
899 * nir_lower_two_sided_color, which only handles a vec4 at a time.
900 */
901 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
902 }
903
904 uint64_t *v3d_compile(const struct v3d_compiler *compiler,
905 struct v3d_key *key,
906 struct v3d_prog_data **out_prog_data,
907 nir_shader *s,
908 void (*debug_output)(const char *msg,
909 void *debug_output_data),
910 void *debug_output_data,
911 int program_id, int variant_id,
912 uint32_t *final_assembly_size)
913 {
914 struct v3d_prog_data *prog_data;
915 struct v3d_compile *c = vir_compile_init(compiler, key, s,
916 debug_output, debug_output_data,
917 program_id, variant_id);
918
919 switch (c->s->info.stage) {
920 case MESA_SHADER_VERTEX:
921 c->vs_key = (struct v3d_vs_key *)key;
922 prog_data = rzalloc_size(NULL, sizeof(struct v3d_vs_prog_data));
923 break;
924 case MESA_SHADER_FRAGMENT:
925 c->fs_key = (struct v3d_fs_key *)key;
926 prog_data = rzalloc_size(NULL, sizeof(struct v3d_fs_prog_data));
927 break;
928 default:
929 unreachable("unsupported shader stage");
930 }
931
932 if (c->s->info.stage == MESA_SHADER_VERTEX) {
933 v3d_nir_lower_vs_early(c);
934 } else {
935 assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
936 v3d_nir_lower_fs_early(c);
937 }
938
939 v3d_lower_nir(c);
940
941 if (c->s->info.stage == MESA_SHADER_VERTEX) {
942 v3d_nir_lower_vs_late(c);
943 } else {
944 assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
945 v3d_nir_lower_fs_late(c);
946 }
947
948 NIR_PASS_V(c->s, v3d_nir_lower_io, c);
949 NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
950 NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
951 NIR_PASS_V(c->s, nir_lower_idiv);
952
953 v3d_optimize_nir(c->s);
954 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
955 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
956
957 v3d_nir_to_vir(c);
958
959 v3d_set_prog_data(c, prog_data);
960
961 *out_prog_data = prog_data;
962
963 char *shaderdb;
964 int ret = asprintf(&shaderdb,
965 "%s shader: %d inst, %d threads, %d loops, "
966 "%d uniforms, %d:%d spills:fills",
967 vir_get_stage_name(c),
968 c->qpu_inst_count,
969 c->threads,
970 c->loops,
971 c->num_uniforms,
972 c->spills,
973 c->fills);
974 if (ret >= 0) {
975 c->debug_output(shaderdb, c->debug_output_data);
976 free(shaderdb);
977 }
978
979 return v3d_return_qpu_insts(c, final_assembly_size);
980 }
981
982 void
983 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
984 {
985 if (qinst->dst.file == QFILE_TEMP)
986 c->defs[qinst->dst.index] = NULL;
987
988 assert(&qinst->link != c->cursor.link);
989
990 list_del(&qinst->link);
991 free(qinst);
992
993 c->live_intervals_valid = false;
994 }
995
996 struct qreg
997 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
998 {
999 /* XXX
1000 int pack = reg.pack;
1001
1002 while (reg.file == QFILE_TEMP &&
1003 c->defs[reg.index] &&
1004 (c->defs[reg.index]->op == QOP_MOV ||
1005 c->defs[reg.index]->op == QOP_FMOV) &&
1006 !c->defs[reg.index]->dst.pack &&
1007 !c->defs[reg.index]->src[0].pack) {
1008 reg = c->defs[reg.index]->src[0];
1009 }
1010
1011 reg.pack = pack;
1012 */
1013 return reg;
1014 }
1015
1016 void
1017 vir_compile_destroy(struct v3d_compile *c)
1018 {
1019 /* Defuse the assert that we aren't removing the cursor's instruction.
1020 */
1021 c->cursor.link = NULL;
1022
1023 vir_for_each_block(block, c) {
1024 while (!list_empty(&block->instructions)) {
1025 struct qinst *qinst =
1026 list_first_entry(&block->instructions,
1027 struct qinst, link);
1028 vir_remove_instruction(c, qinst);
1029 }
1030 }
1031
1032 ralloc_free(c);
1033 }
1034
1035 struct qreg
1036 vir_uniform(struct v3d_compile *c,
1037 enum quniform_contents contents,
1038 uint32_t data)
1039 {
1040 for (int i = 0; i < c->num_uniforms; i++) {
1041 if (c->uniform_contents[i] == contents &&
1042 c->uniform_data[i] == data) {
1043 return vir_reg(QFILE_UNIF, i);
1044 }
1045 }
1046
1047 uint32_t uniform = c->num_uniforms++;
1048
1049 if (uniform >= c->uniform_array_size) {
1050 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1051 c->uniform_array_size * 2);
1052
1053 c->uniform_data = reralloc(c, c->uniform_data,
1054 uint32_t,
1055 c->uniform_array_size);
1056 c->uniform_contents = reralloc(c, c->uniform_contents,
1057 enum quniform_contents,
1058 c->uniform_array_size);
1059 }
1060
1061 c->uniform_contents[uniform] = contents;
1062 c->uniform_data[uniform] = data;
1063
1064 return vir_reg(QFILE_UNIF, uniform);
1065 }
1066
1067 #define OPTPASS(func) \
1068 do { \
1069 bool stage_progress = func(c); \
1070 if (stage_progress) { \
1071 progress = true; \
1072 if (print_opt_debug) { \
1073 fprintf(stderr, \
1074 "VIR opt pass %2d: %s progress\n", \
1075 pass, #func); \
1076 } \
1077 /*XXX vir_validate(c);*/ \
1078 } \
1079 } while (0)
1080
1081 void
1082 vir_optimize(struct v3d_compile *c)
1083 {
1084 bool print_opt_debug = false;
1085 int pass = 1;
1086
1087 while (true) {
1088 bool progress = false;
1089
1090 OPTPASS(vir_opt_copy_propagate);
1091 OPTPASS(vir_opt_dead_code);
1092 OPTPASS(vir_opt_small_immediates);
1093
1094 if (!progress)
1095 break;
1096
1097 pass++;
1098 }
1099 }
1100
1101 const char *
1102 vir_get_stage_name(struct v3d_compile *c)
1103 {
1104 if (c->vs_key && c->vs_key->is_coord)
1105 return "MESA_SHADER_COORD";
1106 else
1107 return gl_shader_stage_name(c->s->info.stage);
1108 }