v3d: Add support for textureSize() on MSAA textures.
[mesa.git] / src / broadcom / compiler / vir.c
1 /*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26
27 int
28 vir_get_non_sideband_nsrc(struct qinst *inst)
29 {
30 switch (inst->qpu.type) {
31 case V3D_QPU_INSTR_TYPE_BRANCH:
32 return 0;
33 case V3D_QPU_INSTR_TYPE_ALU:
34 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
35 return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
36 else
37 return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
38 }
39
40 return 0;
41 }
42
43 int
44 vir_get_nsrc(struct qinst *inst)
45 {
46 int nsrc = vir_get_non_sideband_nsrc(inst);
47
48 if (vir_has_implicit_uniform(inst))
49 nsrc++;
50
51 return nsrc;
52 }
53
54 bool
55 vir_has_implicit_uniform(struct qinst *inst)
56 {
57 switch (inst->qpu.type) {
58 case V3D_QPU_INSTR_TYPE_BRANCH:
59 return true;
60 case V3D_QPU_INSTR_TYPE_ALU:
61 switch (inst->dst.file) {
62 case QFILE_TLBU:
63 return true;
64 default:
65 return inst->has_implicit_uniform;
66 }
67 }
68 return false;
69 }
70
71 /* The sideband uniform for textures gets stored after the normal ALU
72 * arguments.
73 */
74 int
75 vir_get_implicit_uniform_src(struct qinst *inst)
76 {
77 if (!vir_has_implicit_uniform(inst))
78 return -1;
79 return vir_get_nsrc(inst) - 1;
80 }
81
82 /**
83 * Returns whether the instruction has any side effects that must be
84 * preserved.
85 */
86 bool
87 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
88 {
89 switch (inst->qpu.type) {
90 case V3D_QPU_INSTR_TYPE_BRANCH:
91 return true;
92 case V3D_QPU_INSTR_TYPE_ALU:
93 switch (inst->qpu.alu.add.op) {
94 case V3D_QPU_A_SETREVF:
95 case V3D_QPU_A_SETMSF:
96 case V3D_QPU_A_VPMSETUP:
97 case V3D_QPU_A_STVPMV:
98 case V3D_QPU_A_STVPMD:
99 case V3D_QPU_A_STVPMP:
100 case V3D_QPU_A_VPMWT:
101 case V3D_QPU_A_TMUWT:
102 return true;
103 default:
104 break;
105 }
106
107 switch (inst->qpu.alu.mul.op) {
108 case V3D_QPU_M_MULTOP:
109 return true;
110 default:
111 break;
112 }
113 }
114
115 if (inst->qpu.sig.ldtmu ||
116 inst->qpu.sig.ldvary ||
117 inst->qpu.sig.wrtmuc ||
118 inst->qpu.sig.thrsw) {
119 return true;
120 }
121
122 return false;
123 }
124
125 bool
126 vir_is_float_input(struct qinst *inst)
127 {
128 /* XXX: More instrs */
129 switch (inst->qpu.type) {
130 case V3D_QPU_INSTR_TYPE_BRANCH:
131 return false;
132 case V3D_QPU_INSTR_TYPE_ALU:
133 switch (inst->qpu.alu.add.op) {
134 case V3D_QPU_A_FADD:
135 case V3D_QPU_A_FSUB:
136 case V3D_QPU_A_FMIN:
137 case V3D_QPU_A_FMAX:
138 case V3D_QPU_A_FTOIN:
139 return true;
140 default:
141 break;
142 }
143
144 switch (inst->qpu.alu.mul.op) {
145 case V3D_QPU_M_FMOV:
146 case V3D_QPU_M_VFMUL:
147 case V3D_QPU_M_FMUL:
148 return true;
149 default:
150 break;
151 }
152 }
153
154 return false;
155 }
156
157 bool
158 vir_is_raw_mov(struct qinst *inst)
159 {
160 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
161 (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
162 inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
163 return false;
164 }
165
166 if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
167 inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
168 return false;
169 }
170
171 if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
172 inst->qpu.flags.mc != V3D_QPU_COND_NONE)
173 return false;
174
175 return true;
176 }
177
178 bool
179 vir_is_add(struct qinst *inst)
180 {
181 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
182 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
183 }
184
185 bool
186 vir_is_mul(struct qinst *inst)
187 {
188 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
189 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
190 }
191
192 bool
193 vir_is_tex(struct qinst *inst)
194 {
195 if (inst->dst.file == QFILE_MAGIC)
196 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
197
198 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
199 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
200 return true;
201 }
202
203 return false;
204 }
205
206 bool
207 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
208 {
209 for (int i = 0; i < vir_get_nsrc(inst); i++) {
210 switch (inst->src[i].file) {
211 case QFILE_VPM:
212 return true;
213 default:
214 break;
215 }
216 }
217
218 if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
219 inst->qpu.sig.ldtlb ||
220 inst->qpu.sig.ldtlbu ||
221 inst->qpu.sig.ldvpm)) {
222 return true;
223 }
224
225 return false;
226 }
227
228 bool
229 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
230 {
231 switch (inst->dst.file) {
232 case QFILE_MAGIC:
233 switch (inst->dst.index) {
234 case V3D_QPU_WADDR_RECIP:
235 case V3D_QPU_WADDR_RSQRT:
236 case V3D_QPU_WADDR_EXP:
237 case V3D_QPU_WADDR_LOG:
238 case V3D_QPU_WADDR_SIN:
239 return true;
240 }
241 break;
242 default:
243 break;
244 }
245
246 if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
247 return true;
248
249 return false;
250 }
251
252 void
253 vir_set_unpack(struct qinst *inst, int src,
254 enum v3d_qpu_input_unpack unpack)
255 {
256 assert(src == 0 || src == 1);
257
258 if (vir_is_add(inst)) {
259 if (src == 0)
260 inst->qpu.alu.add.a_unpack = unpack;
261 else
262 inst->qpu.alu.add.b_unpack = unpack;
263 } else {
264 assert(vir_is_mul(inst));
265 if (src == 0)
266 inst->qpu.alu.mul.a_unpack = unpack;
267 else
268 inst->qpu.alu.mul.b_unpack = unpack;
269 }
270 }
271
272 void
273 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
274 {
275 if (vir_is_add(inst)) {
276 inst->qpu.flags.ac = cond;
277 } else {
278 assert(vir_is_mul(inst));
279 inst->qpu.flags.mc = cond;
280 }
281 }
282
283 void
284 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
285 {
286 if (vir_is_add(inst)) {
287 inst->qpu.flags.apf = pf;
288 } else {
289 assert(vir_is_mul(inst));
290 inst->qpu.flags.mpf = pf;
291 }
292 }
293
294 #if 0
295 uint8_t
296 vir_channels_written(struct qinst *inst)
297 {
298 if (vir_is_mul(inst)) {
299 switch (inst->dst.pack) {
300 case QPU_PACK_MUL_NOP:
301 case QPU_PACK_MUL_8888:
302 return 0xf;
303 case QPU_PACK_MUL_8A:
304 return 0x1;
305 case QPU_PACK_MUL_8B:
306 return 0x2;
307 case QPU_PACK_MUL_8C:
308 return 0x4;
309 case QPU_PACK_MUL_8D:
310 return 0x8;
311 }
312 } else {
313 switch (inst->dst.pack) {
314 case QPU_PACK_A_NOP:
315 case QPU_PACK_A_8888:
316 case QPU_PACK_A_8888_SAT:
317 case QPU_PACK_A_32_SAT:
318 return 0xf;
319 case QPU_PACK_A_8A:
320 case QPU_PACK_A_8A_SAT:
321 return 0x1;
322 case QPU_PACK_A_8B:
323 case QPU_PACK_A_8B_SAT:
324 return 0x2;
325 case QPU_PACK_A_8C:
326 case QPU_PACK_A_8C_SAT:
327 return 0x4;
328 case QPU_PACK_A_8D:
329 case QPU_PACK_A_8D_SAT:
330 return 0x8;
331 case QPU_PACK_A_16A:
332 case QPU_PACK_A_16A_SAT:
333 return 0x3;
334 case QPU_PACK_A_16B:
335 case QPU_PACK_A_16B_SAT:
336 return 0xc;
337 }
338 }
339 unreachable("Bad pack field");
340 }
341 #endif
342
343 struct qreg
344 vir_get_temp(struct v3d_compile *c)
345 {
346 struct qreg reg;
347
348 reg.file = QFILE_TEMP;
349 reg.index = c->num_temps++;
350
351 if (c->num_temps > c->defs_array_size) {
352 uint32_t old_size = c->defs_array_size;
353 c->defs_array_size = MAX2(old_size * 2, 16);
354
355 c->defs = reralloc(c, c->defs, struct qinst *,
356 c->defs_array_size);
357 memset(&c->defs[old_size], 0,
358 sizeof(c->defs[0]) * (c->defs_array_size - old_size));
359
360 c->spillable = reralloc(c, c->spillable,
361 BITSET_WORD,
362 BITSET_WORDS(c->defs_array_size));
363 for (int i = old_size; i < c->defs_array_size; i++)
364 BITSET_SET(c->spillable, i);
365 }
366
367 return reg;
368 }
369
370 struct qinst *
371 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
372 {
373 struct qinst *inst = calloc(1, sizeof(*inst));
374
375 inst->qpu = v3d_qpu_nop();
376 inst->qpu.alu.add.op = op;
377
378 inst->dst = dst;
379 inst->src[0] = src0;
380 inst->src[1] = src1;
381 inst->uniform = ~0;
382
383 return inst;
384 }
385
386 struct qinst *
387 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
388 {
389 struct qinst *inst = calloc(1, sizeof(*inst));
390
391 inst->qpu = v3d_qpu_nop();
392 inst->qpu.alu.mul.op = op;
393
394 inst->dst = dst;
395 inst->src[0] = src0;
396 inst->src[1] = src1;
397 inst->uniform = ~0;
398
399 return inst;
400 }
401
402 struct qinst *
403 vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src)
404 {
405 struct qinst *inst = calloc(1, sizeof(*inst));
406
407 inst->qpu = v3d_qpu_nop();
408 inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
409 inst->qpu.branch.cond = cond;
410 inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
411 inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
412 inst->qpu.branch.ub = true;
413 inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
414
415 inst->dst = vir_reg(QFILE_NULL, 0);
416 inst->src[0] = src;
417 inst->uniform = ~0;
418
419 return inst;
420 }
421
422 static void
423 vir_emit(struct v3d_compile *c, struct qinst *inst)
424 {
425 switch (c->cursor.mode) {
426 case vir_cursor_add:
427 list_add(&inst->link, c->cursor.link);
428 break;
429 case vir_cursor_addtail:
430 list_addtail(&inst->link, c->cursor.link);
431 break;
432 }
433
434 c->cursor = vir_after_inst(inst);
435 c->live_intervals_valid = false;
436 }
437
438 /* Updates inst to write to a new temporary, emits it, and notes the def. */
439 struct qreg
440 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
441 {
442 assert(inst->dst.file == QFILE_NULL);
443
444 /* If we're emitting an instruction that's a def, it had better be
445 * writing a register.
446 */
447 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
448 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
449 v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
450 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
451 v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
452 }
453
454 inst->dst = vir_get_temp(c);
455
456 if (inst->dst.file == QFILE_TEMP)
457 c->defs[inst->dst.index] = inst;
458
459 vir_emit(c, inst);
460
461 return inst->dst;
462 }
463
464 struct qinst *
465 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
466 {
467 if (inst->dst.file == QFILE_TEMP)
468 c->defs[inst->dst.index] = NULL;
469
470 vir_emit(c, inst);
471
472 return inst;
473 }
474
475 struct qblock *
476 vir_new_block(struct v3d_compile *c)
477 {
478 struct qblock *block = rzalloc(c, struct qblock);
479
480 list_inithead(&block->instructions);
481
482 block->predecessors = _mesa_set_create(block,
483 _mesa_hash_pointer,
484 _mesa_key_pointer_equal);
485
486 block->index = c->next_block_index++;
487
488 return block;
489 }
490
491 void
492 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
493 {
494 c->cur_block = block;
495 c->cursor = vir_after_block(block);
496 list_addtail(&block->link, &c->blocks);
497 }
498
499 struct qblock *
500 vir_entry_block(struct v3d_compile *c)
501 {
502 return list_first_entry(&c->blocks, struct qblock, link);
503 }
504
505 struct qblock *
506 vir_exit_block(struct v3d_compile *c)
507 {
508 return list_last_entry(&c->blocks, struct qblock, link);
509 }
510
511 void
512 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
513 {
514 _mesa_set_add(successor->predecessors, predecessor);
515 if (predecessor->successors[0]) {
516 assert(!predecessor->successors[1]);
517 predecessor->successors[1] = successor;
518 } else {
519 predecessor->successors[0] = successor;
520 }
521 }
522
523 const struct v3d_compiler *
524 v3d_compiler_init(const struct v3d_device_info *devinfo)
525 {
526 struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
527 if (!compiler)
528 return NULL;
529
530 compiler->devinfo = devinfo;
531
532 if (!vir_init_reg_sets(compiler)) {
533 ralloc_free(compiler);
534 return NULL;
535 }
536
537 return compiler;
538 }
539
540 void
541 v3d_compiler_free(const struct v3d_compiler *compiler)
542 {
543 ralloc_free((void *)compiler);
544 }
545
546 static struct v3d_compile *
547 vir_compile_init(const struct v3d_compiler *compiler,
548 struct v3d_key *key,
549 nir_shader *s,
550 void (*debug_output)(const char *msg,
551 void *debug_output_data),
552 void *debug_output_data,
553 int program_id, int variant_id)
554 {
555 struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
556
557 c->compiler = compiler;
558 c->devinfo = compiler->devinfo;
559 c->key = key;
560 c->program_id = program_id;
561 c->variant_id = variant_id;
562 c->threads = 4;
563 c->debug_output = debug_output;
564 c->debug_output_data = debug_output_data;
565
566 s = nir_shader_clone(c, s);
567 c->s = s;
568
569 list_inithead(&c->blocks);
570 vir_set_emit_block(c, vir_new_block(c));
571
572 c->output_position_index = -1;
573 c->output_point_size_index = -1;
574 c->output_sample_mask_index = -1;
575
576 c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
577 _mesa_key_pointer_equal);
578
579 return c;
580 }
581
582 static int
583 type_size_vec4(const struct glsl_type *type)
584 {
585 return glsl_count_attribute_slots(type, false);
586 }
587
588 static void
589 v3d_lower_nir(struct v3d_compile *c)
590 {
591 struct nir_lower_tex_options tex_options = {
592 .lower_txd = true,
593 .lower_rect = false, /* XXX: Use this on V3D 3.x */
594 .lower_txp = ~0,
595 /* Apply swizzles to all samplers. */
596 .swizzle_result = ~0,
597 };
598
599 /* Lower the format swizzle and (for 32-bit returns)
600 * ARB_texture_swizzle-style swizzle.
601 */
602 for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
603 for (int j = 0; j < 4; j++)
604 tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
605
606 if (c->key->tex[i].clamp_s)
607 tex_options.saturate_s |= 1 << i;
608 if (c->key->tex[i].clamp_t)
609 tex_options.saturate_t |= 1 << i;
610 if (c->key->tex[i].clamp_r)
611 tex_options.saturate_r |= 1 << i;
612 }
613
614 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
615 }
616
617 static void
618 v3d_lower_nir_late(struct v3d_compile *c)
619 {
620 NIR_PASS_V(c->s, v3d_nir_lower_io, c);
621 NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
622 NIR_PASS_V(c->s, nir_lower_idiv);
623 }
624
625 static void
626 v3d_set_prog_data_uniforms(struct v3d_compile *c,
627 struct v3d_prog_data *prog_data)
628 {
629 int count = c->num_uniforms;
630 struct v3d_uniform_list *ulist = &prog_data->uniforms;
631
632 ulist->count = count;
633 ulist->data = ralloc_array(prog_data, uint32_t, count);
634 memcpy(ulist->data, c->uniform_data,
635 count * sizeof(*ulist->data));
636 ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
637 memcpy(ulist->contents, c->uniform_contents,
638 count * sizeof(*ulist->contents));
639 }
640
641 /* Copy the compiler UBO range state to the compiled shader, dropping out
642 * arrays that were never referenced by an indirect load.
643 *
644 * (Note that QIR dead code elimination of an array access still leaves that
645 * array alive, though)
646 */
647 static void
648 v3d_set_prog_data_ubo(struct v3d_compile *c,
649 struct v3d_prog_data *prog_data)
650 {
651 if (!c->num_ubo_ranges)
652 return;
653
654 prog_data->num_ubo_ranges = 0;
655 prog_data->ubo_ranges = ralloc_array(prog_data, struct v3d_ubo_range,
656 c->num_ubo_ranges);
657 for (int i = 0; i < c->num_ubo_ranges; i++) {
658 if (!c->ubo_range_used[i])
659 continue;
660
661 struct v3d_ubo_range *range = &c->ubo_ranges[i];
662 prog_data->ubo_ranges[prog_data->num_ubo_ranges++] = *range;
663 prog_data->ubo_size += range->size;
664 }
665
666 if (prog_data->ubo_size) {
667 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
668 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
669 vir_get_stage_name(c),
670 c->program_id, c->variant_id,
671 prog_data->ubo_size / 4);
672 }
673 }
674 }
675
676 static void
677 v3d_set_prog_data(struct v3d_compile *c,
678 struct v3d_prog_data *prog_data)
679 {
680 prog_data->threads = c->threads;
681 prog_data->single_seg = !c->last_thrsw;
682 prog_data->spill_size = c->spill_size;
683
684 v3d_set_prog_data_uniforms(c, prog_data);
685 v3d_set_prog_data_ubo(c, prog_data);
686 }
687
688 static uint64_t *
689 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
690 {
691 *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
692
693 uint64_t *qpu_insts = malloc(*final_assembly_size);
694 if (!qpu_insts)
695 return NULL;
696
697 memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
698
699 char *shaderdb;
700 int ret = asprintf(&shaderdb,
701 "%s shader: %d inst, %d threads, %d loops, "
702 "%d uniforms, %d:%d spills:fills",
703 vir_get_stage_name(c),
704 c->qpu_inst_count,
705 c->threads,
706 c->loops,
707 c->num_uniforms,
708 c->spills,
709 c->fills);
710 if (ret >= 0) {
711 c->debug_output(shaderdb, c->debug_output_data);
712 free(shaderdb);
713 }
714
715 vir_compile_destroy(c);
716
717 return qpu_insts;
718 }
719
720 uint64_t *v3d_compile_vs(const struct v3d_compiler *compiler,
721 struct v3d_vs_key *key,
722 struct v3d_vs_prog_data *prog_data,
723 nir_shader *s,
724 void (*debug_output)(const char *msg,
725 void *debug_output_data),
726 void *debug_output_data,
727 int program_id, int variant_id,
728 uint32_t *final_assembly_size)
729 {
730 struct v3d_compile *c = vir_compile_init(compiler, &key->base, s,
731 debug_output, debug_output_data,
732 program_id, variant_id);
733
734 c->vs_key = key;
735
736 /* Split our I/O vars and dead code eliminate the unused
737 * components.
738 */
739 NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
740 nir_var_shader_in | nir_var_shader_out);
741 uint64_t used_outputs[4] = {0};
742 for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
743 int slot = v3d_slot_get_slot(c->vs_key->fs_inputs[i]);
744 int comp = v3d_slot_get_component(c->vs_key->fs_inputs[i]);
745 used_outputs[comp] |= 1ull << slot;
746 }
747 NIR_PASS_V(c->s, nir_remove_unused_io_vars,
748 &c->s->outputs, used_outputs, NULL); /* demotes to globals */
749 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
750 v3d_optimize_nir(c->s);
751 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in);
752 NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
753 type_size_vec4,
754 (nir_lower_io_options)0);
755
756 v3d_lower_nir(c);
757
758 if (key->clamp_color)
759 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
760
761 if (key->base.ucp_enables) {
762 NIR_PASS_V(c->s, nir_lower_clip_vs, key->base.ucp_enables,
763 false);
764 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
765 nir_var_shader_out);
766 }
767
768 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
769 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
770
771 v3d_lower_nir_late(c);
772 v3d_optimize_nir(c->s);
773 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
774 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
775
776 v3d_nir_to_vir(c);
777
778 v3d_set_prog_data(c, &prog_data->base);
779
780 prog_data->base.num_inputs = c->num_inputs;
781
782 /* The vertex data gets format converted by the VPM so that
783 * each attribute channel takes up a VPM column. Precompute
784 * the sizes for the shader record.
785 */
786 for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
787 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
788 prog_data->vpm_input_size += c->vattr_sizes[i];
789 }
790
791 prog_data->uses_vid = (s->info.system_values_read &
792 (1ull << SYSTEM_VALUE_VERTEX_ID));
793 prog_data->uses_iid = (s->info.system_values_read &
794 (1ull << SYSTEM_VALUE_INSTANCE_ID));
795
796 if (prog_data->uses_vid)
797 prog_data->vpm_input_size++;
798 if (prog_data->uses_iid)
799 prog_data->vpm_input_size++;
800
801 /* Input/output segment size are in sectors (8 rows of 32 bits per
802 * channel).
803 */
804 prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
805 prog_data->vpm_output_size = align(c->num_vpm_writes, 8) / 8;
806
807 /* Set us up for shared input/output segments. This is apparently
808 * necessary for our VCM setup to avoid varying corruption.
809 */
810 prog_data->separate_segments = false;
811 prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
812 prog_data->vpm_input_size);
813 prog_data->vpm_input_size = 0;
814
815 /* Compute VCM cache size. We set up our program to take up less than
816 * half of the VPM, so that any set of bin and render programs won't
817 * run out of space. We need space for at least one input segment,
818 * and then allocate the rest to output segments (one for the current
819 * program, the rest to VCM). The valid range of the VCM cache size
820 * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
821 * batches.
822 */
823 assert(c->devinfo->vpm_size);
824 int sector_size = 16 * sizeof(uint32_t) * 8;
825 int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
826 int half_vpm = vpm_size_in_sectors / 2;
827 int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
828 int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
829 assert(vpm_output_batches >= 2);
830 prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
831
832 return v3d_return_qpu_insts(c, final_assembly_size);
833 }
834
835 static void
836 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
837 struct v3d_fs_prog_data *prog_data)
838 {
839 prog_data->base.num_inputs = c->num_inputs;
840 memcpy(prog_data->input_slots, c->input_slots,
841 c->num_inputs * sizeof(*c->input_slots));
842
843 STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
844 (V3D_MAX_FS_INPUTS - 1) / 24);
845 for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
846 if (BITSET_TEST(c->flat_shade_flags, i))
847 prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
848
849 if (BITSET_TEST(c->noperspective_flags, i))
850 prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
851
852 if (BITSET_TEST(c->centroid_flags, i))
853 prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
854 }
855 }
856
857 static void
858 v3d_fixup_fs_output_types(struct v3d_compile *c)
859 {
860 nir_foreach_variable(var, &c->s->outputs) {
861 uint32_t mask = 0;
862
863 switch (var->data.location) {
864 case FRAG_RESULT_COLOR:
865 mask = ~0;
866 break;
867 case FRAG_RESULT_DATA0:
868 case FRAG_RESULT_DATA1:
869 case FRAG_RESULT_DATA2:
870 case FRAG_RESULT_DATA3:
871 mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
872 break;
873 }
874
875 if (c->fs_key->int_color_rb & mask) {
876 var->type =
877 glsl_vector_type(GLSL_TYPE_INT,
878 glsl_get_components(var->type));
879 } else if (c->fs_key->uint_color_rb & mask) {
880 var->type =
881 glsl_vector_type(GLSL_TYPE_UINT,
882 glsl_get_components(var->type));
883 }
884 }
885 }
886
887 uint64_t *v3d_compile_fs(const struct v3d_compiler *compiler,
888 struct v3d_fs_key *key,
889 struct v3d_fs_prog_data *prog_data,
890 nir_shader *s,
891 void (*debug_output)(const char *msg,
892 void *debug_output_data),
893 void *debug_output_data,
894 int program_id, int variant_id,
895 uint32_t *final_assembly_size)
896 {
897 struct v3d_compile *c = vir_compile_init(compiler, &key->base, s,
898 debug_output, debug_output_data,
899 program_id, variant_id);
900
901 c->fs_key = key;
902
903 if (key->int_color_rb || key->uint_color_rb)
904 v3d_fixup_fs_output_types(c);
905
906 v3d_lower_nir(c);
907
908 if (key->light_twoside)
909 NIR_PASS_V(c->s, nir_lower_two_sided_color);
910
911 if (key->clamp_color)
912 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
913
914 if (key->alpha_test) {
915 NIR_PASS_V(c->s, nir_lower_alpha_test, key->alpha_test_func,
916 false);
917 }
918
919 if (key->base.ucp_enables)
920 NIR_PASS_V(c->s, nir_lower_clip_fs, key->base.ucp_enables);
921
922 /* Note: FS input scalarizing must happen after
923 * nir_lower_two_sided_color, which only handles a vec4 at a time.
924 */
925 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
926
927 v3d_lower_nir_late(c);
928 v3d_optimize_nir(c->s);
929 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
930 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
931
932 v3d_nir_to_vir(c);
933
934 v3d_set_prog_data(c, &prog_data->base);
935 v3d_set_fs_prog_data_inputs(c, prog_data);
936 prog_data->writes_z = (c->s->info.outputs_written &
937 (1 << FRAG_RESULT_DEPTH));
938 prog_data->discard = (c->s->info.fs.uses_discard ||
939 c->fs_key->sample_alpha_to_coverage);
940 prog_data->uses_center_w = c->uses_center_w;
941
942 return v3d_return_qpu_insts(c, final_assembly_size);
943 }
944
945 void
946 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
947 {
948 if (qinst->dst.file == QFILE_TEMP)
949 c->defs[qinst->dst.index] = NULL;
950
951 assert(&qinst->link != c->cursor.link);
952
953 list_del(&qinst->link);
954 free(qinst);
955
956 c->live_intervals_valid = false;
957 }
958
959 struct qreg
960 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
961 {
962 /* XXX
963 int pack = reg.pack;
964
965 while (reg.file == QFILE_TEMP &&
966 c->defs[reg.index] &&
967 (c->defs[reg.index]->op == QOP_MOV ||
968 c->defs[reg.index]->op == QOP_FMOV) &&
969 !c->defs[reg.index]->dst.pack &&
970 !c->defs[reg.index]->src[0].pack) {
971 reg = c->defs[reg.index]->src[0];
972 }
973
974 reg.pack = pack;
975 */
976 return reg;
977 }
978
979 void
980 vir_compile_destroy(struct v3d_compile *c)
981 {
982 /* Defuse the assert that we aren't removing the cursor's instruction.
983 */
984 c->cursor.link = NULL;
985
986 vir_for_each_block(block, c) {
987 while (!list_empty(&block->instructions)) {
988 struct qinst *qinst =
989 list_first_entry(&block->instructions,
990 struct qinst, link);
991 vir_remove_instruction(c, qinst);
992 }
993 }
994
995 ralloc_free(c);
996 }
997
998 struct qreg
999 vir_uniform(struct v3d_compile *c,
1000 enum quniform_contents contents,
1001 uint32_t data)
1002 {
1003 for (int i = 0; i < c->num_uniforms; i++) {
1004 if (c->uniform_contents[i] == contents &&
1005 c->uniform_data[i] == data) {
1006 return vir_reg(QFILE_UNIF, i);
1007 }
1008 }
1009
1010 uint32_t uniform = c->num_uniforms++;
1011
1012 if (uniform >= c->uniform_array_size) {
1013 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1014 c->uniform_array_size * 2);
1015
1016 c->uniform_data = reralloc(c, c->uniform_data,
1017 uint32_t,
1018 c->uniform_array_size);
1019 c->uniform_contents = reralloc(c, c->uniform_contents,
1020 enum quniform_contents,
1021 c->uniform_array_size);
1022 }
1023
1024 c->uniform_contents[uniform] = contents;
1025 c->uniform_data[uniform] = data;
1026
1027 return vir_reg(QFILE_UNIF, uniform);
1028 }
1029
1030 static bool
1031 vir_can_set_flags(struct v3d_compile *c, struct qinst *inst)
1032 {
1033 if (c->devinfo->ver >= 40 && (v3d_qpu_reads_vpm(&inst->qpu) ||
1034 v3d_qpu_uses_sfu(&inst->qpu))) {
1035 return false;
1036 }
1037
1038 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
1039 (inst->qpu.alu.add.op == V3D_QPU_A_NOP &&
1040 inst->qpu.alu.mul.op == V3D_QPU_M_NOP)) {
1041 return false;
1042 }
1043
1044 return true;
1045 }
1046
1047 void
1048 vir_PF(struct v3d_compile *c, struct qreg src, enum v3d_qpu_pf pf)
1049 {
1050 struct qinst *last_inst = NULL;
1051
1052 if (!list_empty(&c->cur_block->instructions)) {
1053 last_inst = (struct qinst *)c->cur_block->instructions.prev;
1054
1055 /* Can't stuff the PF into the last last inst if our cursor
1056 * isn't pointing after it.
1057 */
1058 struct vir_cursor after_inst = vir_after_inst(last_inst);
1059 if (c->cursor.mode != after_inst.mode ||
1060 c->cursor.link != after_inst.link)
1061 last_inst = NULL;
1062 }
1063
1064 if (src.file != QFILE_TEMP ||
1065 !c->defs[src.index] ||
1066 last_inst != c->defs[src.index] ||
1067 !vir_can_set_flags(c, last_inst)) {
1068 /* XXX: Make the MOV be the appropriate type */
1069 last_inst = vir_MOV_dest(c, vir_reg(QFILE_NULL, 0), src);
1070 }
1071
1072 vir_set_pf(last_inst, pf);
1073 }
1074
1075 #define OPTPASS(func) \
1076 do { \
1077 bool stage_progress = func(c); \
1078 if (stage_progress) { \
1079 progress = true; \
1080 if (print_opt_debug) { \
1081 fprintf(stderr, \
1082 "VIR opt pass %2d: %s progress\n", \
1083 pass, #func); \
1084 } \
1085 /*XXX vir_validate(c);*/ \
1086 } \
1087 } while (0)
1088
1089 void
1090 vir_optimize(struct v3d_compile *c)
1091 {
1092 bool print_opt_debug = false;
1093 int pass = 1;
1094
1095 while (true) {
1096 bool progress = false;
1097
1098 OPTPASS(vir_opt_copy_propagate);
1099 OPTPASS(vir_opt_dead_code);
1100 OPTPASS(vir_opt_small_immediates);
1101
1102 if (!progress)
1103 break;
1104
1105 pass++;
1106 }
1107 }
1108
1109 const char *
1110 vir_get_stage_name(struct v3d_compile *c)
1111 {
1112 if (c->vs_key && c->vs_key->is_coord)
1113 return "MESA_SHADER_COORD";
1114 else
1115 return gl_shader_stage_name(c->s->info.stage);
1116 }