dc966bc80ca3a8f655b266391bfea17c7a199406
[mesa.git] / src / broadcom / compiler / vir.c
1 /*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26 #include "util/u_prim.h"
27
28 int
29 vir_get_nsrc(struct qinst *inst)
30 {
31 switch (inst->qpu.type) {
32 case V3D_QPU_INSTR_TYPE_BRANCH:
33 return 0;
34 case V3D_QPU_INSTR_TYPE_ALU:
35 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
36 return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
37 else
38 return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
39 }
40
41 return 0;
42 }
43
44 /**
45 * Returns whether the instruction has any side effects that must be
46 * preserved.
47 */
48 bool
49 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
50 {
51 switch (inst->qpu.type) {
52 case V3D_QPU_INSTR_TYPE_BRANCH:
53 return true;
54 case V3D_QPU_INSTR_TYPE_ALU:
55 switch (inst->qpu.alu.add.op) {
56 case V3D_QPU_A_SETREVF:
57 case V3D_QPU_A_SETMSF:
58 case V3D_QPU_A_VPMSETUP:
59 case V3D_QPU_A_STVPMV:
60 case V3D_QPU_A_STVPMD:
61 case V3D_QPU_A_STVPMP:
62 case V3D_QPU_A_VPMWT:
63 case V3D_QPU_A_TMUWT:
64 return true;
65 default:
66 break;
67 }
68
69 switch (inst->qpu.alu.mul.op) {
70 case V3D_QPU_M_MULTOP:
71 return true;
72 default:
73 break;
74 }
75 }
76
77 if (inst->qpu.sig.ldtmu ||
78 inst->qpu.sig.ldvary ||
79 inst->qpu.sig.ldtlbu ||
80 inst->qpu.sig.ldtlb ||
81 inst->qpu.sig.wrtmuc ||
82 inst->qpu.sig.thrsw) {
83 return true;
84 }
85
86 return false;
87 }
88
89 bool
90 vir_is_raw_mov(struct qinst *inst)
91 {
92 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
93 (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
94 inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
95 return false;
96 }
97
98 if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
99 inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
100 return false;
101 }
102
103 if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE ||
104 inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE ||
105 inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE ||
106 inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) {
107 return false;
108 }
109
110 if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
111 inst->qpu.flags.mc != V3D_QPU_COND_NONE)
112 return false;
113
114 return true;
115 }
116
117 bool
118 vir_is_add(struct qinst *inst)
119 {
120 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
121 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
122 }
123
124 bool
125 vir_is_mul(struct qinst *inst)
126 {
127 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
128 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
129 }
130
131 bool
132 vir_is_tex(struct qinst *inst)
133 {
134 if (inst->dst.file == QFILE_MAGIC)
135 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
136
137 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
138 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
139 return true;
140 }
141
142 return false;
143 }
144
145 bool
146 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
147 {
148 for (int i = 0; i < vir_get_nsrc(inst); i++) {
149 switch (inst->src[i].file) {
150 case QFILE_VPM:
151 return true;
152 default:
153 break;
154 }
155 }
156
157 if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
158 inst->qpu.sig.ldtlb ||
159 inst->qpu.sig.ldtlbu ||
160 inst->qpu.sig.ldvpm)) {
161 return true;
162 }
163
164 return false;
165 }
166
167 bool
168 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
169 {
170 switch (inst->dst.file) {
171 case QFILE_MAGIC:
172 switch (inst->dst.index) {
173 case V3D_QPU_WADDR_RECIP:
174 case V3D_QPU_WADDR_RSQRT:
175 case V3D_QPU_WADDR_EXP:
176 case V3D_QPU_WADDR_LOG:
177 case V3D_QPU_WADDR_SIN:
178 return true;
179 }
180 break;
181 default:
182 break;
183 }
184
185 if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
186 return true;
187
188 return false;
189 }
190
191 void
192 vir_set_unpack(struct qinst *inst, int src,
193 enum v3d_qpu_input_unpack unpack)
194 {
195 assert(src == 0 || src == 1);
196
197 if (vir_is_add(inst)) {
198 if (src == 0)
199 inst->qpu.alu.add.a_unpack = unpack;
200 else
201 inst->qpu.alu.add.b_unpack = unpack;
202 } else {
203 assert(vir_is_mul(inst));
204 if (src == 0)
205 inst->qpu.alu.mul.a_unpack = unpack;
206 else
207 inst->qpu.alu.mul.b_unpack = unpack;
208 }
209 }
210
211 void
212 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
213 {
214 if (vir_is_add(inst)) {
215 inst->qpu.flags.ac = cond;
216 } else {
217 assert(vir_is_mul(inst));
218 inst->qpu.flags.mc = cond;
219 }
220 }
221
222 void
223 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
224 {
225 if (vir_is_add(inst)) {
226 inst->qpu.flags.apf = pf;
227 } else {
228 assert(vir_is_mul(inst));
229 inst->qpu.flags.mpf = pf;
230 }
231 }
232
233 void
234 vir_set_uf(struct qinst *inst, enum v3d_qpu_uf uf)
235 {
236 if (vir_is_add(inst)) {
237 inst->qpu.flags.auf = uf;
238 } else {
239 assert(vir_is_mul(inst));
240 inst->qpu.flags.muf = uf;
241 }
242 }
243
244 #if 0
245 uint8_t
246 vir_channels_written(struct qinst *inst)
247 {
248 if (vir_is_mul(inst)) {
249 switch (inst->dst.pack) {
250 case QPU_PACK_MUL_NOP:
251 case QPU_PACK_MUL_8888:
252 return 0xf;
253 case QPU_PACK_MUL_8A:
254 return 0x1;
255 case QPU_PACK_MUL_8B:
256 return 0x2;
257 case QPU_PACK_MUL_8C:
258 return 0x4;
259 case QPU_PACK_MUL_8D:
260 return 0x8;
261 }
262 } else {
263 switch (inst->dst.pack) {
264 case QPU_PACK_A_NOP:
265 case QPU_PACK_A_8888:
266 case QPU_PACK_A_8888_SAT:
267 case QPU_PACK_A_32_SAT:
268 return 0xf;
269 case QPU_PACK_A_8A:
270 case QPU_PACK_A_8A_SAT:
271 return 0x1;
272 case QPU_PACK_A_8B:
273 case QPU_PACK_A_8B_SAT:
274 return 0x2;
275 case QPU_PACK_A_8C:
276 case QPU_PACK_A_8C_SAT:
277 return 0x4;
278 case QPU_PACK_A_8D:
279 case QPU_PACK_A_8D_SAT:
280 return 0x8;
281 case QPU_PACK_A_16A:
282 case QPU_PACK_A_16A_SAT:
283 return 0x3;
284 case QPU_PACK_A_16B:
285 case QPU_PACK_A_16B_SAT:
286 return 0xc;
287 }
288 }
289 unreachable("Bad pack field");
290 }
291 #endif
292
293 struct qreg
294 vir_get_temp(struct v3d_compile *c)
295 {
296 struct qreg reg;
297
298 reg.file = QFILE_TEMP;
299 reg.index = c->num_temps++;
300
301 if (c->num_temps > c->defs_array_size) {
302 uint32_t old_size = c->defs_array_size;
303 c->defs_array_size = MAX2(old_size * 2, 16);
304
305 c->defs = reralloc(c, c->defs, struct qinst *,
306 c->defs_array_size);
307 memset(&c->defs[old_size], 0,
308 sizeof(c->defs[0]) * (c->defs_array_size - old_size));
309
310 c->spillable = reralloc(c, c->spillable,
311 BITSET_WORD,
312 BITSET_WORDS(c->defs_array_size));
313 for (int i = old_size; i < c->defs_array_size; i++)
314 BITSET_SET(c->spillable, i);
315 }
316
317 return reg;
318 }
319
320 struct qinst *
321 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
322 {
323 struct qinst *inst = calloc(1, sizeof(*inst));
324
325 inst->qpu = v3d_qpu_nop();
326 inst->qpu.alu.add.op = op;
327
328 inst->dst = dst;
329 inst->src[0] = src0;
330 inst->src[1] = src1;
331 inst->uniform = ~0;
332
333 return inst;
334 }
335
336 struct qinst *
337 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
338 {
339 struct qinst *inst = calloc(1, sizeof(*inst));
340
341 inst->qpu = v3d_qpu_nop();
342 inst->qpu.alu.mul.op = op;
343
344 inst->dst = dst;
345 inst->src[0] = src0;
346 inst->src[1] = src1;
347 inst->uniform = ~0;
348
349 return inst;
350 }
351
352 struct qinst *
353 vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
354 {
355 struct qinst *inst = calloc(1, sizeof(*inst));
356
357 inst->qpu = v3d_qpu_nop();
358 inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
359 inst->qpu.branch.cond = cond;
360 inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
361 inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
362 inst->qpu.branch.ub = true;
363 inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
364
365 inst->dst = vir_nop_reg();
366 inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0);
367
368 return inst;
369 }
370
371 static void
372 vir_emit(struct v3d_compile *c, struct qinst *inst)
373 {
374 switch (c->cursor.mode) {
375 case vir_cursor_add:
376 list_add(&inst->link, c->cursor.link);
377 break;
378 case vir_cursor_addtail:
379 list_addtail(&inst->link, c->cursor.link);
380 break;
381 }
382
383 c->cursor = vir_after_inst(inst);
384 c->live_intervals_valid = false;
385 }
386
387 /* Updates inst to write to a new temporary, emits it, and notes the def. */
388 struct qreg
389 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
390 {
391 assert(inst->dst.file == QFILE_NULL);
392
393 /* If we're emitting an instruction that's a def, it had better be
394 * writing a register.
395 */
396 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
397 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
398 v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
399 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
400 v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
401 }
402
403 inst->dst = vir_get_temp(c);
404
405 if (inst->dst.file == QFILE_TEMP)
406 c->defs[inst->dst.index] = inst;
407
408 vir_emit(c, inst);
409
410 return inst->dst;
411 }
412
413 struct qinst *
414 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
415 {
416 if (inst->dst.file == QFILE_TEMP)
417 c->defs[inst->dst.index] = NULL;
418
419 vir_emit(c, inst);
420
421 return inst;
422 }
423
424 struct qblock *
425 vir_new_block(struct v3d_compile *c)
426 {
427 struct qblock *block = rzalloc(c, struct qblock);
428
429 list_inithead(&block->instructions);
430
431 block->predecessors = _mesa_set_create(block,
432 _mesa_hash_pointer,
433 _mesa_key_pointer_equal);
434
435 block->index = c->next_block_index++;
436
437 return block;
438 }
439
440 void
441 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
442 {
443 c->cur_block = block;
444 c->cursor = vir_after_block(block);
445 list_addtail(&block->link, &c->blocks);
446 }
447
448 struct qblock *
449 vir_entry_block(struct v3d_compile *c)
450 {
451 return list_first_entry(&c->blocks, struct qblock, link);
452 }
453
454 struct qblock *
455 vir_exit_block(struct v3d_compile *c)
456 {
457 return list_last_entry(&c->blocks, struct qblock, link);
458 }
459
460 void
461 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
462 {
463 _mesa_set_add(successor->predecessors, predecessor);
464 if (predecessor->successors[0]) {
465 assert(!predecessor->successors[1]);
466 predecessor->successors[1] = successor;
467 } else {
468 predecessor->successors[0] = successor;
469 }
470 }
471
472 const struct v3d_compiler *
473 v3d_compiler_init(const struct v3d_device_info *devinfo)
474 {
475 struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
476 if (!compiler)
477 return NULL;
478
479 compiler->devinfo = devinfo;
480
481 if (!vir_init_reg_sets(compiler)) {
482 ralloc_free(compiler);
483 return NULL;
484 }
485
486 return compiler;
487 }
488
489 void
490 v3d_compiler_free(const struct v3d_compiler *compiler)
491 {
492 ralloc_free((void *)compiler);
493 }
494
495 static struct v3d_compile *
496 vir_compile_init(const struct v3d_compiler *compiler,
497 struct v3d_key *key,
498 nir_shader *s,
499 void (*debug_output)(const char *msg,
500 void *debug_output_data),
501 void *debug_output_data,
502 int program_id, int variant_id)
503 {
504 struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
505
506 c->compiler = compiler;
507 c->devinfo = compiler->devinfo;
508 c->key = key;
509 c->program_id = program_id;
510 c->variant_id = variant_id;
511 c->threads = 4;
512 c->debug_output = debug_output;
513 c->debug_output_data = debug_output_data;
514
515 s = nir_shader_clone(c, s);
516 c->s = s;
517
518 list_inithead(&c->blocks);
519 vir_set_emit_block(c, vir_new_block(c));
520
521 c->output_position_index = -1;
522 c->output_sample_mask_index = -1;
523
524 c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
525 _mesa_key_pointer_equal);
526
527 return c;
528 }
529
530 static int
531 type_size_vec4(const struct glsl_type *type, bool bindless)
532 {
533 return glsl_count_attribute_slots(type, false);
534 }
535
536 static void
537 v3d_lower_nir(struct v3d_compile *c)
538 {
539 struct nir_lower_tex_options tex_options = {
540 .lower_txd = true,
541 .lower_tg4_broadcom_swizzle = true,
542
543 .lower_rect = false, /* XXX: Use this on V3D 3.x */
544 .lower_txp = ~0,
545 /* Apply swizzles to all samplers. */
546 .swizzle_result = ~0,
547 };
548
549 /* Lower the format swizzle and (for 32-bit returns)
550 * ARB_texture_swizzle-style swizzle.
551 */
552 for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
553 for (int j = 0; j < 4; j++)
554 tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
555
556 if (c->key->tex[i].clamp_s)
557 tex_options.saturate_s |= 1 << i;
558 if (c->key->tex[i].clamp_t)
559 tex_options.saturate_t |= 1 << i;
560 if (c->key->tex[i].clamp_r)
561 tex_options.saturate_r |= 1 << i;
562 if (c->key->tex[i].return_size == 16) {
563 tex_options.lower_tex_packing[i] =
564 nir_lower_tex_packing_16;
565 }
566 }
567
568 /* CS textures may not have return_size reflecting the shadow state. */
569 nir_foreach_variable(var, &c->s->uniforms) {
570 const struct glsl_type *type = glsl_without_array(var->type);
571 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
572
573 if (!glsl_type_is_sampler(type) ||
574 !glsl_sampler_type_is_shadow(type))
575 continue;
576
577 for (int i = 0; i < array_len; i++) {
578 tex_options.lower_tex_packing[var->data.binding + i] =
579 nir_lower_tex_packing_16;
580 }
581 }
582
583 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
584 NIR_PASS_V(c->s, nir_lower_system_values);
585
586 NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
587 nir_var_function_temp,
588 0,
589 glsl_get_natural_size_align_bytes);
590 NIR_PASS_V(c->s, v3d_nir_lower_scratch);
591 }
592
593 static void
594 v3d_set_prog_data_uniforms(struct v3d_compile *c,
595 struct v3d_prog_data *prog_data)
596 {
597 int count = c->num_uniforms;
598 struct v3d_uniform_list *ulist = &prog_data->uniforms;
599
600 ulist->count = count;
601 ulist->data = ralloc_array(prog_data, uint32_t, count);
602 memcpy(ulist->data, c->uniform_data,
603 count * sizeof(*ulist->data));
604 ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
605 memcpy(ulist->contents, c->uniform_contents,
606 count * sizeof(*ulist->contents));
607 }
608
609 static void
610 v3d_vs_set_prog_data(struct v3d_compile *c,
611 struct v3d_vs_prog_data *prog_data)
612 {
613 /* The vertex data gets format converted by the VPM so that
614 * each attribute channel takes up a VPM column. Precompute
615 * the sizes for the shader record.
616 */
617 for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
618 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
619 prog_data->vpm_input_size += c->vattr_sizes[i];
620 }
621
622 prog_data->uses_vid = (c->s->info.system_values_read &
623 (1ull << SYSTEM_VALUE_VERTEX_ID));
624 prog_data->uses_iid = (c->s->info.system_values_read &
625 (1ull << SYSTEM_VALUE_INSTANCE_ID));
626
627 if (prog_data->uses_vid)
628 prog_data->vpm_input_size++;
629 if (prog_data->uses_iid)
630 prog_data->vpm_input_size++;
631
632 /* Input/output segment size are in sectors (8 rows of 32 bits per
633 * channel).
634 */
635 prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
636 prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
637
638 /* Set us up for shared input/output segments. This is apparently
639 * necessary for our VCM setup to avoid varying corruption.
640 */
641 prog_data->separate_segments = false;
642 prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
643 prog_data->vpm_input_size);
644 prog_data->vpm_input_size = 0;
645
646 /* Compute VCM cache size. We set up our program to take up less than
647 * half of the VPM, so that any set of bin and render programs won't
648 * run out of space. We need space for at least one input segment,
649 * and then allocate the rest to output segments (one for the current
650 * program, the rest to VCM). The valid range of the VCM cache size
651 * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
652 * batches.
653 */
654 assert(c->devinfo->vpm_size);
655 int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
656 int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
657 int half_vpm = vpm_size_in_sectors / 2;
658 int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
659 int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
660 assert(vpm_output_batches >= 2);
661 prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
662 }
663
664 static void
665 v3d_gs_set_prog_data(struct v3d_compile *c,
666 struct v3d_gs_prog_data *prog_data)
667 {
668 prog_data->num_inputs = c->num_inputs;
669 memcpy(prog_data->input_slots, c->input_slots,
670 c->num_inputs * sizeof(*c->input_slots));
671
672 /* gl_PrimitiveIdIn is written by the GBG into the first word of the
673 * VPM output header automatically and the shader will overwrite
674 * it after reading it if necessary, so it doesn't add to the VPM
675 * size requirements.
676 */
677 prog_data->uses_pid = (c->s->info.system_values_read &
678 (1ull << SYSTEM_VALUE_PRIMITIVE_ID));
679
680 /* Output segment size is in sectors (8 rows of 32 bits per channel) */
681 prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
682
683 prog_data->out_prim_type = c->s->info.gs.output_primitive;
684 }
685
686 static void
687 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
688 struct v3d_fs_prog_data *prog_data)
689 {
690 prog_data->num_inputs = c->num_inputs;
691 memcpy(prog_data->input_slots, c->input_slots,
692 c->num_inputs * sizeof(*c->input_slots));
693
694 STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
695 (V3D_MAX_FS_INPUTS - 1) / 24);
696 for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
697 if (BITSET_TEST(c->flat_shade_flags, i))
698 prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
699
700 if (BITSET_TEST(c->noperspective_flags, i))
701 prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
702
703 if (BITSET_TEST(c->centroid_flags, i))
704 prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
705 }
706 }
707
708 static void
709 v3d_fs_set_prog_data(struct v3d_compile *c,
710 struct v3d_fs_prog_data *prog_data)
711 {
712 v3d_set_fs_prog_data_inputs(c, prog_data);
713 prog_data->writes_z = c->writes_z;
714 prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
715 prog_data->uses_center_w = c->uses_center_w;
716 prog_data->uses_implicit_point_line_varyings =
717 c->uses_implicit_point_line_varyings;
718 prog_data->lock_scoreboard_on_first_thrsw =
719 c->lock_scoreboard_on_first_thrsw;
720 }
721
722 static void
723 v3d_cs_set_prog_data(struct v3d_compile *c,
724 struct v3d_compute_prog_data *prog_data)
725 {
726 prog_data->shared_size = c->s->info.cs.shared_size;
727 }
728
729 static void
730 v3d_set_prog_data(struct v3d_compile *c,
731 struct v3d_prog_data *prog_data)
732 {
733 prog_data->threads = c->threads;
734 prog_data->single_seg = !c->last_thrsw;
735 prog_data->spill_size = c->spill_size;
736 prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl;
737
738 v3d_set_prog_data_uniforms(c, prog_data);
739
740 switch (c->s->info.stage) {
741 case MESA_SHADER_VERTEX:
742 v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
743 break;
744 case MESA_SHADER_GEOMETRY:
745 v3d_gs_set_prog_data(c, (struct v3d_gs_prog_data *)prog_data);
746 break;
747 case MESA_SHADER_FRAGMENT:
748 v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);
749 break;
750 case MESA_SHADER_COMPUTE:
751 v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);
752 break;
753 default:
754 unreachable("unsupported shader stage");
755 }
756 }
757
758 static uint64_t *
759 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
760 {
761 *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
762
763 uint64_t *qpu_insts = malloc(*final_assembly_size);
764 if (!qpu_insts)
765 return NULL;
766
767 memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
768
769 vir_compile_destroy(c);
770
771 return qpu_insts;
772 }
773
774 static void
775 v3d_nir_lower_vs_early(struct v3d_compile *c)
776 {
777 /* Split our I/O vars and dead code eliminate the unused
778 * components.
779 */
780 NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
781 nir_var_shader_in | nir_var_shader_out);
782 uint64_t used_outputs[4] = {0};
783 for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
784 int slot = v3d_slot_get_slot(c->vs_key->used_outputs[i]);
785 int comp = v3d_slot_get_component(c->vs_key->used_outputs[i]);
786 used_outputs[comp] |= 1ull << slot;
787 }
788 NIR_PASS_V(c->s, nir_remove_unused_io_vars,
789 &c->s->outputs, used_outputs, NULL); /* demotes to globals */
790 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
791 v3d_optimize_nir(c->s);
792 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in);
793
794 /* This must go before nir_lower_io */
795 if (c->vs_key->per_vertex_point_size)
796 NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
797
798 NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
799 type_size_vec4,
800 (nir_lower_io_options)0);
801 /* clean up nir_lower_io's deref_var remains */
802 NIR_PASS_V(c->s, nir_opt_dce);
803 }
804
805 static void
806 v3d_nir_lower_gs_early(struct v3d_compile *c)
807 {
808 /* Split our I/O vars and dead code eliminate the unused
809 * components.
810 */
811 NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
812 nir_var_shader_in | nir_var_shader_out);
813 uint64_t used_outputs[4] = {0};
814 for (int i = 0; i < c->gs_key->num_used_outputs; i++) {
815 int slot = v3d_slot_get_slot(c->gs_key->used_outputs[i]);
816 int comp = v3d_slot_get_component(c->gs_key->used_outputs[i]);
817 used_outputs[comp] |= 1ull << slot;
818 }
819 NIR_PASS_V(c->s, nir_remove_unused_io_vars,
820 &c->s->outputs, used_outputs, NULL); /* demotes to globals */
821 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
822 v3d_optimize_nir(c->s);
823 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in);
824
825 /* This must go before nir_lower_io */
826 if (c->gs_key->per_vertex_point_size)
827 NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
828
829 NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
830 type_size_vec4,
831 (nir_lower_io_options)0);
832 /* clean up nir_lower_io's deref_var remains */
833 NIR_PASS_V(c->s, nir_opt_dce);
834 }
835
836 static void
837 v3d_fixup_fs_output_types(struct v3d_compile *c)
838 {
839 nir_foreach_variable(var, &c->s->outputs) {
840 uint32_t mask = 0;
841
842 switch (var->data.location) {
843 case FRAG_RESULT_COLOR:
844 mask = ~0;
845 break;
846 case FRAG_RESULT_DATA0:
847 case FRAG_RESULT_DATA1:
848 case FRAG_RESULT_DATA2:
849 case FRAG_RESULT_DATA3:
850 mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
851 break;
852 }
853
854 if (c->fs_key->int_color_rb & mask) {
855 var->type =
856 glsl_vector_type(GLSL_TYPE_INT,
857 glsl_get_components(var->type));
858 } else if (c->fs_key->uint_color_rb & mask) {
859 var->type =
860 glsl_vector_type(GLSL_TYPE_UINT,
861 glsl_get_components(var->type));
862 }
863 }
864 }
865
866 static void
867 v3d_nir_lower_fs_early(struct v3d_compile *c)
868 {
869 if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
870 v3d_fixup_fs_output_types(c);
871
872 NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
873
874 /* If the shader has no non-TLB side effects, we can promote it to
875 * enabling early_fragment_tests even if the user didn't.
876 */
877 if (!(c->s->info.num_images ||
878 c->s->info.num_ssbos)) {
879 c->s->info.fs.early_fragment_tests = true;
880 }
881 }
882
883 static void
884 v3d_nir_lower_gs_late(struct v3d_compile *c)
885 {
886 if (c->key->ucp_enables) {
887 NIR_PASS_V(c->s, nir_lower_clip_gs, c->key->ucp_enables,
888 false, NULL);
889 }
890
891 /* Note: GS output scalarizing must happen after nir_lower_clip_gs. */
892 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
893 }
894
895 static void
896 v3d_nir_lower_vs_late(struct v3d_compile *c)
897 {
898 if (c->vs_key->clamp_color)
899 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
900
901 if (c->key->ucp_enables) {
902 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
903 false, false, NULL);
904 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
905 nir_var_shader_out);
906 }
907
908 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
909 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
910 }
911
912 static void
913 v3d_nir_lower_fs_late(struct v3d_compile *c)
914 {
915 if (c->fs_key->light_twoside)
916 NIR_PASS_V(c->s, nir_lower_two_sided_color);
917
918 if (c->fs_key->clamp_color)
919 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
920
921 if (c->fs_key->alpha_test) {
922 NIR_PASS_V(c->s, nir_lower_alpha_test,
923 c->fs_key->alpha_test_func,
924 false, NULL);
925 }
926
927 if (c->key->ucp_enables)
928 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables,
929 false);
930
931 /* Note: FS input scalarizing must happen after
932 * nir_lower_two_sided_color, which only handles a vec4 at a time.
933 */
934 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
935 }
936
937 static uint32_t
938 vir_get_max_temps(struct v3d_compile *c)
939 {
940 int max_ip = 0;
941 vir_for_each_inst_inorder(inst, c)
942 max_ip++;
943
944 uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip);
945
946 for (int t = 0; t < c->num_temps; t++) {
947 for (int i = c->temp_start[t]; (i < c->temp_end[t] &&
948 i < max_ip); i++) {
949 if (i > max_ip)
950 break;
951 pressure[i]++;
952 }
953 }
954
955 uint32_t max_temps = 0;
956 for (int i = 0; i < max_ip; i++)
957 max_temps = MAX2(max_temps, pressure[i]);
958
959 ralloc_free(pressure);
960
961 return max_temps;
962 }
963
964 uint64_t *v3d_compile(const struct v3d_compiler *compiler,
965 struct v3d_key *key,
966 struct v3d_prog_data **out_prog_data,
967 nir_shader *s,
968 void (*debug_output)(const char *msg,
969 void *debug_output_data),
970 void *debug_output_data,
971 int program_id, int variant_id,
972 uint32_t *final_assembly_size)
973 {
974 struct v3d_prog_data *prog_data;
975 struct v3d_compile *c = vir_compile_init(compiler, key, s,
976 debug_output, debug_output_data,
977 program_id, variant_id);
978
979 switch (c->s->info.stage) {
980 case MESA_SHADER_VERTEX:
981 c->vs_key = (struct v3d_vs_key *)key;
982 prog_data = rzalloc_size(NULL, sizeof(struct v3d_vs_prog_data));
983 break;
984 case MESA_SHADER_GEOMETRY:
985 c->gs_key = (struct v3d_gs_key *)key;
986 prog_data = rzalloc_size(NULL, sizeof(struct v3d_gs_prog_data));
987 break;
988 case MESA_SHADER_FRAGMENT:
989 c->fs_key = (struct v3d_fs_key *)key;
990 prog_data = rzalloc_size(NULL, sizeof(struct v3d_fs_prog_data));
991 break;
992 case MESA_SHADER_COMPUTE:
993 prog_data = rzalloc_size(NULL,
994 sizeof(struct v3d_compute_prog_data));
995 break;
996 default:
997 unreachable("unsupported shader stage");
998 }
999
1000
1001 switch (c->s->info.stage) {
1002 case MESA_SHADER_VERTEX:
1003 v3d_nir_lower_vs_early(c);
1004 break;
1005 case MESA_SHADER_GEOMETRY:
1006 v3d_nir_lower_gs_early(c);
1007 break;
1008 case MESA_SHADER_FRAGMENT:
1009 v3d_nir_lower_fs_early(c);
1010 break;
1011 default:
1012 break;
1013 }
1014
1015 v3d_lower_nir(c);
1016
1017 switch (c->s->info.stage) {
1018 case MESA_SHADER_VERTEX:
1019 v3d_nir_lower_vs_late(c);
1020 break;
1021 case MESA_SHADER_GEOMETRY:
1022 v3d_nir_lower_gs_late(c);
1023 break;
1024 case MESA_SHADER_FRAGMENT:
1025 v3d_nir_lower_fs_late(c);
1026 break;
1027 default:
1028 break;
1029 }
1030
1031 NIR_PASS_V(c->s, v3d_nir_lower_io, c);
1032 NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
1033 NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
1034 NIR_PASS_V(c->s, nir_lower_idiv, nir_lower_idiv_fast);
1035
1036 v3d_optimize_nir(c->s);
1037
1038 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1039 * subs, then the mandatory cleanup after algebraic. Note that it may
1040 * produce fnegs, and if so then we need to keep running to squash
1041 * fneg(fneg(a)).
1042 */
1043 bool more_late_algebraic = true;
1044 while (more_late_algebraic) {
1045 more_late_algebraic = false;
1046 NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
1047 NIR_PASS_V(c->s, nir_opt_constant_folding);
1048 NIR_PASS_V(c->s, nir_copy_prop);
1049 NIR_PASS_V(c->s, nir_opt_dce);
1050 NIR_PASS_V(c->s, nir_opt_cse);
1051 }
1052
1053 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
1054 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1055
1056 /* Schedule for about half our register space, to enable more shaders
1057 * to hit 4 threads.
1058 */
1059 NIR_PASS_V(c->s, nir_schedule, 24);
1060
1061 v3d_nir_to_vir(c);
1062
1063 v3d_set_prog_data(c, prog_data);
1064
1065 *out_prog_data = prog_data;
1066
1067 char *shaderdb;
1068 int ret = asprintf(&shaderdb,
1069 "%s shader: %d inst, %d threads, %d loops, "
1070 "%d uniforms, %d max-temps, %d:%d spills:fills, "
1071 "%d sfu-stalls, %d inst-and-stalls",
1072 vir_get_stage_name(c),
1073 c->qpu_inst_count,
1074 c->threads,
1075 c->loops,
1076 c->num_uniforms,
1077 vir_get_max_temps(c),
1078 c->spills,
1079 c->fills,
1080 c->qpu_inst_stalled_count,
1081 c->qpu_inst_count + c->qpu_inst_stalled_count);
1082 if (ret >= 0) {
1083 if (V3D_DEBUG & V3D_DEBUG_SHADERDB)
1084 fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
1085
1086 c->debug_output(shaderdb, c->debug_output_data);
1087 free(shaderdb);
1088 }
1089
1090 return v3d_return_qpu_insts(c, final_assembly_size);
1091 }
1092
1093 void
1094 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
1095 {
1096 if (qinst->dst.file == QFILE_TEMP)
1097 c->defs[qinst->dst.index] = NULL;
1098
1099 assert(&qinst->link != c->cursor.link);
1100
1101 list_del(&qinst->link);
1102 free(qinst);
1103
1104 c->live_intervals_valid = false;
1105 }
1106
1107 struct qreg
1108 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
1109 {
1110 /* XXX
1111 int pack = reg.pack;
1112
1113 while (reg.file == QFILE_TEMP &&
1114 c->defs[reg.index] &&
1115 (c->defs[reg.index]->op == QOP_MOV ||
1116 c->defs[reg.index]->op == QOP_FMOV) &&
1117 !c->defs[reg.index]->dst.pack &&
1118 !c->defs[reg.index]->src[0].pack) {
1119 reg = c->defs[reg.index]->src[0];
1120 }
1121
1122 reg.pack = pack;
1123 */
1124 return reg;
1125 }
1126
1127 void
1128 vir_compile_destroy(struct v3d_compile *c)
1129 {
1130 /* Defuse the assert that we aren't removing the cursor's instruction.
1131 */
1132 c->cursor.link = NULL;
1133
1134 vir_for_each_block(block, c) {
1135 while (!list_is_empty(&block->instructions)) {
1136 struct qinst *qinst =
1137 list_first_entry(&block->instructions,
1138 struct qinst, link);
1139 vir_remove_instruction(c, qinst);
1140 }
1141 }
1142
1143 ralloc_free(c);
1144 }
1145
1146 uint32_t
1147 vir_get_uniform_index(struct v3d_compile *c,
1148 enum quniform_contents contents,
1149 uint32_t data)
1150 {
1151 for (int i = 0; i < c->num_uniforms; i++) {
1152 if (c->uniform_contents[i] == contents &&
1153 c->uniform_data[i] == data) {
1154 return i;
1155 }
1156 }
1157
1158 uint32_t uniform = c->num_uniforms++;
1159
1160 if (uniform >= c->uniform_array_size) {
1161 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1162 c->uniform_array_size * 2);
1163
1164 c->uniform_data = reralloc(c, c->uniform_data,
1165 uint32_t,
1166 c->uniform_array_size);
1167 c->uniform_contents = reralloc(c, c->uniform_contents,
1168 enum quniform_contents,
1169 c->uniform_array_size);
1170 }
1171
1172 c->uniform_contents[uniform] = contents;
1173 c->uniform_data[uniform] = data;
1174
1175 return uniform;
1176 }
1177
1178 struct qreg
1179 vir_uniform(struct v3d_compile *c,
1180 enum quniform_contents contents,
1181 uint32_t data)
1182 {
1183 struct qinst *inst = vir_NOP(c);
1184 inst->qpu.sig.ldunif = true;
1185 inst->uniform = vir_get_uniform_index(c, contents, data);
1186 inst->dst = vir_get_temp(c);
1187 c->defs[inst->dst.index] = inst;
1188 return inst->dst;
1189 }
1190
1191 #define OPTPASS(func) \
1192 do { \
1193 bool stage_progress = func(c); \
1194 if (stage_progress) { \
1195 progress = true; \
1196 if (print_opt_debug) { \
1197 fprintf(stderr, \
1198 "VIR opt pass %2d: %s progress\n", \
1199 pass, #func); \
1200 } \
1201 /*XXX vir_validate(c);*/ \
1202 } \
1203 } while (0)
1204
1205 void
1206 vir_optimize(struct v3d_compile *c)
1207 {
1208 bool print_opt_debug = false;
1209 int pass = 1;
1210
1211 while (true) {
1212 bool progress = false;
1213
1214 OPTPASS(vir_opt_copy_propagate);
1215 OPTPASS(vir_opt_redundant_flags);
1216 OPTPASS(vir_opt_dead_code);
1217 OPTPASS(vir_opt_small_immediates);
1218
1219 if (!progress)
1220 break;
1221
1222 pass++;
1223 }
1224 }
1225
1226 const char *
1227 vir_get_stage_name(struct v3d_compile *c)
1228 {
1229 if (c->vs_key && c->vs_key->is_coord)
1230 return "MESA_SHADER_VERTEX_BIN";
1231 else if (c->gs_key && c->gs_key->is_coord)
1232 return "MESA_SHADER_GEOMETRY_BIN";
1233 else
1234 return gl_shader_stage_name(c->s->info.stage);
1235 }