10105fbd8611e9ea7b2ff91ddd40097dc372ffcb
[mesa.git] / src / broadcom / compiler / vir.c
1 /*
2 * Copyright © 2016-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26
27 int
28 vir_get_non_sideband_nsrc(struct qinst *inst)
29 {
30 switch (inst->qpu.type) {
31 case V3D_QPU_INSTR_TYPE_BRANCH:
32 return 0;
33 case V3D_QPU_INSTR_TYPE_ALU:
34 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
35 return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
36 else
37 return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
38 }
39
40 return 0;
41 }
42
43 int
44 vir_get_nsrc(struct qinst *inst)
45 {
46 int nsrc = vir_get_non_sideband_nsrc(inst);
47
48 if (vir_has_implicit_uniform(inst))
49 nsrc++;
50
51 return nsrc;
52 }
53
54 bool
55 vir_has_implicit_uniform(struct qinst *inst)
56 {
57 switch (inst->qpu.type) {
58 case V3D_QPU_INSTR_TYPE_BRANCH:
59 return true;
60 case V3D_QPU_INSTR_TYPE_ALU:
61 switch (inst->dst.file) {
62 case QFILE_TLBU:
63 return true;
64 case QFILE_MAGIC:
65 switch (inst->dst.index) {
66 case V3D_QPU_WADDR_TLBU:
67 case V3D_QPU_WADDR_TMUAU:
68 case V3D_QPU_WADDR_SYNCU:
69 return true;
70 default:
71 break;
72 }
73 break;
74 default:
75 return inst->has_implicit_uniform;
76 }
77 }
78 return false;
79 }
80
81 /* The sideband uniform for textures gets stored after the normal ALU
82 * arguments.
83 */
84 int
85 vir_get_implicit_uniform_src(struct qinst *inst)
86 {
87 if (!vir_has_implicit_uniform(inst))
88 return -1;
89 return vir_get_nsrc(inst) - 1;
90 }
91
92 /**
93 * Returns whether the instruction has any side effects that must be
94 * preserved.
95 */
96 bool
97 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
98 {
99 switch (inst->qpu.type) {
100 case V3D_QPU_INSTR_TYPE_BRANCH:
101 return true;
102 case V3D_QPU_INSTR_TYPE_ALU:
103 switch (inst->qpu.alu.add.op) {
104 case V3D_QPU_A_SETREVF:
105 case V3D_QPU_A_SETMSF:
106 case V3D_QPU_A_VPMSETUP:
107 case V3D_QPU_A_STVPMV:
108 case V3D_QPU_A_STVPMD:
109 case V3D_QPU_A_STVPMP:
110 case V3D_QPU_A_VPMWT:
111 case V3D_QPU_A_TMUWT:
112 return true;
113 default:
114 break;
115 }
116
117 switch (inst->qpu.alu.mul.op) {
118 case V3D_QPU_M_MULTOP:
119 return true;
120 default:
121 break;
122 }
123 }
124
125 if (inst->qpu.sig.ldtmu ||
126 inst->qpu.sig.ldvary ||
127 inst->qpu.sig.wrtmuc ||
128 inst->qpu.sig.thrsw) {
129 return true;
130 }
131
132 return false;
133 }
134
135 bool
136 vir_is_float_input(struct qinst *inst)
137 {
138 /* XXX: More instrs */
139 switch (inst->qpu.type) {
140 case V3D_QPU_INSTR_TYPE_BRANCH:
141 return false;
142 case V3D_QPU_INSTR_TYPE_ALU:
143 switch (inst->qpu.alu.add.op) {
144 case V3D_QPU_A_FADD:
145 case V3D_QPU_A_FSUB:
146 case V3D_QPU_A_FMIN:
147 case V3D_QPU_A_FMAX:
148 case V3D_QPU_A_FTOIN:
149 return true;
150 default:
151 break;
152 }
153
154 switch (inst->qpu.alu.mul.op) {
155 case V3D_QPU_M_FMOV:
156 case V3D_QPU_M_VFMUL:
157 case V3D_QPU_M_FMUL:
158 return true;
159 default:
160 break;
161 }
162 }
163
164 return false;
165 }
166
167 bool
168 vir_is_raw_mov(struct qinst *inst)
169 {
170 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
171 (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
172 inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
173 return false;
174 }
175
176 if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
177 inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
178 return false;
179 }
180
181 if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
182 inst->qpu.flags.mc != V3D_QPU_COND_NONE)
183 return false;
184
185 return true;
186 }
187
188 bool
189 vir_is_add(struct qinst *inst)
190 {
191 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
192 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
193 }
194
195 bool
196 vir_is_mul(struct qinst *inst)
197 {
198 return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
199 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
200 }
201
202 bool
203 vir_is_tex(struct qinst *inst)
204 {
205 if (inst->dst.file == QFILE_MAGIC)
206 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
207
208 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
209 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
210 return true;
211 }
212
213 return false;
214 }
215
216 bool
217 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
218 {
219 for (int i = 0; i < vir_get_nsrc(inst); i++) {
220 switch (inst->src[i].file) {
221 case QFILE_VPM:
222 return true;
223 default:
224 break;
225 }
226 }
227
228 if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
229 inst->qpu.sig.ldtlb ||
230 inst->qpu.sig.ldtlbu ||
231 inst->qpu.sig.ldvpm)) {
232 return true;
233 }
234
235 return false;
236 }
237
238 bool
239 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
240 {
241 switch (inst->dst.file) {
242 case QFILE_MAGIC:
243 switch (inst->dst.index) {
244 case V3D_QPU_WADDR_RECIP:
245 case V3D_QPU_WADDR_RSQRT:
246 case V3D_QPU_WADDR_EXP:
247 case V3D_QPU_WADDR_LOG:
248 case V3D_QPU_WADDR_SIN:
249 return true;
250 }
251 break;
252 default:
253 break;
254 }
255
256 if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
257 return true;
258
259 return false;
260 }
261
262 void
263 vir_set_unpack(struct qinst *inst, int src,
264 enum v3d_qpu_input_unpack unpack)
265 {
266 assert(src == 0 || src == 1);
267
268 if (vir_is_add(inst)) {
269 if (src == 0)
270 inst->qpu.alu.add.a_unpack = unpack;
271 else
272 inst->qpu.alu.add.b_unpack = unpack;
273 } else {
274 assert(vir_is_mul(inst));
275 if (src == 0)
276 inst->qpu.alu.mul.a_unpack = unpack;
277 else
278 inst->qpu.alu.mul.b_unpack = unpack;
279 }
280 }
281
282 void
283 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
284 {
285 if (vir_is_add(inst)) {
286 inst->qpu.flags.ac = cond;
287 } else {
288 assert(vir_is_mul(inst));
289 inst->qpu.flags.mc = cond;
290 }
291 }
292
293 void
294 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
295 {
296 if (vir_is_add(inst)) {
297 inst->qpu.flags.apf = pf;
298 } else {
299 assert(vir_is_mul(inst));
300 inst->qpu.flags.mpf = pf;
301 }
302 }
303
304 void
305 vir_set_uf(struct qinst *inst, enum v3d_qpu_uf uf)
306 {
307 if (vir_is_add(inst)) {
308 inst->qpu.flags.auf = uf;
309 } else {
310 assert(vir_is_mul(inst));
311 inst->qpu.flags.muf = uf;
312 }
313 }
314
315 #if 0
316 uint8_t
317 vir_channels_written(struct qinst *inst)
318 {
319 if (vir_is_mul(inst)) {
320 switch (inst->dst.pack) {
321 case QPU_PACK_MUL_NOP:
322 case QPU_PACK_MUL_8888:
323 return 0xf;
324 case QPU_PACK_MUL_8A:
325 return 0x1;
326 case QPU_PACK_MUL_8B:
327 return 0x2;
328 case QPU_PACK_MUL_8C:
329 return 0x4;
330 case QPU_PACK_MUL_8D:
331 return 0x8;
332 }
333 } else {
334 switch (inst->dst.pack) {
335 case QPU_PACK_A_NOP:
336 case QPU_PACK_A_8888:
337 case QPU_PACK_A_8888_SAT:
338 case QPU_PACK_A_32_SAT:
339 return 0xf;
340 case QPU_PACK_A_8A:
341 case QPU_PACK_A_8A_SAT:
342 return 0x1;
343 case QPU_PACK_A_8B:
344 case QPU_PACK_A_8B_SAT:
345 return 0x2;
346 case QPU_PACK_A_8C:
347 case QPU_PACK_A_8C_SAT:
348 return 0x4;
349 case QPU_PACK_A_8D:
350 case QPU_PACK_A_8D_SAT:
351 return 0x8;
352 case QPU_PACK_A_16A:
353 case QPU_PACK_A_16A_SAT:
354 return 0x3;
355 case QPU_PACK_A_16B:
356 case QPU_PACK_A_16B_SAT:
357 return 0xc;
358 }
359 }
360 unreachable("Bad pack field");
361 }
362 #endif
363
364 struct qreg
365 vir_get_temp(struct v3d_compile *c)
366 {
367 struct qreg reg;
368
369 reg.file = QFILE_TEMP;
370 reg.index = c->num_temps++;
371
372 if (c->num_temps > c->defs_array_size) {
373 uint32_t old_size = c->defs_array_size;
374 c->defs_array_size = MAX2(old_size * 2, 16);
375
376 c->defs = reralloc(c, c->defs, struct qinst *,
377 c->defs_array_size);
378 memset(&c->defs[old_size], 0,
379 sizeof(c->defs[0]) * (c->defs_array_size - old_size));
380
381 c->spillable = reralloc(c, c->spillable,
382 BITSET_WORD,
383 BITSET_WORDS(c->defs_array_size));
384 for (int i = old_size; i < c->defs_array_size; i++)
385 BITSET_SET(c->spillable, i);
386 }
387
388 return reg;
389 }
390
391 struct qinst *
392 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
393 {
394 struct qinst *inst = calloc(1, sizeof(*inst));
395
396 inst->qpu = v3d_qpu_nop();
397 inst->qpu.alu.add.op = op;
398
399 inst->dst = dst;
400 inst->src[0] = src0;
401 inst->src[1] = src1;
402 inst->uniform = ~0;
403
404 return inst;
405 }
406
407 struct qinst *
408 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
409 {
410 struct qinst *inst = calloc(1, sizeof(*inst));
411
412 inst->qpu = v3d_qpu_nop();
413 inst->qpu.alu.mul.op = op;
414
415 inst->dst = dst;
416 inst->src[0] = src0;
417 inst->src[1] = src1;
418 inst->uniform = ~0;
419
420 return inst;
421 }
422
423 struct qinst *
424 vir_branch_inst(enum v3d_qpu_branch_cond cond, struct qreg src)
425 {
426 struct qinst *inst = calloc(1, sizeof(*inst));
427
428 inst->qpu = v3d_qpu_nop();
429 inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
430 inst->qpu.branch.cond = cond;
431 inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
432 inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
433 inst->qpu.branch.ub = true;
434 inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
435
436 inst->dst = vir_reg(QFILE_NULL, 0);
437 inst->src[0] = src;
438 inst->uniform = ~0;
439
440 return inst;
441 }
442
443 static void
444 vir_emit(struct v3d_compile *c, struct qinst *inst)
445 {
446 switch (c->cursor.mode) {
447 case vir_cursor_add:
448 list_add(&inst->link, c->cursor.link);
449 break;
450 case vir_cursor_addtail:
451 list_addtail(&inst->link, c->cursor.link);
452 break;
453 }
454
455 c->cursor = vir_after_inst(inst);
456 c->live_intervals_valid = false;
457 }
458
459 /* Updates inst to write to a new temporary, emits it, and notes the def. */
460 struct qreg
461 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
462 {
463 assert(inst->dst.file == QFILE_NULL);
464
465 /* If we're emitting an instruction that's a def, it had better be
466 * writing a register.
467 */
468 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
469 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
470 v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
471 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
472 v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
473 }
474
475 inst->dst = vir_get_temp(c);
476
477 if (inst->dst.file == QFILE_TEMP)
478 c->defs[inst->dst.index] = inst;
479
480 vir_emit(c, inst);
481
482 return inst->dst;
483 }
484
485 struct qinst *
486 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
487 {
488 if (inst->dst.file == QFILE_TEMP)
489 c->defs[inst->dst.index] = NULL;
490
491 vir_emit(c, inst);
492
493 return inst;
494 }
495
496 struct qblock *
497 vir_new_block(struct v3d_compile *c)
498 {
499 struct qblock *block = rzalloc(c, struct qblock);
500
501 list_inithead(&block->instructions);
502
503 block->predecessors = _mesa_set_create(block,
504 _mesa_hash_pointer,
505 _mesa_key_pointer_equal);
506
507 block->index = c->next_block_index++;
508
509 return block;
510 }
511
512 void
513 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
514 {
515 c->cur_block = block;
516 c->cursor = vir_after_block(block);
517 list_addtail(&block->link, &c->blocks);
518 }
519
520 struct qblock *
521 vir_entry_block(struct v3d_compile *c)
522 {
523 return list_first_entry(&c->blocks, struct qblock, link);
524 }
525
526 struct qblock *
527 vir_exit_block(struct v3d_compile *c)
528 {
529 return list_last_entry(&c->blocks, struct qblock, link);
530 }
531
532 void
533 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
534 {
535 _mesa_set_add(successor->predecessors, predecessor);
536 if (predecessor->successors[0]) {
537 assert(!predecessor->successors[1]);
538 predecessor->successors[1] = successor;
539 } else {
540 predecessor->successors[0] = successor;
541 }
542 }
543
544 const struct v3d_compiler *
545 v3d_compiler_init(const struct v3d_device_info *devinfo)
546 {
547 struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
548 if (!compiler)
549 return NULL;
550
551 compiler->devinfo = devinfo;
552
553 if (!vir_init_reg_sets(compiler)) {
554 ralloc_free(compiler);
555 return NULL;
556 }
557
558 return compiler;
559 }
560
561 void
562 v3d_compiler_free(const struct v3d_compiler *compiler)
563 {
564 ralloc_free((void *)compiler);
565 }
566
567 static struct v3d_compile *
568 vir_compile_init(const struct v3d_compiler *compiler,
569 struct v3d_key *key,
570 nir_shader *s,
571 void (*debug_output)(const char *msg,
572 void *debug_output_data),
573 void *debug_output_data,
574 int program_id, int variant_id)
575 {
576 struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
577
578 c->compiler = compiler;
579 c->devinfo = compiler->devinfo;
580 c->key = key;
581 c->program_id = program_id;
582 c->variant_id = variant_id;
583 c->threads = 4;
584 c->debug_output = debug_output;
585 c->debug_output_data = debug_output_data;
586
587 s = nir_shader_clone(c, s);
588 c->s = s;
589
590 list_inithead(&c->blocks);
591 vir_set_emit_block(c, vir_new_block(c));
592
593 c->output_position_index = -1;
594 c->output_point_size_index = -1;
595 c->output_sample_mask_index = -1;
596
597 c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
598 _mesa_key_pointer_equal);
599
600 return c;
601 }
602
603 static int
604 type_size_vec4(const struct glsl_type *type)
605 {
606 return glsl_count_attribute_slots(type, false);
607 }
608
609 static void
610 v3d_lower_nir(struct v3d_compile *c)
611 {
612 struct nir_lower_tex_options tex_options = {
613 .lower_txd = true,
614 .lower_tg4_broadcom_swizzle = true,
615
616 .lower_rect = false, /* XXX: Use this on V3D 3.x */
617 .lower_txp = ~0,
618 /* Apply swizzles to all samplers. */
619 .swizzle_result = ~0,
620 };
621
622 /* Lower the format swizzle and (for 32-bit returns)
623 * ARB_texture_swizzle-style swizzle.
624 */
625 for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
626 for (int j = 0; j < 4; j++)
627 tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
628
629 if (c->key->tex[i].clamp_s)
630 tex_options.saturate_s |= 1 << i;
631 if (c->key->tex[i].clamp_t)
632 tex_options.saturate_t |= 1 << i;
633 if (c->key->tex[i].clamp_r)
634 tex_options.saturate_r |= 1 << i;
635 if (c->key->tex[i].return_size == 16) {
636 tex_options.lower_tex_packing[i] =
637 nir_lower_tex_packing_16;
638 }
639 }
640
641 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
642 NIR_PASS_V(c->s, nir_lower_system_values);
643 }
644
645 static void
646 v3d_set_prog_data_uniforms(struct v3d_compile *c,
647 struct v3d_prog_data *prog_data)
648 {
649 int count = c->num_uniforms;
650 struct v3d_uniform_list *ulist = &prog_data->uniforms;
651
652 ulist->count = count;
653 ulist->data = ralloc_array(prog_data, uint32_t, count);
654 memcpy(ulist->data, c->uniform_data,
655 count * sizeof(*ulist->data));
656 ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
657 memcpy(ulist->contents, c->uniform_contents,
658 count * sizeof(*ulist->contents));
659 }
660
661 /* Copy the compiler UBO range state to the compiled shader, dropping out
662 * arrays that were never referenced by an indirect load.
663 *
664 * (Note that QIR dead code elimination of an array access still leaves that
665 * array alive, though)
666 */
667 static void
668 v3d_set_prog_data_ubo(struct v3d_compile *c,
669 struct v3d_prog_data *prog_data)
670 {
671 if (!c->num_ubo_ranges)
672 return;
673
674 prog_data->num_ubo_ranges = 0;
675 prog_data->ubo_ranges = ralloc_array(prog_data, struct v3d_ubo_range,
676 c->num_ubo_ranges);
677 for (int i = 0; i < c->num_ubo_ranges; i++) {
678 if (!c->ubo_range_used[i])
679 continue;
680
681 struct v3d_ubo_range *range = &c->ubo_ranges[i];
682 prog_data->ubo_ranges[prog_data->num_ubo_ranges++] = *range;
683 prog_data->ubo_size += range->size;
684 }
685
686 if (prog_data->ubo_size) {
687 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
688 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
689 vir_get_stage_name(c),
690 c->program_id, c->variant_id,
691 prog_data->ubo_size / 4);
692 }
693 }
694 }
695
696 static void
697 v3d_vs_set_prog_data(struct v3d_compile *c,
698 struct v3d_vs_prog_data *prog_data)
699 {
700 prog_data->base.num_inputs = c->num_inputs;
701
702 /* The vertex data gets format converted by the VPM so that
703 * each attribute channel takes up a VPM column. Precompute
704 * the sizes for the shader record.
705 */
706 for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
707 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
708 prog_data->vpm_input_size += c->vattr_sizes[i];
709 }
710
711 prog_data->uses_vid = (c->s->info.system_values_read &
712 (1ull << SYSTEM_VALUE_VERTEX_ID));
713 prog_data->uses_iid = (c->s->info.system_values_read &
714 (1ull << SYSTEM_VALUE_INSTANCE_ID));
715
716 if (prog_data->uses_vid)
717 prog_data->vpm_input_size++;
718 if (prog_data->uses_iid)
719 prog_data->vpm_input_size++;
720
721 /* Input/output segment size are in sectors (8 rows of 32 bits per
722 * channel).
723 */
724 prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
725 prog_data->vpm_output_size = align(c->num_vpm_writes, 8) / 8;
726
727 /* Set us up for shared input/output segments. This is apparently
728 * necessary for our VCM setup to avoid varying corruption.
729 */
730 prog_data->separate_segments = false;
731 prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
732 prog_data->vpm_input_size);
733 prog_data->vpm_input_size = 0;
734
735 /* Compute VCM cache size. We set up our program to take up less than
736 * half of the VPM, so that any set of bin and render programs won't
737 * run out of space. We need space for at least one input segment,
738 * and then allocate the rest to output segments (one for the current
739 * program, the rest to VCM). The valid range of the VCM cache size
740 * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
741 * batches.
742 */
743 assert(c->devinfo->vpm_size);
744 int sector_size = 16 * sizeof(uint32_t) * 8;
745 int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
746 int half_vpm = vpm_size_in_sectors / 2;
747 int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
748 int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
749 assert(vpm_output_batches >= 2);
750 prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
751 }
752
753 static void
754 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
755 struct v3d_fs_prog_data *prog_data)
756 {
757 prog_data->base.num_inputs = c->num_inputs;
758 memcpy(prog_data->input_slots, c->input_slots,
759 c->num_inputs * sizeof(*c->input_slots));
760
761 STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
762 (V3D_MAX_FS_INPUTS - 1) / 24);
763 for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
764 if (BITSET_TEST(c->flat_shade_flags, i))
765 prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
766
767 if (BITSET_TEST(c->noperspective_flags, i))
768 prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
769
770 if (BITSET_TEST(c->centroid_flags, i))
771 prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
772 }
773 }
774
775 static void
776 v3d_fs_set_prog_data(struct v3d_compile *c,
777 struct v3d_fs_prog_data *prog_data)
778 {
779 v3d_set_fs_prog_data_inputs(c, prog_data);
780 prog_data->writes_z = (c->s->info.outputs_written &
781 (1 << FRAG_RESULT_DEPTH));
782 prog_data->discard = (c->s->info.fs.uses_discard ||
783 c->fs_key->sample_alpha_to_coverage);
784 prog_data->uses_center_w = c->uses_center_w;
785
786 /* If the shader has some side effects and hasn't allowed early
787 * fragment tests, disable them.
788 */
789 if (!c->s->info.fs.early_fragment_tests &&
790 (c->s->info.num_images ||
791 c->s->info.num_ssbos ||
792 c->s->info.num_abos)) {
793 prog_data->discard = true;
794 }
795 }
796
797 static void
798 v3d_set_prog_data(struct v3d_compile *c,
799 struct v3d_prog_data *prog_data)
800 {
801 prog_data->threads = c->threads;
802 prog_data->single_seg = !c->last_thrsw;
803 prog_data->spill_size = c->spill_size;
804
805 v3d_set_prog_data_uniforms(c, prog_data);
806 v3d_set_prog_data_ubo(c, prog_data);
807
808 if (c->s->info.stage == MESA_SHADER_VERTEX) {
809 v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
810 } else {
811 assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
812 v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);
813 }
814 }
815
816 static uint64_t *
817 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
818 {
819 *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
820
821 uint64_t *qpu_insts = malloc(*final_assembly_size);
822 if (!qpu_insts)
823 return NULL;
824
825 memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
826
827 vir_compile_destroy(c);
828
829 return qpu_insts;
830 }
831
832 static void
833 v3d_nir_lower_vs_early(struct v3d_compile *c)
834 {
835 /* Split our I/O vars and dead code eliminate the unused
836 * components.
837 */
838 NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
839 nir_var_shader_in | nir_var_shader_out);
840 uint64_t used_outputs[4] = {0};
841 for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
842 int slot = v3d_slot_get_slot(c->vs_key->fs_inputs[i]);
843 int comp = v3d_slot_get_component(c->vs_key->fs_inputs[i]);
844 used_outputs[comp] |= 1ull << slot;
845 }
846 NIR_PASS_V(c->s, nir_remove_unused_io_vars,
847 &c->s->outputs, used_outputs, NULL); /* demotes to globals */
848 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
849 v3d_optimize_nir(c->s);
850 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in);
851 NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
852 type_size_vec4,
853 (nir_lower_io_options)0);
854 }
855
856 static void
857 v3d_fixup_fs_output_types(struct v3d_compile *c)
858 {
859 nir_foreach_variable(var, &c->s->outputs) {
860 uint32_t mask = 0;
861
862 switch (var->data.location) {
863 case FRAG_RESULT_COLOR:
864 mask = ~0;
865 break;
866 case FRAG_RESULT_DATA0:
867 case FRAG_RESULT_DATA1:
868 case FRAG_RESULT_DATA2:
869 case FRAG_RESULT_DATA3:
870 mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
871 break;
872 }
873
874 if (c->fs_key->int_color_rb & mask) {
875 var->type =
876 glsl_vector_type(GLSL_TYPE_INT,
877 glsl_get_components(var->type));
878 } else if (c->fs_key->uint_color_rb & mask) {
879 var->type =
880 glsl_vector_type(GLSL_TYPE_UINT,
881 glsl_get_components(var->type));
882 }
883 }
884 }
885
886 static void
887 v3d_nir_lower_fs_early(struct v3d_compile *c)
888 {
889 if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
890 v3d_fixup_fs_output_types(c);
891 }
892
893 static void
894 v3d_nir_lower_vs_late(struct v3d_compile *c)
895 {
896 if (c->vs_key->clamp_color)
897 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
898
899 if (c->key->ucp_enables) {
900 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
901 false);
902 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
903 nir_var_shader_out);
904 }
905
906 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
907 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
908 }
909
910 static void
911 v3d_nir_lower_fs_late(struct v3d_compile *c)
912 {
913 if (c->fs_key->light_twoside)
914 NIR_PASS_V(c->s, nir_lower_two_sided_color);
915
916 if (c->fs_key->clamp_color)
917 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
918
919 if (c->fs_key->alpha_test) {
920 NIR_PASS_V(c->s, nir_lower_alpha_test,
921 c->fs_key->alpha_test_func,
922 false);
923 }
924
925 if (c->key->ucp_enables)
926 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
927
928 /* Note: FS input scalarizing must happen after
929 * nir_lower_two_sided_color, which only handles a vec4 at a time.
930 */
931 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
932 }
933
934 uint64_t *v3d_compile(const struct v3d_compiler *compiler,
935 struct v3d_key *key,
936 struct v3d_prog_data **out_prog_data,
937 nir_shader *s,
938 void (*debug_output)(const char *msg,
939 void *debug_output_data),
940 void *debug_output_data,
941 int program_id, int variant_id,
942 uint32_t *final_assembly_size)
943 {
944 struct v3d_prog_data *prog_data;
945 struct v3d_compile *c = vir_compile_init(compiler, key, s,
946 debug_output, debug_output_data,
947 program_id, variant_id);
948
949 switch (c->s->info.stage) {
950 case MESA_SHADER_VERTEX:
951 c->vs_key = (struct v3d_vs_key *)key;
952 prog_data = rzalloc_size(NULL, sizeof(struct v3d_vs_prog_data));
953 break;
954 case MESA_SHADER_FRAGMENT:
955 c->fs_key = (struct v3d_fs_key *)key;
956 prog_data = rzalloc_size(NULL, sizeof(struct v3d_fs_prog_data));
957 break;
958 default:
959 unreachable("unsupported shader stage");
960 }
961
962 if (c->s->info.stage == MESA_SHADER_VERTEX) {
963 v3d_nir_lower_vs_early(c);
964 } else {
965 assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
966 v3d_nir_lower_fs_early(c);
967 }
968
969 v3d_lower_nir(c);
970
971 if (c->s->info.stage == MESA_SHADER_VERTEX) {
972 v3d_nir_lower_vs_late(c);
973 } else {
974 assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
975 v3d_nir_lower_fs_late(c);
976 }
977
978 NIR_PASS_V(c->s, v3d_nir_lower_io, c);
979 NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
980 NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
981 NIR_PASS_V(c->s, nir_lower_idiv);
982
983 v3d_optimize_nir(c->s);
984 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
985 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
986
987 v3d_nir_to_vir(c);
988
989 v3d_set_prog_data(c, prog_data);
990
991 *out_prog_data = prog_data;
992
993 char *shaderdb;
994 int ret = asprintf(&shaderdb,
995 "%s shader: %d inst, %d threads, %d loops, "
996 "%d uniforms, %d:%d spills:fills",
997 vir_get_stage_name(c),
998 c->qpu_inst_count,
999 c->threads,
1000 c->loops,
1001 c->num_uniforms,
1002 c->spills,
1003 c->fills);
1004 if (ret >= 0) {
1005 c->debug_output(shaderdb, c->debug_output_data);
1006 free(shaderdb);
1007 }
1008
1009 return v3d_return_qpu_insts(c, final_assembly_size);
1010 }
1011
1012 void
1013 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
1014 {
1015 if (qinst->dst.file == QFILE_TEMP)
1016 c->defs[qinst->dst.index] = NULL;
1017
1018 assert(&qinst->link != c->cursor.link);
1019
1020 list_del(&qinst->link);
1021 free(qinst);
1022
1023 c->live_intervals_valid = false;
1024 }
1025
1026 struct qreg
1027 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
1028 {
1029 /* XXX
1030 int pack = reg.pack;
1031
1032 while (reg.file == QFILE_TEMP &&
1033 c->defs[reg.index] &&
1034 (c->defs[reg.index]->op == QOP_MOV ||
1035 c->defs[reg.index]->op == QOP_FMOV) &&
1036 !c->defs[reg.index]->dst.pack &&
1037 !c->defs[reg.index]->src[0].pack) {
1038 reg = c->defs[reg.index]->src[0];
1039 }
1040
1041 reg.pack = pack;
1042 */
1043 return reg;
1044 }
1045
1046 void
1047 vir_compile_destroy(struct v3d_compile *c)
1048 {
1049 /* Defuse the assert that we aren't removing the cursor's instruction.
1050 */
1051 c->cursor.link = NULL;
1052
1053 vir_for_each_block(block, c) {
1054 while (!list_empty(&block->instructions)) {
1055 struct qinst *qinst =
1056 list_first_entry(&block->instructions,
1057 struct qinst, link);
1058 vir_remove_instruction(c, qinst);
1059 }
1060 }
1061
1062 ralloc_free(c);
1063 }
1064
1065 struct qreg
1066 vir_uniform(struct v3d_compile *c,
1067 enum quniform_contents contents,
1068 uint32_t data)
1069 {
1070 for (int i = 0; i < c->num_uniforms; i++) {
1071 if (c->uniform_contents[i] == contents &&
1072 c->uniform_data[i] == data) {
1073 return vir_reg(QFILE_UNIF, i);
1074 }
1075 }
1076
1077 uint32_t uniform = c->num_uniforms++;
1078
1079 if (uniform >= c->uniform_array_size) {
1080 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1081 c->uniform_array_size * 2);
1082
1083 c->uniform_data = reralloc(c, c->uniform_data,
1084 uint32_t,
1085 c->uniform_array_size);
1086 c->uniform_contents = reralloc(c, c->uniform_contents,
1087 enum quniform_contents,
1088 c->uniform_array_size);
1089 }
1090
1091 c->uniform_contents[uniform] = contents;
1092 c->uniform_data[uniform] = data;
1093
1094 return vir_reg(QFILE_UNIF, uniform);
1095 }
1096
1097 static bool
1098 vir_can_set_flags(struct v3d_compile *c, struct qinst *inst)
1099 {
1100 if (c->devinfo->ver >= 40 && (v3d_qpu_reads_vpm(&inst->qpu) ||
1101 v3d_qpu_uses_sfu(&inst->qpu))) {
1102 return false;
1103 }
1104
1105 if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
1106 (inst->qpu.alu.add.op == V3D_QPU_A_NOP &&
1107 inst->qpu.alu.mul.op == V3D_QPU_M_NOP)) {
1108 return false;
1109 }
1110
1111 return true;
1112 }
1113
1114 void
1115 vir_PF(struct v3d_compile *c, struct qreg src, enum v3d_qpu_pf pf)
1116 {
1117 struct qinst *last_inst = NULL;
1118
1119 if (!list_empty(&c->cur_block->instructions)) {
1120 last_inst = (struct qinst *)c->cur_block->instructions.prev;
1121
1122 /* Can't stuff the PF into the last last inst if our cursor
1123 * isn't pointing after it.
1124 */
1125 struct vir_cursor after_inst = vir_after_inst(last_inst);
1126 if (c->cursor.mode != after_inst.mode ||
1127 c->cursor.link != after_inst.link)
1128 last_inst = NULL;
1129 }
1130
1131 if (src.file != QFILE_TEMP ||
1132 !c->defs[src.index] ||
1133 last_inst != c->defs[src.index] ||
1134 !vir_can_set_flags(c, last_inst)) {
1135 /* XXX: Make the MOV be the appropriate type */
1136 last_inst = vir_MOV_dest(c, vir_reg(QFILE_NULL, 0), src);
1137 }
1138
1139 vir_set_pf(last_inst, pf);
1140 }
1141
1142 #define OPTPASS(func) \
1143 do { \
1144 bool stage_progress = func(c); \
1145 if (stage_progress) { \
1146 progress = true; \
1147 if (print_opt_debug) { \
1148 fprintf(stderr, \
1149 "VIR opt pass %2d: %s progress\n", \
1150 pass, #func); \
1151 } \
1152 /*XXX vir_validate(c);*/ \
1153 } \
1154 } while (0)
1155
1156 void
1157 vir_optimize(struct v3d_compile *c)
1158 {
1159 bool print_opt_debug = false;
1160 int pass = 1;
1161
1162 while (true) {
1163 bool progress = false;
1164
1165 OPTPASS(vir_opt_copy_propagate);
1166 OPTPASS(vir_opt_dead_code);
1167 OPTPASS(vir_opt_small_immediates);
1168
1169 if (!progress)
1170 break;
1171
1172 pass++;
1173 }
1174 }
1175
1176 const char *
1177 vir_get_stage_name(struct v3d_compile *c)
1178 {
1179 if (c->vs_key && c->vs_key->is_coord)
1180 return "MESA_SHADER_COORD";
1181 else
1182 return gl_shader_stage_name(c->s->info.stage);
1183 }