de1ee07be7186c49f6a2132b3b4047d0d36753a6
[mesa.git] / src / broadcom / compiler / nir_to_vir.c
1 /*
2 * Copyright © 2016 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
34
35 #define GENERAL_TMU_LOOKUP_PER_QUAD (0 << 7)
36 #define GENERAL_TMU_LOOKUP_PER_PIXEL (1 << 7)
37 #define GENERAL_TMU_READ_OP_PREFETCH (0 << 3)
38 #define GENERAL_TMU_READ_OP_CACHE_CLEAR (1 << 3)
39 #define GENERAL_TMU_READ_OP_CACHE_FLUSH (3 << 3)
40 #define GENERAL_TMU_READ_OP_CACHE_CLEAN (3 << 3)
41 #define GENERAL_TMU_READ_OP_CACHE_L1T_CLEAR (4 << 3)
42 #define GENERAL_TMU_READ_OP_CACHE_L1T_FLUSH_AGGREGATION (5 << 3)
43 #define GENERAL_TMU_READ_OP_ATOMIC_INC (8 << 3)
44 #define GENERAL_TMU_READ_OP_ATOMIC_DEC (9 << 3)
45 #define GENERAL_TMU_READ_OP_ATOMIC_NOT (10 << 3)
46 #define GENERAL_TMU_READ_OP_READ (15 << 3)
47 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_I (0 << 0)
48 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_I (1 << 0)
49 #define GENERAL_TMU_LOOKUP_TYPE_VEC2 (2 << 0)
50 #define GENERAL_TMU_LOOKUP_TYPE_VEC3 (3 << 0)
51 #define GENERAL_TMU_LOOKUP_TYPE_VEC4 (4 << 0)
52 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_UI (5 << 0)
53 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_UI (6 << 0)
54 #define GENERAL_TMU_LOOKUP_TYPE_32BIT_UI (7 << 0)
55
56 #define GENERAL_TMU_WRITE_OP_ATOMIC_ADD_WRAP (0 << 3)
57 #define GENERAL_TMU_WRITE_OP_ATOMIC_SUB_WRAP (1 << 3)
58 #define GENERAL_TMU_WRITE_OP_ATOMIC_XCHG (2 << 3)
59 #define GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG (3 << 3)
60 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMIN (4 << 3)
61 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMAX (5 << 3)
62 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMIN (6 << 3)
63 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMAX (7 << 3)
64 #define GENERAL_TMU_WRITE_OP_ATOMIC_AND (8 << 3)
65 #define GENERAL_TMU_WRITE_OP_ATOMIC_OR (9 << 3)
66 #define GENERAL_TMU_WRITE_OP_ATOMIC_XOR (10 << 3)
67 #define GENERAL_TMU_WRITE_OP_WRITE (15 << 3)
68
69 #define V3D_TSY_SET_QUORUM 0
70 #define V3D_TSY_INC_WAITERS 1
71 #define V3D_TSY_DEC_WAITERS 2
72 #define V3D_TSY_INC_QUORUM 3
73 #define V3D_TSY_DEC_QUORUM 4
74 #define V3D_TSY_FREE_ALL 5
75 #define V3D_TSY_RELEASE 6
76 #define V3D_TSY_ACQUIRE 7
77 #define V3D_TSY_WAIT 8
78 #define V3D_TSY_WAIT_INC 9
79 #define V3D_TSY_WAIT_CHECK 10
80 #define V3D_TSY_WAIT_INC_CHECK 11
81 #define V3D_TSY_WAIT_CV 12
82 #define V3D_TSY_INC_SEMAPHORE 13
83 #define V3D_TSY_DEC_SEMAPHORE 14
84 #define V3D_TSY_SET_QUORUM_FREE_ALL 15
85
86 static void
87 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
88
89 static void
90 resize_qreg_array(struct v3d_compile *c,
91 struct qreg **regs,
92 uint32_t *size,
93 uint32_t decl_size)
94 {
95 if (*size >= decl_size)
96 return;
97
98 uint32_t old_size = *size;
99 *size = MAX2(*size * 2, decl_size);
100 *regs = reralloc(c, *regs, struct qreg, *size);
101 if (!*regs) {
102 fprintf(stderr, "Malloc failure\n");
103 abort();
104 }
105
106 for (uint32_t i = old_size; i < *size; i++)
107 (*regs)[i] = c->undef;
108 }
109
110 void
111 vir_emit_thrsw(struct v3d_compile *c)
112 {
113 if (c->threads == 1)
114 return;
115
116 /* Always thread switch after each texture operation for now.
117 *
118 * We could do better by batching a bunch of texture fetches up and
119 * then doing one thread switch and collecting all their results
120 * afterward.
121 */
122 c->last_thrsw = vir_NOP(c);
123 c->last_thrsw->qpu.sig.thrsw = true;
124 c->last_thrsw_at_top_level = !c->in_control_flow;
125 }
126
127 static uint32_t
128 v3d_general_tmu_op(nir_intrinsic_instr *instr)
129 {
130 switch (instr->intrinsic) {
131 case nir_intrinsic_load_ssbo:
132 case nir_intrinsic_load_ubo:
133 case nir_intrinsic_load_uniform:
134 case nir_intrinsic_load_shared:
135 return GENERAL_TMU_READ_OP_READ;
136 case nir_intrinsic_store_ssbo:
137 case nir_intrinsic_store_shared:
138 return GENERAL_TMU_WRITE_OP_WRITE;
139 case nir_intrinsic_ssbo_atomic_add:
140 case nir_intrinsic_shared_atomic_add:
141 return GENERAL_TMU_WRITE_OP_ATOMIC_ADD_WRAP;
142 case nir_intrinsic_ssbo_atomic_imin:
143 case nir_intrinsic_shared_atomic_imin:
144 return GENERAL_TMU_WRITE_OP_ATOMIC_SMIN;
145 case nir_intrinsic_ssbo_atomic_umin:
146 case nir_intrinsic_shared_atomic_umin:
147 return GENERAL_TMU_WRITE_OP_ATOMIC_UMIN;
148 case nir_intrinsic_ssbo_atomic_imax:
149 case nir_intrinsic_shared_atomic_imax:
150 return GENERAL_TMU_WRITE_OP_ATOMIC_SMAX;
151 case nir_intrinsic_ssbo_atomic_umax:
152 case nir_intrinsic_shared_atomic_umax:
153 return GENERAL_TMU_WRITE_OP_ATOMIC_UMAX;
154 case nir_intrinsic_ssbo_atomic_and:
155 case nir_intrinsic_shared_atomic_and:
156 return GENERAL_TMU_WRITE_OP_ATOMIC_AND;
157 case nir_intrinsic_ssbo_atomic_or:
158 case nir_intrinsic_shared_atomic_or:
159 return GENERAL_TMU_WRITE_OP_ATOMIC_OR;
160 case nir_intrinsic_ssbo_atomic_xor:
161 case nir_intrinsic_shared_atomic_xor:
162 return GENERAL_TMU_WRITE_OP_ATOMIC_XOR;
163 case nir_intrinsic_ssbo_atomic_exchange:
164 case nir_intrinsic_shared_atomic_exchange:
165 return GENERAL_TMU_WRITE_OP_ATOMIC_XCHG;
166 case nir_intrinsic_ssbo_atomic_comp_swap:
167 case nir_intrinsic_shared_atomic_comp_swap:
168 return GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG;
169 default:
170 unreachable("unknown intrinsic op");
171 }
172 }
173
174 /**
175 * Implements indirect uniform loads and SSBO accesses through the TMU general
176 * memory access interface.
177 */
178 static void
179 ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
180 bool is_shared)
181 {
182 /* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
183 * wants to have support for inc/dec?
184 */
185
186 uint32_t tmu_op = v3d_general_tmu_op(instr);
187 bool is_store = (instr->intrinsic == nir_intrinsic_store_ssbo ||
188 instr->intrinsic == nir_intrinsic_store_shared);
189 bool has_index = !is_shared;
190
191 int offset_src;
192 int tmu_writes = 1; /* address */
193 if (instr->intrinsic == nir_intrinsic_load_uniform) {
194 offset_src = 0;
195 } else if (instr->intrinsic == nir_intrinsic_load_ssbo ||
196 instr->intrinsic == nir_intrinsic_load_ubo ||
197 instr->intrinsic == nir_intrinsic_load_shared) {
198 offset_src = 0 + has_index;
199 } else if (is_store) {
200 offset_src = 1 + has_index;
201 for (int i = 0; i < instr->num_components; i++) {
202 vir_MOV_dest(c,
203 vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD),
204 ntq_get_src(c, instr->src[0], i));
205 tmu_writes++;
206 }
207 } else {
208 offset_src = 0 + has_index;
209 vir_MOV_dest(c,
210 vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD),
211 ntq_get_src(c, instr->src[1 + has_index], 0));
212 tmu_writes++;
213 if (tmu_op == GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG) {
214 vir_MOV_dest(c,
215 vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD),
216 ntq_get_src(c, instr->src[2 + has_index],
217 0));
218 tmu_writes++;
219 }
220 }
221
222 bool dynamic_src = !nir_src_is_const(instr->src[offset_src]);
223 uint32_t const_offset = 0;
224 if (!dynamic_src)
225 const_offset = nir_src_as_uint(instr->src[offset_src]);
226
227 /* Make sure we won't exceed the 16-entry TMU fifo if each thread is
228 * storing at the same time.
229 */
230 while (tmu_writes > 16 / c->threads)
231 c->threads /= 2;
232
233 struct qreg offset;
234 if (instr->intrinsic == nir_intrinsic_load_uniform) {
235 const_offset += nir_intrinsic_base(instr);
236 offset = vir_uniform(c, QUNIFORM_UBO_ADDR,
237 v3d_unit_data_create(0, const_offset));
238 const_offset = 0;
239 } else if (instr->intrinsic == nir_intrinsic_load_ubo) {
240 uint32_t index = nir_src_as_uint(instr->src[0]) + 1;
241 /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
242 * 1 (0 is gallium's constant buffer 0).
243 */
244 offset = vir_uniform(c, QUNIFORM_UBO_ADDR,
245 v3d_unit_data_create(index, const_offset));
246 const_offset = 0;
247 } else if (is_shared) {
248 const_offset += nir_intrinsic_base(instr);
249
250 /* Shared variables have no buffer index, and all start from a
251 * common base that we set up at the start of dispatch
252 */
253 offset = c->cs_shared_offset;
254 } else {
255 offset = vir_uniform(c, QUNIFORM_SSBO_OFFSET,
256 nir_src_as_uint(instr->src[is_store ?
257 1 : 0]));
258 }
259
260 uint32_t config = (0xffffff00 |
261 tmu_op |
262 GENERAL_TMU_LOOKUP_PER_PIXEL);
263 if (instr->num_components == 1) {
264 config |= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI;
265 } else {
266 config |= (GENERAL_TMU_LOOKUP_TYPE_VEC2 +
267 instr->num_components - 2);
268 }
269
270 if (vir_in_nonuniform_control_flow(c)) {
271 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
272 V3D_QPU_PF_PUSHZ);
273 }
274
275 struct qreg tmua;
276 if (config == ~0)
277 tmua = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUA);
278 else
279 tmua = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUAU);
280
281 struct qinst *tmu;
282 if (dynamic_src) {
283 if (const_offset != 0) {
284 offset = vir_ADD(c, offset,
285 vir_uniform_ui(c, const_offset));
286 }
287 tmu = vir_ADD_dest(c, tmua, offset,
288 ntq_get_src(c, instr->src[offset_src], 0));
289 } else {
290 if (const_offset != 0) {
291 tmu = vir_ADD_dest(c, tmua, offset,
292 vir_uniform_ui(c, const_offset));
293 } else {
294 tmu = vir_MOV_dest(c, tmua, offset);
295 }
296 }
297
298 if (config != ~0) {
299 tmu->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT,
300 config);
301 }
302
303 if (vir_in_nonuniform_control_flow(c))
304 vir_set_cond(tmu, V3D_QPU_COND_IFA);
305
306 vir_emit_thrsw(c);
307
308 /* Read the result, or wait for the TMU op to complete. */
309 for (int i = 0; i < nir_intrinsic_dest_components(instr); i++)
310 ntq_store_dest(c, &instr->dest, i, vir_MOV(c, vir_LDTMU(c)));
311
312 if (nir_intrinsic_dest_components(instr) == 0)
313 vir_TMUWT(c);
314 }
315
316 static struct qreg *
317 ntq_init_ssa_def(struct v3d_compile *c, nir_ssa_def *def)
318 {
319 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
320 def->num_components);
321 _mesa_hash_table_insert(c->def_ht, def, qregs);
322 return qregs;
323 }
324
325 /**
326 * This function is responsible for getting VIR results into the associated
327 * storage for a NIR instruction.
328 *
329 * If it's a NIR SSA def, then we just set the associated hash table entry to
330 * the new result.
331 *
332 * If it's a NIR reg, then we need to update the existing qreg assigned to the
333 * NIR destination with the incoming value. To do that without introducing
334 * new MOVs, we require that the incoming qreg either be a uniform, or be
335 * SSA-defined by the previous VIR instruction in the block and rewritable by
336 * this function. That lets us sneak ahead and insert the SF flag beforehand
337 * (knowing that the previous instruction doesn't depend on flags) and rewrite
338 * its destination to be the NIR reg's destination
339 */
340 void
341 ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
342 struct qreg result)
343 {
344 struct qinst *last_inst = NULL;
345 if (!list_empty(&c->cur_block->instructions))
346 last_inst = (struct qinst *)c->cur_block->instructions.prev;
347
348 assert((result.file == QFILE_TEMP &&
349 last_inst && last_inst == c->defs[result.index]));
350
351 if (dest->is_ssa) {
352 assert(chan < dest->ssa.num_components);
353
354 struct qreg *qregs;
355 struct hash_entry *entry =
356 _mesa_hash_table_search(c->def_ht, &dest->ssa);
357
358 if (entry)
359 qregs = entry->data;
360 else
361 qregs = ntq_init_ssa_def(c, &dest->ssa);
362
363 qregs[chan] = result;
364 } else {
365 nir_register *reg = dest->reg.reg;
366 assert(dest->reg.base_offset == 0);
367 assert(reg->num_array_elems == 0);
368 struct hash_entry *entry =
369 _mesa_hash_table_search(c->def_ht, reg);
370 struct qreg *qregs = entry->data;
371
372 /* Insert a MOV if the source wasn't an SSA def in the
373 * previous instruction.
374 */
375 if ((vir_in_nonuniform_control_flow(c) &&
376 c->defs[last_inst->dst.index]->qpu.sig.ldunif)) {
377 result = vir_MOV(c, result);
378 last_inst = c->defs[result.index];
379 }
380
381 /* We know they're both temps, so just rewrite index. */
382 c->defs[last_inst->dst.index] = NULL;
383 last_inst->dst.index = qregs[chan].index;
384
385 /* If we're in control flow, then make this update of the reg
386 * conditional on the execution mask.
387 */
388 if (vir_in_nonuniform_control_flow(c)) {
389 last_inst->dst.index = qregs[chan].index;
390
391 /* Set the flags to the current exec mask.
392 */
393 c->cursor = vir_before_inst(last_inst);
394 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
395 V3D_QPU_PF_PUSHZ);
396 c->cursor = vir_after_inst(last_inst);
397
398 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
399 }
400 }
401 }
402
403 struct qreg
404 ntq_get_src(struct v3d_compile *c, nir_src src, int i)
405 {
406 struct hash_entry *entry;
407 if (src.is_ssa) {
408 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
409 assert(i < src.ssa->num_components);
410 } else {
411 nir_register *reg = src.reg.reg;
412 entry = _mesa_hash_table_search(c->def_ht, reg);
413 assert(reg->num_array_elems == 0);
414 assert(src.reg.base_offset == 0);
415 assert(i < reg->num_components);
416 }
417
418 struct qreg *qregs = entry->data;
419 return qregs[i];
420 }
421
422 static struct qreg
423 ntq_get_alu_src(struct v3d_compile *c, nir_alu_instr *instr,
424 unsigned src)
425 {
426 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
427 unsigned chan = ffs(instr->dest.write_mask) - 1;
428 struct qreg r = ntq_get_src(c, instr->src[src].src,
429 instr->src[src].swizzle[chan]);
430
431 assert(!instr->src[src].abs);
432 assert(!instr->src[src].negate);
433
434 return r;
435 };
436
437 static struct qreg
438 ntq_minify(struct v3d_compile *c, struct qreg size, struct qreg level)
439 {
440 return vir_MAX(c, vir_SHR(c, size, level), vir_uniform_ui(c, 1));
441 }
442
443 static void
444 ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr)
445 {
446 unsigned unit = instr->texture_index;
447 int lod_index = nir_tex_instr_src_index(instr, nir_tex_src_lod);
448 int dest_size = nir_tex_instr_dest_size(instr);
449
450 struct qreg lod = c->undef;
451 if (lod_index != -1)
452 lod = ntq_get_src(c, instr->src[lod_index].src, 0);
453
454 for (int i = 0; i < dest_size; i++) {
455 assert(i < 3);
456 enum quniform_contents contents;
457
458 if (instr->is_array && i == dest_size - 1)
459 contents = QUNIFORM_TEXTURE_ARRAY_SIZE;
460 else
461 contents = QUNIFORM_TEXTURE_WIDTH + i;
462
463 struct qreg size = vir_uniform(c, contents, unit);
464
465 switch (instr->sampler_dim) {
466 case GLSL_SAMPLER_DIM_1D:
467 case GLSL_SAMPLER_DIM_2D:
468 case GLSL_SAMPLER_DIM_MS:
469 case GLSL_SAMPLER_DIM_3D:
470 case GLSL_SAMPLER_DIM_CUBE:
471 /* Don't minify the array size. */
472 if (!(instr->is_array && i == dest_size - 1)) {
473 size = ntq_minify(c, size, lod);
474 }
475 break;
476
477 case GLSL_SAMPLER_DIM_RECT:
478 /* There's no LOD field for rects */
479 break;
480
481 default:
482 unreachable("Bad sampler type");
483 }
484
485 ntq_store_dest(c, &instr->dest, i, size);
486 }
487 }
488
489 static void
490 ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
491 {
492 unsigned unit = instr->texture_index;
493
494 /* Since each texture sampling op requires uploading uniforms to
495 * reference the texture, there's no HW support for texture size and
496 * you just upload uniforms containing the size.
497 */
498 switch (instr->op) {
499 case nir_texop_query_levels:
500 ntq_store_dest(c, &instr->dest, 0,
501 vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit));
502 return;
503 case nir_texop_txs:
504 ntq_emit_txs(c, instr);
505 return;
506 default:
507 break;
508 }
509
510 if (c->devinfo->ver >= 40)
511 v3d40_vir_emit_tex(c, instr);
512 else
513 v3d33_vir_emit_tex(c, instr);
514 }
515
516 static struct qreg
517 ntq_fsincos(struct v3d_compile *c, struct qreg src, bool is_cos)
518 {
519 struct qreg input = vir_FMUL(c, src, vir_uniform_f(c, 1.0f / M_PI));
520 if (is_cos)
521 input = vir_FADD(c, input, vir_uniform_f(c, 0.5));
522
523 struct qreg periods = vir_FROUND(c, input);
524 struct qreg sin_output = vir_SIN(c, vir_FSUB(c, input, periods));
525 return vir_XOR(c, sin_output, vir_SHL(c,
526 vir_FTOIN(c, periods),
527 vir_uniform_ui(c, -1)));
528 }
529
530 static struct qreg
531 ntq_fsign(struct v3d_compile *c, struct qreg src)
532 {
533 struct qreg t = vir_get_temp(c);
534
535 vir_MOV_dest(c, t, vir_uniform_f(c, 0.0));
536 vir_set_pf(vir_FMOV_dest(c, vir_nop_reg(), src), V3D_QPU_PF_PUSHZ);
537 vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_f(c, 1.0));
538 vir_set_pf(vir_FMOV_dest(c, vir_nop_reg(), src), V3D_QPU_PF_PUSHN);
539 vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_f(c, -1.0));
540 return vir_MOV(c, t);
541 }
542
543 static void
544 emit_fragcoord_input(struct v3d_compile *c, int attr)
545 {
546 c->inputs[attr * 4 + 0] = vir_FXCD(c);
547 c->inputs[attr * 4 + 1] = vir_FYCD(c);
548 c->inputs[attr * 4 + 2] = c->payload_z;
549 c->inputs[attr * 4 + 3] = vir_RECIP(c, c->payload_w);
550 }
551
552 static struct qreg
553 emit_fragment_varying(struct v3d_compile *c, nir_variable *var,
554 uint8_t swizzle, int array_index)
555 {
556 struct qreg r3 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R3);
557 struct qreg r5 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R5);
558
559 struct qreg vary;
560 if (c->devinfo->ver >= 41) {
561 struct qinst *ldvary = vir_add_inst(V3D_QPU_A_NOP, c->undef,
562 c->undef, c->undef);
563 ldvary->qpu.sig.ldvary = true;
564 vary = vir_emit_def(c, ldvary);
565 } else {
566 vir_NOP(c)->qpu.sig.ldvary = true;
567 vary = r3;
568 }
569
570 /* For gl_PointCoord input or distance along a line, we'll be called
571 * with no nir_variable, and we don't count toward VPM size so we
572 * don't track an input slot.
573 */
574 if (!var) {
575 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
576 }
577
578 int i = c->num_inputs++;
579 c->input_slots[i] =
580 v3d_slot_from_slot_and_component(var->data.location +
581 array_index, swizzle);
582
583 switch (var->data.interpolation) {
584 case INTERP_MODE_NONE:
585 /* If a gl_FrontColor or gl_BackColor input has no interp
586 * qualifier, then if we're using glShadeModel(GL_FLAT) it
587 * needs to be flat shaded.
588 */
589 switch (var->data.location + array_index) {
590 case VARYING_SLOT_COL0:
591 case VARYING_SLOT_COL1:
592 case VARYING_SLOT_BFC0:
593 case VARYING_SLOT_BFC1:
594 if (c->fs_key->shade_model_flat) {
595 BITSET_SET(c->flat_shade_flags, i);
596 vir_MOV_dest(c, c->undef, vary);
597 return vir_MOV(c, r5);
598 } else {
599 return vir_FADD(c, vir_FMUL(c, vary,
600 c->payload_w), r5);
601 }
602 default:
603 break;
604 }
605 /* FALLTHROUGH */
606 case INTERP_MODE_SMOOTH:
607 if (var->data.centroid) {
608 BITSET_SET(c->centroid_flags, i);
609 return vir_FADD(c, vir_FMUL(c, vary,
610 c->payload_w_centroid), r5);
611 } else {
612 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
613 }
614 case INTERP_MODE_NOPERSPECTIVE:
615 BITSET_SET(c->noperspective_flags, i);
616 return vir_FADD(c, vir_MOV(c, vary), r5);
617 case INTERP_MODE_FLAT:
618 BITSET_SET(c->flat_shade_flags, i);
619 vir_MOV_dest(c, c->undef, vary);
620 return vir_MOV(c, r5);
621 default:
622 unreachable("Bad interp mode");
623 }
624 }
625
626 static void
627 emit_fragment_input(struct v3d_compile *c, int attr, nir_variable *var,
628 int array_index)
629 {
630 for (int i = 0; i < glsl_get_vector_elements(var->type); i++) {
631 int chan = var->data.location_frac + i;
632 c->inputs[attr * 4 + chan] =
633 emit_fragment_varying(c, var, chan, array_index);
634 }
635 }
636
637 static void
638 add_output(struct v3d_compile *c,
639 uint32_t decl_offset,
640 uint8_t slot,
641 uint8_t swizzle)
642 {
643 uint32_t old_array_size = c->outputs_array_size;
644 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
645 decl_offset + 1);
646
647 if (old_array_size != c->outputs_array_size) {
648 c->output_slots = reralloc(c,
649 c->output_slots,
650 struct v3d_varying_slot,
651 c->outputs_array_size);
652 }
653
654 c->output_slots[decl_offset] =
655 v3d_slot_from_slot_and_component(slot, swizzle);
656 }
657
658 /**
659 * If compare_instr is a valid comparison instruction, emits the
660 * compare_instr's comparison and returns the sel_instr's return value based
661 * on the compare_instr's result.
662 */
663 static bool
664 ntq_emit_comparison(struct v3d_compile *c,
665 nir_alu_instr *compare_instr,
666 enum v3d_qpu_cond *out_cond)
667 {
668 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
669 struct qreg src1;
670 if (nir_op_infos[compare_instr->op].num_inputs > 1)
671 src1 = ntq_get_alu_src(c, compare_instr, 1);
672 bool cond_invert = false;
673 struct qreg nop = vir_nop_reg();
674
675 switch (compare_instr->op) {
676 case nir_op_feq32:
677 case nir_op_seq:
678 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
679 break;
680 case nir_op_ieq32:
681 vir_set_pf(vir_XOR_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
682 break;
683
684 case nir_op_fne32:
685 case nir_op_sne:
686 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
687 cond_invert = true;
688 break;
689 case nir_op_ine32:
690 vir_set_pf(vir_XOR_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
691 cond_invert = true;
692 break;
693
694 case nir_op_fge32:
695 case nir_op_sge:
696 vir_set_pf(vir_FCMP_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
697 break;
698 case nir_op_ige32:
699 vir_set_pf(vir_MIN_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
700 cond_invert = true;
701 break;
702 case nir_op_uge32:
703 vir_set_pf(vir_SUB_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHC);
704 cond_invert = true;
705 break;
706
707 case nir_op_slt:
708 case nir_op_flt32:
709 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHN);
710 break;
711 case nir_op_ilt32:
712 vir_set_pf(vir_MIN_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
713 break;
714 case nir_op_ult32:
715 vir_set_pf(vir_SUB_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHC);
716 break;
717
718 case nir_op_i2b32:
719 vir_set_pf(vir_MOV_dest(c, nop, src0), V3D_QPU_PF_PUSHZ);
720 cond_invert = true;
721 break;
722
723 case nir_op_f2b32:
724 vir_set_pf(vir_FMOV_dest(c, nop, src0), V3D_QPU_PF_PUSHZ);
725 cond_invert = true;
726 break;
727
728 default:
729 return false;
730 }
731
732 *out_cond = cond_invert ? V3D_QPU_COND_IFNA : V3D_QPU_COND_IFA;
733
734 return true;
735 }
736
737 /* Finds an ALU instruction that generates our src value that could
738 * (potentially) be greedily emitted in the consuming instruction.
739 */
740 static struct nir_alu_instr *
741 ntq_get_alu_parent(nir_src src)
742 {
743 if (!src.is_ssa || src.ssa->parent_instr->type != nir_instr_type_alu)
744 return NULL;
745 nir_alu_instr *instr = nir_instr_as_alu(src.ssa->parent_instr);
746 if (!instr)
747 return NULL;
748
749 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
750 * moving emission of the ALU instr down past another write of the
751 * src.
752 */
753 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
754 if (!instr->src[i].src.is_ssa)
755 return NULL;
756 }
757
758 return instr;
759 }
760
761 /* Turns a NIR bool into a condition code to predicate on. */
762 static enum v3d_qpu_cond
763 ntq_emit_bool_to_cond(struct v3d_compile *c, nir_src src)
764 {
765 nir_alu_instr *compare = ntq_get_alu_parent(src);
766 if (!compare)
767 goto out;
768
769 enum v3d_qpu_cond cond;
770 if (ntq_emit_comparison(c, compare, &cond))
771 return cond;
772
773 out:
774 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), ntq_get_src(c, src, 0)),
775 V3D_QPU_PF_PUSHZ);
776 return V3D_QPU_COND_IFNA;
777 }
778
779 static void
780 ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
781 {
782 /* This should always be lowered to ALU operations for V3D. */
783 assert(!instr->dest.saturate);
784
785 /* Vectors are special in that they have non-scalarized writemasks,
786 * and just take the first swizzle channel for each argument in order
787 * into each writemask channel.
788 */
789 if (instr->op == nir_op_vec2 ||
790 instr->op == nir_op_vec3 ||
791 instr->op == nir_op_vec4) {
792 struct qreg srcs[4];
793 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
794 srcs[i] = ntq_get_src(c, instr->src[i].src,
795 instr->src[i].swizzle[0]);
796 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
797 ntq_store_dest(c, &instr->dest.dest, i,
798 vir_MOV(c, srcs[i]));
799 return;
800 }
801
802 /* General case: We can just grab the one used channel per src. */
803 struct qreg src[nir_op_infos[instr->op].num_inputs];
804 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
805 src[i] = ntq_get_alu_src(c, instr, i);
806 }
807
808 struct qreg result;
809
810 switch (instr->op) {
811 case nir_op_fmov:
812 case nir_op_imov:
813 result = vir_MOV(c, src[0]);
814 break;
815
816 case nir_op_fneg:
817 result = vir_XOR(c, src[0], vir_uniform_ui(c, 1 << 31));
818 break;
819 case nir_op_ineg:
820 result = vir_NEG(c, src[0]);
821 break;
822
823 case nir_op_fmul:
824 result = vir_FMUL(c, src[0], src[1]);
825 break;
826 case nir_op_fadd:
827 result = vir_FADD(c, src[0], src[1]);
828 break;
829 case nir_op_fsub:
830 result = vir_FSUB(c, src[0], src[1]);
831 break;
832 case nir_op_fmin:
833 result = vir_FMIN(c, src[0], src[1]);
834 break;
835 case nir_op_fmax:
836 result = vir_FMAX(c, src[0], src[1]);
837 break;
838
839 case nir_op_f2i32: {
840 nir_alu_instr *src0_alu = ntq_get_alu_parent(instr->src[0].src);
841 if (src0_alu && src0_alu->op == nir_op_fround_even) {
842 result = vir_FTOIN(c, ntq_get_alu_src(c, src0_alu, 0));
843 } else {
844 result = vir_FTOIZ(c, src[0]);
845 }
846 break;
847 }
848
849 case nir_op_f2u32:
850 result = vir_FTOUZ(c, src[0]);
851 break;
852 case nir_op_i2f32:
853 result = vir_ITOF(c, src[0]);
854 break;
855 case nir_op_u2f32:
856 result = vir_UTOF(c, src[0]);
857 break;
858 case nir_op_b2f32:
859 result = vir_AND(c, src[0], vir_uniform_f(c, 1.0));
860 break;
861 case nir_op_b2i32:
862 result = vir_AND(c, src[0], vir_uniform_ui(c, 1));
863 break;
864
865 case nir_op_iadd:
866 result = vir_ADD(c, src[0], src[1]);
867 break;
868 case nir_op_ushr:
869 result = vir_SHR(c, src[0], src[1]);
870 break;
871 case nir_op_isub:
872 result = vir_SUB(c, src[0], src[1]);
873 break;
874 case nir_op_ishr:
875 result = vir_ASR(c, src[0], src[1]);
876 break;
877 case nir_op_ishl:
878 result = vir_SHL(c, src[0], src[1]);
879 break;
880 case nir_op_imin:
881 result = vir_MIN(c, src[0], src[1]);
882 break;
883 case nir_op_umin:
884 result = vir_UMIN(c, src[0], src[1]);
885 break;
886 case nir_op_imax:
887 result = vir_MAX(c, src[0], src[1]);
888 break;
889 case nir_op_umax:
890 result = vir_UMAX(c, src[0], src[1]);
891 break;
892 case nir_op_iand:
893 result = vir_AND(c, src[0], src[1]);
894 break;
895 case nir_op_ior:
896 result = vir_OR(c, src[0], src[1]);
897 break;
898 case nir_op_ixor:
899 result = vir_XOR(c, src[0], src[1]);
900 break;
901 case nir_op_inot:
902 result = vir_NOT(c, src[0]);
903 break;
904
905 case nir_op_ufind_msb:
906 result = vir_SUB(c, vir_uniform_ui(c, 31), vir_CLZ(c, src[0]));
907 break;
908
909 case nir_op_imul:
910 result = vir_UMUL(c, src[0], src[1]);
911 break;
912
913 case nir_op_seq:
914 case nir_op_sne:
915 case nir_op_sge:
916 case nir_op_slt: {
917 enum v3d_qpu_cond cond;
918 MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
919 assert(ok);
920 result = vir_MOV(c, vir_SEL(c, cond,
921 vir_uniform_f(c, 1.0),
922 vir_uniform_f(c, 0.0)));
923 break;
924 }
925
926 case nir_op_i2b32:
927 case nir_op_f2b32:
928 case nir_op_feq32:
929 case nir_op_fne32:
930 case nir_op_fge32:
931 case nir_op_flt32:
932 case nir_op_ieq32:
933 case nir_op_ine32:
934 case nir_op_ige32:
935 case nir_op_uge32:
936 case nir_op_ilt32:
937 case nir_op_ult32: {
938 enum v3d_qpu_cond cond;
939 MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
940 assert(ok);
941 result = vir_MOV(c, vir_SEL(c, cond,
942 vir_uniform_ui(c, ~0),
943 vir_uniform_ui(c, 0)));
944 break;
945 }
946
947 case nir_op_b32csel:
948 result = vir_MOV(c,
949 vir_SEL(c,
950 ntq_emit_bool_to_cond(c, instr->src[0].src),
951 src[1], src[2]));
952 break;
953
954 case nir_op_fcsel:
955 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), src[0]),
956 V3D_QPU_PF_PUSHZ);
957 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
958 src[1], src[2]));
959 break;
960
961 case nir_op_frcp:
962 result = vir_RECIP(c, src[0]);
963 break;
964 case nir_op_frsq:
965 result = vir_RSQRT(c, src[0]);
966 break;
967 case nir_op_fexp2:
968 result = vir_EXP(c, src[0]);
969 break;
970 case nir_op_flog2:
971 result = vir_LOG(c, src[0]);
972 break;
973
974 case nir_op_fceil:
975 result = vir_FCEIL(c, src[0]);
976 break;
977 case nir_op_ffloor:
978 result = vir_FFLOOR(c, src[0]);
979 break;
980 case nir_op_fround_even:
981 result = vir_FROUND(c, src[0]);
982 break;
983 case nir_op_ftrunc:
984 result = vir_FTRUNC(c, src[0]);
985 break;
986
987 case nir_op_fsin:
988 result = ntq_fsincos(c, src[0], false);
989 break;
990 case nir_op_fcos:
991 result = ntq_fsincos(c, src[0], true);
992 break;
993
994 case nir_op_fsign:
995 result = ntq_fsign(c, src[0]);
996 break;
997
998 case nir_op_fabs: {
999 result = vir_FMOV(c, src[0]);
1000 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_ABS);
1001 break;
1002 }
1003
1004 case nir_op_iabs:
1005 result = vir_MAX(c, src[0], vir_NEG(c, src[0]));
1006 break;
1007
1008 case nir_op_fddx:
1009 case nir_op_fddx_coarse:
1010 case nir_op_fddx_fine:
1011 result = vir_FDX(c, src[0]);
1012 break;
1013
1014 case nir_op_fddy:
1015 case nir_op_fddy_coarse:
1016 case nir_op_fddy_fine:
1017 result = vir_FDY(c, src[0]);
1018 break;
1019
1020 case nir_op_uadd_carry:
1021 vir_set_pf(vir_ADD_dest(c, vir_nop_reg(), src[0], src[1]),
1022 V3D_QPU_PF_PUSHC);
1023 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFA,
1024 vir_uniform_ui(c, ~0),
1025 vir_uniform_ui(c, 0)));
1026 break;
1027
1028 case nir_op_pack_half_2x16_split:
1029 result = vir_VFPACK(c, src[0], src[1]);
1030 break;
1031
1032 case nir_op_unpack_half_2x16_split_x:
1033 result = vir_FMOV(c, src[0]);
1034 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_L);
1035 break;
1036
1037 case nir_op_unpack_half_2x16_split_y:
1038 result = vir_FMOV(c, src[0]);
1039 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_H);
1040 break;
1041
1042 default:
1043 fprintf(stderr, "unknown NIR ALU inst: ");
1044 nir_print_instr(&instr->instr, stderr);
1045 fprintf(stderr, "\n");
1046 abort();
1047 }
1048
1049 /* We have a scalar result, so the instruction should only have a
1050 * single channel written to.
1051 */
1052 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
1053 ntq_store_dest(c, &instr->dest.dest,
1054 ffs(instr->dest.write_mask) - 1, result);
1055 }
1056
1057 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
1058 * specifier. They come from a register that's preloaded with 0xffffffff
1059 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
1060 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
1061 */
1062 #define TLB_TYPE_F16_COLOR (3 << 6)
1063 #define TLB_TYPE_I32_COLOR (1 << 6)
1064 #define TLB_TYPE_F32_COLOR (0 << 6)
1065 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
1066 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
1067 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
1068 #define TLB_F16_SWAP_HI_LO (1 << 1)
1069 #define TLB_VEC_SIZE_4_F16 (1 << 0)
1070 #define TLB_VEC_SIZE_2_F16 (0 << 0)
1071 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
1072
1073 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
1074 * flag is set.
1075 */
1076 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
1077 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
1078 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
1079 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
1080 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
1081
1082 /* Stencil is a single 32-bit write. */
1083 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
1084
1085 static void
1086 emit_frag_end(struct v3d_compile *c)
1087 {
1088 /* XXX
1089 if (c->output_sample_mask_index != -1) {
1090 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1091 }
1092 */
1093
1094 bool has_any_tlb_color_write = false;
1095 for (int rt = 0; rt < V3D_MAX_DRAW_BUFFERS; rt++) {
1096 if (c->fs_key->cbufs & (1 << rt) && c->output_color_var[rt])
1097 has_any_tlb_color_write = true;
1098 }
1099
1100 if (c->fs_key->sample_alpha_to_coverage && c->output_color_var[0]) {
1101 struct nir_variable *var = c->output_color_var[0];
1102 struct qreg *color = &c->outputs[var->data.driver_location * 4];
1103
1104 vir_SETMSF_dest(c, vir_nop_reg(),
1105 vir_AND(c,
1106 vir_MSF(c),
1107 vir_FTOC(c, color[3])));
1108 }
1109
1110 struct qreg tlb_reg = vir_magic_reg(V3D_QPU_WADDR_TLB);
1111 struct qreg tlbu_reg = vir_magic_reg(V3D_QPU_WADDR_TLBU);
1112 if (c->output_position_index != -1) {
1113 struct qinst *inst = vir_MOV_dest(c, tlbu_reg,
1114 c->outputs[c->output_position_index]);
1115 uint8_t tlb_specifier = TLB_TYPE_DEPTH;
1116
1117 if (c->devinfo->ver >= 42) {
1118 tlb_specifier |= (TLB_V42_DEPTH_TYPE_PER_PIXEL |
1119 TLB_SAMPLE_MODE_PER_PIXEL);
1120 } else
1121 tlb_specifier |= TLB_DEPTH_TYPE_PER_PIXEL;
1122
1123 inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT,
1124 tlb_specifier |
1125 0xffffff00);
1126 c->writes_z = true;
1127 } else if (c->s->info.fs.uses_discard ||
1128 !c->s->info.fs.early_fragment_tests ||
1129 c->fs_key->sample_alpha_to_coverage ||
1130 !has_any_tlb_color_write) {
1131 /* Emit passthrough Z if it needed to be delayed until shader
1132 * end due to potential discards.
1133 *
1134 * Since (single-threaded) fragment shaders always need a TLB
1135 * write, emit passthrouh Z if we didn't have any color
1136 * buffers and flag us as potentially discarding, so that we
1137 * can use Z as the TLB write.
1138 */
1139 c->s->info.fs.uses_discard = true;
1140
1141 struct qinst *inst = vir_MOV_dest(c, tlbu_reg,
1142 vir_nop_reg());
1143 uint8_t tlb_specifier = TLB_TYPE_DEPTH;
1144
1145 if (c->devinfo->ver >= 42) {
1146 /* The spec says the PER_PIXEL flag is ignored for
1147 * invariant writes, but the simulator demands it.
1148 */
1149 tlb_specifier |= (TLB_V42_DEPTH_TYPE_INVARIANT |
1150 TLB_SAMPLE_MODE_PER_PIXEL);
1151 } else {
1152 tlb_specifier |= TLB_DEPTH_TYPE_INVARIANT;
1153 }
1154
1155 inst->uniform = vir_get_uniform_index(c,
1156 QUNIFORM_CONSTANT,
1157 tlb_specifier |
1158 0xffffff00);
1159 c->writes_z = true;
1160 }
1161
1162 /* XXX: Performance improvement: Merge Z write and color writes TLB
1163 * uniform setup
1164 */
1165
1166 for (int rt = 0; rt < V3D_MAX_DRAW_BUFFERS; rt++) {
1167 if (!(c->fs_key->cbufs & (1 << rt)) || !c->output_color_var[rt])
1168 continue;
1169
1170 nir_variable *var = c->output_color_var[rt];
1171 struct qreg *color = &c->outputs[var->data.driver_location * 4];
1172 int num_components = glsl_get_vector_elements(var->type);
1173 uint32_t conf = 0xffffff00;
1174 struct qinst *inst;
1175
1176 conf |= TLB_SAMPLE_MODE_PER_PIXEL;
1177 conf |= (7 - rt) << TLB_RENDER_TARGET_SHIFT;
1178
1179 if (c->fs_key->swap_color_rb & (1 << rt))
1180 num_components = MAX2(num_components, 3);
1181
1182 assert(num_components != 0);
1183 switch (glsl_get_base_type(var->type)) {
1184 case GLSL_TYPE_UINT:
1185 case GLSL_TYPE_INT:
1186 /* The F32 vs I32 distinction was dropped in 4.2. */
1187 if (c->devinfo->ver < 42)
1188 conf |= TLB_TYPE_I32_COLOR;
1189 else
1190 conf |= TLB_TYPE_F32_COLOR;
1191 conf |= ((num_components - 1) <<
1192 TLB_VEC_SIZE_MINUS_1_SHIFT);
1193
1194 inst = vir_MOV_dest(c, tlbu_reg, color[0]);
1195 inst->uniform = vir_get_uniform_index(c,
1196 QUNIFORM_CONSTANT,
1197 conf);
1198
1199 for (int i = 1; i < num_components; i++) {
1200 inst = vir_MOV_dest(c, tlb_reg, color[i]);
1201 }
1202 break;
1203
1204 default: {
1205 struct qreg r = color[0];
1206 struct qreg g = color[1];
1207 struct qreg b = color[2];
1208 struct qreg a = color[3];
1209
1210 if (c->fs_key->f32_color_rb & (1 << rt)) {
1211 conf |= TLB_TYPE_F32_COLOR;
1212 conf |= ((num_components - 1) <<
1213 TLB_VEC_SIZE_MINUS_1_SHIFT);
1214 } else {
1215 conf |= TLB_TYPE_F16_COLOR;
1216 conf |= TLB_F16_SWAP_HI_LO;
1217 if (num_components >= 3)
1218 conf |= TLB_VEC_SIZE_4_F16;
1219 else
1220 conf |= TLB_VEC_SIZE_2_F16;
1221 }
1222
1223 if (c->fs_key->swap_color_rb & (1 << rt)) {
1224 r = color[2];
1225 b = color[0];
1226 }
1227
1228 if (c->fs_key->sample_alpha_to_one)
1229 a = vir_uniform_f(c, 1.0);
1230
1231 if (c->fs_key->f32_color_rb & (1 << rt)) {
1232 inst = vir_MOV_dest(c, tlbu_reg, r);
1233 inst->uniform = vir_get_uniform_index(c,
1234 QUNIFORM_CONSTANT,
1235 conf);
1236
1237 if (num_components >= 2)
1238 vir_MOV_dest(c, tlb_reg, g);
1239 if (num_components >= 3)
1240 vir_MOV_dest(c, tlb_reg, b);
1241 if (num_components >= 4)
1242 vir_MOV_dest(c, tlb_reg, a);
1243 } else {
1244 inst = vir_VFPACK_dest(c, tlb_reg, r, g);
1245 if (conf != ~0) {
1246 inst->dst = tlbu_reg;
1247 inst->uniform = vir_get_uniform_index(c,
1248 QUNIFORM_CONSTANT,
1249 conf);
1250 }
1251
1252 if (num_components >= 3)
1253 inst = vir_VFPACK_dest(c, tlb_reg, b, a);
1254 }
1255 break;
1256 }
1257 }
1258 }
1259 }
1260
1261 static void
1262 vir_VPM_WRITE(struct v3d_compile *c, struct qreg val, uint32_t vpm_index)
1263 {
1264 if (c->devinfo->ver >= 40) {
1265 vir_STVPMV(c, vir_uniform_ui(c, vpm_index), val);
1266 } else {
1267 /* XXX: v3d33_vir_vpm_write_setup(c); */
1268 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_VPM), val);
1269 }
1270 }
1271
1272 static void
1273 emit_vert_end(struct v3d_compile *c)
1274 {
1275 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1276 */
1277 if (c->devinfo->ver >= 40 && c->devinfo->ver <= 42)
1278 vir_VPMWT(c);
1279 }
1280
1281 void
1282 v3d_optimize_nir(struct nir_shader *s)
1283 {
1284 bool progress;
1285
1286 do {
1287 progress = false;
1288
1289 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1290 NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1291 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1292 NIR_PASS(progress, s, nir_copy_prop);
1293 NIR_PASS(progress, s, nir_opt_remove_phis);
1294 NIR_PASS(progress, s, nir_opt_dce);
1295 NIR_PASS(progress, s, nir_opt_dead_cf);
1296 NIR_PASS(progress, s, nir_opt_cse);
1297 NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
1298 NIR_PASS(progress, s, nir_opt_algebraic);
1299 NIR_PASS(progress, s, nir_opt_constant_folding);
1300 NIR_PASS(progress, s, nir_opt_undef);
1301 } while (progress);
1302
1303 NIR_PASS(progress, s, nir_opt_move_load_ubo);
1304 }
1305
1306 static int
1307 driver_location_compare(const void *in_a, const void *in_b)
1308 {
1309 const nir_variable *const *a = in_a;
1310 const nir_variable *const *b = in_b;
1311
1312 return (*a)->data.driver_location - (*b)->data.driver_location;
1313 }
1314
1315 static struct qreg
1316 ntq_emit_vpm_read(struct v3d_compile *c,
1317 uint32_t *num_components_queued,
1318 uint32_t *remaining,
1319 uint32_t vpm_index)
1320 {
1321 struct qreg vpm = vir_reg(QFILE_VPM, vpm_index);
1322
1323 if (c->devinfo->ver >= 40 ) {
1324 return vir_LDVPMV_IN(c,
1325 vir_uniform_ui(c,
1326 (*num_components_queued)++));
1327 }
1328
1329 if (*num_components_queued != 0) {
1330 (*num_components_queued)--;
1331 return vir_MOV(c, vpm);
1332 }
1333
1334 uint32_t num_components = MIN2(*remaining, 32);
1335
1336 v3d33_vir_vpm_read_setup(c, num_components);
1337
1338 *num_components_queued = num_components - 1;
1339 *remaining -= num_components;
1340
1341 return vir_MOV(c, vpm);
1342 }
1343
1344 static void
1345 ntq_setup_vpm_inputs(struct v3d_compile *c)
1346 {
1347 /* Figure out how many components of each vertex attribute the shader
1348 * uses. Each variable should have been split to individual
1349 * components and unused ones DCEed. The vertex fetcher will load
1350 * from the start of the attribute to the number of components we
1351 * declare we need in c->vattr_sizes[].
1352 */
1353 nir_foreach_variable(var, &c->s->inputs) {
1354 /* No VS attribute array support. */
1355 assert(MAX2(glsl_get_length(var->type), 1) == 1);
1356
1357 unsigned loc = var->data.driver_location;
1358 int start_component = var->data.location_frac;
1359 int num_components = glsl_get_components(var->type);
1360
1361 c->vattr_sizes[loc] = MAX2(c->vattr_sizes[loc],
1362 start_component + num_components);
1363 }
1364
1365 unsigned num_components = 0;
1366 uint32_t vpm_components_queued = 0;
1367 bool uses_iid = c->s->info.system_values_read &
1368 (1ull << SYSTEM_VALUE_INSTANCE_ID);
1369 bool uses_vid = c->s->info.system_values_read &
1370 (1ull << SYSTEM_VALUE_VERTEX_ID);
1371 num_components += uses_iid;
1372 num_components += uses_vid;
1373
1374 for (int i = 0; i < ARRAY_SIZE(c->vattr_sizes); i++)
1375 num_components += c->vattr_sizes[i];
1376
1377 if (uses_iid) {
1378 c->iid = ntq_emit_vpm_read(c, &vpm_components_queued,
1379 &num_components, ~0);
1380 }
1381
1382 if (uses_vid) {
1383 c->vid = ntq_emit_vpm_read(c, &vpm_components_queued,
1384 &num_components, ~0);
1385 }
1386
1387 /* The actual loads will happen directly in nir_intrinsic_load_input
1388 * on newer versions.
1389 */
1390 if (c->devinfo->ver >= 40)
1391 return;
1392
1393 for (int loc = 0; loc < ARRAY_SIZE(c->vattr_sizes); loc++) {
1394 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1395 (loc + 1) * 4);
1396
1397 for (int i = 0; i < c->vattr_sizes[loc]; i++) {
1398 c->inputs[loc * 4 + i] =
1399 ntq_emit_vpm_read(c,
1400 &vpm_components_queued,
1401 &num_components,
1402 loc * 4 + i);
1403
1404 }
1405 }
1406
1407 if (c->devinfo->ver >= 40) {
1408 assert(vpm_components_queued == num_components);
1409 } else {
1410 assert(vpm_components_queued == 0);
1411 assert(num_components == 0);
1412 }
1413 }
1414
1415 static void
1416 ntq_setup_fs_inputs(struct v3d_compile *c)
1417 {
1418 unsigned num_entries = 0;
1419 unsigned num_components = 0;
1420 nir_foreach_variable(var, &c->s->inputs) {
1421 num_entries++;
1422 num_components += glsl_get_components(var->type);
1423 }
1424
1425 nir_variable *vars[num_entries];
1426
1427 unsigned i = 0;
1428 nir_foreach_variable(var, &c->s->inputs)
1429 vars[i++] = var;
1430
1431 /* Sort the variables so that we emit the input setup in
1432 * driver_location order. This is required for VPM reads, whose data
1433 * is fetched into the VPM in driver_location (TGSI register index)
1434 * order.
1435 */
1436 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1437
1438 for (unsigned i = 0; i < num_entries; i++) {
1439 nir_variable *var = vars[i];
1440 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1441 unsigned loc = var->data.driver_location;
1442
1443 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1444 (loc + array_len) * 4);
1445
1446 if (var->data.location == VARYING_SLOT_POS) {
1447 emit_fragcoord_input(c, loc);
1448 } else if (var->data.location == VARYING_SLOT_PNTC ||
1449 (var->data.location >= VARYING_SLOT_VAR0 &&
1450 (c->fs_key->point_sprite_mask &
1451 (1 << (var->data.location -
1452 VARYING_SLOT_VAR0))))) {
1453 c->inputs[loc * 4 + 0] = c->point_x;
1454 c->inputs[loc * 4 + 1] = c->point_y;
1455 } else {
1456 for (int j = 0; j < array_len; j++)
1457 emit_fragment_input(c, loc + j, var, j);
1458 }
1459 }
1460 }
1461
1462 static void
1463 ntq_setup_outputs(struct v3d_compile *c)
1464 {
1465 if (c->s->info.stage != MESA_SHADER_FRAGMENT)
1466 return;
1467
1468 nir_foreach_variable(var, &c->s->outputs) {
1469 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1470 unsigned loc = var->data.driver_location * 4;
1471
1472 assert(array_len == 1);
1473 (void)array_len;
1474
1475 for (int i = 0; i < 4 - var->data.location_frac; i++) {
1476 add_output(c, loc + var->data.location_frac + i,
1477 var->data.location,
1478 var->data.location_frac + i);
1479 }
1480
1481 switch (var->data.location) {
1482 case FRAG_RESULT_COLOR:
1483 c->output_color_var[0] = var;
1484 c->output_color_var[1] = var;
1485 c->output_color_var[2] = var;
1486 c->output_color_var[3] = var;
1487 break;
1488 case FRAG_RESULT_DATA0:
1489 case FRAG_RESULT_DATA1:
1490 case FRAG_RESULT_DATA2:
1491 case FRAG_RESULT_DATA3:
1492 c->output_color_var[var->data.location -
1493 FRAG_RESULT_DATA0] = var;
1494 break;
1495 case FRAG_RESULT_DEPTH:
1496 c->output_position_index = loc;
1497 break;
1498 case FRAG_RESULT_SAMPLE_MASK:
1499 c->output_sample_mask_index = loc;
1500 break;
1501 }
1502 }
1503 }
1504
1505 /**
1506 * Sets up the mapping from nir_register to struct qreg *.
1507 *
1508 * Each nir_register gets a struct qreg per 32-bit component being stored.
1509 */
1510 static void
1511 ntq_setup_registers(struct v3d_compile *c, struct exec_list *list)
1512 {
1513 foreach_list_typed(nir_register, nir_reg, node, list) {
1514 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1515 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1516 array_len *
1517 nir_reg->num_components);
1518
1519 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1520
1521 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1522 qregs[i] = vir_get_temp(c);
1523 }
1524 }
1525
1526 static void
1527 ntq_emit_load_const(struct v3d_compile *c, nir_load_const_instr *instr)
1528 {
1529 /* XXX perf: Experiment with using immediate loads to avoid having
1530 * these end up in the uniform stream. Watch out for breaking the
1531 * small immediates optimization in the process!
1532 */
1533 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1534 for (int i = 0; i < instr->def.num_components; i++)
1535 qregs[i] = vir_uniform_ui(c, instr->value.u32[i]);
1536
1537 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1538 }
1539
1540 static void
1541 ntq_emit_ssa_undef(struct v3d_compile *c, nir_ssa_undef_instr *instr)
1542 {
1543 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1544
1545 /* VIR needs there to be *some* value, so pick 0 (same as for
1546 * ntq_setup_registers().
1547 */
1548 for (int i = 0; i < instr->def.num_components; i++)
1549 qregs[i] = vir_uniform_ui(c, 0);
1550 }
1551
1552 static void
1553 ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr)
1554 {
1555 assert(instr->intrinsic == nir_intrinsic_image_deref_size);
1556 nir_variable *var = nir_intrinsic_get_var(instr, 0);
1557 unsigned image_index = var->data.driver_location;
1558 const struct glsl_type *sampler_type = glsl_without_array(var->type);
1559 bool is_array = glsl_sampler_type_is_array(sampler_type);
1560
1561 ntq_store_dest(c, &instr->dest, 0,
1562 vir_uniform(c, QUNIFORM_IMAGE_WIDTH, image_index));
1563 if (instr->num_components > 1) {
1564 ntq_store_dest(c, &instr->dest, 1,
1565 vir_uniform(c, QUNIFORM_IMAGE_HEIGHT,
1566 image_index));
1567 }
1568 if (instr->num_components > 2) {
1569 ntq_store_dest(c, &instr->dest, 2,
1570 vir_uniform(c,
1571 is_array ?
1572 QUNIFORM_IMAGE_ARRAY_SIZE :
1573 QUNIFORM_IMAGE_DEPTH,
1574 image_index));
1575 }
1576 }
1577
1578 static void
1579 ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
1580 {
1581 unsigned offset;
1582
1583 switch (instr->intrinsic) {
1584 case nir_intrinsic_load_uniform:
1585 if (nir_src_is_const(instr->src[0])) {
1586 int offset = (nir_intrinsic_base(instr) +
1587 nir_src_as_uint(instr->src[0]));
1588 assert(offset % 4 == 0);
1589 /* We need dwords */
1590 offset = offset / 4;
1591 for (int i = 0; i < instr->num_components; i++) {
1592 ntq_store_dest(c, &instr->dest, i,
1593 vir_uniform(c, QUNIFORM_UNIFORM,
1594 offset + i));
1595 }
1596 } else {
1597 ntq_emit_tmu_general(c, instr, false);
1598 }
1599 break;
1600
1601 case nir_intrinsic_load_ubo:
1602 ntq_emit_tmu_general(c, instr, false);
1603 break;
1604
1605 case nir_intrinsic_ssbo_atomic_add:
1606 case nir_intrinsic_ssbo_atomic_imin:
1607 case nir_intrinsic_ssbo_atomic_umin:
1608 case nir_intrinsic_ssbo_atomic_imax:
1609 case nir_intrinsic_ssbo_atomic_umax:
1610 case nir_intrinsic_ssbo_atomic_and:
1611 case nir_intrinsic_ssbo_atomic_or:
1612 case nir_intrinsic_ssbo_atomic_xor:
1613 case nir_intrinsic_ssbo_atomic_exchange:
1614 case nir_intrinsic_ssbo_atomic_comp_swap:
1615 case nir_intrinsic_load_ssbo:
1616 case nir_intrinsic_store_ssbo:
1617 ntq_emit_tmu_general(c, instr, false);
1618 break;
1619
1620 case nir_intrinsic_shared_atomic_add:
1621 case nir_intrinsic_shared_atomic_imin:
1622 case nir_intrinsic_shared_atomic_umin:
1623 case nir_intrinsic_shared_atomic_imax:
1624 case nir_intrinsic_shared_atomic_umax:
1625 case nir_intrinsic_shared_atomic_and:
1626 case nir_intrinsic_shared_atomic_or:
1627 case nir_intrinsic_shared_atomic_xor:
1628 case nir_intrinsic_shared_atomic_exchange:
1629 case nir_intrinsic_shared_atomic_comp_swap:
1630 case nir_intrinsic_load_shared:
1631 case nir_intrinsic_store_shared:
1632 ntq_emit_tmu_general(c, instr, true);
1633 break;
1634
1635 case nir_intrinsic_image_deref_load:
1636 case nir_intrinsic_image_deref_store:
1637 case nir_intrinsic_image_deref_atomic_add:
1638 case nir_intrinsic_image_deref_atomic_min:
1639 case nir_intrinsic_image_deref_atomic_max:
1640 case nir_intrinsic_image_deref_atomic_and:
1641 case nir_intrinsic_image_deref_atomic_or:
1642 case nir_intrinsic_image_deref_atomic_xor:
1643 case nir_intrinsic_image_deref_atomic_exchange:
1644 case nir_intrinsic_image_deref_atomic_comp_swap:
1645 v3d40_vir_emit_image_load_store(c, instr);
1646 break;
1647
1648 case nir_intrinsic_get_buffer_size:
1649 ntq_store_dest(c, &instr->dest, 0,
1650 vir_uniform(c, QUNIFORM_GET_BUFFER_SIZE,
1651 nir_src_as_uint(instr->src[0])));
1652 break;
1653
1654 case nir_intrinsic_load_user_clip_plane:
1655 for (int i = 0; i < instr->num_components; i++) {
1656 ntq_store_dest(c, &instr->dest, i,
1657 vir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1658 nir_intrinsic_ucp_id(instr) *
1659 4 + i));
1660 }
1661 break;
1662
1663 case nir_intrinsic_load_viewport_x_scale:
1664 ntq_store_dest(c, &instr->dest, 0,
1665 vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE, 0));
1666 break;
1667
1668 case nir_intrinsic_load_viewport_y_scale:
1669 ntq_store_dest(c, &instr->dest, 0,
1670 vir_uniform(c, QUNIFORM_VIEWPORT_Y_SCALE, 0));
1671 break;
1672
1673 case nir_intrinsic_load_viewport_z_scale:
1674 ntq_store_dest(c, &instr->dest, 0,
1675 vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0));
1676 break;
1677
1678 case nir_intrinsic_load_viewport_z_offset:
1679 ntq_store_dest(c, &instr->dest, 0,
1680 vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0));
1681 break;
1682
1683 case nir_intrinsic_load_alpha_ref_float:
1684 ntq_store_dest(c, &instr->dest, 0,
1685 vir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1686 break;
1687
1688 case nir_intrinsic_load_sample_mask_in:
1689 ntq_store_dest(c, &instr->dest, 0, vir_MSF(c));
1690 break;
1691
1692 case nir_intrinsic_load_helper_invocation:
1693 vir_set_pf(vir_MSF_dest(c, vir_nop_reg()), V3D_QPU_PF_PUSHZ);
1694 ntq_store_dest(c, &instr->dest, 0,
1695 vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFA,
1696 vir_uniform_ui(c, ~0),
1697 vir_uniform_ui(c, 0))));
1698 break;
1699
1700 case nir_intrinsic_load_front_face:
1701 /* The register contains 0 (front) or 1 (back), and we need to
1702 * turn it into a NIR bool where true means front.
1703 */
1704 ntq_store_dest(c, &instr->dest, 0,
1705 vir_ADD(c,
1706 vir_uniform_ui(c, -1),
1707 vir_REVF(c)));
1708 break;
1709
1710 case nir_intrinsic_load_instance_id:
1711 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->iid));
1712 break;
1713
1714 case nir_intrinsic_load_vertex_id:
1715 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->vid));
1716 break;
1717
1718 case nir_intrinsic_load_input:
1719 offset = (nir_intrinsic_base(instr) +
1720 nir_src_as_uint(instr->src[0]));
1721 if (c->s->info.stage != MESA_SHADER_FRAGMENT &&
1722 c->devinfo->ver >= 40) {
1723 /* Emit the LDVPM directly now, rather than at the top
1724 * of the shader like we did for V3D 3.x (which needs
1725 * vpmsetup when not just taking the next offset).
1726 *
1727 * Note that delaying like this may introduce stalls,
1728 * as LDVPMV takes a minimum of 1 instruction but may
1729 * be slower if the VPM unit is busy with another QPU.
1730 */
1731 int index = 0;
1732 if (c->s->info.system_values_read &
1733 (1ull << SYSTEM_VALUE_INSTANCE_ID)) {
1734 index++;
1735 }
1736 if (c->s->info.system_values_read &
1737 (1ull << SYSTEM_VALUE_VERTEX_ID)) {
1738 index++;
1739 }
1740 for (int i = 0; i < offset; i++)
1741 index += c->vattr_sizes[i];
1742 index += nir_intrinsic_component(instr);
1743 for (int i = 0; i < instr->num_components; i++) {
1744 struct qreg vpm_offset =
1745 vir_uniform_ui(c, index++);
1746 ntq_store_dest(c, &instr->dest, i,
1747 vir_LDVPMV_IN(c, vpm_offset));
1748 }
1749 } else {
1750 for (int i = 0; i < instr->num_components; i++) {
1751 int comp = nir_intrinsic_component(instr) + i;
1752 ntq_store_dest(c, &instr->dest, i,
1753 vir_MOV(c, c->inputs[offset * 4 +
1754 comp]));
1755 }
1756 }
1757 break;
1758
1759 case nir_intrinsic_store_output:
1760 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
1761 offset = ((nir_intrinsic_base(instr) +
1762 nir_src_as_uint(instr->src[1])) * 4 +
1763 nir_intrinsic_component(instr));
1764 for (int i = 0; i < instr->num_components; i++) {
1765 c->outputs[offset + i] =
1766 vir_MOV(c,
1767 ntq_get_src(c,
1768 instr->src[0], i));
1769 }
1770 } else {
1771 assert(instr->num_components == 1);
1772
1773 vir_VPM_WRITE(c,
1774 ntq_get_src(c, instr->src[0], 0),
1775 nir_intrinsic_base(instr));
1776 }
1777 break;
1778
1779 case nir_intrinsic_image_deref_size:
1780 ntq_emit_image_size(c, instr);
1781 break;
1782
1783 case nir_intrinsic_discard:
1784 if (vir_in_nonuniform_control_flow(c)) {
1785 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
1786 V3D_QPU_PF_PUSHZ);
1787 vir_set_cond(vir_SETMSF_dest(c, vir_nop_reg(),
1788 vir_uniform_ui(c, 0)),
1789 V3D_QPU_COND_IFA);
1790 } else {
1791 vir_SETMSF_dest(c, vir_nop_reg(),
1792 vir_uniform_ui(c, 0));
1793 }
1794 break;
1795
1796 case nir_intrinsic_discard_if: {
1797 enum v3d_qpu_cond cond = ntq_emit_bool_to_cond(c, instr->src[0]);
1798
1799 if (vir_in_nonuniform_control_flow(c)) {
1800 struct qinst *exec_flag = vir_MOV_dest(c, vir_nop_reg(),
1801 c->execute);
1802 if (cond == V3D_QPU_COND_IFA) {
1803 vir_set_uf(exec_flag, V3D_QPU_UF_ANDZ);
1804 } else {
1805 vir_set_uf(exec_flag, V3D_QPU_UF_NORNZ);
1806 cond = V3D_QPU_COND_IFA;
1807 }
1808 }
1809
1810 vir_set_cond(vir_SETMSF_dest(c, vir_nop_reg(),
1811 vir_uniform_ui(c, 0)), cond);
1812
1813 break;
1814 }
1815
1816 case nir_intrinsic_memory_barrier:
1817 case nir_intrinsic_memory_barrier_atomic_counter:
1818 case nir_intrinsic_memory_barrier_buffer:
1819 case nir_intrinsic_memory_barrier_image:
1820 case nir_intrinsic_group_memory_barrier:
1821 /* We don't do any instruction scheduling of these NIR
1822 * instructions between each other, so we just need to make
1823 * sure that the TMU operations before the barrier are flushed
1824 * before the ones after the barrier. That is currently
1825 * handled by having a THRSW in each of them and a LDTMU
1826 * series or a TMUWT after.
1827 */
1828 break;
1829
1830 case nir_intrinsic_barrier:
1831 /* Emit a TSY op to get all invocations in the workgroup
1832 * (actually supergroup) to block until the last invocation
1833 * reaches the TSY op.
1834 */
1835 if (c->devinfo->ver >= 42) {
1836 vir_BARRIERID_dest(c, vir_reg(QFILE_MAGIC,
1837 V3D_QPU_WADDR_SYNCB));
1838 } else {
1839 struct qinst *sync =
1840 vir_BARRIERID_dest(c,
1841 vir_reg(QFILE_MAGIC,
1842 V3D_QPU_WADDR_SYNCU));
1843 sync->uniform =
1844 vir_get_uniform_index(c, QUNIFORM_CONSTANT,
1845 0xffffff00 |
1846 V3D_TSY_WAIT_INC_CHECK);
1847
1848 }
1849
1850 /* The blocking of a TSY op only happens at the next thread
1851 * switch. No texturing may be outstanding at the time of a
1852 * TSY blocking operation.
1853 */
1854 vir_emit_thrsw(c);
1855 break;
1856
1857 case nir_intrinsic_load_num_work_groups:
1858 for (int i = 0; i < 3; i++) {
1859 ntq_store_dest(c, &instr->dest, i,
1860 vir_uniform(c, QUNIFORM_NUM_WORK_GROUPS,
1861 i));
1862 }
1863 break;
1864
1865 case nir_intrinsic_load_local_invocation_index:
1866 ntq_store_dest(c, &instr->dest, 0,
1867 vir_SHR(c, c->cs_payload[1],
1868 vir_uniform_ui(c, 32 - c->local_invocation_index_bits)));
1869 break;
1870
1871 case nir_intrinsic_load_work_group_id:
1872 ntq_store_dest(c, &instr->dest, 0,
1873 vir_AND(c, c->cs_payload[0],
1874 vir_uniform_ui(c, 0xffff)));
1875 ntq_store_dest(c, &instr->dest, 1,
1876 vir_SHR(c, c->cs_payload[0],
1877 vir_uniform_ui(c, 16)));
1878 ntq_store_dest(c, &instr->dest, 2,
1879 vir_AND(c, c->cs_payload[1],
1880 vir_uniform_ui(c, 0xffff)));
1881 break;
1882
1883 case nir_intrinsic_load_subgroup_id:
1884 ntq_store_dest(c, &instr->dest, 0, vir_EIDX(c));
1885 break;
1886
1887 default:
1888 fprintf(stderr, "Unknown intrinsic: ");
1889 nir_print_instr(&instr->instr, stderr);
1890 fprintf(stderr, "\n");
1891 break;
1892 }
1893 }
1894
1895 /* Clears (activates) the execute flags for any channels whose jump target
1896 * matches this block.
1897 *
1898 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
1899 * enabling?
1900 *
1901 * XXX perf: For uniform control flow, we should be able to skip c->execute
1902 * handling entirely.
1903 */
1904 static void
1905 ntq_activate_execute_for_block(struct v3d_compile *c)
1906 {
1907 vir_set_pf(vir_XOR_dest(c, vir_nop_reg(),
1908 c->execute, vir_uniform_ui(c, c->cur_block->index)),
1909 V3D_QPU_PF_PUSHZ);
1910
1911 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
1912 }
1913
1914 static void
1915 ntq_emit_uniform_if(struct v3d_compile *c, nir_if *if_stmt)
1916 {
1917 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1918 bool empty_else_block =
1919 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1920 exec_list_is_empty(&nir_else_block->instr_list));
1921
1922 struct qblock *then_block = vir_new_block(c);
1923 struct qblock *after_block = vir_new_block(c);
1924 struct qblock *else_block;
1925 if (empty_else_block)
1926 else_block = after_block;
1927 else
1928 else_block = vir_new_block(c);
1929
1930 /* Set up the flags for the IF condition (taking the THEN branch). */
1931 enum v3d_qpu_cond cond = ntq_emit_bool_to_cond(c, if_stmt->condition);
1932
1933 /* Jump to ELSE. */
1934 vir_BRANCH(c, cond == V3D_QPU_COND_IFA ?
1935 V3D_QPU_BRANCH_COND_ALLNA :
1936 V3D_QPU_BRANCH_COND_ALLA);
1937 vir_link_blocks(c->cur_block, else_block);
1938 vir_link_blocks(c->cur_block, then_block);
1939
1940 /* Process the THEN block. */
1941 vir_set_emit_block(c, then_block);
1942 ntq_emit_cf_list(c, &if_stmt->then_list);
1943
1944 if (!empty_else_block) {
1945 /* At the end of the THEN block, jump to ENDIF */
1946 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALWAYS);
1947 vir_link_blocks(c->cur_block, after_block);
1948
1949 /* Emit the else block. */
1950 vir_set_emit_block(c, else_block);
1951 ntq_activate_execute_for_block(c);
1952 ntq_emit_cf_list(c, &if_stmt->else_list);
1953 }
1954
1955 vir_link_blocks(c->cur_block, after_block);
1956
1957 vir_set_emit_block(c, after_block);
1958 }
1959
1960 static void
1961 ntq_emit_nonuniform_if(struct v3d_compile *c, nir_if *if_stmt)
1962 {
1963 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1964 bool empty_else_block =
1965 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1966 exec_list_is_empty(&nir_else_block->instr_list));
1967
1968 struct qblock *then_block = vir_new_block(c);
1969 struct qblock *after_block = vir_new_block(c);
1970 struct qblock *else_block;
1971 if (empty_else_block)
1972 else_block = after_block;
1973 else
1974 else_block = vir_new_block(c);
1975
1976 bool was_uniform_control_flow = false;
1977 if (!vir_in_nonuniform_control_flow(c)) {
1978 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
1979 was_uniform_control_flow = true;
1980 }
1981
1982 /* Set up the flags for the IF condition (taking the THEN branch). */
1983 enum v3d_qpu_cond cond = ntq_emit_bool_to_cond(c, if_stmt->condition);
1984
1985 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
1986 * was previously active (execute Z) for updating the exec flags.
1987 */
1988 if (was_uniform_control_flow) {
1989 cond = v3d_qpu_cond_invert(cond);
1990 } else {
1991 struct qinst *inst = vir_MOV_dest(c, vir_nop_reg(), c->execute);
1992 if (cond == V3D_QPU_COND_IFA) {
1993 vir_set_uf(inst, V3D_QPU_UF_NORNZ);
1994 } else {
1995 vir_set_uf(inst, V3D_QPU_UF_ANDZ);
1996 cond = V3D_QPU_COND_IFA;
1997 }
1998 }
1999
2000 vir_MOV_cond(c, cond,
2001 c->execute,
2002 vir_uniform_ui(c, else_block->index));
2003
2004 /* Jump to ELSE if nothing is active for THEN, otherwise fall
2005 * through.
2006 */
2007 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute), V3D_QPU_PF_PUSHZ);
2008 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLNA);
2009 vir_link_blocks(c->cur_block, else_block);
2010 vir_link_blocks(c->cur_block, then_block);
2011
2012 /* Process the THEN block. */
2013 vir_set_emit_block(c, then_block);
2014 ntq_emit_cf_list(c, &if_stmt->then_list);
2015
2016 if (!empty_else_block) {
2017 /* Handle the end of the THEN block. First, all currently
2018 * active channels update their execute flags to point to
2019 * ENDIF
2020 */
2021 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
2022 V3D_QPU_PF_PUSHZ);
2023 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
2024 vir_uniform_ui(c, after_block->index));
2025
2026 /* If everything points at ENDIF, then jump there immediately. */
2027 vir_set_pf(vir_XOR_dest(c, vir_nop_reg(),
2028 c->execute,
2029 vir_uniform_ui(c, after_block->index)),
2030 V3D_QPU_PF_PUSHZ);
2031 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLA);
2032 vir_link_blocks(c->cur_block, after_block);
2033 vir_link_blocks(c->cur_block, else_block);
2034
2035 vir_set_emit_block(c, else_block);
2036 ntq_activate_execute_for_block(c);
2037 ntq_emit_cf_list(c, &if_stmt->else_list);
2038 }
2039
2040 vir_link_blocks(c->cur_block, after_block);
2041
2042 vir_set_emit_block(c, after_block);
2043 if (was_uniform_control_flow)
2044 c->execute = c->undef;
2045 else
2046 ntq_activate_execute_for_block(c);
2047 }
2048
2049 static void
2050 ntq_emit_if(struct v3d_compile *c, nir_if *nif)
2051 {
2052 bool was_in_control_flow = c->in_control_flow;
2053 c->in_control_flow = true;
2054 if (!vir_in_nonuniform_control_flow(c) &&
2055 nir_src_is_dynamically_uniform(nif->condition)) {
2056 ntq_emit_uniform_if(c, nif);
2057 } else {
2058 ntq_emit_nonuniform_if(c, nif);
2059 }
2060 c->in_control_flow = was_in_control_flow;
2061 }
2062
2063 static void
2064 ntq_emit_jump(struct v3d_compile *c, nir_jump_instr *jump)
2065 {
2066 switch (jump->type) {
2067 case nir_jump_break:
2068 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
2069 V3D_QPU_PF_PUSHZ);
2070 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
2071 vir_uniform_ui(c, c->loop_break_block->index));
2072 break;
2073
2074 case nir_jump_continue:
2075 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
2076 V3D_QPU_PF_PUSHZ);
2077 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
2078 vir_uniform_ui(c, c->loop_cont_block->index));
2079 break;
2080
2081 case nir_jump_return:
2082 unreachable("All returns shouold be lowered\n");
2083 }
2084 }
2085
2086 static void
2087 ntq_emit_instr(struct v3d_compile *c, nir_instr *instr)
2088 {
2089 switch (instr->type) {
2090 case nir_instr_type_deref:
2091 /* ignored, will be walked by the intrinsic using it. */
2092 break;
2093
2094 case nir_instr_type_alu:
2095 ntq_emit_alu(c, nir_instr_as_alu(instr));
2096 break;
2097
2098 case nir_instr_type_intrinsic:
2099 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2100 break;
2101
2102 case nir_instr_type_load_const:
2103 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2104 break;
2105
2106 case nir_instr_type_ssa_undef:
2107 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2108 break;
2109
2110 case nir_instr_type_tex:
2111 ntq_emit_tex(c, nir_instr_as_tex(instr));
2112 break;
2113
2114 case nir_instr_type_jump:
2115 ntq_emit_jump(c, nir_instr_as_jump(instr));
2116 break;
2117
2118 default:
2119 fprintf(stderr, "Unknown NIR instr type: ");
2120 nir_print_instr(instr, stderr);
2121 fprintf(stderr, "\n");
2122 abort();
2123 }
2124 }
2125
2126 static void
2127 ntq_emit_block(struct v3d_compile *c, nir_block *block)
2128 {
2129 nir_foreach_instr(instr, block) {
2130 ntq_emit_instr(c, instr);
2131 }
2132 }
2133
2134 static void ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
2135
2136 static void
2137 ntq_emit_loop(struct v3d_compile *c, nir_loop *loop)
2138 {
2139 bool was_in_control_flow = c->in_control_flow;
2140 c->in_control_flow = true;
2141
2142 bool was_uniform_control_flow = false;
2143 if (!vir_in_nonuniform_control_flow(c)) {
2144 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
2145 was_uniform_control_flow = true;
2146 }
2147
2148 struct qblock *save_loop_cont_block = c->loop_cont_block;
2149 struct qblock *save_loop_break_block = c->loop_break_block;
2150
2151 c->loop_cont_block = vir_new_block(c);
2152 c->loop_break_block = vir_new_block(c);
2153
2154 vir_link_blocks(c->cur_block, c->loop_cont_block);
2155 vir_set_emit_block(c, c->loop_cont_block);
2156 ntq_activate_execute_for_block(c);
2157
2158 ntq_emit_cf_list(c, &loop->body);
2159
2160 /* Re-enable any previous continues now, so our ANYA check below
2161 * works.
2162 *
2163 * XXX: Use the .ORZ flags update, instead.
2164 */
2165 vir_set_pf(vir_XOR_dest(c,
2166 vir_nop_reg(),
2167 c->execute,
2168 vir_uniform_ui(c, c->loop_cont_block->index)),
2169 V3D_QPU_PF_PUSHZ);
2170 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
2171
2172 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute), V3D_QPU_PF_PUSHZ);
2173
2174 struct qinst *branch = vir_BRANCH(c, V3D_QPU_BRANCH_COND_ANYA);
2175 /* Pixels that were not dispatched or have been discarded should not
2176 * contribute to looping again.
2177 */
2178 branch->qpu.branch.msfign = V3D_QPU_MSFIGN_P;
2179 vir_link_blocks(c->cur_block, c->loop_cont_block);
2180 vir_link_blocks(c->cur_block, c->loop_break_block);
2181
2182 vir_set_emit_block(c, c->loop_break_block);
2183 if (was_uniform_control_flow)
2184 c->execute = c->undef;
2185 else
2186 ntq_activate_execute_for_block(c);
2187
2188 c->loop_break_block = save_loop_break_block;
2189 c->loop_cont_block = save_loop_cont_block;
2190
2191 c->loops++;
2192
2193 c->in_control_flow = was_in_control_flow;
2194 }
2195
2196 static void
2197 ntq_emit_function(struct v3d_compile *c, nir_function_impl *func)
2198 {
2199 fprintf(stderr, "FUNCTIONS not handled.\n");
2200 abort();
2201 }
2202
2203 static void
2204 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list)
2205 {
2206 foreach_list_typed(nir_cf_node, node, node, list) {
2207 switch (node->type) {
2208 case nir_cf_node_block:
2209 ntq_emit_block(c, nir_cf_node_as_block(node));
2210 break;
2211
2212 case nir_cf_node_if:
2213 ntq_emit_if(c, nir_cf_node_as_if(node));
2214 break;
2215
2216 case nir_cf_node_loop:
2217 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2218 break;
2219
2220 case nir_cf_node_function:
2221 ntq_emit_function(c, nir_cf_node_as_function(node));
2222 break;
2223
2224 default:
2225 fprintf(stderr, "Unknown NIR node type\n");
2226 abort();
2227 }
2228 }
2229 }
2230
2231 static void
2232 ntq_emit_impl(struct v3d_compile *c, nir_function_impl *impl)
2233 {
2234 ntq_setup_registers(c, &impl->registers);
2235 ntq_emit_cf_list(c, &impl->body);
2236 }
2237
2238 static void
2239 nir_to_vir(struct v3d_compile *c)
2240 {
2241 switch (c->s->info.stage) {
2242 case MESA_SHADER_FRAGMENT:
2243 c->payload_w = vir_MOV(c, vir_reg(QFILE_REG, 0));
2244 c->payload_w_centroid = vir_MOV(c, vir_reg(QFILE_REG, 1));
2245 c->payload_z = vir_MOV(c, vir_reg(QFILE_REG, 2));
2246
2247 /* XXX perf: We could set the "disable implicit point/line
2248 * varyings" field in the shader record and not emit these, if
2249 * they're not going to be used.
2250 */
2251 if (c->fs_key->is_points) {
2252 c->point_x = emit_fragment_varying(c, NULL, 0, 0);
2253 c->point_y = emit_fragment_varying(c, NULL, 0, 0);
2254 } else if (c->fs_key->is_lines) {
2255 c->line_x = emit_fragment_varying(c, NULL, 0, 0);
2256 }
2257 break;
2258 case MESA_SHADER_COMPUTE:
2259 /* Set up the TSO for barriers, assuming we do some. */
2260 if (c->devinfo->ver < 42) {
2261 vir_BARRIERID_dest(c, vir_reg(QFILE_MAGIC,
2262 V3D_QPU_WADDR_SYNC));
2263 }
2264
2265 if (c->s->info.system_values_read &
2266 ((1ull << SYSTEM_VALUE_LOCAL_INVOCATION_INDEX) |
2267 (1ull << SYSTEM_VALUE_WORK_GROUP_ID))) {
2268 c->cs_payload[0] = vir_MOV(c, vir_reg(QFILE_REG, 0));
2269 }
2270 if ((c->s->info.system_values_read &
2271 ((1ull << SYSTEM_VALUE_WORK_GROUP_ID))) ||
2272 c->s->info.cs.shared_size) {
2273 c->cs_payload[1] = vir_MOV(c, vir_reg(QFILE_REG, 2));
2274 }
2275
2276 /* Set up the division between gl_LocalInvocationIndex and
2277 * wg_in_mem in the payload reg.
2278 */
2279 int wg_size = (c->s->info.cs.local_size[0] *
2280 c->s->info.cs.local_size[1] *
2281 c->s->info.cs.local_size[2]);
2282 c->local_invocation_index_bits =
2283 ffs(util_next_power_of_two(MAX2(wg_size, 64))) - 1;
2284 assert(c->local_invocation_index_bits <= 8);
2285
2286 if (c->s->info.cs.shared_size) {
2287 struct qreg wg_in_mem = vir_SHR(c, c->cs_payload[1],
2288 vir_uniform_ui(c, 16));
2289 if (c->s->info.cs.local_size[0] != 1 ||
2290 c->s->info.cs.local_size[1] != 1 ||
2291 c->s->info.cs.local_size[2] != 1) {
2292 int wg_bits = (16 -
2293 c->local_invocation_index_bits);
2294 int wg_mask = (1 << wg_bits) - 1;
2295 wg_in_mem = vir_AND(c, wg_in_mem,
2296 vir_uniform_ui(c, wg_mask));
2297 }
2298 struct qreg shared_per_wg =
2299 vir_uniform_ui(c, c->s->info.cs.shared_size);
2300
2301 c->cs_shared_offset =
2302 vir_ADD(c,
2303 vir_uniform(c, QUNIFORM_SHARED_OFFSET,0),
2304 vir_UMUL(c, wg_in_mem, shared_per_wg));
2305 }
2306 break;
2307 default:
2308 break;
2309 }
2310
2311 if (c->s->info.stage == MESA_SHADER_FRAGMENT)
2312 ntq_setup_fs_inputs(c);
2313 else
2314 ntq_setup_vpm_inputs(c);
2315
2316 ntq_setup_outputs(c);
2317
2318 /* Find the main function and emit the body. */
2319 nir_foreach_function(function, c->s) {
2320 assert(strcmp(function->name, "main") == 0);
2321 assert(function->impl);
2322 ntq_emit_impl(c, function->impl);
2323 }
2324 }
2325
2326 const nir_shader_compiler_options v3d_nir_options = {
2327 .lower_all_io_to_temps = true,
2328 .lower_extract_byte = true,
2329 .lower_extract_word = true,
2330 .lower_bfm = true,
2331 .lower_bitfield_insert_to_shifts = true,
2332 .lower_bitfield_extract_to_shifts = true,
2333 .lower_bitfield_reverse = true,
2334 .lower_bit_count = true,
2335 .lower_cs_local_id_from_index = true,
2336 .lower_ffract = true,
2337 .lower_pack_unorm_2x16 = true,
2338 .lower_pack_snorm_2x16 = true,
2339 .lower_pack_unorm_4x8 = true,
2340 .lower_pack_snorm_4x8 = true,
2341 .lower_unpack_unorm_4x8 = true,
2342 .lower_unpack_snorm_4x8 = true,
2343 .lower_pack_half_2x16 = true,
2344 .lower_unpack_half_2x16 = true,
2345 .lower_fdiv = true,
2346 .lower_find_lsb = true,
2347 .lower_ffma = true,
2348 .lower_flrp32 = true,
2349 .lower_fpow = true,
2350 .lower_fsat = true,
2351 .lower_fsqrt = true,
2352 .lower_ifind_msb = true,
2353 .lower_isign = true,
2354 .lower_ldexp = true,
2355 .lower_mul_high = true,
2356 .lower_wpos_pntc = true,
2357 .native_integers = true,
2358 };
2359
2360 /**
2361 * When demoting a shader down to single-threaded, removes the THRSW
2362 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2363 * program end).
2364 */
2365 static void
2366 vir_remove_thrsw(struct v3d_compile *c)
2367 {
2368 vir_for_each_block(block, c) {
2369 vir_for_each_inst_safe(inst, block) {
2370 if (inst->qpu.sig.thrsw)
2371 vir_remove_instruction(c, inst);
2372 }
2373 }
2374
2375 c->last_thrsw = NULL;
2376 }
2377
2378 void
2379 vir_emit_last_thrsw(struct v3d_compile *c)
2380 {
2381 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2382 * switching, so disable threads if we didn't do any TMU ops (each of
2383 * which would have emitted a THRSW).
2384 */
2385 if (!c->last_thrsw_at_top_level && c->devinfo->ver < 41) {
2386 c->threads = 1;
2387 if (c->last_thrsw)
2388 vir_remove_thrsw(c);
2389 return;
2390 }
2391
2392 /* If we're threaded and the last THRSW was in conditional code, then
2393 * we need to emit another one so that we can flag it as the last
2394 * thrsw.
2395 */
2396 if (c->last_thrsw && !c->last_thrsw_at_top_level) {
2397 assert(c->devinfo->ver >= 41);
2398 vir_emit_thrsw(c);
2399 }
2400
2401 /* If we're threaded, then we need to mark the last THRSW instruction
2402 * so we can emit a pair of them at QPU emit time.
2403 *
2404 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2405 * post-last-THRSW state, so we can skip this.
2406 */
2407 if (!c->last_thrsw && c->s->info.stage == MESA_SHADER_FRAGMENT) {
2408 assert(c->devinfo->ver >= 41);
2409 vir_emit_thrsw(c);
2410 }
2411
2412 if (c->last_thrsw)
2413 c->last_thrsw->is_last_thrsw = true;
2414 }
2415
2416 /* There's a flag in the shader for "center W is needed for reasons other than
2417 * non-centroid varyings", so we just walk the program after VIR optimization
2418 * to see if it's used. It should be harmless to set even if we only use
2419 * center W for varyings.
2420 */
2421 static void
2422 vir_check_payload_w(struct v3d_compile *c)
2423 {
2424 if (c->s->info.stage != MESA_SHADER_FRAGMENT)
2425 return;
2426
2427 vir_for_each_inst_inorder(inst, c) {
2428 for (int i = 0; i < vir_get_nsrc(inst); i++) {
2429 if (inst->src[i].file == QFILE_REG &&
2430 inst->src[i].index == 0) {
2431 c->uses_center_w = true;
2432 return;
2433 }
2434 }
2435 }
2436
2437 }
2438
2439 void
2440 v3d_nir_to_vir(struct v3d_compile *c)
2441 {
2442 if (V3D_DEBUG & (V3D_DEBUG_NIR |
2443 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2444 fprintf(stderr, "%s prog %d/%d NIR:\n",
2445 vir_get_stage_name(c),
2446 c->program_id, c->variant_id);
2447 nir_print_shader(c->s, stderr);
2448 }
2449
2450 nir_to_vir(c);
2451
2452 /* Emit the last THRSW before STVPM and TLB writes. */
2453 vir_emit_last_thrsw(c);
2454
2455 switch (c->s->info.stage) {
2456 case MESA_SHADER_FRAGMENT:
2457 emit_frag_end(c);
2458 break;
2459 case MESA_SHADER_VERTEX:
2460 emit_vert_end(c);
2461 break;
2462 case MESA_SHADER_COMPUTE:
2463 break;
2464 default:
2465 unreachable("bad stage");
2466 }
2467
2468 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2469 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2470 fprintf(stderr, "%s prog %d/%d pre-opt VIR:\n",
2471 vir_get_stage_name(c),
2472 c->program_id, c->variant_id);
2473 vir_dump(c);
2474 fprintf(stderr, "\n");
2475 }
2476
2477 vir_optimize(c);
2478
2479 vir_check_payload_w(c);
2480
2481 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2482 * We used that on that platform to pipeline TMU writes and reduce the
2483 * number of thread switches, as well as try (mostly successfully) to
2484 * reduce maximum register pressure to allow more threads. We should
2485 * do something of that sort for V3D -- either instruction scheduling
2486 * here, or delay the the THRSW and LDTMUs from our texture
2487 * instructions until the results are needed.
2488 */
2489
2490 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2491 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2492 fprintf(stderr, "%s prog %d/%d VIR:\n",
2493 vir_get_stage_name(c),
2494 c->program_id, c->variant_id);
2495 vir_dump(c);
2496 fprintf(stderr, "\n");
2497 }
2498
2499 /* Attempt to allocate registers for the temporaries. If we fail,
2500 * reduce thread count and try again.
2501 */
2502 int min_threads = (c->devinfo->ver >= 41) ? 2 : 1;
2503 struct qpu_reg *temp_registers;
2504 while (true) {
2505 bool spilled;
2506 temp_registers = v3d_register_allocate(c, &spilled);
2507 if (spilled)
2508 continue;
2509
2510 if (temp_registers)
2511 break;
2512
2513 if (c->threads == min_threads) {
2514 fprintf(stderr, "Failed to register allocate at %d threads:\n",
2515 c->threads);
2516 vir_dump(c);
2517 c->failed = true;
2518 return;
2519 }
2520
2521 c->threads /= 2;
2522
2523 if (c->threads == 1)
2524 vir_remove_thrsw(c);
2525 }
2526
2527 if (c->spill_size &&
2528 (V3D_DEBUG & (V3D_DEBUG_VIR |
2529 v3d_debug_flag_for_shader_stage(c->s->info.stage)))) {
2530 fprintf(stderr, "%s prog %d/%d spilled VIR:\n",
2531 vir_get_stage_name(c),
2532 c->program_id, c->variant_id);
2533 vir_dump(c);
2534 fprintf(stderr, "\n");
2535 }
2536
2537 v3d_vir_to_qpu(c, temp_registers);
2538 }