v3d: honor the write mask on store operations
[mesa.git] / src / broadcom / compiler / nir_to_vir.c
1 /*
2 * Copyright © 2016 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
34
35 /* We don't do any address packing. */
36 #define __gen_user_data void
37 #define __gen_address_type uint32_t
38 #define __gen_address_offset(reloc) (*reloc)
39 #define __gen_emit_reloc(cl, reloc)
40 #include "cle/v3d_packet_v41_pack.h"
41
42 #define GENERAL_TMU_LOOKUP_PER_QUAD (0 << 7)
43 #define GENERAL_TMU_LOOKUP_PER_PIXEL (1 << 7)
44 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_I (0 << 0)
45 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_I (1 << 0)
46 #define GENERAL_TMU_LOOKUP_TYPE_VEC2 (2 << 0)
47 #define GENERAL_TMU_LOOKUP_TYPE_VEC3 (3 << 0)
48 #define GENERAL_TMU_LOOKUP_TYPE_VEC4 (4 << 0)
49 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_UI (5 << 0)
50 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_UI (6 << 0)
51 #define GENERAL_TMU_LOOKUP_TYPE_32BIT_UI (7 << 0)
52
53 #define V3D_TSY_SET_QUORUM 0
54 #define V3D_TSY_INC_WAITERS 1
55 #define V3D_TSY_DEC_WAITERS 2
56 #define V3D_TSY_INC_QUORUM 3
57 #define V3D_TSY_DEC_QUORUM 4
58 #define V3D_TSY_FREE_ALL 5
59 #define V3D_TSY_RELEASE 6
60 #define V3D_TSY_ACQUIRE 7
61 #define V3D_TSY_WAIT 8
62 #define V3D_TSY_WAIT_INC 9
63 #define V3D_TSY_WAIT_CHECK 10
64 #define V3D_TSY_WAIT_INC_CHECK 11
65 #define V3D_TSY_WAIT_CV 12
66 #define V3D_TSY_INC_SEMAPHORE 13
67 #define V3D_TSY_DEC_SEMAPHORE 14
68 #define V3D_TSY_SET_QUORUM_FREE_ALL 15
69
70 static void
71 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
72
73 static void
74 resize_qreg_array(struct v3d_compile *c,
75 struct qreg **regs,
76 uint32_t *size,
77 uint32_t decl_size)
78 {
79 if (*size >= decl_size)
80 return;
81
82 uint32_t old_size = *size;
83 *size = MAX2(*size * 2, decl_size);
84 *regs = reralloc(c, *regs, struct qreg, *size);
85 if (!*regs) {
86 fprintf(stderr, "Malloc failure\n");
87 abort();
88 }
89
90 for (uint32_t i = old_size; i < *size; i++)
91 (*regs)[i] = c->undef;
92 }
93
94 void
95 vir_emit_thrsw(struct v3d_compile *c)
96 {
97 if (c->threads == 1)
98 return;
99
100 /* Always thread switch after each texture operation for now.
101 *
102 * We could do better by batching a bunch of texture fetches up and
103 * then doing one thread switch and collecting all their results
104 * afterward.
105 */
106 c->last_thrsw = vir_NOP(c);
107 c->last_thrsw->qpu.sig.thrsw = true;
108 c->last_thrsw_at_top_level = !c->in_control_flow;
109
110 /* We need to lock the scoreboard before any tlb acess happens. If this
111 * thread switch comes after we have emitted a tlb load, then it means
112 * that we can't lock on the last thread switch any more.
113 */
114 if (c->emitted_tlb_load)
115 c->lock_scoreboard_on_first_thrsw = true;
116 }
117
118 uint32_t
119 v3d_get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src)
120 {
121 if (nir_src_is_const(instr->src[src])) {
122 int64_t add_val = nir_src_as_int(instr->src[src]);
123 if (add_val == 1)
124 return V3D_TMU_OP_WRITE_AND_READ_INC;
125 else if (add_val == -1)
126 return V3D_TMU_OP_WRITE_OR_READ_DEC;
127 }
128
129 return V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
130 }
131
132 static uint32_t
133 v3d_general_tmu_op(nir_intrinsic_instr *instr)
134 {
135 switch (instr->intrinsic) {
136 case nir_intrinsic_load_ssbo:
137 case nir_intrinsic_load_ubo:
138 case nir_intrinsic_load_uniform:
139 case nir_intrinsic_load_shared:
140 case nir_intrinsic_load_scratch:
141 case nir_intrinsic_store_ssbo:
142 case nir_intrinsic_store_shared:
143 case nir_intrinsic_store_scratch:
144 return V3D_TMU_OP_REGULAR;
145 case nir_intrinsic_ssbo_atomic_add:
146 return v3d_get_op_for_atomic_add(instr, 2);
147 case nir_intrinsic_shared_atomic_add:
148 return v3d_get_op_for_atomic_add(instr, 1);
149 case nir_intrinsic_ssbo_atomic_imin:
150 case nir_intrinsic_shared_atomic_imin:
151 return V3D_TMU_OP_WRITE_SMIN;
152 case nir_intrinsic_ssbo_atomic_umin:
153 case nir_intrinsic_shared_atomic_umin:
154 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
155 case nir_intrinsic_ssbo_atomic_imax:
156 case nir_intrinsic_shared_atomic_imax:
157 return V3D_TMU_OP_WRITE_SMAX;
158 case nir_intrinsic_ssbo_atomic_umax:
159 case nir_intrinsic_shared_atomic_umax:
160 return V3D_TMU_OP_WRITE_UMAX;
161 case nir_intrinsic_ssbo_atomic_and:
162 case nir_intrinsic_shared_atomic_and:
163 return V3D_TMU_OP_WRITE_AND_READ_INC;
164 case nir_intrinsic_ssbo_atomic_or:
165 case nir_intrinsic_shared_atomic_or:
166 return V3D_TMU_OP_WRITE_OR_READ_DEC;
167 case nir_intrinsic_ssbo_atomic_xor:
168 case nir_intrinsic_shared_atomic_xor:
169 return V3D_TMU_OP_WRITE_XOR_READ_NOT;
170 case nir_intrinsic_ssbo_atomic_exchange:
171 case nir_intrinsic_shared_atomic_exchange:
172 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
173 case nir_intrinsic_ssbo_atomic_comp_swap:
174 case nir_intrinsic_shared_atomic_comp_swap:
175 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
176 default:
177 unreachable("unknown intrinsic op");
178 }
179 }
180
181 /**
182 * Implements indirect uniform loads and SSBO accesses through the TMU general
183 * memory access interface.
184 */
185 static void
186 ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
187 bool is_shared_or_scratch)
188 {
189 uint32_t tmu_op = v3d_general_tmu_op(instr);
190
191 /* If we were able to replace atomic_add for an inc/dec, then we
192 * need/can to do things slightly different, like not loading the
193 * amount to add/sub, as that is implicit.
194 */
195 bool atomic_add_replaced = ((instr->intrinsic == nir_intrinsic_ssbo_atomic_add ||
196 instr->intrinsic == nir_intrinsic_shared_atomic_add) &&
197 (tmu_op == V3D_TMU_OP_WRITE_AND_READ_INC ||
198 tmu_op == V3D_TMU_OP_WRITE_OR_READ_DEC));
199
200 bool is_store = (instr->intrinsic == nir_intrinsic_store_ssbo ||
201 instr->intrinsic == nir_intrinsic_store_scratch ||
202 instr->intrinsic == nir_intrinsic_store_shared);
203
204 bool is_load = (instr->intrinsic == nir_intrinsic_load_uniform ||
205 instr->intrinsic == nir_intrinsic_load_ubo ||
206 instr->intrinsic == nir_intrinsic_load_ssbo ||
207 instr->intrinsic == nir_intrinsic_load_scratch ||
208 instr->intrinsic == nir_intrinsic_load_shared);
209
210 bool has_index = !is_shared_or_scratch;
211
212 int offset_src;
213 if (instr->intrinsic == nir_intrinsic_load_uniform) {
214 offset_src = 0;
215 } else if (instr->intrinsic == nir_intrinsic_load_ssbo ||
216 instr->intrinsic == nir_intrinsic_load_ubo ||
217 instr->intrinsic == nir_intrinsic_load_scratch ||
218 instr->intrinsic == nir_intrinsic_load_shared ||
219 atomic_add_replaced) {
220 offset_src = 0 + has_index;
221 } else if (is_store) {
222 offset_src = 1 + has_index;
223 } else {
224 offset_src = 0 + has_index;
225 }
226
227 bool dynamic_src = !nir_src_is_const(instr->src[offset_src]);
228 uint32_t const_offset = 0;
229 if (!dynamic_src)
230 const_offset = nir_src_as_uint(instr->src[offset_src]);
231
232 struct qreg base_offset;
233 if (instr->intrinsic == nir_intrinsic_load_uniform) {
234 const_offset += nir_intrinsic_base(instr);
235 base_offset = vir_uniform(c, QUNIFORM_UBO_ADDR,
236 v3d_unit_data_create(0, const_offset));
237 const_offset = 0;
238 } else if (instr->intrinsic == nir_intrinsic_load_ubo) {
239 uint32_t index = nir_src_as_uint(instr->src[0]) + 1;
240 /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
241 * 1 (0 is gallium's constant buffer 0).
242 */
243 base_offset = vir_uniform(c, QUNIFORM_UBO_ADDR,
244 v3d_unit_data_create(index, const_offset));
245 const_offset = 0;
246 } else if (is_shared_or_scratch) {
247 /* Shared and scratch variables have no buffer index, and all
248 * start from a common base that we set up at the start of
249 * dispatch.
250 */
251 if (instr->intrinsic == nir_intrinsic_load_scratch ||
252 instr->intrinsic == nir_intrinsic_store_scratch) {
253 base_offset = c->spill_base;
254 } else {
255 base_offset = c->cs_shared_offset;
256 const_offset += nir_intrinsic_base(instr);
257 }
258 } else {
259 base_offset = vir_uniform(c, QUNIFORM_SSBO_OFFSET,
260 nir_src_as_uint(instr->src[is_store ?
261 1 : 0]));
262 }
263
264 unsigned writemask = is_store ? nir_intrinsic_write_mask(instr) : 0;
265 uint32_t base_const_offset = const_offset;
266 int first_component = -1;
267 int last_component = -1;
268 do {
269 int tmu_writes = 1; /* address */
270
271 if (is_store) {
272 /* Find the first set of consecutive components that
273 * are enabled in the writemask and emit the TMUD
274 * instructions for them.
275 */
276 first_component = ffs(writemask) - 1;
277 last_component = first_component;
278 while (writemask & BITFIELD_BIT(last_component + 1))
279 last_component++;
280
281 assert(first_component >= 0 &&
282 first_component <= last_component &&
283 last_component < instr->num_components);
284
285 struct qreg tmud = vir_reg(QFILE_MAGIC,
286 V3D_QPU_WADDR_TMUD);
287 for (int i = first_component; i <= last_component; i++) {
288 struct qreg data =
289 ntq_get_src(c, instr->src[0], i);
290 vir_MOV_dest(c, tmud, data);
291 tmu_writes++;
292 }
293
294 /* Update the offset for the TMU write based on the
295 * the first component we are writing.
296 */
297 const_offset = base_const_offset + first_component * 4;
298
299 /* Clear these components from the writemask */
300 uint32_t written_mask =
301 BITFIELD_RANGE(first_component, tmu_writes - 1);
302 writemask &= ~written_mask;
303 } else if (!is_load && !atomic_add_replaced) {
304 vir_MOV_dest(c,
305 vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD),
306 ntq_get_src(c, instr->src[1 + has_index], 0));
307 tmu_writes++;
308 if (tmu_op == V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH) {
309 vir_MOV_dest(c,
310 vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD),
311 ntq_get_src(c, instr->src[2 + has_index],
312 0));
313 tmu_writes++;
314 }
315 }
316
317 /* Make sure we won't exceed the 16-entry TMU fifo if each thread is
318 * storing at the same time.
319 */
320 while (tmu_writes > 16 / c->threads)
321 c->threads /= 2;
322
323 /* The spec says that for atomics, the TYPE field is ignored, but that
324 * doesn't seem to be the case for CMPXCHG. Just use the number of
325 * tmud writes we did to decide the type (or choose "32bit" for atomic
326 * reads, which has been fine).
327 */
328 uint32_t num_components;
329 if (is_load || atomic_add_replaced) {
330 num_components = instr->num_components;
331 } else {
332 assert(tmu_writes > 1);
333 num_components = tmu_writes - 1;
334 }
335
336 uint32_t config = (0xffffff00 |
337 tmu_op << 3|
338 GENERAL_TMU_LOOKUP_PER_PIXEL);
339 if (num_components == 1) {
340 config |= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI;
341 } else {
342 config |= GENERAL_TMU_LOOKUP_TYPE_VEC2 + num_components - 2;
343 }
344
345 if (vir_in_nonuniform_control_flow(c)) {
346 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
347 V3D_QPU_PF_PUSHZ);
348 }
349
350 struct qreg tmua;
351 if (config == ~0)
352 tmua = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUA);
353 else
354 tmua = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUAU);
355
356 struct qinst *tmu;
357 if (dynamic_src) {
358 struct qreg offset = base_offset;
359 if (const_offset != 0) {
360 offset = vir_ADD(c, offset,
361 vir_uniform_ui(c, const_offset));
362 }
363 tmu = vir_ADD_dest(c, tmua, offset,
364 ntq_get_src(c, instr->src[offset_src], 0));
365 } else {
366 if (const_offset != 0) {
367 tmu = vir_ADD_dest(c, tmua, base_offset,
368 vir_uniform_ui(c, const_offset));
369 } else {
370 tmu = vir_MOV_dest(c, tmua, base_offset);
371 }
372 }
373
374 if (config != ~0) {
375 tmu->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT,
376 config);
377 }
378
379 if (vir_in_nonuniform_control_flow(c))
380 vir_set_cond(tmu, V3D_QPU_COND_IFA);
381
382 vir_emit_thrsw(c);
383
384 /* Read the result, or wait for the TMU op to complete. */
385 for (int i = 0; i < nir_intrinsic_dest_components(instr); i++)
386 ntq_store_dest(c, &instr->dest, i, vir_MOV(c, vir_LDTMU(c)));
387
388 if (nir_intrinsic_dest_components(instr) == 0)
389 vir_TMUWT(c);
390 } while (is_store && writemask != 0);
391 }
392
393 static struct qreg *
394 ntq_init_ssa_def(struct v3d_compile *c, nir_ssa_def *def)
395 {
396 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
397 def->num_components);
398 _mesa_hash_table_insert(c->def_ht, def, qregs);
399 return qregs;
400 }
401
402 /**
403 * This function is responsible for getting VIR results into the associated
404 * storage for a NIR instruction.
405 *
406 * If it's a NIR SSA def, then we just set the associated hash table entry to
407 * the new result.
408 *
409 * If it's a NIR reg, then we need to update the existing qreg assigned to the
410 * NIR destination with the incoming value. To do that without introducing
411 * new MOVs, we require that the incoming qreg either be a uniform, or be
412 * SSA-defined by the previous VIR instruction in the block and rewritable by
413 * this function. That lets us sneak ahead and insert the SF flag beforehand
414 * (knowing that the previous instruction doesn't depend on flags) and rewrite
415 * its destination to be the NIR reg's destination
416 */
417 void
418 ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
419 struct qreg result)
420 {
421 struct qinst *last_inst = NULL;
422 if (!list_empty(&c->cur_block->instructions))
423 last_inst = (struct qinst *)c->cur_block->instructions.prev;
424
425 assert((result.file == QFILE_TEMP &&
426 last_inst && last_inst == c->defs[result.index]));
427
428 if (dest->is_ssa) {
429 assert(chan < dest->ssa.num_components);
430
431 struct qreg *qregs;
432 struct hash_entry *entry =
433 _mesa_hash_table_search(c->def_ht, &dest->ssa);
434
435 if (entry)
436 qregs = entry->data;
437 else
438 qregs = ntq_init_ssa_def(c, &dest->ssa);
439
440 qregs[chan] = result;
441 } else {
442 nir_register *reg = dest->reg.reg;
443 assert(dest->reg.base_offset == 0);
444 assert(reg->num_array_elems == 0);
445 struct hash_entry *entry =
446 _mesa_hash_table_search(c->def_ht, reg);
447 struct qreg *qregs = entry->data;
448
449 /* Insert a MOV if the source wasn't an SSA def in the
450 * previous instruction.
451 */
452 if ((vir_in_nonuniform_control_flow(c) &&
453 c->defs[last_inst->dst.index]->qpu.sig.ldunif)) {
454 result = vir_MOV(c, result);
455 last_inst = c->defs[result.index];
456 }
457
458 /* We know they're both temps, so just rewrite index. */
459 c->defs[last_inst->dst.index] = NULL;
460 last_inst->dst.index = qregs[chan].index;
461
462 /* If we're in control flow, then make this update of the reg
463 * conditional on the execution mask.
464 */
465 if (vir_in_nonuniform_control_flow(c)) {
466 last_inst->dst.index = qregs[chan].index;
467
468 /* Set the flags to the current exec mask.
469 */
470 c->cursor = vir_before_inst(last_inst);
471 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
472 V3D_QPU_PF_PUSHZ);
473 c->cursor = vir_after_inst(last_inst);
474
475 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
476 }
477 }
478 }
479
480 struct qreg
481 ntq_get_src(struct v3d_compile *c, nir_src src, int i)
482 {
483 struct hash_entry *entry;
484 if (src.is_ssa) {
485 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
486 assert(i < src.ssa->num_components);
487 } else {
488 nir_register *reg = src.reg.reg;
489 entry = _mesa_hash_table_search(c->def_ht, reg);
490 assert(reg->num_array_elems == 0);
491 assert(src.reg.base_offset == 0);
492 assert(i < reg->num_components);
493 }
494
495 struct qreg *qregs = entry->data;
496 return qregs[i];
497 }
498
499 static struct qreg
500 ntq_get_alu_src(struct v3d_compile *c, nir_alu_instr *instr,
501 unsigned src)
502 {
503 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
504 unsigned chan = ffs(instr->dest.write_mask) - 1;
505 struct qreg r = ntq_get_src(c, instr->src[src].src,
506 instr->src[src].swizzle[chan]);
507
508 assert(!instr->src[src].abs);
509 assert(!instr->src[src].negate);
510
511 return r;
512 };
513
514 static struct qreg
515 ntq_minify(struct v3d_compile *c, struct qreg size, struct qreg level)
516 {
517 return vir_MAX(c, vir_SHR(c, size, level), vir_uniform_ui(c, 1));
518 }
519
520 static void
521 ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr)
522 {
523 unsigned unit = instr->texture_index;
524 int lod_index = nir_tex_instr_src_index(instr, nir_tex_src_lod);
525 int dest_size = nir_tex_instr_dest_size(instr);
526
527 struct qreg lod = c->undef;
528 if (lod_index != -1)
529 lod = ntq_get_src(c, instr->src[lod_index].src, 0);
530
531 for (int i = 0; i < dest_size; i++) {
532 assert(i < 3);
533 enum quniform_contents contents;
534
535 if (instr->is_array && i == dest_size - 1)
536 contents = QUNIFORM_TEXTURE_ARRAY_SIZE;
537 else
538 contents = QUNIFORM_TEXTURE_WIDTH + i;
539
540 struct qreg size = vir_uniform(c, contents, unit);
541
542 switch (instr->sampler_dim) {
543 case GLSL_SAMPLER_DIM_1D:
544 case GLSL_SAMPLER_DIM_2D:
545 case GLSL_SAMPLER_DIM_MS:
546 case GLSL_SAMPLER_DIM_3D:
547 case GLSL_SAMPLER_DIM_CUBE:
548 /* Don't minify the array size. */
549 if (!(instr->is_array && i == dest_size - 1)) {
550 size = ntq_minify(c, size, lod);
551 }
552 break;
553
554 case GLSL_SAMPLER_DIM_RECT:
555 /* There's no LOD field for rects */
556 break;
557
558 default:
559 unreachable("Bad sampler type");
560 }
561
562 ntq_store_dest(c, &instr->dest, i, size);
563 }
564 }
565
566 static void
567 ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
568 {
569 unsigned unit = instr->texture_index;
570
571 /* Since each texture sampling op requires uploading uniforms to
572 * reference the texture, there's no HW support for texture size and
573 * you just upload uniforms containing the size.
574 */
575 switch (instr->op) {
576 case nir_texop_query_levels:
577 ntq_store_dest(c, &instr->dest, 0,
578 vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit));
579 return;
580 case nir_texop_txs:
581 ntq_emit_txs(c, instr);
582 return;
583 default:
584 break;
585 }
586
587 if (c->devinfo->ver >= 40)
588 v3d40_vir_emit_tex(c, instr);
589 else
590 v3d33_vir_emit_tex(c, instr);
591 }
592
593 static struct qreg
594 ntq_fsincos(struct v3d_compile *c, struct qreg src, bool is_cos)
595 {
596 struct qreg input = vir_FMUL(c, src, vir_uniform_f(c, 1.0f / M_PI));
597 if (is_cos)
598 input = vir_FADD(c, input, vir_uniform_f(c, 0.5));
599
600 struct qreg periods = vir_FROUND(c, input);
601 struct qreg sin_output = vir_SIN(c, vir_FSUB(c, input, periods));
602 return vir_XOR(c, sin_output, vir_SHL(c,
603 vir_FTOIN(c, periods),
604 vir_uniform_ui(c, -1)));
605 }
606
607 static struct qreg
608 ntq_fsign(struct v3d_compile *c, struct qreg src)
609 {
610 struct qreg t = vir_get_temp(c);
611
612 vir_MOV_dest(c, t, vir_uniform_f(c, 0.0));
613 vir_set_pf(vir_FMOV_dest(c, vir_nop_reg(), src), V3D_QPU_PF_PUSHZ);
614 vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_f(c, 1.0));
615 vir_set_pf(vir_FMOV_dest(c, vir_nop_reg(), src), V3D_QPU_PF_PUSHN);
616 vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_f(c, -1.0));
617 return vir_MOV(c, t);
618 }
619
620 static void
621 emit_fragcoord_input(struct v3d_compile *c, int attr)
622 {
623 c->inputs[attr * 4 + 0] = vir_FXCD(c);
624 c->inputs[attr * 4 + 1] = vir_FYCD(c);
625 c->inputs[attr * 4 + 2] = c->payload_z;
626 c->inputs[attr * 4 + 3] = vir_RECIP(c, c->payload_w);
627 }
628
629 static struct qreg
630 emit_fragment_varying(struct v3d_compile *c, nir_variable *var,
631 uint8_t swizzle, int array_index)
632 {
633 struct qreg r3 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R3);
634 struct qreg r5 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R5);
635
636 struct qreg vary;
637 if (c->devinfo->ver >= 41) {
638 struct qinst *ldvary = vir_add_inst(V3D_QPU_A_NOP, c->undef,
639 c->undef, c->undef);
640 ldvary->qpu.sig.ldvary = true;
641 vary = vir_emit_def(c, ldvary);
642 } else {
643 vir_NOP(c)->qpu.sig.ldvary = true;
644 vary = r3;
645 }
646
647 /* For gl_PointCoord input or distance along a line, we'll be called
648 * with no nir_variable, and we don't count toward VPM size so we
649 * don't track an input slot.
650 */
651 if (!var) {
652 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
653 }
654
655 int i = c->num_inputs++;
656 c->input_slots[i] =
657 v3d_slot_from_slot_and_component(var->data.location +
658 array_index, swizzle);
659
660 switch (var->data.interpolation) {
661 case INTERP_MODE_NONE:
662 /* If a gl_FrontColor or gl_BackColor input has no interp
663 * qualifier, then if we're using glShadeModel(GL_FLAT) it
664 * needs to be flat shaded.
665 */
666 switch (var->data.location + array_index) {
667 case VARYING_SLOT_COL0:
668 case VARYING_SLOT_COL1:
669 case VARYING_SLOT_BFC0:
670 case VARYING_SLOT_BFC1:
671 if (c->fs_key->shade_model_flat) {
672 BITSET_SET(c->flat_shade_flags, i);
673 vir_MOV_dest(c, c->undef, vary);
674 return vir_MOV(c, r5);
675 } else {
676 return vir_FADD(c, vir_FMUL(c, vary,
677 c->payload_w), r5);
678 }
679 default:
680 break;
681 }
682 /* FALLTHROUGH */
683 case INTERP_MODE_SMOOTH:
684 if (var->data.centroid) {
685 BITSET_SET(c->centroid_flags, i);
686 return vir_FADD(c, vir_FMUL(c, vary,
687 c->payload_w_centroid), r5);
688 } else {
689 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
690 }
691 case INTERP_MODE_NOPERSPECTIVE:
692 BITSET_SET(c->noperspective_flags, i);
693 return vir_FADD(c, vir_MOV(c, vary), r5);
694 case INTERP_MODE_FLAT:
695 BITSET_SET(c->flat_shade_flags, i);
696 vir_MOV_dest(c, c->undef, vary);
697 return vir_MOV(c, r5);
698 default:
699 unreachable("Bad interp mode");
700 }
701 }
702
703 static void
704 emit_fragment_input(struct v3d_compile *c, int attr, nir_variable *var,
705 int array_index)
706 {
707 for (int i = 0; i < glsl_get_vector_elements(var->type); i++) {
708 int chan = var->data.location_frac + i;
709 c->inputs[attr * 4 + chan] =
710 emit_fragment_varying(c, var, chan, array_index);
711 }
712 }
713
714 static void
715 add_output(struct v3d_compile *c,
716 uint32_t decl_offset,
717 uint8_t slot,
718 uint8_t swizzle)
719 {
720 uint32_t old_array_size = c->outputs_array_size;
721 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
722 decl_offset + 1);
723
724 if (old_array_size != c->outputs_array_size) {
725 c->output_slots = reralloc(c,
726 c->output_slots,
727 struct v3d_varying_slot,
728 c->outputs_array_size);
729 }
730
731 c->output_slots[decl_offset] =
732 v3d_slot_from_slot_and_component(slot, swizzle);
733 }
734
735 /**
736 * If compare_instr is a valid comparison instruction, emits the
737 * compare_instr's comparison and returns the sel_instr's return value based
738 * on the compare_instr's result.
739 */
740 static bool
741 ntq_emit_comparison(struct v3d_compile *c,
742 nir_alu_instr *compare_instr,
743 enum v3d_qpu_cond *out_cond)
744 {
745 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
746 struct qreg src1;
747 if (nir_op_infos[compare_instr->op].num_inputs > 1)
748 src1 = ntq_get_alu_src(c, compare_instr, 1);
749 bool cond_invert = false;
750 struct qreg nop = vir_nop_reg();
751
752 switch (compare_instr->op) {
753 case nir_op_feq32:
754 case nir_op_seq:
755 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
756 break;
757 case nir_op_ieq32:
758 vir_set_pf(vir_XOR_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
759 break;
760
761 case nir_op_fne32:
762 case nir_op_sne:
763 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
764 cond_invert = true;
765 break;
766 case nir_op_ine32:
767 vir_set_pf(vir_XOR_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
768 cond_invert = true;
769 break;
770
771 case nir_op_fge32:
772 case nir_op_sge:
773 vir_set_pf(vir_FCMP_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
774 break;
775 case nir_op_ige32:
776 vir_set_pf(vir_MIN_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
777 cond_invert = true;
778 break;
779 case nir_op_uge32:
780 vir_set_pf(vir_SUB_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHC);
781 cond_invert = true;
782 break;
783
784 case nir_op_slt:
785 case nir_op_flt32:
786 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHN);
787 break;
788 case nir_op_ilt32:
789 vir_set_pf(vir_MIN_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
790 break;
791 case nir_op_ult32:
792 vir_set_pf(vir_SUB_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHC);
793 break;
794
795 case nir_op_i2b32:
796 vir_set_pf(vir_MOV_dest(c, nop, src0), V3D_QPU_PF_PUSHZ);
797 cond_invert = true;
798 break;
799
800 case nir_op_f2b32:
801 vir_set_pf(vir_FMOV_dest(c, nop, src0), V3D_QPU_PF_PUSHZ);
802 cond_invert = true;
803 break;
804
805 default:
806 return false;
807 }
808
809 *out_cond = cond_invert ? V3D_QPU_COND_IFNA : V3D_QPU_COND_IFA;
810
811 return true;
812 }
813
814 /* Finds an ALU instruction that generates our src value that could
815 * (potentially) be greedily emitted in the consuming instruction.
816 */
817 static struct nir_alu_instr *
818 ntq_get_alu_parent(nir_src src)
819 {
820 if (!src.is_ssa || src.ssa->parent_instr->type != nir_instr_type_alu)
821 return NULL;
822 nir_alu_instr *instr = nir_instr_as_alu(src.ssa->parent_instr);
823 if (!instr)
824 return NULL;
825
826 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
827 * moving emission of the ALU instr down past another write of the
828 * src.
829 */
830 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
831 if (!instr->src[i].src.is_ssa)
832 return NULL;
833 }
834
835 return instr;
836 }
837
838 /* Turns a NIR bool into a condition code to predicate on. */
839 static enum v3d_qpu_cond
840 ntq_emit_bool_to_cond(struct v3d_compile *c, nir_src src)
841 {
842 nir_alu_instr *compare = ntq_get_alu_parent(src);
843 if (!compare)
844 goto out;
845
846 enum v3d_qpu_cond cond;
847 if (ntq_emit_comparison(c, compare, &cond))
848 return cond;
849
850 out:
851 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), ntq_get_src(c, src, 0)),
852 V3D_QPU_PF_PUSHZ);
853 return V3D_QPU_COND_IFNA;
854 }
855
856 static void
857 ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
858 {
859 /* This should always be lowered to ALU operations for V3D. */
860 assert(!instr->dest.saturate);
861
862 /* Vectors are special in that they have non-scalarized writemasks,
863 * and just take the first swizzle channel for each argument in order
864 * into each writemask channel.
865 */
866 if (instr->op == nir_op_vec2 ||
867 instr->op == nir_op_vec3 ||
868 instr->op == nir_op_vec4) {
869 struct qreg srcs[4];
870 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
871 srcs[i] = ntq_get_src(c, instr->src[i].src,
872 instr->src[i].swizzle[0]);
873 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
874 ntq_store_dest(c, &instr->dest.dest, i,
875 vir_MOV(c, srcs[i]));
876 return;
877 }
878
879 /* General case: We can just grab the one used channel per src. */
880 struct qreg src[nir_op_infos[instr->op].num_inputs];
881 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
882 src[i] = ntq_get_alu_src(c, instr, i);
883 }
884
885 struct qreg result;
886
887 switch (instr->op) {
888 case nir_op_mov:
889 result = vir_MOV(c, src[0]);
890 break;
891
892 case nir_op_fneg:
893 result = vir_XOR(c, src[0], vir_uniform_ui(c, 1 << 31));
894 break;
895 case nir_op_ineg:
896 result = vir_NEG(c, src[0]);
897 break;
898
899 case nir_op_fmul:
900 result = vir_FMUL(c, src[0], src[1]);
901 break;
902 case nir_op_fadd:
903 result = vir_FADD(c, src[0], src[1]);
904 break;
905 case nir_op_fsub:
906 result = vir_FSUB(c, src[0], src[1]);
907 break;
908 case nir_op_fmin:
909 result = vir_FMIN(c, src[0], src[1]);
910 break;
911 case nir_op_fmax:
912 result = vir_FMAX(c, src[0], src[1]);
913 break;
914
915 case nir_op_f2i32: {
916 nir_alu_instr *src0_alu = ntq_get_alu_parent(instr->src[0].src);
917 if (src0_alu && src0_alu->op == nir_op_fround_even) {
918 result = vir_FTOIN(c, ntq_get_alu_src(c, src0_alu, 0));
919 } else {
920 result = vir_FTOIZ(c, src[0]);
921 }
922 break;
923 }
924
925 case nir_op_f2u32:
926 result = vir_FTOUZ(c, src[0]);
927 break;
928 case nir_op_i2f32:
929 result = vir_ITOF(c, src[0]);
930 break;
931 case nir_op_u2f32:
932 result = vir_UTOF(c, src[0]);
933 break;
934 case nir_op_b2f32:
935 result = vir_AND(c, src[0], vir_uniform_f(c, 1.0));
936 break;
937 case nir_op_b2i32:
938 result = vir_AND(c, src[0], vir_uniform_ui(c, 1));
939 break;
940
941 case nir_op_iadd:
942 result = vir_ADD(c, src[0], src[1]);
943 break;
944 case nir_op_ushr:
945 result = vir_SHR(c, src[0], src[1]);
946 break;
947 case nir_op_isub:
948 result = vir_SUB(c, src[0], src[1]);
949 break;
950 case nir_op_ishr:
951 result = vir_ASR(c, src[0], src[1]);
952 break;
953 case nir_op_ishl:
954 result = vir_SHL(c, src[0], src[1]);
955 break;
956 case nir_op_imin:
957 result = vir_MIN(c, src[0], src[1]);
958 break;
959 case nir_op_umin:
960 result = vir_UMIN(c, src[0], src[1]);
961 break;
962 case nir_op_imax:
963 result = vir_MAX(c, src[0], src[1]);
964 break;
965 case nir_op_umax:
966 result = vir_UMAX(c, src[0], src[1]);
967 break;
968 case nir_op_iand:
969 result = vir_AND(c, src[0], src[1]);
970 break;
971 case nir_op_ior:
972 result = vir_OR(c, src[0], src[1]);
973 break;
974 case nir_op_ixor:
975 result = vir_XOR(c, src[0], src[1]);
976 break;
977 case nir_op_inot:
978 result = vir_NOT(c, src[0]);
979 break;
980
981 case nir_op_ufind_msb:
982 result = vir_SUB(c, vir_uniform_ui(c, 31), vir_CLZ(c, src[0]));
983 break;
984
985 case nir_op_imul:
986 result = vir_UMUL(c, src[0], src[1]);
987 break;
988
989 case nir_op_seq:
990 case nir_op_sne:
991 case nir_op_sge:
992 case nir_op_slt: {
993 enum v3d_qpu_cond cond;
994 ASSERTED bool ok = ntq_emit_comparison(c, instr, &cond);
995 assert(ok);
996 result = vir_MOV(c, vir_SEL(c, cond,
997 vir_uniform_f(c, 1.0),
998 vir_uniform_f(c, 0.0)));
999 break;
1000 }
1001
1002 case nir_op_i2b32:
1003 case nir_op_f2b32:
1004 case nir_op_feq32:
1005 case nir_op_fne32:
1006 case nir_op_fge32:
1007 case nir_op_flt32:
1008 case nir_op_ieq32:
1009 case nir_op_ine32:
1010 case nir_op_ige32:
1011 case nir_op_uge32:
1012 case nir_op_ilt32:
1013 case nir_op_ult32: {
1014 enum v3d_qpu_cond cond;
1015 ASSERTED bool ok = ntq_emit_comparison(c, instr, &cond);
1016 assert(ok);
1017 result = vir_MOV(c, vir_SEL(c, cond,
1018 vir_uniform_ui(c, ~0),
1019 vir_uniform_ui(c, 0)));
1020 break;
1021 }
1022
1023 case nir_op_b32csel:
1024 result = vir_MOV(c,
1025 vir_SEL(c,
1026 ntq_emit_bool_to_cond(c, instr->src[0].src),
1027 src[1], src[2]));
1028 break;
1029
1030 case nir_op_fcsel:
1031 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), src[0]),
1032 V3D_QPU_PF_PUSHZ);
1033 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
1034 src[1], src[2]));
1035 break;
1036
1037 case nir_op_frcp:
1038 result = vir_RECIP(c, src[0]);
1039 break;
1040 case nir_op_frsq:
1041 result = vir_RSQRT(c, src[0]);
1042 break;
1043 case nir_op_fexp2:
1044 result = vir_EXP(c, src[0]);
1045 break;
1046 case nir_op_flog2:
1047 result = vir_LOG(c, src[0]);
1048 break;
1049
1050 case nir_op_fceil:
1051 result = vir_FCEIL(c, src[0]);
1052 break;
1053 case nir_op_ffloor:
1054 result = vir_FFLOOR(c, src[0]);
1055 break;
1056 case nir_op_fround_even:
1057 result = vir_FROUND(c, src[0]);
1058 break;
1059 case nir_op_ftrunc:
1060 result = vir_FTRUNC(c, src[0]);
1061 break;
1062
1063 case nir_op_fsin:
1064 result = ntq_fsincos(c, src[0], false);
1065 break;
1066 case nir_op_fcos:
1067 result = ntq_fsincos(c, src[0], true);
1068 break;
1069
1070 case nir_op_fsign:
1071 result = ntq_fsign(c, src[0]);
1072 break;
1073
1074 case nir_op_fabs: {
1075 result = vir_FMOV(c, src[0]);
1076 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_ABS);
1077 break;
1078 }
1079
1080 case nir_op_iabs:
1081 result = vir_MAX(c, src[0], vir_NEG(c, src[0]));
1082 break;
1083
1084 case nir_op_fddx:
1085 case nir_op_fddx_coarse:
1086 case nir_op_fddx_fine:
1087 result = vir_FDX(c, src[0]);
1088 break;
1089
1090 case nir_op_fddy:
1091 case nir_op_fddy_coarse:
1092 case nir_op_fddy_fine:
1093 result = vir_FDY(c, src[0]);
1094 break;
1095
1096 case nir_op_uadd_carry:
1097 vir_set_pf(vir_ADD_dest(c, vir_nop_reg(), src[0], src[1]),
1098 V3D_QPU_PF_PUSHC);
1099 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFA,
1100 vir_uniform_ui(c, ~0),
1101 vir_uniform_ui(c, 0)));
1102 break;
1103
1104 case nir_op_pack_half_2x16_split:
1105 result = vir_VFPACK(c, src[0], src[1]);
1106 break;
1107
1108 case nir_op_unpack_half_2x16_split_x:
1109 result = vir_FMOV(c, src[0]);
1110 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_L);
1111 break;
1112
1113 case nir_op_unpack_half_2x16_split_y:
1114 result = vir_FMOV(c, src[0]);
1115 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_H);
1116 break;
1117
1118 default:
1119 fprintf(stderr, "unknown NIR ALU inst: ");
1120 nir_print_instr(&instr->instr, stderr);
1121 fprintf(stderr, "\n");
1122 abort();
1123 }
1124
1125 /* We have a scalar result, so the instruction should only have a
1126 * single channel written to.
1127 */
1128 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
1129 ntq_store_dest(c, &instr->dest.dest,
1130 ffs(instr->dest.write_mask) - 1, result);
1131 }
1132
1133 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
1134 * specifier. They come from a register that's preloaded with 0xffffffff
1135 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
1136 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
1137 */
1138 #define TLB_TYPE_F16_COLOR (3 << 6)
1139 #define TLB_TYPE_I32_COLOR (1 << 6)
1140 #define TLB_TYPE_F32_COLOR (0 << 6)
1141 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
1142 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
1143 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
1144 #define TLB_F16_SWAP_HI_LO (1 << 1)
1145 #define TLB_VEC_SIZE_4_F16 (1 << 0)
1146 #define TLB_VEC_SIZE_2_F16 (0 << 0)
1147 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
1148
1149 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
1150 * flag is set.
1151 */
1152 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
1153 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
1154 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
1155 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
1156 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
1157
1158 /* Stencil is a single 32-bit write. */
1159 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
1160
1161 static void
1162 vir_emit_tlb_color_write(struct v3d_compile *c, unsigned rt)
1163 {
1164 if (!(c->fs_key->cbufs & (1 << rt)) || !c->output_color_var[rt])
1165 return;
1166
1167 struct qreg tlb_reg = vir_magic_reg(V3D_QPU_WADDR_TLB);
1168 struct qreg tlbu_reg = vir_magic_reg(V3D_QPU_WADDR_TLBU);
1169
1170 nir_variable *var = c->output_color_var[rt];
1171 int num_components = glsl_get_vector_elements(var->type);
1172 uint32_t conf = 0xffffff00;
1173 struct qinst *inst;
1174
1175 conf |= c->msaa_per_sample_output ? TLB_SAMPLE_MODE_PER_SAMPLE :
1176 TLB_SAMPLE_MODE_PER_PIXEL;
1177 conf |= (7 - rt) << TLB_RENDER_TARGET_SHIFT;
1178
1179 if (c->fs_key->swap_color_rb & (1 << rt))
1180 num_components = MAX2(num_components, 3);
1181 assert(num_components != 0);
1182
1183 enum glsl_base_type type = glsl_get_base_type(var->type);
1184 bool is_int_format = type == GLSL_TYPE_INT || type == GLSL_TYPE_UINT;
1185 bool is_32b_tlb_format = is_int_format ||
1186 (c->fs_key->f32_color_rb & (1 << rt));
1187
1188 if (is_int_format) {
1189 /* The F32 vs I32 distinction was dropped in 4.2. */
1190 if (c->devinfo->ver < 42)
1191 conf |= TLB_TYPE_I32_COLOR;
1192 else
1193 conf |= TLB_TYPE_F32_COLOR;
1194 conf |= ((num_components - 1) << TLB_VEC_SIZE_MINUS_1_SHIFT);
1195 } else {
1196 if (c->fs_key->f32_color_rb & (1 << rt)) {
1197 conf |= TLB_TYPE_F32_COLOR;
1198 conf |= ((num_components - 1) <<
1199 TLB_VEC_SIZE_MINUS_1_SHIFT);
1200 } else {
1201 conf |= TLB_TYPE_F16_COLOR;
1202 conf |= TLB_F16_SWAP_HI_LO;
1203 if (num_components >= 3)
1204 conf |= TLB_VEC_SIZE_4_F16;
1205 else
1206 conf |= TLB_VEC_SIZE_2_F16;
1207 }
1208 }
1209
1210 int num_samples = c->msaa_per_sample_output ? V3D_MAX_SAMPLES : 1;
1211 for (int i = 0; i < num_samples; i++) {
1212 struct qreg *color = c->msaa_per_sample_output ?
1213 &c->sample_colors[(rt * V3D_MAX_SAMPLES + i) * 4] :
1214 &c->outputs[var->data.driver_location * 4];
1215
1216 struct qreg r = color[0];
1217 struct qreg g = color[1];
1218 struct qreg b = color[2];
1219 struct qreg a = color[3];
1220
1221 if (c->fs_key->swap_color_rb & (1 << rt)) {
1222 r = color[2];
1223 b = color[0];
1224 }
1225
1226 if (c->fs_key->sample_alpha_to_one)
1227 a = vir_uniform_f(c, 1.0);
1228
1229 if (is_32b_tlb_format) {
1230 if (i == 0) {
1231 inst = vir_MOV_dest(c, tlbu_reg, r);
1232 inst->uniform =
1233 vir_get_uniform_index(c,
1234 QUNIFORM_CONSTANT,
1235 conf);
1236 } else {
1237 inst = vir_MOV_dest(c, tlb_reg, r);
1238 }
1239
1240 if (num_components >= 2)
1241 vir_MOV_dest(c, tlb_reg, g);
1242 if (num_components >= 3)
1243 vir_MOV_dest(c, tlb_reg, b);
1244 if (num_components >= 4)
1245 vir_MOV_dest(c, tlb_reg, a);
1246 } else {
1247 inst = vir_VFPACK_dest(c, tlb_reg, r, g);
1248 if (conf != ~0 && i == 0) {
1249 inst->dst = tlbu_reg;
1250 inst->uniform =
1251 vir_get_uniform_index(c,
1252 QUNIFORM_CONSTANT,
1253 conf);
1254 }
1255
1256 if (num_components >= 3)
1257 inst = vir_VFPACK_dest(c, tlb_reg, b, a);
1258 }
1259 }
1260 }
1261
1262 static void
1263 emit_frag_end(struct v3d_compile *c)
1264 {
1265 /* XXX
1266 if (c->output_sample_mask_index != -1) {
1267 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1268 }
1269 */
1270
1271 bool has_any_tlb_color_write = false;
1272 for (int rt = 0; rt < V3D_MAX_DRAW_BUFFERS; rt++) {
1273 if (c->fs_key->cbufs & (1 << rt) && c->output_color_var[rt])
1274 has_any_tlb_color_write = true;
1275 }
1276
1277 if (c->fs_key->sample_alpha_to_coverage && c->output_color_var[0]) {
1278 struct nir_variable *var = c->output_color_var[0];
1279 struct qreg *color = &c->outputs[var->data.driver_location * 4];
1280
1281 vir_SETMSF_dest(c, vir_nop_reg(),
1282 vir_AND(c,
1283 vir_MSF(c),
1284 vir_FTOC(c, color[3])));
1285 }
1286
1287 struct qreg tlbu_reg = vir_magic_reg(V3D_QPU_WADDR_TLBU);
1288 if (c->output_position_index != -1) {
1289 struct qinst *inst = vir_MOV_dest(c, tlbu_reg,
1290 c->outputs[c->output_position_index]);
1291 uint8_t tlb_specifier = TLB_TYPE_DEPTH;
1292
1293 if (c->devinfo->ver >= 42) {
1294 tlb_specifier |= (TLB_V42_DEPTH_TYPE_PER_PIXEL |
1295 TLB_SAMPLE_MODE_PER_PIXEL);
1296 } else
1297 tlb_specifier |= TLB_DEPTH_TYPE_PER_PIXEL;
1298
1299 inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT,
1300 tlb_specifier |
1301 0xffffff00);
1302 c->writes_z = true;
1303 } else if (c->s->info.fs.uses_discard ||
1304 !c->s->info.fs.early_fragment_tests ||
1305 c->fs_key->sample_alpha_to_coverage ||
1306 !has_any_tlb_color_write) {
1307 /* Emit passthrough Z if it needed to be delayed until shader
1308 * end due to potential discards.
1309 *
1310 * Since (single-threaded) fragment shaders always need a TLB
1311 * write, emit passthrouh Z if we didn't have any color
1312 * buffers and flag us as potentially discarding, so that we
1313 * can use Z as the TLB write.
1314 */
1315 c->s->info.fs.uses_discard = true;
1316
1317 struct qinst *inst = vir_MOV_dest(c, tlbu_reg,
1318 vir_nop_reg());
1319 uint8_t tlb_specifier = TLB_TYPE_DEPTH;
1320
1321 if (c->devinfo->ver >= 42) {
1322 /* The spec says the PER_PIXEL flag is ignored for
1323 * invariant writes, but the simulator demands it.
1324 */
1325 tlb_specifier |= (TLB_V42_DEPTH_TYPE_INVARIANT |
1326 TLB_SAMPLE_MODE_PER_PIXEL);
1327 } else {
1328 tlb_specifier |= TLB_DEPTH_TYPE_INVARIANT;
1329 }
1330
1331 inst->uniform = vir_get_uniform_index(c,
1332 QUNIFORM_CONSTANT,
1333 tlb_specifier |
1334 0xffffff00);
1335 c->writes_z = true;
1336 }
1337
1338 /* XXX: Performance improvement: Merge Z write and color writes TLB
1339 * uniform setup
1340 */
1341 for (int rt = 0; rt < V3D_MAX_DRAW_BUFFERS; rt++)
1342 vir_emit_tlb_color_write(c, rt);
1343 }
1344
1345 static void
1346 vir_VPM_WRITE(struct v3d_compile *c, struct qreg val, uint32_t vpm_index)
1347 {
1348 if (c->devinfo->ver >= 40) {
1349 vir_STVPMV(c, vir_uniform_ui(c, vpm_index), val);
1350 } else {
1351 /* XXX: v3d33_vir_vpm_write_setup(c); */
1352 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_VPM), val);
1353 }
1354 }
1355
1356 static void
1357 emit_vert_end(struct v3d_compile *c)
1358 {
1359 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1360 */
1361 if (c->devinfo->ver >= 40 && c->devinfo->ver <= 42)
1362 vir_VPMWT(c);
1363 }
1364
1365 void
1366 v3d_optimize_nir(struct nir_shader *s)
1367 {
1368 bool progress;
1369 unsigned lower_flrp =
1370 (s->options->lower_flrp16 ? 16 : 0) |
1371 (s->options->lower_flrp32 ? 32 : 0) |
1372 (s->options->lower_flrp64 ? 64 : 0);
1373
1374 do {
1375 progress = false;
1376
1377 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1378 NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL);
1379 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1380 NIR_PASS(progress, s, nir_copy_prop);
1381 NIR_PASS(progress, s, nir_opt_remove_phis);
1382 NIR_PASS(progress, s, nir_opt_dce);
1383 NIR_PASS(progress, s, nir_opt_dead_cf);
1384 NIR_PASS(progress, s, nir_opt_cse);
1385 NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
1386 NIR_PASS(progress, s, nir_opt_algebraic);
1387 NIR_PASS(progress, s, nir_opt_constant_folding);
1388
1389 if (lower_flrp != 0) {
1390 bool lower_flrp_progress = false;
1391
1392 NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
1393 lower_flrp,
1394 false /* always_precise */,
1395 s->options->lower_ffma);
1396 if (lower_flrp_progress) {
1397 NIR_PASS(progress, s, nir_opt_constant_folding);
1398 progress = true;
1399 }
1400
1401 /* Nothing should rematerialize any flrps, so we only
1402 * need to do this lowering once.
1403 */
1404 lower_flrp = 0;
1405 }
1406
1407 NIR_PASS(progress, s, nir_opt_undef);
1408 } while (progress);
1409
1410 NIR_PASS(progress, s, nir_opt_move, nir_move_load_ubo);
1411 }
1412
1413 static int
1414 driver_location_compare(const void *in_a, const void *in_b)
1415 {
1416 const nir_variable *const *a = in_a;
1417 const nir_variable *const *b = in_b;
1418
1419 return (*a)->data.driver_location - (*b)->data.driver_location;
1420 }
1421
1422 static struct qreg
1423 ntq_emit_vpm_read(struct v3d_compile *c,
1424 uint32_t *num_components_queued,
1425 uint32_t *remaining,
1426 uint32_t vpm_index)
1427 {
1428 struct qreg vpm = vir_reg(QFILE_VPM, vpm_index);
1429
1430 if (c->devinfo->ver >= 40 ) {
1431 return vir_LDVPMV_IN(c,
1432 vir_uniform_ui(c,
1433 (*num_components_queued)++));
1434 }
1435
1436 if (*num_components_queued != 0) {
1437 (*num_components_queued)--;
1438 return vir_MOV(c, vpm);
1439 }
1440
1441 uint32_t num_components = MIN2(*remaining, 32);
1442
1443 v3d33_vir_vpm_read_setup(c, num_components);
1444
1445 *num_components_queued = num_components - 1;
1446 *remaining -= num_components;
1447
1448 return vir_MOV(c, vpm);
1449 }
1450
1451 static void
1452 ntq_setup_vpm_inputs(struct v3d_compile *c)
1453 {
1454 /* Figure out how many components of each vertex attribute the shader
1455 * uses. Each variable should have been split to individual
1456 * components and unused ones DCEed. The vertex fetcher will load
1457 * from the start of the attribute to the number of components we
1458 * declare we need in c->vattr_sizes[].
1459 */
1460 nir_foreach_variable(var, &c->s->inputs) {
1461 /* No VS attribute array support. */
1462 assert(MAX2(glsl_get_length(var->type), 1) == 1);
1463
1464 unsigned loc = var->data.driver_location;
1465 int start_component = var->data.location_frac;
1466 int num_components = glsl_get_components(var->type);
1467
1468 c->vattr_sizes[loc] = MAX2(c->vattr_sizes[loc],
1469 start_component + num_components);
1470 }
1471
1472 unsigned num_components = 0;
1473 uint32_t vpm_components_queued = 0;
1474 bool uses_iid = c->s->info.system_values_read &
1475 (1ull << SYSTEM_VALUE_INSTANCE_ID);
1476 bool uses_vid = c->s->info.system_values_read &
1477 (1ull << SYSTEM_VALUE_VERTEX_ID);
1478 num_components += uses_iid;
1479 num_components += uses_vid;
1480
1481 for (int i = 0; i < ARRAY_SIZE(c->vattr_sizes); i++)
1482 num_components += c->vattr_sizes[i];
1483
1484 if (uses_iid) {
1485 c->iid = ntq_emit_vpm_read(c, &vpm_components_queued,
1486 &num_components, ~0);
1487 }
1488
1489 if (uses_vid) {
1490 c->vid = ntq_emit_vpm_read(c, &vpm_components_queued,
1491 &num_components, ~0);
1492 }
1493
1494 /* The actual loads will happen directly in nir_intrinsic_load_input
1495 * on newer versions.
1496 */
1497 if (c->devinfo->ver >= 40)
1498 return;
1499
1500 for (int loc = 0; loc < ARRAY_SIZE(c->vattr_sizes); loc++) {
1501 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1502 (loc + 1) * 4);
1503
1504 for (int i = 0; i < c->vattr_sizes[loc]; i++) {
1505 c->inputs[loc * 4 + i] =
1506 ntq_emit_vpm_read(c,
1507 &vpm_components_queued,
1508 &num_components,
1509 loc * 4 + i);
1510
1511 }
1512 }
1513
1514 if (c->devinfo->ver >= 40) {
1515 assert(vpm_components_queued == num_components);
1516 } else {
1517 assert(vpm_components_queued == 0);
1518 assert(num_components == 0);
1519 }
1520 }
1521
1522 static bool
1523 var_needs_point_coord(struct v3d_compile *c, nir_variable *var)
1524 {
1525 return (var->data.location == VARYING_SLOT_PNTC ||
1526 (var->data.location >= VARYING_SLOT_VAR0 &&
1527 (c->fs_key->point_sprite_mask &
1528 (1 << (var->data.location - VARYING_SLOT_VAR0)))));
1529 }
1530
1531 static bool
1532 program_reads_point_coord(struct v3d_compile *c)
1533 {
1534 nir_foreach_variable(var, &c->s->inputs) {
1535 if (var_needs_point_coord(c, var))
1536 return true;
1537 }
1538
1539 return false;
1540 }
1541
1542 static void
1543 ntq_setup_fs_inputs(struct v3d_compile *c)
1544 {
1545 unsigned num_entries = 0;
1546 unsigned num_components = 0;
1547 nir_foreach_variable(var, &c->s->inputs) {
1548 num_entries++;
1549 num_components += glsl_get_components(var->type);
1550 }
1551
1552 nir_variable *vars[num_entries];
1553
1554 unsigned i = 0;
1555 nir_foreach_variable(var, &c->s->inputs)
1556 vars[i++] = var;
1557
1558 /* Sort the variables so that we emit the input setup in
1559 * driver_location order. This is required for VPM reads, whose data
1560 * is fetched into the VPM in driver_location (TGSI register index)
1561 * order.
1562 */
1563 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1564
1565 for (unsigned i = 0; i < num_entries; i++) {
1566 nir_variable *var = vars[i];
1567 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1568 unsigned loc = var->data.driver_location;
1569
1570 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1571 (loc + array_len) * 4);
1572
1573 if (var->data.location == VARYING_SLOT_POS) {
1574 emit_fragcoord_input(c, loc);
1575 } else if (var_needs_point_coord(c, var)) {
1576 c->inputs[loc * 4 + 0] = c->point_x;
1577 c->inputs[loc * 4 + 1] = c->point_y;
1578 } else {
1579 for (int j = 0; j < array_len; j++)
1580 emit_fragment_input(c, loc + j, var, j);
1581 }
1582 }
1583 }
1584
1585 static void
1586 ntq_setup_outputs(struct v3d_compile *c)
1587 {
1588 if (c->s->info.stage != MESA_SHADER_FRAGMENT)
1589 return;
1590
1591 nir_foreach_variable(var, &c->s->outputs) {
1592 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1593 unsigned loc = var->data.driver_location * 4;
1594
1595 assert(array_len == 1);
1596 (void)array_len;
1597
1598 for (int i = 0; i < 4 - var->data.location_frac; i++) {
1599 add_output(c, loc + var->data.location_frac + i,
1600 var->data.location,
1601 var->data.location_frac + i);
1602 }
1603
1604 switch (var->data.location) {
1605 case FRAG_RESULT_COLOR:
1606 c->output_color_var[0] = var;
1607 c->output_color_var[1] = var;
1608 c->output_color_var[2] = var;
1609 c->output_color_var[3] = var;
1610 break;
1611 case FRAG_RESULT_DATA0:
1612 case FRAG_RESULT_DATA1:
1613 case FRAG_RESULT_DATA2:
1614 case FRAG_RESULT_DATA3:
1615 c->output_color_var[var->data.location -
1616 FRAG_RESULT_DATA0] = var;
1617 break;
1618 case FRAG_RESULT_DEPTH:
1619 c->output_position_index = loc;
1620 break;
1621 case FRAG_RESULT_SAMPLE_MASK:
1622 c->output_sample_mask_index = loc;
1623 break;
1624 }
1625 }
1626 }
1627
1628 /**
1629 * Sets up the mapping from nir_register to struct qreg *.
1630 *
1631 * Each nir_register gets a struct qreg per 32-bit component being stored.
1632 */
1633 static void
1634 ntq_setup_registers(struct v3d_compile *c, struct exec_list *list)
1635 {
1636 foreach_list_typed(nir_register, nir_reg, node, list) {
1637 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1638 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1639 array_len *
1640 nir_reg->num_components);
1641
1642 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1643
1644 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1645 qregs[i] = vir_get_temp(c);
1646 }
1647 }
1648
1649 static void
1650 ntq_emit_load_const(struct v3d_compile *c, nir_load_const_instr *instr)
1651 {
1652 /* XXX perf: Experiment with using immediate loads to avoid having
1653 * these end up in the uniform stream. Watch out for breaking the
1654 * small immediates optimization in the process!
1655 */
1656 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1657 for (int i = 0; i < instr->def.num_components; i++)
1658 qregs[i] = vir_uniform_ui(c, instr->value[i].u32);
1659
1660 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1661 }
1662
1663 static void
1664 ntq_emit_ssa_undef(struct v3d_compile *c, nir_ssa_undef_instr *instr)
1665 {
1666 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1667
1668 /* VIR needs there to be *some* value, so pick 0 (same as for
1669 * ntq_setup_registers().
1670 */
1671 for (int i = 0; i < instr->def.num_components; i++)
1672 qregs[i] = vir_uniform_ui(c, 0);
1673 }
1674
1675 static void
1676 ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr)
1677 {
1678 assert(instr->intrinsic == nir_intrinsic_image_deref_size);
1679 nir_variable *var = nir_intrinsic_get_var(instr, 0);
1680 unsigned image_index = var->data.driver_location;
1681 const struct glsl_type *sampler_type = glsl_without_array(var->type);
1682 bool is_array = glsl_sampler_type_is_array(sampler_type);
1683
1684 ntq_store_dest(c, &instr->dest, 0,
1685 vir_uniform(c, QUNIFORM_IMAGE_WIDTH, image_index));
1686 if (instr->num_components > 1) {
1687 ntq_store_dest(c, &instr->dest, 1,
1688 vir_uniform(c, QUNIFORM_IMAGE_HEIGHT,
1689 image_index));
1690 }
1691 if (instr->num_components > 2) {
1692 ntq_store_dest(c, &instr->dest, 2,
1693 vir_uniform(c,
1694 is_array ?
1695 QUNIFORM_IMAGE_ARRAY_SIZE :
1696 QUNIFORM_IMAGE_DEPTH,
1697 image_index));
1698 }
1699 }
1700
1701 static void
1702 vir_emit_tlb_color_read(struct v3d_compile *c, nir_intrinsic_instr *instr)
1703 {
1704 assert(c->s->info.stage == MESA_SHADER_FRAGMENT);
1705
1706 int rt = nir_src_as_uint(instr->src[0]);
1707 assert(rt < V3D_MAX_DRAW_BUFFERS);
1708
1709 int sample_index = nir_intrinsic_base(instr) ;
1710 assert(sample_index < V3D_MAX_SAMPLES);
1711
1712 int component = nir_intrinsic_component(instr);
1713 assert(component < 4);
1714
1715 /* We need to emit our TLB reads after we have acquired the scoreboard
1716 * lock, or the GPU will hang. Usually, we do our scoreboard locking on
1717 * the last thread switch to improve parallelism, however, that is only
1718 * guaranteed to happen before the tlb color writes.
1719 *
1720 * To fix that, we make sure we always emit a thread switch before the
1721 * first tlb color read. If that happens to be the last thread switch
1722 * we emit, then everything is fine, but otherwsie, if any code after
1723 * this point needs to emit additional thread switches, then we will
1724 * switch the strategy to locking the scoreboard on the first thread
1725 * switch instead -- see vir_emit_thrsw().
1726 */
1727 if (!c->emitted_tlb_load) {
1728 if (!c->last_thrsw_at_top_level) {
1729 assert(c->devinfo->ver >= 41);
1730 vir_emit_thrsw(c);
1731 }
1732
1733 c->emitted_tlb_load = true;
1734 }
1735
1736 struct qreg *color_reads_for_sample =
1737 &c->color_reads[(rt * V3D_MAX_SAMPLES + sample_index) * 4];
1738
1739 if (color_reads_for_sample[component].file == QFILE_NULL) {
1740 enum pipe_format rt_format = c->fs_key->color_fmt[rt].format;
1741 int num_components =
1742 util_format_get_nr_components(rt_format);
1743
1744 const bool swap_rb = c->fs_key->swap_color_rb & (1 << rt);
1745 if (swap_rb)
1746 num_components = MAX2(num_components, 3);
1747
1748 nir_variable *var = c->output_color_var[rt];
1749 enum glsl_base_type type = glsl_get_base_type(var->type);
1750
1751 bool is_int_format = type == GLSL_TYPE_INT ||
1752 type == GLSL_TYPE_UINT;
1753
1754 bool is_32b_tlb_format = is_int_format ||
1755 (c->fs_key->f32_color_rb & (1 << rt));
1756
1757 int num_samples = c->fs_key->msaa ? V3D_MAX_SAMPLES : 1;
1758
1759 uint32_t conf = 0xffffff00;
1760 conf |= c->fs_key->msaa ? TLB_SAMPLE_MODE_PER_SAMPLE :
1761 TLB_SAMPLE_MODE_PER_PIXEL;
1762 conf |= (7 - rt) << TLB_RENDER_TARGET_SHIFT;
1763
1764 if (is_32b_tlb_format) {
1765 /* The F32 vs I32 distinction was dropped in 4.2. */
1766 conf |= (c->devinfo->ver < 42 && is_int_format) ?
1767 TLB_TYPE_I32_COLOR : TLB_TYPE_F32_COLOR;
1768
1769 conf |= ((num_components - 1) <<
1770 TLB_VEC_SIZE_MINUS_1_SHIFT);
1771 } else {
1772 conf |= TLB_TYPE_F16_COLOR;
1773 conf |= TLB_F16_SWAP_HI_LO;
1774
1775 if (num_components >= 3)
1776 conf |= TLB_VEC_SIZE_4_F16;
1777 else
1778 conf |= TLB_VEC_SIZE_2_F16;
1779 }
1780
1781
1782 for (int i = 0; i < num_samples; i++) {
1783 struct qreg r, g, b, a;
1784 if (is_32b_tlb_format) {
1785 r = conf != 0xffffffff && i == 0?
1786 vir_TLBU_COLOR_READ(c, conf) :
1787 vir_TLB_COLOR_READ(c);
1788 if (num_components >= 2)
1789 g = vir_TLB_COLOR_READ(c);
1790 if (num_components >= 3)
1791 b = vir_TLB_COLOR_READ(c);
1792 if (num_components >= 4)
1793 a = vir_TLB_COLOR_READ(c);
1794 } else {
1795 struct qreg rg = conf != 0xffffffff && i == 0 ?
1796 vir_TLBU_COLOR_READ(c, conf) :
1797 vir_TLB_COLOR_READ(c);
1798 r = vir_FMOV(c, rg);
1799 vir_set_unpack(c->defs[r.index], 0,
1800 V3D_QPU_UNPACK_L);
1801 g = vir_FMOV(c, rg);
1802 vir_set_unpack(c->defs[g.index], 0,
1803 V3D_QPU_UNPACK_H);
1804
1805 if (num_components > 2) {
1806 struct qreg ba = vir_TLB_COLOR_READ(c);
1807 b = vir_FMOV(c, ba);
1808 vir_set_unpack(c->defs[b.index], 0,
1809 V3D_QPU_UNPACK_L);
1810 a = vir_FMOV(c, ba);
1811 vir_set_unpack(c->defs[a.index], 0,
1812 V3D_QPU_UNPACK_H);
1813 }
1814 }
1815
1816 struct qreg *color_reads =
1817 &c->color_reads[(rt * V3D_MAX_SAMPLES + i) * 4];
1818
1819 color_reads[0] = swap_rb ? b : r;
1820 if (num_components >= 2)
1821 color_reads[1] = g;
1822 if (num_components >= 3)
1823 color_reads[2] = swap_rb ? r : b;
1824 if (num_components >= 4)
1825 color_reads[3] = a;
1826 }
1827 }
1828
1829 assert(color_reads_for_sample[component].file != QFILE_NULL);
1830 ntq_store_dest(c, &instr->dest, 0,
1831 vir_MOV(c, color_reads_for_sample[component]));
1832 }
1833
1834 static void
1835 ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr)
1836 {
1837 if (nir_src_is_const(instr->src[0])) {
1838 int offset = (nir_intrinsic_base(instr) +
1839 nir_src_as_uint(instr->src[0]));
1840 assert(offset % 4 == 0);
1841 /* We need dwords */
1842 offset = offset / 4;
1843 for (int i = 0; i < instr->num_components; i++) {
1844 ntq_store_dest(c, &instr->dest, i,
1845 vir_uniform(c, QUNIFORM_UNIFORM,
1846 offset + i));
1847 }
1848 } else {
1849 ntq_emit_tmu_general(c, instr, false);
1850 }
1851 }
1852
1853 static void
1854 ntq_emit_load_input(struct v3d_compile *c, nir_intrinsic_instr *instr)
1855 {
1856 /* XXX: Use ldvpmv (uniform offset) or ldvpmd (non-uniform offset)
1857 * and enable PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR.
1858 */
1859 unsigned offset =
1860 nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[0]);
1861
1862 if (c->s->info.stage != MESA_SHADER_FRAGMENT && c->devinfo->ver >= 40) {
1863 /* Emit the LDVPM directly now, rather than at the top
1864 * of the shader like we did for V3D 3.x (which needs
1865 * vpmsetup when not just taking the next offset).
1866 *
1867 * Note that delaying like this may introduce stalls,
1868 * as LDVPMV takes a minimum of 1 instruction but may
1869 * be slower if the VPM unit is busy with another QPU.
1870 */
1871 int index = 0;
1872 if (c->s->info.system_values_read &
1873 (1ull << SYSTEM_VALUE_INSTANCE_ID)) {
1874 index++;
1875 }
1876 if (c->s->info.system_values_read &
1877 (1ull << SYSTEM_VALUE_VERTEX_ID)) {
1878 index++;
1879 }
1880 for (int i = 0; i < offset; i++)
1881 index += c->vattr_sizes[i];
1882 index += nir_intrinsic_component(instr);
1883 for (int i = 0; i < instr->num_components; i++) {
1884 struct qreg vpm_offset = vir_uniform_ui(c, index++);
1885 ntq_store_dest(c, &instr->dest, i,
1886 vir_LDVPMV_IN(c, vpm_offset));
1887 }
1888 } else {
1889 for (int i = 0; i < instr->num_components; i++) {
1890 int comp = nir_intrinsic_component(instr) + i;
1891 ntq_store_dest(c, &instr->dest, i,
1892 vir_MOV(c, c->inputs[offset * 4 + comp]));
1893 }
1894 }
1895 }
1896
1897 static void
1898 ntq_emit_per_sample_color_write(struct v3d_compile *c,
1899 nir_intrinsic_instr *instr)
1900 {
1901 assert(instr->intrinsic == nir_intrinsic_store_tlb_sample_color_v3d);
1902
1903 unsigned rt = nir_src_as_uint(instr->src[1]);
1904 assert(rt < V3D_MAX_DRAW_BUFFERS);
1905
1906 unsigned sample_idx = nir_intrinsic_base(instr);
1907 assert(sample_idx < V3D_MAX_SAMPLES);
1908
1909 unsigned offset = (rt * V3D_MAX_SAMPLES + sample_idx) * 4;
1910 for (int i = 0; i < instr->num_components; i++) {
1911 c->sample_colors[offset + i] =
1912 vir_MOV(c, ntq_get_src(c, instr->src[0], i));
1913 }
1914 }
1915
1916 static void
1917 ntq_emit_color_write(struct v3d_compile *c,
1918 nir_intrinsic_instr *instr)
1919 {
1920 unsigned offset = (nir_intrinsic_base(instr) +
1921 nir_src_as_uint(instr->src[1])) * 4 +
1922 nir_intrinsic_component(instr);
1923 for (int i = 0; i < instr->num_components; i++) {
1924 c->outputs[offset + i] =
1925 vir_MOV(c, ntq_get_src(c, instr->src[0], i));
1926 }
1927 }
1928
1929 static void
1930 ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
1931 {
1932 switch (instr->intrinsic) {
1933 case nir_intrinsic_load_uniform:
1934 ntq_emit_load_uniform(c, instr);
1935 break;
1936
1937 case nir_intrinsic_load_ubo:
1938 ntq_emit_tmu_general(c, instr, false);
1939 break;
1940
1941 case nir_intrinsic_ssbo_atomic_add:
1942 case nir_intrinsic_ssbo_atomic_imin:
1943 case nir_intrinsic_ssbo_atomic_umin:
1944 case nir_intrinsic_ssbo_atomic_imax:
1945 case nir_intrinsic_ssbo_atomic_umax:
1946 case nir_intrinsic_ssbo_atomic_and:
1947 case nir_intrinsic_ssbo_atomic_or:
1948 case nir_intrinsic_ssbo_atomic_xor:
1949 case nir_intrinsic_ssbo_atomic_exchange:
1950 case nir_intrinsic_ssbo_atomic_comp_swap:
1951 case nir_intrinsic_load_ssbo:
1952 case nir_intrinsic_store_ssbo:
1953 ntq_emit_tmu_general(c, instr, false);
1954 break;
1955
1956 case nir_intrinsic_shared_atomic_add:
1957 case nir_intrinsic_shared_atomic_imin:
1958 case nir_intrinsic_shared_atomic_umin:
1959 case nir_intrinsic_shared_atomic_imax:
1960 case nir_intrinsic_shared_atomic_umax:
1961 case nir_intrinsic_shared_atomic_and:
1962 case nir_intrinsic_shared_atomic_or:
1963 case nir_intrinsic_shared_atomic_xor:
1964 case nir_intrinsic_shared_atomic_exchange:
1965 case nir_intrinsic_shared_atomic_comp_swap:
1966 case nir_intrinsic_load_shared:
1967 case nir_intrinsic_store_shared:
1968 case nir_intrinsic_load_scratch:
1969 case nir_intrinsic_store_scratch:
1970 ntq_emit_tmu_general(c, instr, true);
1971 break;
1972
1973 case nir_intrinsic_image_deref_load:
1974 case nir_intrinsic_image_deref_store:
1975 case nir_intrinsic_image_deref_atomic_add:
1976 case nir_intrinsic_image_deref_atomic_min:
1977 case nir_intrinsic_image_deref_atomic_max:
1978 case nir_intrinsic_image_deref_atomic_and:
1979 case nir_intrinsic_image_deref_atomic_or:
1980 case nir_intrinsic_image_deref_atomic_xor:
1981 case nir_intrinsic_image_deref_atomic_exchange:
1982 case nir_intrinsic_image_deref_atomic_comp_swap:
1983 v3d40_vir_emit_image_load_store(c, instr);
1984 break;
1985
1986 case nir_intrinsic_get_buffer_size:
1987 ntq_store_dest(c, &instr->dest, 0,
1988 vir_uniform(c, QUNIFORM_GET_BUFFER_SIZE,
1989 nir_src_as_uint(instr->src[0])));
1990 break;
1991
1992 case nir_intrinsic_load_user_clip_plane:
1993 for (int i = 0; i < instr->num_components; i++) {
1994 ntq_store_dest(c, &instr->dest, i,
1995 vir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1996 nir_intrinsic_ucp_id(instr) *
1997 4 + i));
1998 }
1999 break;
2000
2001 case nir_intrinsic_load_viewport_x_scale:
2002 ntq_store_dest(c, &instr->dest, 0,
2003 vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE, 0));
2004 break;
2005
2006 case nir_intrinsic_load_viewport_y_scale:
2007 ntq_store_dest(c, &instr->dest, 0,
2008 vir_uniform(c, QUNIFORM_VIEWPORT_Y_SCALE, 0));
2009 break;
2010
2011 case nir_intrinsic_load_viewport_z_scale:
2012 ntq_store_dest(c, &instr->dest, 0,
2013 vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0));
2014 break;
2015
2016 case nir_intrinsic_load_viewport_z_offset:
2017 ntq_store_dest(c, &instr->dest, 0,
2018 vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0));
2019 break;
2020
2021 case nir_intrinsic_load_alpha_ref_float:
2022 ntq_store_dest(c, &instr->dest, 0,
2023 vir_uniform(c, QUNIFORM_ALPHA_REF, 0));
2024 break;
2025
2026 case nir_intrinsic_load_sample_mask_in:
2027 ntq_store_dest(c, &instr->dest, 0, vir_MSF(c));
2028 break;
2029
2030 case nir_intrinsic_load_helper_invocation:
2031 vir_set_pf(vir_MSF_dest(c, vir_nop_reg()), V3D_QPU_PF_PUSHZ);
2032 ntq_store_dest(c, &instr->dest, 0,
2033 vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFA,
2034 vir_uniform_ui(c, ~0),
2035 vir_uniform_ui(c, 0))));
2036 break;
2037
2038 case nir_intrinsic_load_front_face:
2039 /* The register contains 0 (front) or 1 (back), and we need to
2040 * turn it into a NIR bool where true means front.
2041 */
2042 ntq_store_dest(c, &instr->dest, 0,
2043 vir_ADD(c,
2044 vir_uniform_ui(c, -1),
2045 vir_REVF(c)));
2046 break;
2047
2048 case nir_intrinsic_load_instance_id:
2049 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->iid));
2050 break;
2051
2052 case nir_intrinsic_load_vertex_id:
2053 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->vid));
2054 break;
2055
2056 case nir_intrinsic_load_tlb_color_v3d:
2057 vir_emit_tlb_color_read(c, instr);
2058 break;
2059
2060 case nir_intrinsic_load_input:
2061 ntq_emit_load_input(c, instr);
2062 break;
2063
2064 case nir_intrinsic_store_tlb_sample_color_v3d:
2065 ntq_emit_per_sample_color_write(c, instr);
2066 break;
2067
2068 case nir_intrinsic_store_output:
2069 /* XXX perf: Use stvpmv with uniform non-constant offsets and
2070 * stvpmd with non-uniform offsets and enable
2071 * PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR.
2072 */
2073 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
2074 ntq_emit_color_write(c, instr);
2075 } else {
2076 assert(instr->num_components == 1);
2077
2078 vir_VPM_WRITE(c,
2079 ntq_get_src(c, instr->src[0], 0),
2080 nir_intrinsic_base(instr));
2081 }
2082 break;
2083
2084 case nir_intrinsic_image_deref_size:
2085 ntq_emit_image_size(c, instr);
2086 break;
2087
2088 case nir_intrinsic_discard:
2089 if (vir_in_nonuniform_control_flow(c)) {
2090 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
2091 V3D_QPU_PF_PUSHZ);
2092 vir_set_cond(vir_SETMSF_dest(c, vir_nop_reg(),
2093 vir_uniform_ui(c, 0)),
2094 V3D_QPU_COND_IFA);
2095 } else {
2096 vir_SETMSF_dest(c, vir_nop_reg(),
2097 vir_uniform_ui(c, 0));
2098 }
2099 break;
2100
2101 case nir_intrinsic_discard_if: {
2102 enum v3d_qpu_cond cond = ntq_emit_bool_to_cond(c, instr->src[0]);
2103
2104 if (vir_in_nonuniform_control_flow(c)) {
2105 struct qinst *exec_flag = vir_MOV_dest(c, vir_nop_reg(),
2106 c->execute);
2107 if (cond == V3D_QPU_COND_IFA) {
2108 vir_set_uf(exec_flag, V3D_QPU_UF_ANDZ);
2109 } else {
2110 vir_set_uf(exec_flag, V3D_QPU_UF_NORNZ);
2111 cond = V3D_QPU_COND_IFA;
2112 }
2113 }
2114
2115 vir_set_cond(vir_SETMSF_dest(c, vir_nop_reg(),
2116 vir_uniform_ui(c, 0)), cond);
2117
2118 break;
2119 }
2120
2121 case nir_intrinsic_memory_barrier:
2122 case nir_intrinsic_memory_barrier_atomic_counter:
2123 case nir_intrinsic_memory_barrier_buffer:
2124 case nir_intrinsic_memory_barrier_image:
2125 case nir_intrinsic_memory_barrier_shared:
2126 case nir_intrinsic_group_memory_barrier:
2127 /* We don't do any instruction scheduling of these NIR
2128 * instructions between each other, so we just need to make
2129 * sure that the TMU operations before the barrier are flushed
2130 * before the ones after the barrier. That is currently
2131 * handled by having a THRSW in each of them and a LDTMU
2132 * series or a TMUWT after.
2133 */
2134 break;
2135
2136 case nir_intrinsic_barrier:
2137 /* Emit a TSY op to get all invocations in the workgroup
2138 * (actually supergroup) to block until the last invocation
2139 * reaches the TSY op.
2140 */
2141 if (c->devinfo->ver >= 42) {
2142 vir_BARRIERID_dest(c, vir_reg(QFILE_MAGIC,
2143 V3D_QPU_WADDR_SYNCB));
2144 } else {
2145 struct qinst *sync =
2146 vir_BARRIERID_dest(c,
2147 vir_reg(QFILE_MAGIC,
2148 V3D_QPU_WADDR_SYNCU));
2149 sync->uniform =
2150 vir_get_uniform_index(c, QUNIFORM_CONSTANT,
2151 0xffffff00 |
2152 V3D_TSY_WAIT_INC_CHECK);
2153
2154 }
2155
2156 /* The blocking of a TSY op only happens at the next thread
2157 * switch. No texturing may be outstanding at the time of a
2158 * TSY blocking operation.
2159 */
2160 vir_emit_thrsw(c);
2161 break;
2162
2163 case nir_intrinsic_load_num_work_groups:
2164 for (int i = 0; i < 3; i++) {
2165 ntq_store_dest(c, &instr->dest, i,
2166 vir_uniform(c, QUNIFORM_NUM_WORK_GROUPS,
2167 i));
2168 }
2169 break;
2170
2171 case nir_intrinsic_load_local_invocation_index:
2172 ntq_store_dest(c, &instr->dest, 0,
2173 vir_SHR(c, c->cs_payload[1],
2174 vir_uniform_ui(c, 32 - c->local_invocation_index_bits)));
2175 break;
2176
2177 case nir_intrinsic_load_work_group_id:
2178 ntq_store_dest(c, &instr->dest, 0,
2179 vir_AND(c, c->cs_payload[0],
2180 vir_uniform_ui(c, 0xffff)));
2181 ntq_store_dest(c, &instr->dest, 1,
2182 vir_SHR(c, c->cs_payload[0],
2183 vir_uniform_ui(c, 16)));
2184 ntq_store_dest(c, &instr->dest, 2,
2185 vir_AND(c, c->cs_payload[1],
2186 vir_uniform_ui(c, 0xffff)));
2187 break;
2188
2189 case nir_intrinsic_load_subgroup_id:
2190 ntq_store_dest(c, &instr->dest, 0, vir_EIDX(c));
2191 break;
2192
2193 default:
2194 fprintf(stderr, "Unknown intrinsic: ");
2195 nir_print_instr(&instr->instr, stderr);
2196 fprintf(stderr, "\n");
2197 break;
2198 }
2199 }
2200
2201 /* Clears (activates) the execute flags for any channels whose jump target
2202 * matches this block.
2203 *
2204 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
2205 * enabling?
2206 *
2207 * XXX perf: For uniform control flow, we should be able to skip c->execute
2208 * handling entirely.
2209 */
2210 static void
2211 ntq_activate_execute_for_block(struct v3d_compile *c)
2212 {
2213 vir_set_pf(vir_XOR_dest(c, vir_nop_reg(),
2214 c->execute, vir_uniform_ui(c, c->cur_block->index)),
2215 V3D_QPU_PF_PUSHZ);
2216
2217 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
2218 }
2219
2220 static void
2221 ntq_emit_uniform_if(struct v3d_compile *c, nir_if *if_stmt)
2222 {
2223 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
2224 bool empty_else_block =
2225 (nir_else_block == nir_if_last_else_block(if_stmt) &&
2226 exec_list_is_empty(&nir_else_block->instr_list));
2227
2228 struct qblock *then_block = vir_new_block(c);
2229 struct qblock *after_block = vir_new_block(c);
2230 struct qblock *else_block;
2231 if (empty_else_block)
2232 else_block = after_block;
2233 else
2234 else_block = vir_new_block(c);
2235
2236 /* Set up the flags for the IF condition (taking the THEN branch). */
2237 enum v3d_qpu_cond cond = ntq_emit_bool_to_cond(c, if_stmt->condition);
2238
2239 /* Jump to ELSE. */
2240 vir_BRANCH(c, cond == V3D_QPU_COND_IFA ?
2241 V3D_QPU_BRANCH_COND_ALLNA :
2242 V3D_QPU_BRANCH_COND_ALLA);
2243 vir_link_blocks(c->cur_block, else_block);
2244 vir_link_blocks(c->cur_block, then_block);
2245
2246 /* Process the THEN block. */
2247 vir_set_emit_block(c, then_block);
2248 ntq_emit_cf_list(c, &if_stmt->then_list);
2249
2250 if (!empty_else_block) {
2251 /* At the end of the THEN block, jump to ENDIF */
2252 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALWAYS);
2253 vir_link_blocks(c->cur_block, after_block);
2254
2255 /* Emit the else block. */
2256 vir_set_emit_block(c, else_block);
2257 ntq_emit_cf_list(c, &if_stmt->else_list);
2258 }
2259
2260 vir_link_blocks(c->cur_block, after_block);
2261
2262 vir_set_emit_block(c, after_block);
2263 }
2264
2265 static void
2266 ntq_emit_nonuniform_if(struct v3d_compile *c, nir_if *if_stmt)
2267 {
2268 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
2269 bool empty_else_block =
2270 (nir_else_block == nir_if_last_else_block(if_stmt) &&
2271 exec_list_is_empty(&nir_else_block->instr_list));
2272
2273 struct qblock *then_block = vir_new_block(c);
2274 struct qblock *after_block = vir_new_block(c);
2275 struct qblock *else_block;
2276 if (empty_else_block)
2277 else_block = after_block;
2278 else
2279 else_block = vir_new_block(c);
2280
2281 bool was_uniform_control_flow = false;
2282 if (!vir_in_nonuniform_control_flow(c)) {
2283 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
2284 was_uniform_control_flow = true;
2285 }
2286
2287 /* Set up the flags for the IF condition (taking the THEN branch). */
2288 enum v3d_qpu_cond cond = ntq_emit_bool_to_cond(c, if_stmt->condition);
2289
2290 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
2291 * was previously active (execute Z) for updating the exec flags.
2292 */
2293 if (was_uniform_control_flow) {
2294 cond = v3d_qpu_cond_invert(cond);
2295 } else {
2296 struct qinst *inst = vir_MOV_dest(c, vir_nop_reg(), c->execute);
2297 if (cond == V3D_QPU_COND_IFA) {
2298 vir_set_uf(inst, V3D_QPU_UF_NORNZ);
2299 } else {
2300 vir_set_uf(inst, V3D_QPU_UF_ANDZ);
2301 cond = V3D_QPU_COND_IFA;
2302 }
2303 }
2304
2305 vir_MOV_cond(c, cond,
2306 c->execute,
2307 vir_uniform_ui(c, else_block->index));
2308
2309 /* Jump to ELSE if nothing is active for THEN, otherwise fall
2310 * through.
2311 */
2312 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute), V3D_QPU_PF_PUSHZ);
2313 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLNA);
2314 vir_link_blocks(c->cur_block, else_block);
2315 vir_link_blocks(c->cur_block, then_block);
2316
2317 /* Process the THEN block. */
2318 vir_set_emit_block(c, then_block);
2319 ntq_emit_cf_list(c, &if_stmt->then_list);
2320
2321 if (!empty_else_block) {
2322 /* Handle the end of the THEN block. First, all currently
2323 * active channels update their execute flags to point to
2324 * ENDIF
2325 */
2326 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
2327 V3D_QPU_PF_PUSHZ);
2328 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
2329 vir_uniform_ui(c, after_block->index));
2330
2331 /* If everything points at ENDIF, then jump there immediately. */
2332 vir_set_pf(vir_XOR_dest(c, vir_nop_reg(),
2333 c->execute,
2334 vir_uniform_ui(c, after_block->index)),
2335 V3D_QPU_PF_PUSHZ);
2336 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLA);
2337 vir_link_blocks(c->cur_block, after_block);
2338 vir_link_blocks(c->cur_block, else_block);
2339
2340 vir_set_emit_block(c, else_block);
2341 ntq_activate_execute_for_block(c);
2342 ntq_emit_cf_list(c, &if_stmt->else_list);
2343 }
2344
2345 vir_link_blocks(c->cur_block, after_block);
2346
2347 vir_set_emit_block(c, after_block);
2348 if (was_uniform_control_flow)
2349 c->execute = c->undef;
2350 else
2351 ntq_activate_execute_for_block(c);
2352 }
2353
2354 static void
2355 ntq_emit_if(struct v3d_compile *c, nir_if *nif)
2356 {
2357 bool was_in_control_flow = c->in_control_flow;
2358 c->in_control_flow = true;
2359 if (!vir_in_nonuniform_control_flow(c) &&
2360 nir_src_is_dynamically_uniform(nif->condition)) {
2361 ntq_emit_uniform_if(c, nif);
2362 } else {
2363 ntq_emit_nonuniform_if(c, nif);
2364 }
2365 c->in_control_flow = was_in_control_flow;
2366 }
2367
2368 static void
2369 ntq_emit_jump(struct v3d_compile *c, nir_jump_instr *jump)
2370 {
2371 switch (jump->type) {
2372 case nir_jump_break:
2373 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
2374 V3D_QPU_PF_PUSHZ);
2375 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
2376 vir_uniform_ui(c, c->loop_break_block->index));
2377 break;
2378
2379 case nir_jump_continue:
2380 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
2381 V3D_QPU_PF_PUSHZ);
2382 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
2383 vir_uniform_ui(c, c->loop_cont_block->index));
2384 break;
2385
2386 case nir_jump_return:
2387 unreachable("All returns shouold be lowered\n");
2388 }
2389 }
2390
2391 static void
2392 ntq_emit_instr(struct v3d_compile *c, nir_instr *instr)
2393 {
2394 switch (instr->type) {
2395 case nir_instr_type_deref:
2396 /* ignored, will be walked by the intrinsic using it. */
2397 break;
2398
2399 case nir_instr_type_alu:
2400 ntq_emit_alu(c, nir_instr_as_alu(instr));
2401 break;
2402
2403 case nir_instr_type_intrinsic:
2404 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2405 break;
2406
2407 case nir_instr_type_load_const:
2408 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2409 break;
2410
2411 case nir_instr_type_ssa_undef:
2412 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2413 break;
2414
2415 case nir_instr_type_tex:
2416 ntq_emit_tex(c, nir_instr_as_tex(instr));
2417 break;
2418
2419 case nir_instr_type_jump:
2420 ntq_emit_jump(c, nir_instr_as_jump(instr));
2421 break;
2422
2423 default:
2424 fprintf(stderr, "Unknown NIR instr type: ");
2425 nir_print_instr(instr, stderr);
2426 fprintf(stderr, "\n");
2427 abort();
2428 }
2429 }
2430
2431 static void
2432 ntq_emit_block(struct v3d_compile *c, nir_block *block)
2433 {
2434 nir_foreach_instr(instr, block) {
2435 ntq_emit_instr(c, instr);
2436 }
2437 }
2438
2439 static void ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
2440
2441 static void
2442 ntq_emit_loop(struct v3d_compile *c, nir_loop *loop)
2443 {
2444 bool was_in_control_flow = c->in_control_flow;
2445 c->in_control_flow = true;
2446
2447 bool was_uniform_control_flow = false;
2448 if (!vir_in_nonuniform_control_flow(c)) {
2449 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
2450 was_uniform_control_flow = true;
2451 }
2452
2453 struct qblock *save_loop_cont_block = c->loop_cont_block;
2454 struct qblock *save_loop_break_block = c->loop_break_block;
2455
2456 c->loop_cont_block = vir_new_block(c);
2457 c->loop_break_block = vir_new_block(c);
2458
2459 vir_link_blocks(c->cur_block, c->loop_cont_block);
2460 vir_set_emit_block(c, c->loop_cont_block);
2461 ntq_activate_execute_for_block(c);
2462
2463 ntq_emit_cf_list(c, &loop->body);
2464
2465 /* Re-enable any previous continues now, so our ANYA check below
2466 * works.
2467 *
2468 * XXX: Use the .ORZ flags update, instead.
2469 */
2470 vir_set_pf(vir_XOR_dest(c,
2471 vir_nop_reg(),
2472 c->execute,
2473 vir_uniform_ui(c, c->loop_cont_block->index)),
2474 V3D_QPU_PF_PUSHZ);
2475 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
2476
2477 vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute), V3D_QPU_PF_PUSHZ);
2478
2479 struct qinst *branch = vir_BRANCH(c, V3D_QPU_BRANCH_COND_ANYA);
2480 /* Pixels that were not dispatched or have been discarded should not
2481 * contribute to looping again.
2482 */
2483 branch->qpu.branch.msfign = V3D_QPU_MSFIGN_P;
2484 vir_link_blocks(c->cur_block, c->loop_cont_block);
2485 vir_link_blocks(c->cur_block, c->loop_break_block);
2486
2487 vir_set_emit_block(c, c->loop_break_block);
2488 if (was_uniform_control_flow)
2489 c->execute = c->undef;
2490 else
2491 ntq_activate_execute_for_block(c);
2492
2493 c->loop_break_block = save_loop_break_block;
2494 c->loop_cont_block = save_loop_cont_block;
2495
2496 c->loops++;
2497
2498 c->in_control_flow = was_in_control_flow;
2499 }
2500
2501 static void
2502 ntq_emit_function(struct v3d_compile *c, nir_function_impl *func)
2503 {
2504 fprintf(stderr, "FUNCTIONS not handled.\n");
2505 abort();
2506 }
2507
2508 static void
2509 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list)
2510 {
2511 foreach_list_typed(nir_cf_node, node, node, list) {
2512 switch (node->type) {
2513 case nir_cf_node_block:
2514 ntq_emit_block(c, nir_cf_node_as_block(node));
2515 break;
2516
2517 case nir_cf_node_if:
2518 ntq_emit_if(c, nir_cf_node_as_if(node));
2519 break;
2520
2521 case nir_cf_node_loop:
2522 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2523 break;
2524
2525 case nir_cf_node_function:
2526 ntq_emit_function(c, nir_cf_node_as_function(node));
2527 break;
2528
2529 default:
2530 fprintf(stderr, "Unknown NIR node type\n");
2531 abort();
2532 }
2533 }
2534 }
2535
2536 static void
2537 ntq_emit_impl(struct v3d_compile *c, nir_function_impl *impl)
2538 {
2539 ntq_setup_registers(c, &impl->registers);
2540 ntq_emit_cf_list(c, &impl->body);
2541 }
2542
2543 static void
2544 nir_to_vir(struct v3d_compile *c)
2545 {
2546 switch (c->s->info.stage) {
2547 case MESA_SHADER_FRAGMENT:
2548 c->payload_w = vir_MOV(c, vir_reg(QFILE_REG, 0));
2549 c->payload_w_centroid = vir_MOV(c, vir_reg(QFILE_REG, 1));
2550 c->payload_z = vir_MOV(c, vir_reg(QFILE_REG, 2));
2551
2552 /* V3D 4.x can disable implicit point coordinate varyings if
2553 * they are not used.
2554 */
2555 if (c->fs_key->is_points &&
2556 (c->devinfo->ver < 40 || program_reads_point_coord(c))) {
2557 c->point_x = emit_fragment_varying(c, NULL, 0, 0);
2558 c->point_y = emit_fragment_varying(c, NULL, 0, 0);
2559 c->uses_implicit_point_line_varyings = true;
2560 } else if (c->fs_key->is_lines && c->devinfo->ver < 40) {
2561 c->line_x = emit_fragment_varying(c, NULL, 0, 0);
2562 c->uses_implicit_point_line_varyings = true;
2563 }
2564 break;
2565 case MESA_SHADER_COMPUTE:
2566 /* Set up the TSO for barriers, assuming we do some. */
2567 if (c->devinfo->ver < 42) {
2568 vir_BARRIERID_dest(c, vir_reg(QFILE_MAGIC,
2569 V3D_QPU_WADDR_SYNC));
2570 }
2571
2572 c->cs_payload[0] = vir_MOV(c, vir_reg(QFILE_REG, 0));
2573 c->cs_payload[1] = vir_MOV(c, vir_reg(QFILE_REG, 2));
2574
2575 /* Set up the division between gl_LocalInvocationIndex and
2576 * wg_in_mem in the payload reg.
2577 */
2578 int wg_size = (c->s->info.cs.local_size[0] *
2579 c->s->info.cs.local_size[1] *
2580 c->s->info.cs.local_size[2]);
2581 c->local_invocation_index_bits =
2582 ffs(util_next_power_of_two(MAX2(wg_size, 64))) - 1;
2583 assert(c->local_invocation_index_bits <= 8);
2584
2585 if (c->s->info.cs.shared_size) {
2586 struct qreg wg_in_mem = vir_SHR(c, c->cs_payload[1],
2587 vir_uniform_ui(c, 16));
2588 if (c->s->info.cs.local_size[0] != 1 ||
2589 c->s->info.cs.local_size[1] != 1 ||
2590 c->s->info.cs.local_size[2] != 1) {
2591 int wg_bits = (16 -
2592 c->local_invocation_index_bits);
2593 int wg_mask = (1 << wg_bits) - 1;
2594 wg_in_mem = vir_AND(c, wg_in_mem,
2595 vir_uniform_ui(c, wg_mask));
2596 }
2597 struct qreg shared_per_wg =
2598 vir_uniform_ui(c, c->s->info.cs.shared_size);
2599
2600 c->cs_shared_offset =
2601 vir_ADD(c,
2602 vir_uniform(c, QUNIFORM_SHARED_OFFSET,0),
2603 vir_UMUL(c, wg_in_mem, shared_per_wg));
2604 }
2605 break;
2606 default:
2607 break;
2608 }
2609
2610 if (c->s->scratch_size) {
2611 v3d_setup_spill_base(c);
2612 c->spill_size += V3D_CHANNELS * c->s->scratch_size;
2613 }
2614
2615 if (c->s->info.stage == MESA_SHADER_FRAGMENT)
2616 ntq_setup_fs_inputs(c);
2617 else
2618 ntq_setup_vpm_inputs(c);
2619
2620 ntq_setup_outputs(c);
2621
2622 /* Find the main function and emit the body. */
2623 nir_foreach_function(function, c->s) {
2624 assert(strcmp(function->name, "main") == 0);
2625 assert(function->impl);
2626 ntq_emit_impl(c, function->impl);
2627 }
2628 }
2629
2630 const nir_shader_compiler_options v3d_nir_options = {
2631 .lower_all_io_to_temps = true,
2632 .lower_extract_byte = true,
2633 .lower_extract_word = true,
2634 .lower_bitfield_insert_to_shifts = true,
2635 .lower_bitfield_extract_to_shifts = true,
2636 .lower_bitfield_reverse = true,
2637 .lower_bit_count = true,
2638 .lower_cs_local_id_from_index = true,
2639 .lower_ffract = true,
2640 .lower_fmod = true,
2641 .lower_pack_unorm_2x16 = true,
2642 .lower_pack_snorm_2x16 = true,
2643 .lower_pack_unorm_4x8 = true,
2644 .lower_pack_snorm_4x8 = true,
2645 .lower_unpack_unorm_4x8 = true,
2646 .lower_unpack_snorm_4x8 = true,
2647 .lower_pack_half_2x16 = true,
2648 .lower_unpack_half_2x16 = true,
2649 .lower_fdiv = true,
2650 .lower_find_lsb = true,
2651 .lower_ffma = true,
2652 .lower_flrp32 = true,
2653 .lower_fpow = true,
2654 .lower_fsat = true,
2655 .lower_fsqrt = true,
2656 .lower_ifind_msb = true,
2657 .lower_isign = true,
2658 .lower_ldexp = true,
2659 .lower_mul_high = true,
2660 .lower_wpos_pntc = true,
2661 .lower_rotate = true,
2662 };
2663
2664 /**
2665 * When demoting a shader down to single-threaded, removes the THRSW
2666 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2667 * program end).
2668 */
2669 static void
2670 vir_remove_thrsw(struct v3d_compile *c)
2671 {
2672 vir_for_each_block(block, c) {
2673 vir_for_each_inst_safe(inst, block) {
2674 if (inst->qpu.sig.thrsw)
2675 vir_remove_instruction(c, inst);
2676 }
2677 }
2678
2679 c->last_thrsw = NULL;
2680 }
2681
2682 void
2683 vir_emit_last_thrsw(struct v3d_compile *c)
2684 {
2685 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2686 * switching, so disable threads if we didn't do any TMU ops (each of
2687 * which would have emitted a THRSW).
2688 */
2689 if (!c->last_thrsw_at_top_level && c->devinfo->ver < 41) {
2690 c->threads = 1;
2691 if (c->last_thrsw)
2692 vir_remove_thrsw(c);
2693 return;
2694 }
2695
2696 /* If we're threaded and the last THRSW was in conditional code, then
2697 * we need to emit another one so that we can flag it as the last
2698 * thrsw.
2699 */
2700 if (c->last_thrsw && !c->last_thrsw_at_top_level) {
2701 assert(c->devinfo->ver >= 41);
2702 vir_emit_thrsw(c);
2703 }
2704
2705 /* If we're threaded, then we need to mark the last THRSW instruction
2706 * so we can emit a pair of them at QPU emit time.
2707 *
2708 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2709 * post-last-THRSW state, so we can skip this.
2710 */
2711 if (!c->last_thrsw && c->s->info.stage == MESA_SHADER_FRAGMENT) {
2712 assert(c->devinfo->ver >= 41);
2713 vir_emit_thrsw(c);
2714 }
2715
2716 if (c->last_thrsw)
2717 c->last_thrsw->is_last_thrsw = true;
2718 }
2719
2720 /* There's a flag in the shader for "center W is needed for reasons other than
2721 * non-centroid varyings", so we just walk the program after VIR optimization
2722 * to see if it's used. It should be harmless to set even if we only use
2723 * center W for varyings.
2724 */
2725 static void
2726 vir_check_payload_w(struct v3d_compile *c)
2727 {
2728 if (c->s->info.stage != MESA_SHADER_FRAGMENT)
2729 return;
2730
2731 vir_for_each_inst_inorder(inst, c) {
2732 for (int i = 0; i < vir_get_nsrc(inst); i++) {
2733 if (inst->src[i].file == QFILE_REG &&
2734 inst->src[i].index == 0) {
2735 c->uses_center_w = true;
2736 return;
2737 }
2738 }
2739 }
2740
2741 }
2742
2743 void
2744 v3d_nir_to_vir(struct v3d_compile *c)
2745 {
2746 if (V3D_DEBUG & (V3D_DEBUG_NIR |
2747 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2748 fprintf(stderr, "%s prog %d/%d NIR:\n",
2749 vir_get_stage_name(c),
2750 c->program_id, c->variant_id);
2751 nir_print_shader(c->s, stderr);
2752 }
2753
2754 nir_to_vir(c);
2755
2756 /* Emit the last THRSW before STVPM and TLB writes. */
2757 vir_emit_last_thrsw(c);
2758
2759 switch (c->s->info.stage) {
2760 case MESA_SHADER_FRAGMENT:
2761 emit_frag_end(c);
2762 break;
2763 case MESA_SHADER_VERTEX:
2764 emit_vert_end(c);
2765 break;
2766 case MESA_SHADER_COMPUTE:
2767 break;
2768 default:
2769 unreachable("bad stage");
2770 }
2771
2772 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2773 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2774 fprintf(stderr, "%s prog %d/%d pre-opt VIR:\n",
2775 vir_get_stage_name(c),
2776 c->program_id, c->variant_id);
2777 vir_dump(c);
2778 fprintf(stderr, "\n");
2779 }
2780
2781 vir_optimize(c);
2782
2783 vir_check_payload_w(c);
2784
2785 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2786 * We used that on that platform to pipeline TMU writes and reduce the
2787 * number of thread switches, as well as try (mostly successfully) to
2788 * reduce maximum register pressure to allow more threads. We should
2789 * do something of that sort for V3D -- either instruction scheduling
2790 * here, or delay the the THRSW and LDTMUs from our texture
2791 * instructions until the results are needed.
2792 */
2793
2794 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2795 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2796 fprintf(stderr, "%s prog %d/%d VIR:\n",
2797 vir_get_stage_name(c),
2798 c->program_id, c->variant_id);
2799 vir_dump(c);
2800 fprintf(stderr, "\n");
2801 }
2802
2803 /* Attempt to allocate registers for the temporaries. If we fail,
2804 * reduce thread count and try again.
2805 */
2806 int min_threads = (c->devinfo->ver >= 41) ? 2 : 1;
2807 struct qpu_reg *temp_registers;
2808 while (true) {
2809 bool spilled;
2810 temp_registers = v3d_register_allocate(c, &spilled);
2811 if (spilled)
2812 continue;
2813
2814 if (temp_registers)
2815 break;
2816
2817 if (c->threads == min_threads) {
2818 fprintf(stderr, "Failed to register allocate at %d threads:\n",
2819 c->threads);
2820 vir_dump(c);
2821 c->failed = true;
2822 return;
2823 }
2824
2825 c->threads /= 2;
2826
2827 if (c->threads == 1)
2828 vir_remove_thrsw(c);
2829 }
2830
2831 if (c->spills &&
2832 (V3D_DEBUG & (V3D_DEBUG_VIR |
2833 v3d_debug_flag_for_shader_stage(c->s->info.stage)))) {
2834 fprintf(stderr, "%s prog %d/%d spilled VIR:\n",
2835 vir_get_stage_name(c),
2836 c->program_id, c->variant_id);
2837 vir_dump(c);
2838 fprintf(stderr, "\n");
2839 }
2840
2841 v3d_vir_to_qpu(c, temp_registers);
2842 }