v3d: Stop scalarizing our uniform loads.
[mesa.git] / src / broadcom / compiler / nir_to_vir.c
1 /*
2 * Copyright © 2016 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
34
35 #define GENERAL_TMU_LOOKUP_PER_QUAD (0 << 7)
36 #define GENERAL_TMU_LOOKUP_PER_PIXEL (1 << 7)
37 #define GENERAL_TMU_READ_OP_PREFETCH (0 << 3)
38 #define GENERAL_TMU_READ_OP_CACHE_CLEAR (1 << 3)
39 #define GENERAL_TMU_READ_OP_CACHE_FLUSH (3 << 3)
40 #define GENERAL_TMU_READ_OP_CACHE_CLEAN (3 << 3)
41 #define GENERAL_TMU_READ_OP_CACHE_L1T_CLEAR (4 << 3)
42 #define GENERAL_TMU_READ_OP_CACHE_L1T_FLUSH_AGGREGATION (5 << 3)
43 #define GENERAL_TMU_READ_OP_ATOMIC_INC (8 << 3)
44 #define GENERAL_TMU_READ_OP_ATOMIC_DEC (9 << 3)
45 #define GENERAL_TMU_READ_OP_ATOMIC_NOT (10 << 3)
46 #define GENERAL_TMU_READ_OP_READ (15 << 3)
47 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_I (0 << 0)
48 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_I (1 << 0)
49 #define GENERAL_TMU_LOOKUP_TYPE_VEC2 (2 << 0)
50 #define GENERAL_TMU_LOOKUP_TYPE_VEC3 (3 << 0)
51 #define GENERAL_TMU_LOOKUP_TYPE_VEC4 (4 << 0)
52 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_UI (5 << 0)
53 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_UI (6 << 0)
54 #define GENERAL_TMU_LOOKUP_TYPE_32BIT_UI (7 << 0)
55
56 #define GENERAL_TMU_WRITE_OP_ATOMIC_ADD_WRAP (0 << 3)
57 #define GENERAL_TMU_WRITE_OP_ATOMIC_SUB_WRAP (1 << 3)
58 #define GENERAL_TMU_WRITE_OP_ATOMIC_XCHG (2 << 3)
59 #define GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG (3 << 3)
60 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMIN (4 << 3)
61 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMAX (5 << 3)
62 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMIN (6 << 3)
63 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMAX (7 << 3)
64 #define GENERAL_TMU_WRITE_OP_ATOMIC_AND (8 << 3)
65 #define GENERAL_TMU_WRITE_OP_ATOMIC_OR (9 << 3)
66 #define GENERAL_TMU_WRITE_OP_ATOMIC_XOR (10 << 3)
67 #define GENERAL_TMU_WRITE_OP_WRITE (15 << 3)
68
69 static void
70 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
71
72 static void
73 resize_qreg_array(struct v3d_compile *c,
74 struct qreg **regs,
75 uint32_t *size,
76 uint32_t decl_size)
77 {
78 if (*size >= decl_size)
79 return;
80
81 uint32_t old_size = *size;
82 *size = MAX2(*size * 2, decl_size);
83 *regs = reralloc(c, *regs, struct qreg, *size);
84 if (!*regs) {
85 fprintf(stderr, "Malloc failure\n");
86 abort();
87 }
88
89 for (uint32_t i = old_size; i < *size; i++)
90 (*regs)[i] = c->undef;
91 }
92
93 void
94 vir_emit_thrsw(struct v3d_compile *c)
95 {
96 if (c->threads == 1)
97 return;
98
99 /* Always thread switch after each texture operation for now.
100 *
101 * We could do better by batching a bunch of texture fetches up and
102 * then doing one thread switch and collecting all their results
103 * afterward.
104 */
105 c->last_thrsw = vir_NOP(c);
106 c->last_thrsw->qpu.sig.thrsw = true;
107 c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
108 }
109
110 /**
111 * Implements indirect uniform loads through the TMU general memory access
112 * interface.
113 */
114 static void
115 ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr)
116 {
117 uint32_t tmu_op = GENERAL_TMU_READ_OP_READ;
118 bool has_index = instr->intrinsic == nir_intrinsic_load_ubo;
119 int offset_src = 0 + has_index;
120
121 struct qreg offset;
122 if (instr->intrinsic == nir_intrinsic_load_uniform) {
123 offset = vir_uniform(c, QUNIFORM_UBO_ADDR, 0);
124
125 /* Find what variable in the default uniform block this
126 * uniform load is coming from.
127 */
128 uint32_t base = nir_intrinsic_base(instr);
129 int i;
130 struct v3d_ubo_range *range = NULL;
131 for (i = 0; i < c->num_ubo_ranges; i++) {
132 range = &c->ubo_ranges[i];
133 if (base >= range->src_offset &&
134 base < range->src_offset + range->size) {
135 break;
136 }
137 }
138 /* The driver-location-based offset always has to be within a
139 * declared uniform range.
140 */
141 assert(i != c->num_ubo_ranges);
142 if (!c->ubo_range_used[i]) {
143 c->ubo_range_used[i] = true;
144 range->dst_offset = c->next_ubo_dst_offset;
145 c->next_ubo_dst_offset += range->size;
146 }
147
148 base = base - range->src_offset + range->dst_offset;
149
150 if (base != 0)
151 offset = vir_ADD(c, offset, vir_uniform_ui(c, base));
152 } else {
153 /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
154 * 1 (0 is gallium's constant buffer 0).
155 */
156 offset = vir_uniform(c, QUNIFORM_UBO_ADDR,
157 nir_src_as_uint(instr->src[0]) + 1);
158 }
159
160 uint32_t config = (0xffffff00 |
161 tmu_op |
162 GENERAL_TMU_LOOKUP_PER_PIXEL);
163 if (instr->num_components == 1) {
164 config |= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI;
165 } else {
166 config |= (GENERAL_TMU_LOOKUP_TYPE_VEC2 +
167 instr->num_components - 2);
168 }
169
170 struct qreg dest;
171 if (config == ~0)
172 dest = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUA);
173 else
174 dest = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUAU);
175
176 struct qinst *tmu;
177 if (nir_src_is_const(instr->src[offset_src]) &&
178 nir_src_as_uint(instr->src[offset_src]) == 0) {
179 tmu = vir_MOV_dest(c, dest, offset);
180 } else {
181 tmu = vir_ADD_dest(c, dest,
182 offset,
183 ntq_get_src(c, instr->src[offset_src], 0));
184 }
185
186 if (config != ~0) {
187 tmu->src[vir_get_implicit_uniform_src(tmu)] =
188 vir_uniform_ui(c, config);
189 }
190
191 vir_emit_thrsw(c);
192
193 for (int i = 0; i < nir_intrinsic_dest_components(instr); i++)
194 ntq_store_dest(c, &instr->dest, i, vir_MOV(c, vir_LDTMU(c)));
195 }
196
197 static struct qreg *
198 ntq_init_ssa_def(struct v3d_compile *c, nir_ssa_def *def)
199 {
200 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
201 def->num_components);
202 _mesa_hash_table_insert(c->def_ht, def, qregs);
203 return qregs;
204 }
205
206 /**
207 * This function is responsible for getting VIR results into the associated
208 * storage for a NIR instruction.
209 *
210 * If it's a NIR SSA def, then we just set the associated hash table entry to
211 * the new result.
212 *
213 * If it's a NIR reg, then we need to update the existing qreg assigned to the
214 * NIR destination with the incoming value. To do that without introducing
215 * new MOVs, we require that the incoming qreg either be a uniform, or be
216 * SSA-defined by the previous VIR instruction in the block and rewritable by
217 * this function. That lets us sneak ahead and insert the SF flag beforehand
218 * (knowing that the previous instruction doesn't depend on flags) and rewrite
219 * its destination to be the NIR reg's destination
220 */
221 void
222 ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
223 struct qreg result)
224 {
225 struct qinst *last_inst = NULL;
226 if (!list_empty(&c->cur_block->instructions))
227 last_inst = (struct qinst *)c->cur_block->instructions.prev;
228
229 assert(result.file == QFILE_UNIF ||
230 (result.file == QFILE_TEMP &&
231 last_inst && last_inst == c->defs[result.index]));
232
233 if (dest->is_ssa) {
234 assert(chan < dest->ssa.num_components);
235
236 struct qreg *qregs;
237 struct hash_entry *entry =
238 _mesa_hash_table_search(c->def_ht, &dest->ssa);
239
240 if (entry)
241 qregs = entry->data;
242 else
243 qregs = ntq_init_ssa_def(c, &dest->ssa);
244
245 qregs[chan] = result;
246 } else {
247 nir_register *reg = dest->reg.reg;
248 assert(dest->reg.base_offset == 0);
249 assert(reg->num_array_elems == 0);
250 struct hash_entry *entry =
251 _mesa_hash_table_search(c->def_ht, reg);
252 struct qreg *qregs = entry->data;
253
254 /* Insert a MOV if the source wasn't an SSA def in the
255 * previous instruction.
256 */
257 if (result.file == QFILE_UNIF) {
258 result = vir_MOV(c, result);
259 last_inst = c->defs[result.index];
260 }
261
262 /* We know they're both temps, so just rewrite index. */
263 c->defs[last_inst->dst.index] = NULL;
264 last_inst->dst.index = qregs[chan].index;
265
266 /* If we're in control flow, then make this update of the reg
267 * conditional on the execution mask.
268 */
269 if (c->execute.file != QFILE_NULL) {
270 last_inst->dst.index = qregs[chan].index;
271
272 /* Set the flags to the current exec mask.
273 */
274 c->cursor = vir_before_inst(last_inst);
275 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
276 c->cursor = vir_after_inst(last_inst);
277
278 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
279 last_inst->cond_is_exec_mask = true;
280 }
281 }
282 }
283
284 struct qreg
285 ntq_get_src(struct v3d_compile *c, nir_src src, int i)
286 {
287 struct hash_entry *entry;
288 if (src.is_ssa) {
289 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
290 assert(i < src.ssa->num_components);
291 } else {
292 nir_register *reg = src.reg.reg;
293 entry = _mesa_hash_table_search(c->def_ht, reg);
294 assert(reg->num_array_elems == 0);
295 assert(src.reg.base_offset == 0);
296 assert(i < reg->num_components);
297 }
298
299 struct qreg *qregs = entry->data;
300 return qregs[i];
301 }
302
303 static struct qreg
304 ntq_get_alu_src(struct v3d_compile *c, nir_alu_instr *instr,
305 unsigned src)
306 {
307 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
308 unsigned chan = ffs(instr->dest.write_mask) - 1;
309 struct qreg r = ntq_get_src(c, instr->src[src].src,
310 instr->src[src].swizzle[chan]);
311
312 assert(!instr->src[src].abs);
313 assert(!instr->src[src].negate);
314
315 return r;
316 };
317
318 static struct qreg
319 ntq_minify(struct v3d_compile *c, struct qreg size, struct qreg level)
320 {
321 return vir_MAX(c, vir_SHR(c, size, level), vir_uniform_ui(c, 1));
322 }
323
324 static void
325 ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr)
326 {
327 unsigned unit = instr->texture_index;
328 int lod_index = nir_tex_instr_src_index(instr, nir_tex_src_lod);
329 int dest_size = nir_tex_instr_dest_size(instr);
330
331 struct qreg lod = c->undef;
332 if (lod_index != -1)
333 lod = ntq_get_src(c, instr->src[lod_index].src, 0);
334
335 for (int i = 0; i < dest_size; i++) {
336 assert(i < 3);
337 enum quniform_contents contents;
338
339 if (instr->is_array && i == dest_size - 1)
340 contents = QUNIFORM_TEXTURE_ARRAY_SIZE;
341 else
342 contents = QUNIFORM_TEXTURE_WIDTH + i;
343
344 struct qreg size = vir_uniform(c, contents, unit);
345
346 switch (instr->sampler_dim) {
347 case GLSL_SAMPLER_DIM_1D:
348 case GLSL_SAMPLER_DIM_2D:
349 case GLSL_SAMPLER_DIM_MS:
350 case GLSL_SAMPLER_DIM_3D:
351 case GLSL_SAMPLER_DIM_CUBE:
352 /* Don't minify the array size. */
353 if (!(instr->is_array && i == dest_size - 1)) {
354 size = ntq_minify(c, size, lod);
355 }
356 break;
357
358 case GLSL_SAMPLER_DIM_RECT:
359 /* There's no LOD field for rects */
360 break;
361
362 default:
363 unreachable("Bad sampler type");
364 }
365
366 ntq_store_dest(c, &instr->dest, i, size);
367 }
368 }
369
370 static void
371 ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
372 {
373 unsigned unit = instr->texture_index;
374
375 /* Since each texture sampling op requires uploading uniforms to
376 * reference the texture, there's no HW support for texture size and
377 * you just upload uniforms containing the size.
378 */
379 switch (instr->op) {
380 case nir_texop_query_levels:
381 ntq_store_dest(c, &instr->dest, 0,
382 vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit));
383 return;
384 case nir_texop_txs:
385 ntq_emit_txs(c, instr);
386 return;
387 default:
388 break;
389 }
390
391 if (c->devinfo->ver >= 40)
392 v3d40_vir_emit_tex(c, instr);
393 else
394 v3d33_vir_emit_tex(c, instr);
395 }
396
397 static struct qreg
398 ntq_fsincos(struct v3d_compile *c, struct qreg src, bool is_cos)
399 {
400 struct qreg input = vir_FMUL(c, src, vir_uniform_f(c, 1.0f / M_PI));
401 if (is_cos)
402 input = vir_FADD(c, input, vir_uniform_f(c, 0.5));
403
404 struct qreg periods = vir_FROUND(c, input);
405 struct qreg sin_output = vir_SIN(c, vir_FSUB(c, input, periods));
406 return vir_XOR(c, sin_output, vir_SHL(c,
407 vir_FTOIN(c, periods),
408 vir_uniform_ui(c, -1)));
409 }
410
411 static struct qreg
412 ntq_fsign(struct v3d_compile *c, struct qreg src)
413 {
414 struct qreg t = vir_get_temp(c);
415
416 vir_MOV_dest(c, t, vir_uniform_f(c, 0.0));
417 vir_PF(c, vir_FMOV(c, src), V3D_QPU_PF_PUSHZ);
418 vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_f(c, 1.0));
419 vir_PF(c, vir_FMOV(c, src), V3D_QPU_PF_PUSHN);
420 vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_f(c, -1.0));
421 return vir_MOV(c, t);
422 }
423
424 static struct qreg
425 ntq_isign(struct v3d_compile *c, struct qreg src)
426 {
427 struct qreg t = vir_get_temp(c);
428
429 vir_MOV_dest(c, t, vir_uniform_ui(c, 0));
430 vir_PF(c, vir_MOV(c, src), V3D_QPU_PF_PUSHZ);
431 vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_ui(c, 1));
432 vir_PF(c, vir_MOV(c, src), V3D_QPU_PF_PUSHN);
433 vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_ui(c, -1));
434 return vir_MOV(c, t);
435 }
436
437 static void
438 emit_fragcoord_input(struct v3d_compile *c, int attr)
439 {
440 c->inputs[attr * 4 + 0] = vir_FXCD(c);
441 c->inputs[attr * 4 + 1] = vir_FYCD(c);
442 c->inputs[attr * 4 + 2] = c->payload_z;
443 c->inputs[attr * 4 + 3] = vir_RECIP(c, c->payload_w);
444 }
445
446 static struct qreg
447 emit_fragment_varying(struct v3d_compile *c, nir_variable *var,
448 uint8_t swizzle)
449 {
450 struct qreg r3 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R3);
451 struct qreg r5 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R5);
452
453 struct qreg vary;
454 if (c->devinfo->ver >= 41) {
455 struct qinst *ldvary = vir_add_inst(V3D_QPU_A_NOP, c->undef,
456 c->undef, c->undef);
457 ldvary->qpu.sig.ldvary = true;
458 vary = vir_emit_def(c, ldvary);
459 } else {
460 vir_NOP(c)->qpu.sig.ldvary = true;
461 vary = r3;
462 }
463
464 /* For gl_PointCoord input or distance along a line, we'll be called
465 * with no nir_variable, and we don't count toward VPM size so we
466 * don't track an input slot.
467 */
468 if (!var) {
469 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
470 }
471
472 int i = c->num_inputs++;
473 c->input_slots[i] = v3d_slot_from_slot_and_component(var->data.location,
474 swizzle);
475
476 switch (var->data.interpolation) {
477 case INTERP_MODE_NONE:
478 /* If a gl_FrontColor or gl_BackColor input has no interp
479 * qualifier, then if we're using glShadeModel(GL_FLAT) it
480 * needs to be flat shaded.
481 */
482 switch (var->data.location) {
483 case VARYING_SLOT_COL0:
484 case VARYING_SLOT_COL1:
485 case VARYING_SLOT_BFC0:
486 case VARYING_SLOT_BFC1:
487 if (c->fs_key->shade_model_flat) {
488 BITSET_SET(c->flat_shade_flags, i);
489 vir_MOV_dest(c, c->undef, vary);
490 return vir_MOV(c, r5);
491 } else {
492 return vir_FADD(c, vir_FMUL(c, vary,
493 c->payload_w), r5);
494 }
495 default:
496 break;
497 }
498 /* FALLTHROUGH */
499 case INTERP_MODE_SMOOTH:
500 if (var->data.centroid) {
501 BITSET_SET(c->centroid_flags, i);
502 return vir_FADD(c, vir_FMUL(c, vary,
503 c->payload_w_centroid), r5);
504 } else {
505 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
506 }
507 case INTERP_MODE_NOPERSPECTIVE:
508 BITSET_SET(c->noperspective_flags, i);
509 return vir_FADD(c, vir_MOV(c, vary), r5);
510 case INTERP_MODE_FLAT:
511 BITSET_SET(c->flat_shade_flags, i);
512 vir_MOV_dest(c, c->undef, vary);
513 return vir_MOV(c, r5);
514 default:
515 unreachable("Bad interp mode");
516 }
517 }
518
519 static void
520 emit_fragment_input(struct v3d_compile *c, int attr, nir_variable *var)
521 {
522 for (int i = 0; i < glsl_get_vector_elements(var->type); i++) {
523 int chan = var->data.location_frac + i;
524 c->inputs[attr * 4 + chan] =
525 emit_fragment_varying(c, var, chan);
526 }
527 }
528
529 static void
530 add_output(struct v3d_compile *c,
531 uint32_t decl_offset,
532 uint8_t slot,
533 uint8_t swizzle)
534 {
535 uint32_t old_array_size = c->outputs_array_size;
536 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
537 decl_offset + 1);
538
539 if (old_array_size != c->outputs_array_size) {
540 c->output_slots = reralloc(c,
541 c->output_slots,
542 struct v3d_varying_slot,
543 c->outputs_array_size);
544 }
545
546 c->output_slots[decl_offset] =
547 v3d_slot_from_slot_and_component(slot, swizzle);
548 }
549
550 static void
551 declare_uniform_range(struct v3d_compile *c, uint32_t start, uint32_t size)
552 {
553 unsigned array_id = c->num_ubo_ranges++;
554 if (array_id >= c->ubo_ranges_array_size) {
555 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
556 array_id + 1);
557 c->ubo_ranges = reralloc(c, c->ubo_ranges,
558 struct v3d_ubo_range,
559 c->ubo_ranges_array_size);
560 c->ubo_range_used = reralloc(c, c->ubo_range_used,
561 bool,
562 c->ubo_ranges_array_size);
563 }
564
565 c->ubo_ranges[array_id].dst_offset = 0;
566 c->ubo_ranges[array_id].src_offset = start;
567 c->ubo_ranges[array_id].size = size;
568 c->ubo_range_used[array_id] = false;
569 }
570
571 /**
572 * If compare_instr is a valid comparison instruction, emits the
573 * compare_instr's comparison and returns the sel_instr's return value based
574 * on the compare_instr's result.
575 */
576 static bool
577 ntq_emit_comparison(struct v3d_compile *c,
578 nir_alu_instr *compare_instr,
579 enum v3d_qpu_cond *out_cond)
580 {
581 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
582 struct qreg src1;
583 if (nir_op_infos[compare_instr->op].num_inputs > 1)
584 src1 = ntq_get_alu_src(c, compare_instr, 1);
585 bool cond_invert = false;
586 struct qreg nop = vir_reg(QFILE_NULL, 0);
587
588 switch (compare_instr->op) {
589 case nir_op_feq32:
590 case nir_op_seq:
591 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
592 break;
593 case nir_op_ieq32:
594 vir_set_pf(vir_XOR_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
595 break;
596
597 case nir_op_fne32:
598 case nir_op_sne:
599 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
600 cond_invert = true;
601 break;
602 case nir_op_ine32:
603 vir_set_pf(vir_XOR_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ);
604 cond_invert = true;
605 break;
606
607 case nir_op_fge32:
608 case nir_op_sge:
609 vir_set_pf(vir_FCMP_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
610 break;
611 case nir_op_ige32:
612 vir_set_pf(vir_MIN_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
613 cond_invert = true;
614 break;
615 case nir_op_uge32:
616 vir_set_pf(vir_SUB_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHC);
617 cond_invert = true;
618 break;
619
620 case nir_op_slt:
621 case nir_op_flt32:
622 vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHN);
623 break;
624 case nir_op_ilt32:
625 vir_set_pf(vir_MIN_dest(c, nop, src1, src0), V3D_QPU_PF_PUSHC);
626 break;
627 case nir_op_ult32:
628 vir_set_pf(vir_SUB_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHC);
629 break;
630
631 default:
632 return false;
633 }
634
635 *out_cond = cond_invert ? V3D_QPU_COND_IFNA : V3D_QPU_COND_IFA;
636
637 return true;
638 }
639
640 /* Finds an ALU instruction that generates our src value that could
641 * (potentially) be greedily emitted in the consuming instruction.
642 */
643 static struct nir_alu_instr *
644 ntq_get_alu_parent(nir_src src)
645 {
646 if (!src.is_ssa || src.ssa->parent_instr->type != nir_instr_type_alu)
647 return NULL;
648 nir_alu_instr *instr = nir_instr_as_alu(src.ssa->parent_instr);
649 if (!instr)
650 return NULL;
651
652 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
653 * moving emission of the ALU instr down past another write of the
654 * src.
655 */
656 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
657 if (!instr->src[i].src.is_ssa)
658 return NULL;
659 }
660
661 return instr;
662 }
663
664 /**
665 * Attempts to fold a comparison generating a boolean result into the
666 * condition code for selecting between two values, instead of comparing the
667 * boolean result against 0 to generate the condition code.
668 */
669 static struct qreg ntq_emit_bcsel(struct v3d_compile *c, nir_alu_instr *instr,
670 struct qreg *src)
671 {
672 nir_alu_instr *compare = ntq_get_alu_parent(instr->src[0].src);
673 if (!compare)
674 goto out;
675
676 enum v3d_qpu_cond cond;
677 if (ntq_emit_comparison(c, compare, &cond))
678 return vir_MOV(c, vir_SEL(c, cond, src[1], src[2]));
679
680 out:
681 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
682 return vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA, src[1], src[2]));
683 }
684
685
686 static void
687 ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
688 {
689 /* This should always be lowered to ALU operations for V3D. */
690 assert(!instr->dest.saturate);
691
692 /* Vectors are special in that they have non-scalarized writemasks,
693 * and just take the first swizzle channel for each argument in order
694 * into each writemask channel.
695 */
696 if (instr->op == nir_op_vec2 ||
697 instr->op == nir_op_vec3 ||
698 instr->op == nir_op_vec4) {
699 struct qreg srcs[4];
700 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
701 srcs[i] = ntq_get_src(c, instr->src[i].src,
702 instr->src[i].swizzle[0]);
703 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
704 ntq_store_dest(c, &instr->dest.dest, i,
705 vir_MOV(c, srcs[i]));
706 return;
707 }
708
709 /* General case: We can just grab the one used channel per src. */
710 struct qreg src[nir_op_infos[instr->op].num_inputs];
711 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
712 src[i] = ntq_get_alu_src(c, instr, i);
713 }
714
715 struct qreg result;
716
717 switch (instr->op) {
718 case nir_op_fmov:
719 case nir_op_imov:
720 result = vir_MOV(c, src[0]);
721 break;
722
723 case nir_op_fneg:
724 result = vir_XOR(c, src[0], vir_uniform_ui(c, 1 << 31));
725 break;
726 case nir_op_ineg:
727 result = vir_NEG(c, src[0]);
728 break;
729
730 case nir_op_fmul:
731 result = vir_FMUL(c, src[0], src[1]);
732 break;
733 case nir_op_fadd:
734 result = vir_FADD(c, src[0], src[1]);
735 break;
736 case nir_op_fsub:
737 result = vir_FSUB(c, src[0], src[1]);
738 break;
739 case nir_op_fmin:
740 result = vir_FMIN(c, src[0], src[1]);
741 break;
742 case nir_op_fmax:
743 result = vir_FMAX(c, src[0], src[1]);
744 break;
745
746 case nir_op_f2i32:
747 result = vir_FTOIZ(c, src[0]);
748 break;
749 case nir_op_f2u32:
750 result = vir_FTOUZ(c, src[0]);
751 break;
752 case nir_op_i2f32:
753 result = vir_ITOF(c, src[0]);
754 break;
755 case nir_op_u2f32:
756 result = vir_UTOF(c, src[0]);
757 break;
758 case nir_op_b2f32:
759 result = vir_AND(c, src[0], vir_uniform_f(c, 1.0));
760 break;
761 case nir_op_b2i32:
762 result = vir_AND(c, src[0], vir_uniform_ui(c, 1));
763 break;
764 case nir_op_i2b32:
765 case nir_op_f2b32:
766 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
767 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
768 vir_uniform_ui(c, ~0),
769 vir_uniform_ui(c, 0)));
770 break;
771
772 case nir_op_iadd:
773 result = vir_ADD(c, src[0], src[1]);
774 break;
775 case nir_op_ushr:
776 result = vir_SHR(c, src[0], src[1]);
777 break;
778 case nir_op_isub:
779 result = vir_SUB(c, src[0], src[1]);
780 break;
781 case nir_op_ishr:
782 result = vir_ASR(c, src[0], src[1]);
783 break;
784 case nir_op_ishl:
785 result = vir_SHL(c, src[0], src[1]);
786 break;
787 case nir_op_imin:
788 result = vir_MIN(c, src[0], src[1]);
789 break;
790 case nir_op_umin:
791 result = vir_UMIN(c, src[0], src[1]);
792 break;
793 case nir_op_imax:
794 result = vir_MAX(c, src[0], src[1]);
795 break;
796 case nir_op_umax:
797 result = vir_UMAX(c, src[0], src[1]);
798 break;
799 case nir_op_iand:
800 result = vir_AND(c, src[0], src[1]);
801 break;
802 case nir_op_ior:
803 result = vir_OR(c, src[0], src[1]);
804 break;
805 case nir_op_ixor:
806 result = vir_XOR(c, src[0], src[1]);
807 break;
808 case nir_op_inot:
809 result = vir_NOT(c, src[0]);
810 break;
811
812 case nir_op_ufind_msb:
813 result = vir_SUB(c, vir_uniform_ui(c, 31), vir_CLZ(c, src[0]));
814 break;
815
816 case nir_op_imul:
817 result = vir_UMUL(c, src[0], src[1]);
818 break;
819
820 case nir_op_seq:
821 case nir_op_sne:
822 case nir_op_sge:
823 case nir_op_slt: {
824 enum v3d_qpu_cond cond;
825 MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
826 assert(ok);
827 result = vir_MOV(c, vir_SEL(c, cond,
828 vir_uniform_f(c, 1.0),
829 vir_uniform_f(c, 0.0)));
830 break;
831 }
832
833 case nir_op_feq32:
834 case nir_op_fne32:
835 case nir_op_fge32:
836 case nir_op_flt32:
837 case nir_op_ieq32:
838 case nir_op_ine32:
839 case nir_op_ige32:
840 case nir_op_uge32:
841 case nir_op_ilt32:
842 case nir_op_ult32: {
843 enum v3d_qpu_cond cond;
844 MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
845 assert(ok);
846 result = vir_MOV(c, vir_SEL(c, cond,
847 vir_uniform_ui(c, ~0),
848 vir_uniform_ui(c, 0)));
849 break;
850 }
851
852 case nir_op_b32csel:
853 result = ntq_emit_bcsel(c, instr, src);
854 break;
855 case nir_op_fcsel:
856 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
857 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
858 src[1], src[2]));
859 break;
860
861 case nir_op_frcp:
862 result = vir_RECIP(c, src[0]);
863 break;
864 case nir_op_frsq:
865 result = vir_RSQRT(c, src[0]);
866 break;
867 case nir_op_fexp2:
868 result = vir_EXP(c, src[0]);
869 break;
870 case nir_op_flog2:
871 result = vir_LOG(c, src[0]);
872 break;
873
874 case nir_op_fceil:
875 result = vir_FCEIL(c, src[0]);
876 break;
877 case nir_op_ffloor:
878 result = vir_FFLOOR(c, src[0]);
879 break;
880 case nir_op_fround_even:
881 result = vir_FROUND(c, src[0]);
882 break;
883 case nir_op_ftrunc:
884 result = vir_FTRUNC(c, src[0]);
885 break;
886 case nir_op_ffract:
887 result = vir_FSUB(c, src[0], vir_FFLOOR(c, src[0]));
888 break;
889
890 case nir_op_fsin:
891 result = ntq_fsincos(c, src[0], false);
892 break;
893 case nir_op_fcos:
894 result = ntq_fsincos(c, src[0], true);
895 break;
896
897 case nir_op_fsign:
898 result = ntq_fsign(c, src[0]);
899 break;
900 case nir_op_isign:
901 result = ntq_isign(c, src[0]);
902 break;
903
904 case nir_op_fabs: {
905 result = vir_FMOV(c, src[0]);
906 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_ABS);
907 break;
908 }
909
910 case nir_op_iabs:
911 result = vir_MAX(c, src[0],
912 vir_SUB(c, vir_uniform_ui(c, 0), src[0]));
913 break;
914
915 case nir_op_fddx:
916 case nir_op_fddx_coarse:
917 case nir_op_fddx_fine:
918 result = vir_FDX(c, src[0]);
919 break;
920
921 case nir_op_fddy:
922 case nir_op_fddy_coarse:
923 case nir_op_fddy_fine:
924 result = vir_FDY(c, src[0]);
925 break;
926
927 case nir_op_uadd_carry:
928 vir_PF(c, vir_ADD(c, src[0], src[1]), V3D_QPU_PF_PUSHC);
929 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFA,
930 vir_uniform_ui(c, ~0),
931 vir_uniform_ui(c, 0)));
932 break;
933
934 case nir_op_pack_half_2x16_split:
935 result = vir_VFPACK(c, src[0], src[1]);
936 break;
937
938 case nir_op_unpack_half_2x16_split_x:
939 /* XXX perf: It would be good to be able to merge this unpack
940 * with whatever uses our result.
941 */
942 result = vir_FMOV(c, src[0]);
943 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_L);
944 break;
945
946 case nir_op_unpack_half_2x16_split_y:
947 result = vir_FMOV(c, src[0]);
948 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_H);
949 break;
950
951 default:
952 fprintf(stderr, "unknown NIR ALU inst: ");
953 nir_print_instr(&instr->instr, stderr);
954 fprintf(stderr, "\n");
955 abort();
956 }
957
958 /* We have a scalar result, so the instruction should only have a
959 * single channel written to.
960 */
961 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
962 ntq_store_dest(c, &instr->dest.dest,
963 ffs(instr->dest.write_mask) - 1, result);
964 }
965
966 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
967 * specifier. They come from a register that's preloaded with 0xffffffff
968 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
969 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
970 */
971 #define TLB_TYPE_F16_COLOR (3 << 6)
972 #define TLB_TYPE_I32_COLOR (1 << 6)
973 #define TLB_TYPE_F32_COLOR (0 << 6)
974 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
975 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
976 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
977 #define TLB_F16_SWAP_HI_LO (1 << 1)
978 #define TLB_VEC_SIZE_4_F16 (1 << 0)
979 #define TLB_VEC_SIZE_2_F16 (0 << 0)
980 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
981
982 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
983 * flag is set.
984 */
985 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
986 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
987 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
988 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
989 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
990
991 /* Stencil is a single 32-bit write. */
992 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
993
994 static void
995 emit_frag_end(struct v3d_compile *c)
996 {
997 /* XXX
998 if (c->output_sample_mask_index != -1) {
999 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1000 }
1001 */
1002
1003 bool has_any_tlb_color_write = false;
1004 for (int rt = 0; rt < c->fs_key->nr_cbufs; rt++) {
1005 if (c->output_color_var[rt])
1006 has_any_tlb_color_write = true;
1007 }
1008
1009 if (c->fs_key->sample_alpha_to_coverage && c->output_color_var[0]) {
1010 struct nir_variable *var = c->output_color_var[0];
1011 struct qreg *color = &c->outputs[var->data.driver_location * 4];
1012
1013 vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1014 vir_AND(c,
1015 vir_MSF(c),
1016 vir_FTOC(c, color[3])));
1017 }
1018
1019 if (c->output_position_index != -1) {
1020 struct qinst *inst = vir_MOV_dest(c,
1021 vir_reg(QFILE_TLBU, 0),
1022 c->outputs[c->output_position_index]);
1023 uint8_t tlb_specifier = TLB_TYPE_DEPTH;
1024
1025 if (c->devinfo->ver >= 42) {
1026 tlb_specifier |= (TLB_V42_DEPTH_TYPE_PER_PIXEL |
1027 TLB_SAMPLE_MODE_PER_PIXEL);
1028 } else
1029 tlb_specifier |= TLB_DEPTH_TYPE_PER_PIXEL;
1030
1031 inst->src[vir_get_implicit_uniform_src(inst)] =
1032 vir_uniform_ui(c, tlb_specifier | 0xffffff00);
1033 } else if (c->s->info.fs.uses_discard ||
1034 c->fs_key->sample_alpha_to_coverage ||
1035 !has_any_tlb_color_write) {
1036 /* Emit passthrough Z if it needed to be delayed until shader
1037 * end due to potential discards.
1038 *
1039 * Since (single-threaded) fragment shaders always need a TLB
1040 * write, emit passthrouh Z if we didn't have any color
1041 * buffers and flag us as potentially discarding, so that we
1042 * can use Z as the TLB write.
1043 */
1044 c->s->info.fs.uses_discard = true;
1045
1046 struct qinst *inst = vir_MOV_dest(c,
1047 vir_reg(QFILE_TLBU, 0),
1048 vir_reg(QFILE_NULL, 0));
1049 uint8_t tlb_specifier = TLB_TYPE_DEPTH;
1050
1051 if (c->devinfo->ver >= 42) {
1052 /* The spec says the PER_PIXEL flag is ignored for
1053 * invariant writes, but the simulator demands it.
1054 */
1055 tlb_specifier |= (TLB_V42_DEPTH_TYPE_INVARIANT |
1056 TLB_SAMPLE_MODE_PER_PIXEL);
1057 } else {
1058 tlb_specifier |= TLB_DEPTH_TYPE_INVARIANT;
1059 }
1060
1061 inst->src[vir_get_implicit_uniform_src(inst)] =
1062 vir_uniform_ui(c, tlb_specifier | 0xffffff00);
1063 }
1064
1065 /* XXX: Performance improvement: Merge Z write and color writes TLB
1066 * uniform setup
1067 */
1068
1069 for (int rt = 0; rt < c->fs_key->nr_cbufs; rt++) {
1070 if (!c->output_color_var[rt])
1071 continue;
1072
1073 nir_variable *var = c->output_color_var[rt];
1074 struct qreg *color = &c->outputs[var->data.driver_location * 4];
1075 int num_components = glsl_get_vector_elements(var->type);
1076 uint32_t conf = 0xffffff00;
1077 struct qinst *inst;
1078
1079 conf |= TLB_SAMPLE_MODE_PER_PIXEL;
1080 conf |= (7 - rt) << TLB_RENDER_TARGET_SHIFT;
1081
1082 if (c->fs_key->swap_color_rb & (1 << rt))
1083 num_components = MAX2(num_components, 3);
1084
1085 assert(num_components != 0);
1086 switch (glsl_get_base_type(var->type)) {
1087 case GLSL_TYPE_UINT:
1088 case GLSL_TYPE_INT:
1089 /* The F32 vs I32 distinction was dropped in 4.2. */
1090 if (c->devinfo->ver < 42)
1091 conf |= TLB_TYPE_I32_COLOR;
1092 else
1093 conf |= TLB_TYPE_F32_COLOR;
1094 conf |= ((num_components - 1) <<
1095 TLB_VEC_SIZE_MINUS_1_SHIFT);
1096
1097 inst = vir_MOV_dest(c, vir_reg(QFILE_TLBU, 0), color[0]);
1098 inst->src[vir_get_implicit_uniform_src(inst)] =
1099 vir_uniform_ui(c, conf);
1100
1101 for (int i = 1; i < num_components; i++) {
1102 inst = vir_MOV_dest(c, vir_reg(QFILE_TLB, 0),
1103 color[i]);
1104 }
1105 break;
1106
1107 default: {
1108 struct qreg r = color[0];
1109 struct qreg g = color[1];
1110 struct qreg b = color[2];
1111 struct qreg a = color[3];
1112
1113 if (c->fs_key->f32_color_rb & (1 << rt)) {
1114 conf |= TLB_TYPE_F32_COLOR;
1115 conf |= ((num_components - 1) <<
1116 TLB_VEC_SIZE_MINUS_1_SHIFT);
1117 } else {
1118 conf |= TLB_TYPE_F16_COLOR;
1119 conf |= TLB_F16_SWAP_HI_LO;
1120 if (num_components >= 3)
1121 conf |= TLB_VEC_SIZE_4_F16;
1122 else
1123 conf |= TLB_VEC_SIZE_2_F16;
1124 }
1125
1126 if (c->fs_key->swap_color_rb & (1 << rt)) {
1127 r = color[2];
1128 b = color[0];
1129 }
1130
1131 if (c->fs_key->sample_alpha_to_one)
1132 a = vir_uniform_f(c, 1.0);
1133
1134 if (c->fs_key->f32_color_rb & (1 << rt)) {
1135 inst = vir_MOV_dest(c, vir_reg(QFILE_TLBU, 0), r);
1136 inst->src[vir_get_implicit_uniform_src(inst)] =
1137 vir_uniform_ui(c, conf);
1138
1139 if (num_components >= 2)
1140 vir_MOV_dest(c, vir_reg(QFILE_TLB, 0), g);
1141 if (num_components >= 3)
1142 vir_MOV_dest(c, vir_reg(QFILE_TLB, 0), b);
1143 if (num_components >= 4)
1144 vir_MOV_dest(c, vir_reg(QFILE_TLB, 0), a);
1145 } else {
1146 inst = vir_VFPACK_dest(c, vir_reg(QFILE_TLB, 0), r, g);
1147 if (conf != ~0) {
1148 inst->dst.file = QFILE_TLBU;
1149 inst->src[vir_get_implicit_uniform_src(inst)] =
1150 vir_uniform_ui(c, conf);
1151 }
1152
1153 if (num_components >= 3)
1154 inst = vir_VFPACK_dest(c, vir_reg(QFILE_TLB, 0), b, a);
1155 }
1156 break;
1157 }
1158 }
1159 }
1160 }
1161
1162 static void
1163 vir_VPM_WRITE(struct v3d_compile *c, struct qreg val, uint32_t *vpm_index)
1164 {
1165 if (c->devinfo->ver >= 40) {
1166 vir_STVPMV(c, vir_uniform_ui(c, *vpm_index), val);
1167 *vpm_index = *vpm_index + 1;
1168 } else {
1169 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_VPM), val);
1170 }
1171
1172 c->num_vpm_writes++;
1173 }
1174
1175 static void
1176 emit_scaled_viewport_write(struct v3d_compile *c, struct qreg rcp_w,
1177 uint32_t *vpm_index)
1178 {
1179 for (int i = 0; i < 2; i++) {
1180 struct qreg coord = c->outputs[c->output_position_index + i];
1181 coord = vir_FMUL(c, coord,
1182 vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i,
1183 0));
1184 coord = vir_FMUL(c, coord, rcp_w);
1185 vir_VPM_WRITE(c, vir_FTOIN(c, coord), vpm_index);
1186 }
1187
1188 }
1189
1190 static void
1191 emit_zs_write(struct v3d_compile *c, struct qreg rcp_w, uint32_t *vpm_index)
1192 {
1193 struct qreg zscale = vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1194 struct qreg zoffset = vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1195
1196 struct qreg z = c->outputs[c->output_position_index + 2];
1197 z = vir_FMUL(c, z, zscale);
1198 z = vir_FMUL(c, z, rcp_w);
1199 z = vir_FADD(c, z, zoffset);
1200 vir_VPM_WRITE(c, z, vpm_index);
1201 }
1202
1203 static void
1204 emit_rcp_wc_write(struct v3d_compile *c, struct qreg rcp_w, uint32_t *vpm_index)
1205 {
1206 vir_VPM_WRITE(c, rcp_w, vpm_index);
1207 }
1208
1209 static void
1210 emit_point_size_write(struct v3d_compile *c, uint32_t *vpm_index)
1211 {
1212 struct qreg point_size;
1213
1214 if (c->output_point_size_index != -1)
1215 point_size = c->outputs[c->output_point_size_index];
1216 else
1217 point_size = vir_uniform_f(c, 1.0);
1218
1219 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1220 * BCM21553).
1221 */
1222 point_size = vir_FMAX(c, point_size, vir_uniform_f(c, .125));
1223
1224 vir_VPM_WRITE(c, point_size, vpm_index);
1225 }
1226
1227 static void
1228 emit_vpm_write_setup(struct v3d_compile *c)
1229 {
1230 if (c->devinfo->ver >= 40)
1231 return;
1232
1233 v3d33_vir_vpm_write_setup(c);
1234 }
1235
1236 /**
1237 * Sets up c->outputs[c->output_position_index] for the vertex shader
1238 * epilogue, if an output vertex position wasn't specified in the user's
1239 * shader. This may be the case for transform feedback with rasterizer
1240 * discard enabled.
1241 */
1242 static void
1243 setup_default_position(struct v3d_compile *c)
1244 {
1245 if (c->output_position_index != -1)
1246 return;
1247
1248 c->output_position_index = c->outputs_array_size;
1249 for (int i = 0; i < 4; i++) {
1250 add_output(c,
1251 c->output_position_index + i,
1252 VARYING_SLOT_POS, i);
1253 }
1254 }
1255
1256 static void
1257 emit_vert_end(struct v3d_compile *c)
1258 {
1259 setup_default_position(c);
1260
1261 uint32_t vpm_index = 0;
1262 struct qreg rcp_w = vir_RECIP(c,
1263 c->outputs[c->output_position_index + 3]);
1264
1265 emit_vpm_write_setup(c);
1266
1267 if (c->vs_key->is_coord) {
1268 for (int i = 0; i < 4; i++)
1269 vir_VPM_WRITE(c, c->outputs[c->output_position_index + i],
1270 &vpm_index);
1271 emit_scaled_viewport_write(c, rcp_w, &vpm_index);
1272 if (c->vs_key->per_vertex_point_size) {
1273 emit_point_size_write(c, &vpm_index);
1274 /* emit_rcp_wc_write(c, rcp_w); */
1275 }
1276 /* XXX: Z-only rendering */
1277 if (0)
1278 emit_zs_write(c, rcp_w, &vpm_index);
1279 } else {
1280 emit_scaled_viewport_write(c, rcp_w, &vpm_index);
1281 emit_zs_write(c, rcp_w, &vpm_index);
1282 emit_rcp_wc_write(c, rcp_w, &vpm_index);
1283 if (c->vs_key->per_vertex_point_size)
1284 emit_point_size_write(c, &vpm_index);
1285 }
1286
1287 for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
1288 struct v3d_varying_slot input = c->vs_key->fs_inputs[i];
1289 int j;
1290
1291 for (j = 0; j < c->num_outputs; j++) {
1292 struct v3d_varying_slot output = c->output_slots[j];
1293
1294 if (!memcmp(&input, &output, sizeof(input))) {
1295 vir_VPM_WRITE(c, c->outputs[j],
1296 &vpm_index);
1297 break;
1298 }
1299 }
1300 /* Emit padding if we didn't find a declared VS output for
1301 * this FS input.
1302 */
1303 if (j == c->num_outputs)
1304 vir_VPM_WRITE(c, vir_uniform_f(c, 0.0),
1305 &vpm_index);
1306 }
1307
1308 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1309 */
1310 if (c->devinfo->ver >= 40 && c->devinfo->ver <= 42)
1311 vir_VPMWT(c);
1312 }
1313
1314 void
1315 v3d_optimize_nir(struct nir_shader *s)
1316 {
1317 bool progress;
1318
1319 do {
1320 progress = false;
1321
1322 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1323 NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1324 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1325 NIR_PASS(progress, s, nir_copy_prop);
1326 NIR_PASS(progress, s, nir_opt_remove_phis);
1327 NIR_PASS(progress, s, nir_opt_dce);
1328 NIR_PASS(progress, s, nir_opt_dead_cf);
1329 NIR_PASS(progress, s, nir_opt_cse);
1330 NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
1331 NIR_PASS(progress, s, nir_opt_algebraic);
1332 NIR_PASS(progress, s, nir_opt_constant_folding);
1333 NIR_PASS(progress, s, nir_opt_undef);
1334 } while (progress);
1335
1336 NIR_PASS(progress, s, nir_opt_move_load_ubo);
1337 }
1338
1339 static int
1340 driver_location_compare(const void *in_a, const void *in_b)
1341 {
1342 const nir_variable *const *a = in_a;
1343 const nir_variable *const *b = in_b;
1344
1345 return (*a)->data.driver_location - (*b)->data.driver_location;
1346 }
1347
1348 static struct qreg
1349 ntq_emit_vpm_read(struct v3d_compile *c,
1350 uint32_t *num_components_queued,
1351 uint32_t *remaining,
1352 uint32_t vpm_index)
1353 {
1354 struct qreg vpm = vir_reg(QFILE_VPM, vpm_index);
1355
1356 if (c->devinfo->ver >= 40 ) {
1357 return vir_LDVPMV_IN(c,
1358 vir_uniform_ui(c,
1359 (*num_components_queued)++));
1360 }
1361
1362 if (*num_components_queued != 0) {
1363 (*num_components_queued)--;
1364 c->num_inputs++;
1365 return vir_MOV(c, vpm);
1366 }
1367
1368 uint32_t num_components = MIN2(*remaining, 32);
1369
1370 v3d33_vir_vpm_read_setup(c, num_components);
1371
1372 *num_components_queued = num_components - 1;
1373 *remaining -= num_components;
1374 c->num_inputs++;
1375
1376 return vir_MOV(c, vpm);
1377 }
1378
1379 static void
1380 ntq_setup_vpm_inputs(struct v3d_compile *c)
1381 {
1382 /* Figure out how many components of each vertex attribute the shader
1383 * uses. Each variable should have been split to individual
1384 * components and unused ones DCEed. The vertex fetcher will load
1385 * from the start of the attribute to the number of components we
1386 * declare we need in c->vattr_sizes[].
1387 */
1388 nir_foreach_variable(var, &c->s->inputs) {
1389 /* No VS attribute array support. */
1390 assert(MAX2(glsl_get_length(var->type), 1) == 1);
1391
1392 unsigned loc = var->data.driver_location;
1393 int start_component = var->data.location_frac;
1394 int num_components = glsl_get_components(var->type);
1395
1396 c->vattr_sizes[loc] = MAX2(c->vattr_sizes[loc],
1397 start_component + num_components);
1398 }
1399
1400 unsigned num_components = 0;
1401 uint32_t vpm_components_queued = 0;
1402 bool uses_iid = c->s->info.system_values_read &
1403 (1ull << SYSTEM_VALUE_INSTANCE_ID);
1404 bool uses_vid = c->s->info.system_values_read &
1405 (1ull << SYSTEM_VALUE_VERTEX_ID);
1406 num_components += uses_iid;
1407 num_components += uses_vid;
1408
1409 for (int i = 0; i < ARRAY_SIZE(c->vattr_sizes); i++)
1410 num_components += c->vattr_sizes[i];
1411
1412 if (uses_iid) {
1413 c->iid = ntq_emit_vpm_read(c, &vpm_components_queued,
1414 &num_components, ~0);
1415 }
1416
1417 if (uses_vid) {
1418 c->vid = ntq_emit_vpm_read(c, &vpm_components_queued,
1419 &num_components, ~0);
1420 }
1421
1422 for (int loc = 0; loc < ARRAY_SIZE(c->vattr_sizes); loc++) {
1423 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1424 (loc + 1) * 4);
1425
1426 for (int i = 0; i < c->vattr_sizes[loc]; i++) {
1427 c->inputs[loc * 4 + i] =
1428 ntq_emit_vpm_read(c,
1429 &vpm_components_queued,
1430 &num_components,
1431 loc * 4 + i);
1432
1433 }
1434 }
1435
1436 if (c->devinfo->ver >= 40) {
1437 assert(vpm_components_queued == num_components);
1438 } else {
1439 assert(vpm_components_queued == 0);
1440 assert(num_components == 0);
1441 }
1442 }
1443
1444 static void
1445 ntq_setup_fs_inputs(struct v3d_compile *c)
1446 {
1447 unsigned num_entries = 0;
1448 unsigned num_components = 0;
1449 nir_foreach_variable(var, &c->s->inputs) {
1450 num_entries++;
1451 num_components += glsl_get_components(var->type);
1452 }
1453
1454 nir_variable *vars[num_entries];
1455
1456 unsigned i = 0;
1457 nir_foreach_variable(var, &c->s->inputs)
1458 vars[i++] = var;
1459
1460 /* Sort the variables so that we emit the input setup in
1461 * driver_location order. This is required for VPM reads, whose data
1462 * is fetched into the VPM in driver_location (TGSI register index)
1463 * order.
1464 */
1465 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1466
1467 for (unsigned i = 0; i < num_entries; i++) {
1468 nir_variable *var = vars[i];
1469 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1470 unsigned loc = var->data.driver_location;
1471
1472 assert(array_len == 1);
1473 (void)array_len;
1474 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1475 (loc + 1) * 4);
1476
1477 if (var->data.location == VARYING_SLOT_POS) {
1478 emit_fragcoord_input(c, loc);
1479 } else if (var->data.location == VARYING_SLOT_PNTC ||
1480 (var->data.location >= VARYING_SLOT_VAR0 &&
1481 (c->fs_key->point_sprite_mask &
1482 (1 << (var->data.location -
1483 VARYING_SLOT_VAR0))))) {
1484 c->inputs[loc * 4 + 0] = c->point_x;
1485 c->inputs[loc * 4 + 1] = c->point_y;
1486 } else {
1487 emit_fragment_input(c, loc, var);
1488 }
1489 }
1490 }
1491
1492 static void
1493 ntq_setup_outputs(struct v3d_compile *c)
1494 {
1495 nir_foreach_variable(var, &c->s->outputs) {
1496 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1497 unsigned loc = var->data.driver_location * 4;
1498
1499 assert(array_len == 1);
1500 (void)array_len;
1501
1502 for (int i = 0; i < 4 - var->data.location_frac; i++) {
1503 add_output(c, loc + var->data.location_frac + i,
1504 var->data.location,
1505 var->data.location_frac + i);
1506 }
1507
1508 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
1509 switch (var->data.location) {
1510 case FRAG_RESULT_COLOR:
1511 c->output_color_var[0] = var;
1512 c->output_color_var[1] = var;
1513 c->output_color_var[2] = var;
1514 c->output_color_var[3] = var;
1515 break;
1516 case FRAG_RESULT_DATA0:
1517 case FRAG_RESULT_DATA1:
1518 case FRAG_RESULT_DATA2:
1519 case FRAG_RESULT_DATA3:
1520 c->output_color_var[var->data.location -
1521 FRAG_RESULT_DATA0] = var;
1522 break;
1523 case FRAG_RESULT_DEPTH:
1524 c->output_position_index = loc;
1525 break;
1526 case FRAG_RESULT_SAMPLE_MASK:
1527 c->output_sample_mask_index = loc;
1528 break;
1529 }
1530 } else {
1531 switch (var->data.location) {
1532 case VARYING_SLOT_POS:
1533 c->output_position_index = loc;
1534 break;
1535 case VARYING_SLOT_PSIZ:
1536 c->output_point_size_index = loc;
1537 break;
1538 }
1539 }
1540 }
1541 }
1542
1543 static void
1544 ntq_setup_uniforms(struct v3d_compile *c)
1545 {
1546 nir_foreach_variable(var, &c->s->uniforms) {
1547 uint32_t vec4_count = glsl_count_attribute_slots(var->type,
1548 false);
1549 unsigned vec4_size = 4 * sizeof(float);
1550
1551 declare_uniform_range(c, var->data.driver_location * vec4_size,
1552 vec4_count * vec4_size);
1553
1554 }
1555 }
1556
1557 /**
1558 * Sets up the mapping from nir_register to struct qreg *.
1559 *
1560 * Each nir_register gets a struct qreg per 32-bit component being stored.
1561 */
1562 static void
1563 ntq_setup_registers(struct v3d_compile *c, struct exec_list *list)
1564 {
1565 foreach_list_typed(nir_register, nir_reg, node, list) {
1566 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1567 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1568 array_len *
1569 nir_reg->num_components);
1570
1571 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1572
1573 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1574 qregs[i] = vir_get_temp(c);
1575 }
1576 }
1577
1578 static void
1579 ntq_emit_load_const(struct v3d_compile *c, nir_load_const_instr *instr)
1580 {
1581 /* XXX perf: Experiment with using immediate loads to avoid having
1582 * these end up in the uniform stream. Watch out for breaking the
1583 * small immediates optimization in the process!
1584 */
1585 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1586 for (int i = 0; i < instr->def.num_components; i++)
1587 qregs[i] = vir_uniform_ui(c, instr->value.u32[i]);
1588
1589 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1590 }
1591
1592 static void
1593 ntq_emit_ssa_undef(struct v3d_compile *c, nir_ssa_undef_instr *instr)
1594 {
1595 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1596
1597 /* VIR needs there to be *some* value, so pick 0 (same as for
1598 * ntq_setup_registers().
1599 */
1600 for (int i = 0; i < instr->def.num_components; i++)
1601 qregs[i] = vir_uniform_ui(c, 0);
1602 }
1603
1604 static void
1605 ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
1606 {
1607 unsigned offset;
1608
1609 switch (instr->intrinsic) {
1610 case nir_intrinsic_load_uniform:
1611 if (nir_src_is_const(instr->src[0])) {
1612 int offset = (nir_intrinsic_base(instr) +
1613 nir_src_as_uint(instr->src[0]));
1614 assert(offset % 4 == 0);
1615 /* We need dwords */
1616 offset = offset / 4;
1617 for (int i = 0; i < instr->num_components; i++) {
1618 ntq_store_dest(c, &instr->dest, i,
1619 vir_uniform(c, QUNIFORM_UNIFORM,
1620 offset + i));
1621 }
1622 } else {
1623 ntq_emit_tmu_general(c, instr);
1624 }
1625 break;
1626
1627 case nir_intrinsic_load_ubo:
1628 ntq_emit_tmu_general(c, instr);
1629 break;
1630
1631 case nir_intrinsic_load_user_clip_plane:
1632 for (int i = 0; i < instr->num_components; i++) {
1633 ntq_store_dest(c, &instr->dest, i,
1634 vir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1635 nir_intrinsic_ucp_id(instr) *
1636 4 + i));
1637 }
1638 break;
1639
1640 case nir_intrinsic_load_alpha_ref_float:
1641 ntq_store_dest(c, &instr->dest, 0,
1642 vir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1643 break;
1644
1645 case nir_intrinsic_load_sample_mask_in:
1646 ntq_store_dest(c, &instr->dest, 0, vir_MSF(c));
1647 break;
1648
1649 case nir_intrinsic_load_helper_invocation:
1650 vir_PF(c, vir_MSF(c), V3D_QPU_PF_PUSHZ);
1651 ntq_store_dest(c, &instr->dest, 0,
1652 vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFA,
1653 vir_uniform_ui(c, ~0),
1654 vir_uniform_ui(c, 0))));
1655 break;
1656
1657 case nir_intrinsic_load_front_face:
1658 /* The register contains 0 (front) or 1 (back), and we need to
1659 * turn it into a NIR bool where true means front.
1660 */
1661 ntq_store_dest(c, &instr->dest, 0,
1662 vir_ADD(c,
1663 vir_uniform_ui(c, -1),
1664 vir_REVF(c)));
1665 break;
1666
1667 case nir_intrinsic_load_instance_id:
1668 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->iid));
1669 break;
1670
1671 case nir_intrinsic_load_vertex_id:
1672 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->vid));
1673 break;
1674
1675 case nir_intrinsic_load_input:
1676 for (int i = 0; i < instr->num_components; i++) {
1677 offset = (nir_intrinsic_base(instr) +
1678 nir_src_as_uint(instr->src[0]));
1679 int comp = nir_intrinsic_component(instr) + i;
1680 ntq_store_dest(c, &instr->dest, i,
1681 vir_MOV(c, c->inputs[offset * 4 + comp]));
1682 }
1683 break;
1684
1685 case nir_intrinsic_store_output:
1686 offset = ((nir_intrinsic_base(instr) +
1687 nir_src_as_uint(instr->src[1])) * 4 +
1688 nir_intrinsic_component(instr));
1689
1690 for (int i = 0; i < instr->num_components; i++) {
1691 c->outputs[offset + i] =
1692 vir_MOV(c, ntq_get_src(c, instr->src[0], i));
1693 }
1694 c->num_outputs = MAX2(c->num_outputs,
1695 offset + instr->num_components);
1696 break;
1697
1698 case nir_intrinsic_discard:
1699 if (c->execute.file != QFILE_NULL) {
1700 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1701 vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1702 vir_uniform_ui(c, 0)),
1703 V3D_QPU_COND_IFA);
1704 } else {
1705 vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1706 vir_uniform_ui(c, 0));
1707 }
1708 break;
1709
1710 case nir_intrinsic_discard_if: {
1711 /* true (~0) if we're discarding */
1712 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1713
1714 if (c->execute.file != QFILE_NULL) {
1715 /* execute == 0 means the channel is active. Invert
1716 * the condition so that we can use zero as "executing
1717 * and discarding."
1718 */
1719 vir_PF(c, vir_OR(c, c->execute, vir_NOT(c, cond)),
1720 V3D_QPU_PF_PUSHZ);
1721 vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1722 vir_uniform_ui(c, 0)),
1723 V3D_QPU_COND_IFA);
1724 } else {
1725 vir_PF(c, cond, V3D_QPU_PF_PUSHZ);
1726 vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1727 vir_uniform_ui(c, 0)),
1728 V3D_QPU_COND_IFNA);
1729 }
1730
1731 break;
1732 }
1733
1734 default:
1735 fprintf(stderr, "Unknown intrinsic: ");
1736 nir_print_instr(&instr->instr, stderr);
1737 fprintf(stderr, "\n");
1738 break;
1739 }
1740 }
1741
1742 /* Clears (activates) the execute flags for any channels whose jump target
1743 * matches this block.
1744 *
1745 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
1746 * enabling?
1747 *
1748 * XXX perf: For uniform control flow, we should be able to skip c->execute
1749 * handling entirely.
1750 */
1751 static void
1752 ntq_activate_execute_for_block(struct v3d_compile *c)
1753 {
1754 vir_set_pf(vir_XOR_dest(c, vir_reg(QFILE_NULL, 0),
1755 c->execute, vir_uniform_ui(c, c->cur_block->index)),
1756 V3D_QPU_PF_PUSHZ);
1757
1758 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
1759 }
1760
1761 static void
1762 ntq_emit_uniform_if(struct v3d_compile *c, nir_if *if_stmt)
1763 {
1764 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1765 bool empty_else_block =
1766 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1767 exec_list_is_empty(&nir_else_block->instr_list));
1768
1769 struct qblock *then_block = vir_new_block(c);
1770 struct qblock *after_block = vir_new_block(c);
1771 struct qblock *else_block;
1772 if (empty_else_block)
1773 else_block = after_block;
1774 else
1775 else_block = vir_new_block(c);
1776
1777 /* Set up the flags for the IF condition (taking the THEN branch). */
1778 nir_alu_instr *if_condition_alu = ntq_get_alu_parent(if_stmt->condition);
1779 enum v3d_qpu_cond cond;
1780 if (!if_condition_alu ||
1781 !ntq_emit_comparison(c, if_condition_alu, &cond)) {
1782 vir_PF(c, ntq_get_src(c, if_stmt->condition, 0),
1783 V3D_QPU_PF_PUSHZ);
1784 cond = V3D_QPU_COND_IFNA;
1785 }
1786
1787 /* Jump to ELSE. */
1788 vir_BRANCH(c, cond == V3D_QPU_COND_IFA ?
1789 V3D_QPU_BRANCH_COND_ALLNA :
1790 V3D_QPU_BRANCH_COND_ALLA);
1791 vir_link_blocks(c->cur_block, else_block);
1792 vir_link_blocks(c->cur_block, then_block);
1793
1794 /* Process the THEN block. */
1795 vir_set_emit_block(c, then_block);
1796 ntq_emit_cf_list(c, &if_stmt->then_list);
1797
1798 if (!empty_else_block) {
1799 /* At the end of the THEN block, jump to ENDIF */
1800 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALWAYS);
1801 vir_link_blocks(c->cur_block, after_block);
1802
1803 /* Emit the else block. */
1804 vir_set_emit_block(c, else_block);
1805 ntq_activate_execute_for_block(c);
1806 ntq_emit_cf_list(c, &if_stmt->else_list);
1807 }
1808
1809 vir_link_blocks(c->cur_block, after_block);
1810
1811 vir_set_emit_block(c, after_block);
1812 }
1813
1814 static void
1815 ntq_emit_nonuniform_if(struct v3d_compile *c, nir_if *if_stmt)
1816 {
1817 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1818 bool empty_else_block =
1819 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1820 exec_list_is_empty(&nir_else_block->instr_list));
1821
1822 struct qblock *then_block = vir_new_block(c);
1823 struct qblock *after_block = vir_new_block(c);
1824 struct qblock *else_block;
1825 if (empty_else_block)
1826 else_block = after_block;
1827 else
1828 else_block = vir_new_block(c);
1829
1830 bool was_top_level = false;
1831 if (c->execute.file == QFILE_NULL) {
1832 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
1833 was_top_level = true;
1834 }
1835
1836 /* Set up the flags for the IF condition (taking the THEN branch). */
1837 nir_alu_instr *if_condition_alu = ntq_get_alu_parent(if_stmt->condition);
1838 enum v3d_qpu_cond cond;
1839 if (!if_condition_alu ||
1840 !ntq_emit_comparison(c, if_condition_alu, &cond)) {
1841 vir_PF(c, ntq_get_src(c, if_stmt->condition, 0),
1842 V3D_QPU_PF_PUSHZ);
1843 cond = V3D_QPU_COND_IFNA;
1844 }
1845
1846 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
1847 * was previously active (execute Z) for updating the exec flags.
1848 */
1849 if (was_top_level) {
1850 cond = v3d_qpu_cond_invert(cond);
1851 } else {
1852 struct qinst *inst = vir_MOV_dest(c, vir_reg(QFILE_NULL, 0),
1853 c->execute);
1854 if (cond == V3D_QPU_COND_IFA) {
1855 vir_set_uf(inst, V3D_QPU_UF_NORNZ);
1856 } else {
1857 vir_set_uf(inst, V3D_QPU_UF_ANDZ);
1858 cond = V3D_QPU_COND_IFA;
1859 }
1860 }
1861
1862 vir_MOV_cond(c, cond,
1863 c->execute,
1864 vir_uniform_ui(c, else_block->index));
1865
1866 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1867 * through.
1868 */
1869 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1870 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLNA);
1871 vir_link_blocks(c->cur_block, else_block);
1872 vir_link_blocks(c->cur_block, then_block);
1873
1874 /* Process the THEN block. */
1875 vir_set_emit_block(c, then_block);
1876 ntq_emit_cf_list(c, &if_stmt->then_list);
1877
1878 if (!empty_else_block) {
1879 /* Handle the end of the THEN block. First, all currently
1880 * active channels update their execute flags to point to
1881 * ENDIF
1882 */
1883 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1884 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1885 vir_uniform_ui(c, after_block->index));
1886
1887 /* If everything points at ENDIF, then jump there immediately. */
1888 vir_PF(c, vir_XOR(c, c->execute,
1889 vir_uniform_ui(c, after_block->index)),
1890 V3D_QPU_PF_PUSHZ);
1891 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLA);
1892 vir_link_blocks(c->cur_block, after_block);
1893 vir_link_blocks(c->cur_block, else_block);
1894
1895 vir_set_emit_block(c, else_block);
1896 ntq_activate_execute_for_block(c);
1897 ntq_emit_cf_list(c, &if_stmt->else_list);
1898 }
1899
1900 vir_link_blocks(c->cur_block, after_block);
1901
1902 vir_set_emit_block(c, after_block);
1903 if (was_top_level)
1904 c->execute = c->undef;
1905 else
1906 ntq_activate_execute_for_block(c);
1907 }
1908
1909 static void
1910 ntq_emit_if(struct v3d_compile *c, nir_if *nif)
1911 {
1912 if (c->execute.file == QFILE_NULL &&
1913 nir_src_is_dynamically_uniform(nif->condition)) {
1914 ntq_emit_uniform_if(c, nif);
1915 } else {
1916 ntq_emit_nonuniform_if(c, nif);
1917 }
1918 }
1919
1920 static void
1921 ntq_emit_jump(struct v3d_compile *c, nir_jump_instr *jump)
1922 {
1923 switch (jump->type) {
1924 case nir_jump_break:
1925 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1926 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1927 vir_uniform_ui(c, c->loop_break_block->index));
1928 break;
1929
1930 case nir_jump_continue:
1931 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1932 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1933 vir_uniform_ui(c, c->loop_cont_block->index));
1934 break;
1935
1936 case nir_jump_return:
1937 unreachable("All returns shouold be lowered\n");
1938 }
1939 }
1940
1941 static void
1942 ntq_emit_instr(struct v3d_compile *c, nir_instr *instr)
1943 {
1944 switch (instr->type) {
1945 case nir_instr_type_alu:
1946 ntq_emit_alu(c, nir_instr_as_alu(instr));
1947 break;
1948
1949 case nir_instr_type_intrinsic:
1950 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1951 break;
1952
1953 case nir_instr_type_load_const:
1954 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1955 break;
1956
1957 case nir_instr_type_ssa_undef:
1958 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1959 break;
1960
1961 case nir_instr_type_tex:
1962 ntq_emit_tex(c, nir_instr_as_tex(instr));
1963 break;
1964
1965 case nir_instr_type_jump:
1966 ntq_emit_jump(c, nir_instr_as_jump(instr));
1967 break;
1968
1969 default:
1970 fprintf(stderr, "Unknown NIR instr type: ");
1971 nir_print_instr(instr, stderr);
1972 fprintf(stderr, "\n");
1973 abort();
1974 }
1975 }
1976
1977 static void
1978 ntq_emit_block(struct v3d_compile *c, nir_block *block)
1979 {
1980 nir_foreach_instr(instr, block) {
1981 ntq_emit_instr(c, instr);
1982 }
1983 }
1984
1985 static void ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
1986
1987 static void
1988 ntq_emit_loop(struct v3d_compile *c, nir_loop *loop)
1989 {
1990 bool was_top_level = false;
1991 if (c->execute.file == QFILE_NULL) {
1992 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
1993 was_top_level = true;
1994 }
1995
1996 struct qblock *save_loop_cont_block = c->loop_cont_block;
1997 struct qblock *save_loop_break_block = c->loop_break_block;
1998
1999 c->loop_cont_block = vir_new_block(c);
2000 c->loop_break_block = vir_new_block(c);
2001
2002 vir_link_blocks(c->cur_block, c->loop_cont_block);
2003 vir_set_emit_block(c, c->loop_cont_block);
2004 ntq_activate_execute_for_block(c);
2005
2006 ntq_emit_cf_list(c, &loop->body);
2007
2008 /* Re-enable any previous continues now, so our ANYA check below
2009 * works.
2010 *
2011 * XXX: Use the .ORZ flags update, instead.
2012 */
2013 vir_PF(c, vir_XOR(c,
2014 c->execute,
2015 vir_uniform_ui(c, c->loop_cont_block->index)),
2016 V3D_QPU_PF_PUSHZ);
2017 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
2018
2019 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
2020
2021 struct qinst *branch = vir_BRANCH(c, V3D_QPU_BRANCH_COND_ANYA);
2022 /* Pixels that were not dispatched or have been discarded should not
2023 * contribute to looping again.
2024 */
2025 branch->qpu.branch.msfign = V3D_QPU_MSFIGN_P;
2026 vir_link_blocks(c->cur_block, c->loop_cont_block);
2027 vir_link_blocks(c->cur_block, c->loop_break_block);
2028
2029 vir_set_emit_block(c, c->loop_break_block);
2030 if (was_top_level)
2031 c->execute = c->undef;
2032 else
2033 ntq_activate_execute_for_block(c);
2034
2035 c->loop_break_block = save_loop_break_block;
2036 c->loop_cont_block = save_loop_cont_block;
2037
2038 c->loops++;
2039 }
2040
2041 static void
2042 ntq_emit_function(struct v3d_compile *c, nir_function_impl *func)
2043 {
2044 fprintf(stderr, "FUNCTIONS not handled.\n");
2045 abort();
2046 }
2047
2048 static void
2049 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list)
2050 {
2051 foreach_list_typed(nir_cf_node, node, node, list) {
2052 switch (node->type) {
2053 case nir_cf_node_block:
2054 ntq_emit_block(c, nir_cf_node_as_block(node));
2055 break;
2056
2057 case nir_cf_node_if:
2058 ntq_emit_if(c, nir_cf_node_as_if(node));
2059 break;
2060
2061 case nir_cf_node_loop:
2062 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2063 break;
2064
2065 case nir_cf_node_function:
2066 ntq_emit_function(c, nir_cf_node_as_function(node));
2067 break;
2068
2069 default:
2070 fprintf(stderr, "Unknown NIR node type\n");
2071 abort();
2072 }
2073 }
2074 }
2075
2076 static void
2077 ntq_emit_impl(struct v3d_compile *c, nir_function_impl *impl)
2078 {
2079 ntq_setup_registers(c, &impl->registers);
2080 ntq_emit_cf_list(c, &impl->body);
2081 }
2082
2083 static void
2084 nir_to_vir(struct v3d_compile *c)
2085 {
2086 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
2087 c->payload_w = vir_MOV(c, vir_reg(QFILE_REG, 0));
2088 c->payload_w_centroid = vir_MOV(c, vir_reg(QFILE_REG, 1));
2089 c->payload_z = vir_MOV(c, vir_reg(QFILE_REG, 2));
2090
2091 /* XXX perf: We could set the "disable implicit point/line
2092 * varyings" field in the shader record and not emit these, if
2093 * they're not going to be used.
2094 */
2095 if (c->fs_key->is_points) {
2096 c->point_x = emit_fragment_varying(c, NULL, 0);
2097 c->point_y = emit_fragment_varying(c, NULL, 0);
2098 } else if (c->fs_key->is_lines) {
2099 c->line_x = emit_fragment_varying(c, NULL, 0);
2100 }
2101 }
2102
2103 if (c->s->info.stage == MESA_SHADER_FRAGMENT)
2104 ntq_setup_fs_inputs(c);
2105 else
2106 ntq_setup_vpm_inputs(c);
2107
2108 ntq_setup_outputs(c);
2109 ntq_setup_uniforms(c);
2110 ntq_setup_registers(c, &c->s->registers);
2111
2112 /* Find the main function and emit the body. */
2113 nir_foreach_function(function, c->s) {
2114 assert(strcmp(function->name, "main") == 0);
2115 assert(function->impl);
2116 ntq_emit_impl(c, function->impl);
2117 }
2118 }
2119
2120 const nir_shader_compiler_options v3d_nir_options = {
2121 .lower_all_io_to_temps = true,
2122 .lower_extract_byte = true,
2123 .lower_extract_word = true,
2124 .lower_bfm = true,
2125 .lower_bitfield_insert_to_shifts = true,
2126 .lower_bitfield_extract_to_shifts = true,
2127 .lower_bitfield_reverse = true,
2128 .lower_bit_count = true,
2129 .lower_pack_unorm_2x16 = true,
2130 .lower_pack_snorm_2x16 = true,
2131 .lower_pack_unorm_4x8 = true,
2132 .lower_pack_snorm_4x8 = true,
2133 .lower_unpack_unorm_4x8 = true,
2134 .lower_unpack_snorm_4x8 = true,
2135 .lower_pack_half_2x16 = true,
2136 .lower_unpack_half_2x16 = true,
2137 .lower_fdiv = true,
2138 .lower_find_lsb = true,
2139 .lower_ffma = true,
2140 .lower_flrp32 = true,
2141 .lower_fpow = true,
2142 .lower_fsat = true,
2143 .lower_fsqrt = true,
2144 .lower_ifind_msb = true,
2145 .lower_ldexp = true,
2146 .lower_mul_high = true,
2147 .lower_wpos_pntc = true,
2148 .native_integers = true,
2149 };
2150
2151 /**
2152 * When demoting a shader down to single-threaded, removes the THRSW
2153 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2154 * program end).
2155 */
2156 static void
2157 vir_remove_thrsw(struct v3d_compile *c)
2158 {
2159 vir_for_each_block(block, c) {
2160 vir_for_each_inst_safe(inst, block) {
2161 if (inst->qpu.sig.thrsw)
2162 vir_remove_instruction(c, inst);
2163 }
2164 }
2165
2166 c->last_thrsw = NULL;
2167 }
2168
2169 void
2170 vir_emit_last_thrsw(struct v3d_compile *c)
2171 {
2172 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2173 * switching, so disable threads if we didn't do any TMU ops (each of
2174 * which would have emitted a THRSW).
2175 */
2176 if (!c->last_thrsw_at_top_level && c->devinfo->ver < 41) {
2177 c->threads = 1;
2178 if (c->last_thrsw)
2179 vir_remove_thrsw(c);
2180 return;
2181 }
2182
2183 /* If we're threaded and the last THRSW was in conditional code, then
2184 * we need to emit another one so that we can flag it as the last
2185 * thrsw.
2186 */
2187 if (c->last_thrsw && !c->last_thrsw_at_top_level) {
2188 assert(c->devinfo->ver >= 41);
2189 vir_emit_thrsw(c);
2190 }
2191
2192 /* If we're threaded, then we need to mark the last THRSW instruction
2193 * so we can emit a pair of them at QPU emit time.
2194 *
2195 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2196 * post-last-THRSW state, so we can skip this.
2197 */
2198 if (!c->last_thrsw && c->s->info.stage == MESA_SHADER_FRAGMENT) {
2199 assert(c->devinfo->ver >= 41);
2200 vir_emit_thrsw(c);
2201 }
2202
2203 if (c->last_thrsw)
2204 c->last_thrsw->is_last_thrsw = true;
2205 }
2206
2207 /* There's a flag in the shader for "center W is needed for reasons other than
2208 * non-centroid varyings", so we just walk the program after VIR optimization
2209 * to see if it's used. It should be harmless to set even if we only use
2210 * center W for varyings.
2211 */
2212 static void
2213 vir_check_payload_w(struct v3d_compile *c)
2214 {
2215 if (c->s->info.stage != MESA_SHADER_FRAGMENT)
2216 return;
2217
2218 vir_for_each_inst_inorder(inst, c) {
2219 for (int i = 0; i < vir_get_nsrc(inst); i++) {
2220 if (inst->src[i].file == QFILE_REG &&
2221 inst->src[i].index == 0) {
2222 c->uses_center_w = true;
2223 return;
2224 }
2225 }
2226 }
2227
2228 }
2229
2230 void
2231 v3d_nir_to_vir(struct v3d_compile *c)
2232 {
2233 if (V3D_DEBUG & (V3D_DEBUG_NIR |
2234 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2235 fprintf(stderr, "%s prog %d/%d NIR:\n",
2236 vir_get_stage_name(c),
2237 c->program_id, c->variant_id);
2238 nir_print_shader(c->s, stderr);
2239 }
2240
2241 nir_to_vir(c);
2242
2243 /* Emit the last THRSW before STVPM and TLB writes. */
2244 vir_emit_last_thrsw(c);
2245
2246 switch (c->s->info.stage) {
2247 case MESA_SHADER_FRAGMENT:
2248 emit_frag_end(c);
2249 break;
2250 case MESA_SHADER_VERTEX:
2251 emit_vert_end(c);
2252 break;
2253 default:
2254 unreachable("bad stage");
2255 }
2256
2257 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2258 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2259 fprintf(stderr, "%s prog %d/%d pre-opt VIR:\n",
2260 vir_get_stage_name(c),
2261 c->program_id, c->variant_id);
2262 vir_dump(c);
2263 fprintf(stderr, "\n");
2264 }
2265
2266 vir_optimize(c);
2267 vir_lower_uniforms(c);
2268
2269 vir_check_payload_w(c);
2270
2271 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2272 * We used that on that platform to pipeline TMU writes and reduce the
2273 * number of thread switches, as well as try (mostly successfully) to
2274 * reduce maximum register pressure to allow more threads. We should
2275 * do something of that sort for V3D -- either instruction scheduling
2276 * here, or delay the the THRSW and LDTMUs from our texture
2277 * instructions until the results are needed.
2278 */
2279
2280 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2281 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2282 fprintf(stderr, "%s prog %d/%d VIR:\n",
2283 vir_get_stage_name(c),
2284 c->program_id, c->variant_id);
2285 vir_dump(c);
2286 fprintf(stderr, "\n");
2287 }
2288
2289 /* Attempt to allocate registers for the temporaries. If we fail,
2290 * reduce thread count and try again.
2291 */
2292 int min_threads = (c->devinfo->ver >= 41) ? 2 : 1;
2293 struct qpu_reg *temp_registers;
2294 while (true) {
2295 bool spilled;
2296 temp_registers = v3d_register_allocate(c, &spilled);
2297 if (spilled)
2298 continue;
2299
2300 if (temp_registers)
2301 break;
2302
2303 if (c->threads == min_threads) {
2304 fprintf(stderr, "Failed to register allocate at %d threads:\n",
2305 c->threads);
2306 vir_dump(c);
2307 c->failed = true;
2308 return;
2309 }
2310
2311 c->threads /= 2;
2312
2313 if (c->threads == 1)
2314 vir_remove_thrsw(c);
2315 }
2316
2317 v3d_vir_to_qpu(c, temp_registers);
2318 }