broadcom/vc5: Fix discard_if during control flow.
[mesa.git] / src / broadcom / compiler / nir_to_vir.c
1 /*
2 * Copyright © 2016 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "v3d_compiler.h"
33
34 /* We don't do any address packing. */
35 #define __gen_user_data void
36 #define __gen_address_type uint32_t
37 #define __gen_address_offset(reloc) (*reloc)
38 #define __gen_emit_reloc(cl, reloc)
39 #include "cle/v3d_packet_v33_pack.h"
40
41 static struct qreg
42 ntq_get_src(struct v3d_compile *c, nir_src src, int i);
43 static void
44 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
45
46 static void
47 resize_qreg_array(struct v3d_compile *c,
48 struct qreg **regs,
49 uint32_t *size,
50 uint32_t decl_size)
51 {
52 if (*size >= decl_size)
53 return;
54
55 uint32_t old_size = *size;
56 *size = MAX2(*size * 2, decl_size);
57 *regs = reralloc(c, *regs, struct qreg, *size);
58 if (!*regs) {
59 fprintf(stderr, "Malloc failure\n");
60 abort();
61 }
62
63 for (uint32_t i = old_size; i < *size; i++)
64 (*regs)[i] = c->undef;
65 }
66
67 static struct qreg
68 vir_SFU(struct v3d_compile *c, int waddr, struct qreg src)
69 {
70 vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, waddr), src);
71 return vir_FMOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4));
72 }
73
74 static struct qreg
75 vir_LDTMU(struct v3d_compile *c)
76 {
77 vir_NOP(c)->qpu.sig.ldtmu = true;
78 return vir_MOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4));
79 }
80
81 static struct qreg
82 indirect_uniform_load(struct v3d_compile *c, nir_intrinsic_instr *intr)
83 {
84 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
85 uint32_t offset = nir_intrinsic_base(intr);
86 struct v3d_ubo_range *range = NULL;
87 unsigned i;
88
89 for (i = 0; i < c->num_ubo_ranges; i++) {
90 range = &c->ubo_ranges[i];
91 if (offset >= range->src_offset &&
92 offset < range->src_offset + range->size) {
93 break;
94 }
95 }
96 /* The driver-location-based offset always has to be within a declared
97 * uniform range.
98 */
99 assert(i != c->num_ubo_ranges);
100 if (!c->ubo_range_used[i]) {
101 c->ubo_range_used[i] = true;
102 range->dst_offset = c->next_ubo_dst_offset;
103 c->next_ubo_dst_offset += range->size;
104 }
105
106 offset -= range->src_offset;
107
108 if (range->dst_offset + offset != 0) {
109 indirect_offset = vir_ADD(c, indirect_offset,
110 vir_uniform_ui(c, range->dst_offset +
111 offset));
112 }
113
114 /* Adjust for where we stored the TGSI register base. */
115 vir_ADD_dest(c,
116 vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUA),
117 vir_uniform(c, QUNIFORM_UBO_ADDR, 0),
118 indirect_offset);
119
120 return vir_LDTMU(c);
121 }
122
123 static struct qreg *
124 ntq_init_ssa_def(struct v3d_compile *c, nir_ssa_def *def)
125 {
126 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
127 def->num_components);
128 _mesa_hash_table_insert(c->def_ht, def, qregs);
129 return qregs;
130 }
131
132 /**
133 * This function is responsible for getting VIR results into the associated
134 * storage for a NIR instruction.
135 *
136 * If it's a NIR SSA def, then we just set the associated hash table entry to
137 * the new result.
138 *
139 * If it's a NIR reg, then we need to update the existing qreg assigned to the
140 * NIR destination with the incoming value. To do that without introducing
141 * new MOVs, we require that the incoming qreg either be a uniform, or be
142 * SSA-defined by the previous VIR instruction in the block and rewritable by
143 * this function. That lets us sneak ahead and insert the SF flag beforehand
144 * (knowing that the previous instruction doesn't depend on flags) and rewrite
145 * its destination to be the NIR reg's destination
146 */
147 static void
148 ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
149 struct qreg result)
150 {
151 struct qinst *last_inst = NULL;
152 if (!list_empty(&c->cur_block->instructions))
153 last_inst = (struct qinst *)c->cur_block->instructions.prev;
154
155 assert(result.file == QFILE_UNIF ||
156 (result.file == QFILE_TEMP &&
157 last_inst && last_inst == c->defs[result.index]));
158
159 if (dest->is_ssa) {
160 assert(chan < dest->ssa.num_components);
161
162 struct qreg *qregs;
163 struct hash_entry *entry =
164 _mesa_hash_table_search(c->def_ht, &dest->ssa);
165
166 if (entry)
167 qregs = entry->data;
168 else
169 qregs = ntq_init_ssa_def(c, &dest->ssa);
170
171 qregs[chan] = result;
172 } else {
173 nir_register *reg = dest->reg.reg;
174 assert(dest->reg.base_offset == 0);
175 assert(reg->num_array_elems == 0);
176 struct hash_entry *entry =
177 _mesa_hash_table_search(c->def_ht, reg);
178 struct qreg *qregs = entry->data;
179
180 /* Insert a MOV if the source wasn't an SSA def in the
181 * previous instruction.
182 */
183 if (result.file == QFILE_UNIF) {
184 result = vir_MOV(c, result);
185 last_inst = c->defs[result.index];
186 }
187
188 /* We know they're both temps, so just rewrite index. */
189 c->defs[last_inst->dst.index] = NULL;
190 last_inst->dst.index = qregs[chan].index;
191
192 /* If we're in control flow, then make this update of the reg
193 * conditional on the execution mask.
194 */
195 if (c->execute.file != QFILE_NULL) {
196 last_inst->dst.index = qregs[chan].index;
197
198 /* Set the flags to the current exec mask. To insert
199 * the flags push, we temporarily remove our SSA
200 * instruction.
201 */
202 list_del(&last_inst->link);
203 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
204 list_addtail(&last_inst->link,
205 &c->cur_block->instructions);
206
207 vir_set_cond(last_inst, V3D_QPU_COND_IFA);
208 last_inst->cond_is_exec_mask = true;
209 }
210 }
211 }
212
213 static struct qreg
214 ntq_get_src(struct v3d_compile *c, nir_src src, int i)
215 {
216 struct hash_entry *entry;
217 if (src.is_ssa) {
218 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
219 assert(i < src.ssa->num_components);
220 } else {
221 nir_register *reg = src.reg.reg;
222 entry = _mesa_hash_table_search(c->def_ht, reg);
223 assert(reg->num_array_elems == 0);
224 assert(src.reg.base_offset == 0);
225 assert(i < reg->num_components);
226 }
227
228 struct qreg *qregs = entry->data;
229 return qregs[i];
230 }
231
232 static struct qreg
233 ntq_get_alu_src(struct v3d_compile *c, nir_alu_instr *instr,
234 unsigned src)
235 {
236 assert(util_is_power_of_two(instr->dest.write_mask));
237 unsigned chan = ffs(instr->dest.write_mask) - 1;
238 struct qreg r = ntq_get_src(c, instr->src[src].src,
239 instr->src[src].swizzle[chan]);
240
241 assert(!instr->src[src].abs);
242 assert(!instr->src[src].negate);
243
244 return r;
245 };
246
247 static inline struct qreg
248 vir_SAT(struct v3d_compile *c, struct qreg val)
249 {
250 return vir_FMAX(c,
251 vir_FMIN(c, val, vir_uniform_f(c, 1.0)),
252 vir_uniform_f(c, 0.0));
253 }
254
255 static struct qreg
256 ntq_umul(struct v3d_compile *c, struct qreg src0, struct qreg src1)
257 {
258 vir_MULTOP(c, src0, src1);
259 return vir_UMUL24(c, src0, src1);
260 }
261
262 static struct qreg
263 ntq_minify(struct v3d_compile *c, struct qreg size, struct qreg level)
264 {
265 return vir_MAX(c, vir_SHR(c, size, level), vir_uniform_ui(c, 1));
266 }
267
268 static void
269 ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr)
270 {
271 unsigned unit = instr->texture_index;
272 int lod_index = nir_tex_instr_src_index(instr, nir_tex_src_lod);
273 int dest_size = nir_tex_instr_dest_size(instr);
274
275 struct qreg lod = c->undef;
276 if (lod_index != -1)
277 lod = ntq_get_src(c, instr->src[lod_index].src, 0);
278
279 for (int i = 0; i < dest_size; i++) {
280 assert(i < 3);
281 enum quniform_contents contents;
282
283 if (instr->is_array && i == dest_size - 1)
284 contents = QUNIFORM_TEXTURE_ARRAY_SIZE;
285 else
286 contents = QUNIFORM_TEXTURE_WIDTH + i;
287
288 struct qreg size = vir_uniform(c, contents, unit);
289
290 switch (instr->sampler_dim) {
291 case GLSL_SAMPLER_DIM_1D:
292 case GLSL_SAMPLER_DIM_2D:
293 case GLSL_SAMPLER_DIM_3D:
294 case GLSL_SAMPLER_DIM_CUBE:
295 /* Don't minify the array size. */
296 if (!(instr->is_array && i == dest_size - 1)) {
297 size = ntq_minify(c, size, lod);
298 }
299 break;
300
301 case GLSL_SAMPLER_DIM_RECT:
302 /* There's no LOD field for rects */
303 break;
304
305 default:
306 unreachable("Bad sampler type");
307 }
308
309 ntq_store_dest(c, &instr->dest, i, size);
310 }
311 }
312
313 static void
314 ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
315 {
316 unsigned unit = instr->texture_index;
317
318 /* Since each texture sampling op requires uploading uniforms to
319 * reference the texture, there's no HW support for texture size and
320 * you just upload uniforms containing the size.
321 */
322 switch (instr->op) {
323 case nir_texop_query_levels:
324 ntq_store_dest(c, &instr->dest, 0,
325 vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit));
326 return;
327 case nir_texop_txs:
328 ntq_emit_txs(c, instr);
329 return;
330 default:
331 break;
332 }
333
334 struct V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1 p0_unpacked = {
335 V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_header,
336
337 .fetch_sample_mode = instr->op == nir_texop_txf,
338 };
339
340 struct V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1 p1_unpacked = {
341 };
342
343 switch (instr->sampler_dim) {
344 case GLSL_SAMPLER_DIM_1D:
345 if (instr->is_array)
346 p0_unpacked.lookup_type = TEXTURE_1D_ARRAY;
347 else
348 p0_unpacked.lookup_type = TEXTURE_1D;
349 break;
350 case GLSL_SAMPLER_DIM_2D:
351 case GLSL_SAMPLER_DIM_RECT:
352 if (instr->is_array)
353 p0_unpacked.lookup_type = TEXTURE_2D_ARRAY;
354 else
355 p0_unpacked.lookup_type = TEXTURE_2D;
356 break;
357 case GLSL_SAMPLER_DIM_3D:
358 p0_unpacked.lookup_type = TEXTURE_3D;
359 break;
360 case GLSL_SAMPLER_DIM_CUBE:
361 p0_unpacked.lookup_type = TEXTURE_CUBE_MAP;
362 break;
363 default:
364 unreachable("Bad sampler type");
365 }
366
367 struct qreg coords[5];
368 int next_coord = 0;
369 for (unsigned i = 0; i < instr->num_srcs; i++) {
370 switch (instr->src[i].src_type) {
371 case nir_tex_src_coord:
372 for (int j = 0; j < instr->coord_components; j++) {
373 coords[next_coord++] =
374 ntq_get_src(c, instr->src[i].src, j);
375 }
376 if (instr->coord_components < 2)
377 coords[next_coord++] = vir_uniform_f(c, 0.5);
378 break;
379 case nir_tex_src_bias:
380 coords[next_coord++] =
381 ntq_get_src(c, instr->src[i].src, 0);
382
383 p0_unpacked.bias_supplied = true;
384 break;
385 case nir_tex_src_lod:
386 coords[next_coord++] =
387 vir_FADD(c,
388 ntq_get_src(c, instr->src[i].src, 0),
389 vir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL,
390 unit));
391
392 if (instr->op != nir_texop_txf &&
393 instr->op != nir_texop_tg4) {
394 p0_unpacked.disable_autolod_use_bias_only = true;
395 }
396 break;
397 case nir_tex_src_comparator:
398 coords[next_coord++] =
399 ntq_get_src(c, instr->src[i].src, 0);
400
401 p0_unpacked.shadow = true;
402 break;
403
404 case nir_tex_src_offset: {
405 nir_const_value *offset =
406 nir_src_as_const_value(instr->src[i].src);
407 p0_unpacked.texel_offset_for_s_coordinate =
408 offset->i32[0];
409
410 if (instr->coord_components >= 2)
411 p0_unpacked.texel_offset_for_t_coordinate =
412 offset->i32[1];
413
414 if (instr->coord_components >= 3)
415 p0_unpacked.texel_offset_for_r_coordinate =
416 offset->i32[2];
417 break;
418 }
419
420 default:
421 unreachable("unknown texture source");
422 }
423 }
424
425 bool return_16 = (c->key->tex[unit].return_size == 16 ||
426 p0_unpacked.shadow);
427
428 /* Limit the number of channels returned to both how many the NIR
429 * instruction writes and how many the instruction could produce.
430 */
431 uint32_t instr_return_channels = nir_tex_instr_dest_size(instr);
432 if (return_16)
433 instr_return_channels = (instr_return_channels + 1) / 2;
434
435 p1_unpacked.return_words_of_texture_data =
436 (1 << MIN2(instr_return_channels,
437 c->key->tex[unit].return_channels)) - 1;
438
439 uint32_t p0_packed;
440 V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL,
441 (uint8_t *)&p0_packed,
442 &p0_unpacked);
443
444 uint32_t p1_packed;
445 V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1_pack(NULL,
446 (uint8_t *)&p1_packed,
447 &p1_unpacked);
448 /* Load unit number into the address field, which will be be used by
449 * the driver to decide which texture to put in the actual address
450 * field.
451 */
452 p1_packed |= unit << 5;
453
454 /* There is no native support for GL texture rectangle coordinates, so
455 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
456 * 1]).
457 */
458 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
459 coords[0] = vir_FMUL(c, coords[0],
460 vir_uniform(c, QUNIFORM_TEXRECT_SCALE_X,
461 unit));
462 coords[1] = vir_FMUL(c, coords[1],
463 vir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y,
464 unit));
465 }
466
467 struct qreg texture_u[] = {
468 vir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0_0 + unit, p0_packed),
469 vir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, p1_packed),
470 };
471 uint32_t next_texture_u = 0;
472
473 for (int i = 0; i < next_coord; i++) {
474 struct qreg dst;
475
476 if (i == next_coord - 1)
477 dst = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUL);
478 else
479 dst = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMU);
480
481 struct qinst *tmu = vir_MOV_dest(c, dst, coords[i]);
482
483 if (i < 2) {
484 tmu->has_implicit_uniform = true;
485 tmu->src[vir_get_implicit_uniform_src(tmu)] =
486 texture_u[next_texture_u++];
487 }
488 }
489
490 struct qreg return_values[4];
491 for (int i = 0; i < 4; i++) {
492 /* Swizzling .zw of an RG texture should give undefined
493 * results, not crash the compiler.
494 */
495 if (p1_unpacked.return_words_of_texture_data & (1 << i))
496 return_values[i] = vir_LDTMU(c);
497 else
498 return_values[i] = c->undef;
499 }
500
501 for (int i = 0; i < nir_tex_instr_dest_size(instr); i++) {
502 struct qreg chan;
503
504 if (return_16) {
505 STATIC_ASSERT(PIPE_SWIZZLE_X == 0);
506 chan = return_values[i / 2];
507
508 if (nir_alu_type_get_base_type(instr->dest_type) ==
509 nir_type_float) {
510 enum v3d_qpu_input_unpack unpack;
511 if (i & 1)
512 unpack = V3D_QPU_UNPACK_H;
513 else
514 unpack = V3D_QPU_UNPACK_L;
515
516 chan = vir_FMOV(c, chan);
517 vir_set_unpack(c->defs[chan.index], 0, unpack);
518 } else {
519 /* If we're unpacking the low field, shift it
520 * up to the top first.
521 */
522 if ((i & 1) == 0) {
523 chan = vir_SHL(c, chan,
524 vir_uniform_ui(c, 16));
525 }
526
527 /* Do proper sign extension to a 32-bit int. */
528 if (nir_alu_type_get_base_type(instr->dest_type) ==
529 nir_type_int) {
530 chan = vir_ASR(c, chan,
531 vir_uniform_ui(c, 16));
532 } else {
533 chan = vir_SHR(c, chan,
534 vir_uniform_ui(c, 16));
535 }
536 }
537 } else {
538 chan = vir_MOV(c, return_values[i]);
539 }
540 ntq_store_dest(c, &instr->dest, i, chan);
541 }
542 }
543
544 static struct qreg
545 ntq_fsincos(struct v3d_compile *c, struct qreg src, bool is_cos)
546 {
547 struct qreg input = vir_FMUL(c, src, vir_uniform_f(c, 1.0f / M_PI));
548 if (is_cos)
549 input = vir_FADD(c, input, vir_uniform_f(c, 0.5));
550
551 struct qreg periods = vir_FROUND(c, input);
552 struct qreg sin_output = vir_SFU(c, V3D_QPU_WADDR_SIN,
553 vir_FSUB(c, input, periods));
554 return vir_XOR(c, sin_output, vir_SHL(c,
555 vir_FTOIN(c, periods),
556 vir_uniform_ui(c, -1)));
557 }
558
559 static struct qreg
560 ntq_fsign(struct v3d_compile *c, struct qreg src)
561 {
562 struct qreg t = vir_get_temp(c);
563
564 vir_MOV_dest(c, t, vir_uniform_f(c, 0.0));
565 vir_PF(c, vir_FMOV(c, src), V3D_QPU_PF_PUSHZ);
566 vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_f(c, 1.0));
567 vir_PF(c, vir_FMOV(c, src), V3D_QPU_PF_PUSHN);
568 vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_f(c, -1.0));
569 return vir_MOV(c, t);
570 }
571
572 static struct qreg
573 ntq_isign(struct v3d_compile *c, struct qreg src)
574 {
575 struct qreg t = vir_get_temp(c);
576
577 vir_MOV_dest(c, t, vir_uniform_ui(c, 0));
578 vir_PF(c, vir_MOV(c, src), V3D_QPU_PF_PUSHZ);
579 vir_MOV_cond(c, V3D_QPU_COND_IFNA, t, vir_uniform_ui(c, 1));
580 vir_PF(c, vir_MOV(c, src), V3D_QPU_PF_PUSHN);
581 vir_MOV_cond(c, V3D_QPU_COND_IFA, t, vir_uniform_ui(c, -1));
582 return vir_MOV(c, t);
583 }
584
585 static void
586 emit_fragcoord_input(struct v3d_compile *c, int attr)
587 {
588 c->inputs[attr * 4 + 0] = vir_FXCD(c);
589 c->inputs[attr * 4 + 1] = vir_FYCD(c);
590 c->inputs[attr * 4 + 2] = c->payload_z;
591 c->inputs[attr * 4 + 3] = vir_SFU(c, V3D_QPU_WADDR_RECIP,
592 c->payload_w);
593 }
594
595 static struct qreg
596 emit_fragment_varying(struct v3d_compile *c, nir_variable *var,
597 uint8_t swizzle)
598 {
599 struct qreg vary = vir_reg(QFILE_VARY, ~0);
600 struct qreg r5 = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R5);
601
602 /* For gl_PointCoord input or distance along a line, we'll be called
603 * with no nir_variable, and we don't count toward VPM size so we
604 * don't track an input slot.
605 */
606 if (!var) {
607 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
608 }
609
610 int i = c->num_inputs++;
611 c->input_slots[i] = v3d_slot_from_slot_and_component(var->data.location,
612 swizzle);
613
614 switch (var->data.interpolation) {
615 case INTERP_MODE_NONE:
616 /* If a gl_FrontColor or gl_BackColor input has no interp
617 * qualifier, then if we're using glShadeModel(GL_FLAT) it
618 * needs to be flat shaded.
619 */
620 switch (var->data.location) {
621 case VARYING_SLOT_COL0:
622 case VARYING_SLOT_COL1:
623 case VARYING_SLOT_BFC0:
624 case VARYING_SLOT_BFC1:
625 if (c->fs_key->shade_model_flat) {
626 BITSET_SET(c->flat_shade_flags, i);
627 vir_MOV_dest(c, c->undef, vary);
628 return vir_MOV(c, r5);
629 } else {
630 return vir_FADD(c, vir_FMUL(c, vary,
631 c->payload_w), r5);
632 }
633 default:
634 break;
635 }
636 /* FALLTHROUGH */
637 case INTERP_MODE_SMOOTH:
638 if (var->data.centroid) {
639 return vir_FADD(c, vir_FMUL(c, vary,
640 c->payload_w_centroid), r5);
641 } else {
642 return vir_FADD(c, vir_FMUL(c, vary, c->payload_w), r5);
643 }
644 case INTERP_MODE_NOPERSPECTIVE:
645 /* C appears after the mov from the varying.
646 XXX: improve ldvary setup.
647 */
648 return vir_FADD(c, vir_MOV(c, vary), r5);
649 case INTERP_MODE_FLAT:
650 BITSET_SET(c->flat_shade_flags, i);
651 vir_MOV_dest(c, c->undef, vary);
652 return vir_MOV(c, r5);
653 default:
654 unreachable("Bad interp mode");
655 }
656 }
657
658 static void
659 emit_fragment_input(struct v3d_compile *c, int attr, nir_variable *var)
660 {
661 for (int i = 0; i < glsl_get_vector_elements(var->type); i++) {
662 int chan = var->data.location_frac + i;
663 c->inputs[attr * 4 + chan] =
664 emit_fragment_varying(c, var, chan);
665 }
666 }
667
668 static void
669 add_output(struct v3d_compile *c,
670 uint32_t decl_offset,
671 uint8_t slot,
672 uint8_t swizzle)
673 {
674 uint32_t old_array_size = c->outputs_array_size;
675 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
676 decl_offset + 1);
677
678 if (old_array_size != c->outputs_array_size) {
679 c->output_slots = reralloc(c,
680 c->output_slots,
681 struct v3d_varying_slot,
682 c->outputs_array_size);
683 }
684
685 c->output_slots[decl_offset] =
686 v3d_slot_from_slot_and_component(slot, swizzle);
687 }
688
689 static void
690 declare_uniform_range(struct v3d_compile *c, uint32_t start, uint32_t size)
691 {
692 unsigned array_id = c->num_ubo_ranges++;
693 if (array_id >= c->ubo_ranges_array_size) {
694 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
695 array_id + 1);
696 c->ubo_ranges = reralloc(c, c->ubo_ranges,
697 struct v3d_ubo_range,
698 c->ubo_ranges_array_size);
699 c->ubo_range_used = reralloc(c, c->ubo_range_used,
700 bool,
701 c->ubo_ranges_array_size);
702 }
703
704 c->ubo_ranges[array_id].dst_offset = 0;
705 c->ubo_ranges[array_id].src_offset = start;
706 c->ubo_ranges[array_id].size = size;
707 c->ubo_range_used[array_id] = false;
708 }
709
710 /**
711 * If compare_instr is a valid comparison instruction, emits the
712 * compare_instr's comparison and returns the sel_instr's return value based
713 * on the compare_instr's result.
714 */
715 static bool
716 ntq_emit_comparison(struct v3d_compile *c, struct qreg *dest,
717 nir_alu_instr *compare_instr,
718 nir_alu_instr *sel_instr)
719 {
720 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
721 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
722 bool cond_invert = false;
723
724 switch (compare_instr->op) {
725 case nir_op_feq:
726 case nir_op_seq:
727 vir_PF(c, vir_FCMP(c, src0, src1), V3D_QPU_PF_PUSHZ);
728 break;
729 case nir_op_ieq:
730 vir_PF(c, vir_XOR(c, src0, src1), V3D_QPU_PF_PUSHZ);
731 break;
732
733 case nir_op_fne:
734 case nir_op_sne:
735 vir_PF(c, vir_FCMP(c, src0, src1), V3D_QPU_PF_PUSHZ);
736 cond_invert = true;
737 break;
738 case nir_op_ine:
739 vir_PF(c, vir_XOR(c, src0, src1), V3D_QPU_PF_PUSHZ);
740 cond_invert = true;
741 break;
742
743 case nir_op_fge:
744 case nir_op_sge:
745 vir_PF(c, vir_FCMP(c, src1, src0), V3D_QPU_PF_PUSHC);
746 break;
747 case nir_op_ige:
748 vir_PF(c, vir_MIN(c, src1, src0), V3D_QPU_PF_PUSHC);
749 cond_invert = true;
750 break;
751 case nir_op_uge:
752 vir_PF(c, vir_SUB(c, src0, src1), V3D_QPU_PF_PUSHC);
753 cond_invert = true;
754 break;
755
756 case nir_op_slt:
757 case nir_op_flt:
758 vir_PF(c, vir_FCMP(c, src0, src1), V3D_QPU_PF_PUSHN);
759 break;
760 case nir_op_ilt:
761 vir_PF(c, vir_MIN(c, src1, src0), V3D_QPU_PF_PUSHC);
762 break;
763 case nir_op_ult:
764 vir_PF(c, vir_SUB(c, src0, src1), V3D_QPU_PF_PUSHC);
765 break;
766
767 default:
768 return false;
769 }
770
771 enum v3d_qpu_cond cond = (cond_invert ?
772 V3D_QPU_COND_IFNA :
773 V3D_QPU_COND_IFA);
774
775 switch (sel_instr->op) {
776 case nir_op_seq:
777 case nir_op_sne:
778 case nir_op_sge:
779 case nir_op_slt:
780 *dest = vir_SEL(c, cond,
781 vir_uniform_f(c, 1.0), vir_uniform_f(c, 0.0));
782 break;
783
784 case nir_op_bcsel:
785 *dest = vir_SEL(c, cond,
786 ntq_get_alu_src(c, sel_instr, 1),
787 ntq_get_alu_src(c, sel_instr, 2));
788 break;
789
790 default:
791 *dest = vir_SEL(c, cond,
792 vir_uniform_ui(c, ~0), vir_uniform_ui(c, 0));
793 break;
794 }
795
796 /* Make the temporary for nir_store_dest(). */
797 *dest = vir_MOV(c, *dest);
798
799 return true;
800 }
801
802 /**
803 * Attempts to fold a comparison generating a boolean result into the
804 * condition code for selecting between two values, instead of comparing the
805 * boolean result against 0 to generate the condition code.
806 */
807 static struct qreg ntq_emit_bcsel(struct v3d_compile *c, nir_alu_instr *instr,
808 struct qreg *src)
809 {
810 if (!instr->src[0].src.is_ssa)
811 goto out;
812 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
813 goto out;
814 nir_alu_instr *compare =
815 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
816 if (!compare)
817 goto out;
818
819 struct qreg dest;
820 if (ntq_emit_comparison(c, &dest, compare, instr))
821 return dest;
822
823 out:
824 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
825 return vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA, src[1], src[2]));
826 }
827
828
829 static void
830 ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
831 {
832 /* This should always be lowered to ALU operations for V3D. */
833 assert(!instr->dest.saturate);
834
835 /* Vectors are special in that they have non-scalarized writemasks,
836 * and just take the first swizzle channel for each argument in order
837 * into each writemask channel.
838 */
839 if (instr->op == nir_op_vec2 ||
840 instr->op == nir_op_vec3 ||
841 instr->op == nir_op_vec4) {
842 struct qreg srcs[4];
843 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
844 srcs[i] = ntq_get_src(c, instr->src[i].src,
845 instr->src[i].swizzle[0]);
846 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
847 ntq_store_dest(c, &instr->dest.dest, i,
848 vir_MOV(c, srcs[i]));
849 return;
850 }
851
852 /* General case: We can just grab the one used channel per src. */
853 struct qreg src[nir_op_infos[instr->op].num_inputs];
854 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
855 src[i] = ntq_get_alu_src(c, instr, i);
856 }
857
858 struct qreg result;
859
860 switch (instr->op) {
861 case nir_op_fmov:
862 case nir_op_imov:
863 result = vir_MOV(c, src[0]);
864 break;
865
866 case nir_op_fneg:
867 result = vir_XOR(c, src[0], vir_uniform_ui(c, 1 << 31));
868 break;
869 case nir_op_ineg:
870 result = vir_NEG(c, src[0]);
871 break;
872
873 case nir_op_fmul:
874 result = vir_FMUL(c, src[0], src[1]);
875 break;
876 case nir_op_fadd:
877 result = vir_FADD(c, src[0], src[1]);
878 break;
879 case nir_op_fsub:
880 result = vir_FSUB(c, src[0], src[1]);
881 break;
882 case nir_op_fmin:
883 result = vir_FMIN(c, src[0], src[1]);
884 break;
885 case nir_op_fmax:
886 result = vir_FMAX(c, src[0], src[1]);
887 break;
888
889 case nir_op_f2i32:
890 result = vir_FTOIZ(c, src[0]);
891 break;
892 case nir_op_f2u32:
893 result = vir_FTOUZ(c, src[0]);
894 break;
895 case nir_op_i2f32:
896 result = vir_ITOF(c, src[0]);
897 break;
898 case nir_op_u2f32:
899 result = vir_UTOF(c, src[0]);
900 break;
901 case nir_op_b2f:
902 result = vir_AND(c, src[0], vir_uniform_f(c, 1.0));
903 break;
904 case nir_op_b2i:
905 result = vir_AND(c, src[0], vir_uniform_ui(c, 1));
906 break;
907 case nir_op_i2b:
908 case nir_op_f2b:
909 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
910 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
911 vir_uniform_ui(c, ~0),
912 vir_uniform_ui(c, 0)));
913 break;
914
915 case nir_op_iadd:
916 result = vir_ADD(c, src[0], src[1]);
917 break;
918 case nir_op_ushr:
919 result = vir_SHR(c, src[0], src[1]);
920 break;
921 case nir_op_isub:
922 result = vir_SUB(c, src[0], src[1]);
923 break;
924 case nir_op_ishr:
925 result = vir_ASR(c, src[0], src[1]);
926 break;
927 case nir_op_ishl:
928 result = vir_SHL(c, src[0], src[1]);
929 break;
930 case nir_op_imin:
931 result = vir_MIN(c, src[0], src[1]);
932 break;
933 case nir_op_umin:
934 result = vir_UMIN(c, src[0], src[1]);
935 break;
936 case nir_op_imax:
937 result = vir_MAX(c, src[0], src[1]);
938 break;
939 case nir_op_umax:
940 result = vir_UMAX(c, src[0], src[1]);
941 break;
942 case nir_op_iand:
943 result = vir_AND(c, src[0], src[1]);
944 break;
945 case nir_op_ior:
946 result = vir_OR(c, src[0], src[1]);
947 break;
948 case nir_op_ixor:
949 result = vir_XOR(c, src[0], src[1]);
950 break;
951 case nir_op_inot:
952 result = vir_NOT(c, src[0]);
953 break;
954
955 case nir_op_imul:
956 result = ntq_umul(c, src[0], src[1]);
957 break;
958
959 case nir_op_seq:
960 case nir_op_sne:
961 case nir_op_sge:
962 case nir_op_slt:
963 case nir_op_feq:
964 case nir_op_fne:
965 case nir_op_fge:
966 case nir_op_flt:
967 case nir_op_ieq:
968 case nir_op_ine:
969 case nir_op_ige:
970 case nir_op_uge:
971 case nir_op_ilt:
972 case nir_op_ult:
973 if (!ntq_emit_comparison(c, &result, instr, instr)) {
974 fprintf(stderr, "Bad comparison instruction\n");
975 }
976 break;
977
978 case nir_op_bcsel:
979 result = ntq_emit_bcsel(c, instr, src);
980 break;
981 case nir_op_fcsel:
982 vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
983 result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
984 src[1], src[2]));
985 break;
986
987 case nir_op_frcp:
988 result = vir_SFU(c, V3D_QPU_WADDR_RECIP, src[0]);
989 break;
990 case nir_op_frsq:
991 result = vir_SFU(c, V3D_QPU_WADDR_RSQRT, src[0]);
992 break;
993 case nir_op_fexp2:
994 result = vir_SFU(c, V3D_QPU_WADDR_EXP, src[0]);
995 break;
996 case nir_op_flog2:
997 result = vir_SFU(c, V3D_QPU_WADDR_LOG, src[0]);
998 break;
999
1000 case nir_op_fceil:
1001 result = vir_FCEIL(c, src[0]);
1002 break;
1003 case nir_op_ffloor:
1004 result = vir_FFLOOR(c, src[0]);
1005 break;
1006 case nir_op_fround_even:
1007 result = vir_FROUND(c, src[0]);
1008 break;
1009 case nir_op_ftrunc:
1010 result = vir_FTRUNC(c, src[0]);
1011 break;
1012 case nir_op_ffract:
1013 result = vir_FSUB(c, src[0], vir_FFLOOR(c, src[0]));
1014 break;
1015
1016 case nir_op_fsin:
1017 result = ntq_fsincos(c, src[0], false);
1018 break;
1019 case nir_op_fcos:
1020 result = ntq_fsincos(c, src[0], true);
1021 break;
1022
1023 case nir_op_fsign:
1024 result = ntq_fsign(c, src[0]);
1025 break;
1026 case nir_op_isign:
1027 result = ntq_isign(c, src[0]);
1028 break;
1029
1030 case nir_op_fabs: {
1031 result = vir_FMOV(c, src[0]);
1032 vir_set_unpack(c->defs[result.index], 0, V3D_QPU_UNPACK_ABS);
1033 break;
1034 }
1035
1036 case nir_op_iabs:
1037 result = vir_MAX(c, src[0],
1038 vir_SUB(c, vir_uniform_ui(c, 0), src[0]));
1039 break;
1040
1041 case nir_op_fddx:
1042 case nir_op_fddx_coarse:
1043 case nir_op_fddx_fine:
1044 result = vir_FDX(c, src[0]);
1045 break;
1046
1047 case nir_op_fddy:
1048 case nir_op_fddy_coarse:
1049 case nir_op_fddy_fine:
1050 result = vir_FDY(c, src[0]);
1051 break;
1052
1053 default:
1054 fprintf(stderr, "unknown NIR ALU inst: ");
1055 nir_print_instr(&instr->instr, stderr);
1056 fprintf(stderr, "\n");
1057 abort();
1058 }
1059
1060 /* We have a scalar result, so the instruction should only have a
1061 * single channel written to.
1062 */
1063 assert(util_is_power_of_two(instr->dest.write_mask));
1064 ntq_store_dest(c, &instr->dest.dest,
1065 ffs(instr->dest.write_mask) - 1, result);
1066 }
1067
1068 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
1069 * specifier. They come from a register that's preloaded with 0xffffffff
1070 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
1071 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
1072 */
1073 #define TLB_TYPE_F16_COLOR (3 << 6)
1074 #define TLB_TYPE_I32_COLOR (1 << 6)
1075 #define TLB_TYPE_F32_COLOR (0 << 6)
1076 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
1077 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
1078 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
1079 #define TLB_F16_SWAP_HI_LO (1 << 1)
1080 #define TLB_VEC_SIZE_4_F16 (1 << 0)
1081 #define TLB_VEC_SIZE_2_F16 (0 << 0)
1082 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
1083
1084 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
1085 * flag is set.
1086 */
1087 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
1088 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
1089 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
1090
1091 /* Stencil is a single 32-bit write. */
1092 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
1093
1094 static void
1095 emit_frag_end(struct v3d_compile *c)
1096 {
1097 /* XXX
1098 if (c->output_sample_mask_index != -1) {
1099 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1100 }
1101 */
1102
1103 bool has_any_tlb_color_write = false;
1104 for (int rt = 0; rt < c->fs_key->nr_cbufs; rt++) {
1105 if (c->output_color_var[rt])
1106 has_any_tlb_color_write = true;
1107 }
1108
1109 if (c->output_position_index != -1) {
1110 struct qinst *inst = vir_MOV_dest(c,
1111 vir_reg(QFILE_TLBU, 0),
1112 c->outputs[c->output_position_index]);
1113
1114 inst->src[vir_get_implicit_uniform_src(inst)] =
1115 vir_uniform_ui(c,
1116 TLB_TYPE_DEPTH |
1117 TLB_DEPTH_TYPE_PER_PIXEL |
1118 0xffffff00);
1119 } else if (c->s->info.fs.uses_discard || !has_any_tlb_color_write) {
1120 /* Emit passthrough Z if it needed to be delayed until shader
1121 * end due to potential discards.
1122 *
1123 * Since (single-threaded) fragment shaders always need a TLB
1124 * write, emit passthrouh Z if we didn't have any color
1125 * buffers and flag us as potentially discarding, so that we
1126 * can use Z as the TLB write.
1127 */
1128 c->s->info.fs.uses_discard = true;
1129
1130 struct qinst *inst = vir_MOV_dest(c,
1131 vir_reg(QFILE_TLBU, 0),
1132 vir_reg(QFILE_NULL, 0));
1133
1134 inst->src[vir_get_implicit_uniform_src(inst)] =
1135 vir_uniform_ui(c,
1136 TLB_TYPE_DEPTH |
1137 TLB_DEPTH_TYPE_INVARIANT |
1138 0xffffff00);
1139 }
1140
1141 /* XXX: Performance improvement: Merge Z write and color writes TLB
1142 * uniform setup
1143 */
1144
1145 for (int rt = 0; rt < c->fs_key->nr_cbufs; rt++) {
1146 if (!c->output_color_var[rt])
1147 continue;
1148
1149 nir_variable *var = c->output_color_var[rt];
1150 struct qreg *color = &c->outputs[var->data.driver_location * 4];
1151 int num_components = glsl_get_vector_elements(var->type);
1152 uint32_t conf = 0xffffff00;
1153 struct qinst *inst;
1154
1155 conf |= TLB_SAMPLE_MODE_PER_PIXEL;
1156 conf |= (7 - rt) << TLB_RENDER_TARGET_SHIFT;
1157
1158 assert(num_components != 0);
1159 switch (glsl_get_base_type(var->type)) {
1160 case GLSL_TYPE_UINT:
1161 case GLSL_TYPE_INT:
1162 conf |= TLB_TYPE_I32_COLOR;
1163 conf |= ((num_components - 1) <<
1164 TLB_VEC_SIZE_MINUS_1_SHIFT);
1165
1166 inst = vir_MOV_dest(c, vir_reg(QFILE_TLBU, 0), color[0]);
1167 inst->src[vir_get_implicit_uniform_src(inst)] =
1168 vir_uniform_ui(c, conf);
1169
1170 for (int i = 1; i < num_components; i++) {
1171 inst = vir_MOV_dest(c, vir_reg(QFILE_TLB, 0),
1172 color[i]);
1173 }
1174 break;
1175
1176 default: {
1177 struct qreg r = color[0];
1178 struct qreg g = color[1];
1179 struct qreg b = color[2];
1180 struct qreg a = color[3];
1181
1182 if (c->fs_key->f32_color_rb) {
1183 conf |= TLB_TYPE_F32_COLOR;
1184 conf |= ((num_components - 1) <<
1185 TLB_VEC_SIZE_MINUS_1_SHIFT);
1186 } else {
1187 conf |= TLB_TYPE_F16_COLOR;
1188 conf |= TLB_F16_SWAP_HI_LO;
1189 if (num_components >= 3)
1190 conf |= TLB_VEC_SIZE_4_F16;
1191 else
1192 conf |= TLB_VEC_SIZE_2_F16;
1193 }
1194
1195 if (c->fs_key->swap_color_rb & (1 << rt)) {
1196 r = color[2];
1197 b = color[0];
1198 }
1199
1200 if (c->fs_key->f32_color_rb & (1 << rt)) {
1201 inst = vir_MOV_dest(c, vir_reg(QFILE_TLBU, 0), color[0]);
1202 inst->src[vir_get_implicit_uniform_src(inst)] =
1203 vir_uniform_ui(c, conf);
1204
1205 for (int i = 1; i < num_components; i++) {
1206 inst = vir_MOV_dest(c, vir_reg(QFILE_TLB, 0),
1207 color[i]);
1208 }
1209 } else {
1210 inst = vir_VFPACK_dest(c, vir_reg(QFILE_TLB, 0), r, g);
1211 if (conf != ~0) {
1212 inst->dst.file = QFILE_TLBU;
1213 inst->src[vir_get_implicit_uniform_src(inst)] =
1214 vir_uniform_ui(c, conf);
1215 }
1216
1217 if (num_components >= 3)
1218 inst = vir_VFPACK_dest(c, vir_reg(QFILE_TLB, 0), b, a);
1219 }
1220 break;
1221 }
1222 }
1223 }
1224 }
1225
1226 static void
1227 emit_scaled_viewport_write(struct v3d_compile *c, struct qreg rcp_w)
1228 {
1229 for (int i = 0; i < 2; i++) {
1230 struct qreg coord = c->outputs[c->output_position_index + i];
1231 coord = vir_FMUL(c, coord,
1232 vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i,
1233 0));
1234 coord = vir_FMUL(c, coord, rcp_w);
1235 vir_FTOIN_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_VPM),
1236 coord);
1237 }
1238
1239 }
1240
1241 static void
1242 emit_zs_write(struct v3d_compile *c, struct qreg rcp_w)
1243 {
1244 struct qreg zscale = vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1245 struct qreg zoffset = vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1246
1247 vir_FADD_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_VPM),
1248 vir_FMUL(c, vir_FMUL(c,
1249 c->outputs[c->output_position_index + 2],
1250 zscale),
1251 rcp_w),
1252 zoffset);
1253 }
1254
1255 static void
1256 emit_rcp_wc_write(struct v3d_compile *c, struct qreg rcp_w)
1257 {
1258 vir_VPM_WRITE(c, rcp_w);
1259 }
1260
1261 static void
1262 emit_point_size_write(struct v3d_compile *c)
1263 {
1264 struct qreg point_size;
1265
1266 if (c->output_point_size_index != -1)
1267 point_size = c->outputs[c->output_point_size_index];
1268 else
1269 point_size = vir_uniform_f(c, 1.0);
1270
1271 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1272 * BCM21553).
1273 */
1274 point_size = vir_FMAX(c, point_size, vir_uniform_f(c, .125));
1275
1276 vir_VPM_WRITE(c, point_size);
1277 }
1278
1279 static void
1280 emit_vpm_write_setup(struct v3d_compile *c)
1281 {
1282 uint32_t packed;
1283 struct V3D33_VPM_GENERIC_BLOCK_WRITE_SETUP unpacked = {
1284 V3D33_VPM_GENERIC_BLOCK_WRITE_SETUP_header,
1285
1286 .horiz = true,
1287 .laned = false,
1288 .segs = true,
1289 .stride = 1,
1290 .size = VPM_SETUP_SIZE_32_BIT,
1291 .addr = 0,
1292 };
1293
1294 V3D33_VPM_GENERIC_BLOCK_WRITE_SETUP_pack(NULL,
1295 (uint8_t *)&packed,
1296 &unpacked);
1297 vir_VPMSETUP(c, vir_uniform_ui(c, packed));
1298 }
1299
1300 static void
1301 emit_vert_end(struct v3d_compile *c)
1302 {
1303 struct qreg rcp_w = vir_SFU(c, V3D_QPU_WADDR_RECIP,
1304 c->outputs[c->output_position_index + 3]);
1305
1306 emit_vpm_write_setup(c);
1307
1308 if (c->vs_key->is_coord) {
1309 for (int i = 0; i < 4; i++)
1310 vir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1311 emit_scaled_viewport_write(c, rcp_w);
1312 if (c->vs_key->per_vertex_point_size) {
1313 emit_point_size_write(c);
1314 /* emit_rcp_wc_write(c, rcp_w); */
1315 }
1316 /* XXX: Z-only rendering */
1317 if (0)
1318 emit_zs_write(c, rcp_w);
1319 } else {
1320 emit_scaled_viewport_write(c, rcp_w);
1321 emit_zs_write(c, rcp_w);
1322 emit_rcp_wc_write(c, rcp_w);
1323 if (c->vs_key->per_vertex_point_size)
1324 emit_point_size_write(c);
1325 }
1326
1327 for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
1328 struct v3d_varying_slot input = c->vs_key->fs_inputs[i];
1329 int j;
1330
1331 for (j = 0; j < c->num_outputs; j++) {
1332 struct v3d_varying_slot output = c->output_slots[j];
1333
1334 if (!memcmp(&input, &output, sizeof(input))) {
1335 vir_VPM_WRITE(c, c->outputs[j]);
1336 break;
1337 }
1338 }
1339 /* Emit padding if we didn't find a declared VS output for
1340 * this FS input.
1341 */
1342 if (j == c->num_outputs)
1343 vir_VPM_WRITE(c, vir_uniform_f(c, 0.0));
1344 }
1345 }
1346
1347 void
1348 v3d_optimize_nir(struct nir_shader *s)
1349 {
1350 bool progress;
1351
1352 do {
1353 progress = false;
1354
1355 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1356 NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1357 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1358 NIR_PASS(progress, s, nir_copy_prop);
1359 NIR_PASS(progress, s, nir_opt_remove_phis);
1360 NIR_PASS(progress, s, nir_opt_dce);
1361 NIR_PASS(progress, s, nir_opt_dead_cf);
1362 NIR_PASS(progress, s, nir_opt_cse);
1363 NIR_PASS(progress, s, nir_opt_peephole_select, 8);
1364 NIR_PASS(progress, s, nir_opt_algebraic);
1365 NIR_PASS(progress, s, nir_opt_constant_folding);
1366 NIR_PASS(progress, s, nir_opt_undef);
1367 } while (progress);
1368 }
1369
1370 static int
1371 driver_location_compare(const void *in_a, const void *in_b)
1372 {
1373 const nir_variable *const *a = in_a;
1374 const nir_variable *const *b = in_b;
1375
1376 return (*a)->data.driver_location - (*b)->data.driver_location;
1377 }
1378
1379 static struct qreg
1380 ntq_emit_vpm_read(struct v3d_compile *c,
1381 uint32_t *num_components_queued,
1382 uint32_t *remaining,
1383 uint32_t vpm_index)
1384 {
1385 struct qreg vpm = vir_reg(QFILE_VPM, vpm_index);
1386
1387 if (*num_components_queued != 0) {
1388 (*num_components_queued)--;
1389 c->num_inputs++;
1390 return vir_MOV(c, vpm);
1391 }
1392
1393 uint32_t num_components = MIN2(*remaining, 32);
1394
1395 struct V3D33_VPM_GENERIC_BLOCK_READ_SETUP unpacked = {
1396 V3D33_VPM_GENERIC_BLOCK_READ_SETUP_header,
1397
1398 .horiz = true,
1399 .laned = false,
1400 /* If the field is 0, that means a read count of 32. */
1401 .num = num_components & 31,
1402 .segs = true,
1403 .stride = 1,
1404 .size = VPM_SETUP_SIZE_32_BIT,
1405 .addr = c->num_inputs,
1406 };
1407
1408 uint32_t packed;
1409 V3D33_VPM_GENERIC_BLOCK_READ_SETUP_pack(NULL,
1410 (uint8_t *)&packed,
1411 &unpacked);
1412 vir_VPMSETUP(c, vir_uniform_ui(c, packed));
1413
1414 *num_components_queued = num_components - 1;
1415 *remaining -= num_components;
1416 c->num_inputs++;
1417
1418 return vir_MOV(c, vpm);
1419 }
1420
1421 static void
1422 ntq_setup_inputs(struct v3d_compile *c)
1423 {
1424 unsigned num_entries = 0;
1425 unsigned num_components = 0;
1426 nir_foreach_variable(var, &c->s->inputs) {
1427 num_entries++;
1428 num_components += glsl_get_components(var->type);
1429 }
1430
1431 nir_variable *vars[num_entries];
1432
1433 unsigned i = 0;
1434 nir_foreach_variable(var, &c->s->inputs)
1435 vars[i++] = var;
1436
1437 /* Sort the variables so that we emit the input setup in
1438 * driver_location order. This is required for VPM reads, whose data
1439 * is fetched into the VPM in driver_location (TGSI register index)
1440 * order.
1441 */
1442 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1443
1444 uint32_t vpm_components_queued = 0;
1445 if (c->s->info.stage == MESA_SHADER_VERTEX) {
1446 bool uses_iid = c->s->info.system_values_read &
1447 (1ull << SYSTEM_VALUE_INSTANCE_ID);
1448 bool uses_vid = c->s->info.system_values_read &
1449 (1ull << SYSTEM_VALUE_VERTEX_ID);
1450
1451 num_components += uses_iid;
1452 num_components += uses_vid;
1453
1454 if (uses_iid) {
1455 c->iid = ntq_emit_vpm_read(c, &vpm_components_queued,
1456 &num_components, ~0);
1457 }
1458
1459 if (uses_vid) {
1460 c->vid = ntq_emit_vpm_read(c, &vpm_components_queued,
1461 &num_components, ~0);
1462 }
1463 }
1464
1465 for (unsigned i = 0; i < num_entries; i++) {
1466 nir_variable *var = vars[i];
1467 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1468 unsigned loc = var->data.driver_location;
1469
1470 assert(array_len == 1);
1471 (void)array_len;
1472 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1473 (loc + 1) * 4);
1474
1475 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
1476 if (var->data.location == VARYING_SLOT_POS) {
1477 emit_fragcoord_input(c, loc);
1478 } else if (var->data.location == VARYING_SLOT_PNTC ||
1479 (var->data.location >= VARYING_SLOT_VAR0 &&
1480 (c->fs_key->point_sprite_mask &
1481 (1 << (var->data.location -
1482 VARYING_SLOT_VAR0))))) {
1483 c->inputs[loc * 4 + 0] = c->point_x;
1484 c->inputs[loc * 4 + 1] = c->point_y;
1485 } else {
1486 emit_fragment_input(c, loc, var);
1487 }
1488 } else {
1489 int var_components = glsl_get_components(var->type);
1490
1491 for (int i = 0; i < var_components; i++) {
1492 c->inputs[loc * 4 + i] =
1493 ntq_emit_vpm_read(c,
1494 &vpm_components_queued,
1495 &num_components,
1496 loc * 4 + i);
1497
1498 }
1499 c->vattr_sizes[loc] = var_components;
1500 }
1501 }
1502
1503 if (c->s->info.stage == MESA_SHADER_VERTEX) {
1504 assert(vpm_components_queued == 0);
1505 assert(num_components == 0);
1506 }
1507 }
1508
1509 static void
1510 ntq_setup_outputs(struct v3d_compile *c)
1511 {
1512 nir_foreach_variable(var, &c->s->outputs) {
1513 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1514 unsigned loc = var->data.driver_location * 4;
1515
1516 assert(array_len == 1);
1517 (void)array_len;
1518
1519 for (int i = 0; i < glsl_get_vector_elements(var->type); i++) {
1520 add_output(c, loc + var->data.location_frac + i,
1521 var->data.location,
1522 var->data.location_frac + i);
1523 }
1524
1525 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
1526 switch (var->data.location) {
1527 case FRAG_RESULT_COLOR:
1528 c->output_color_var[0] = var;
1529 c->output_color_var[1] = var;
1530 c->output_color_var[2] = var;
1531 c->output_color_var[3] = var;
1532 break;
1533 case FRAG_RESULT_DATA0:
1534 case FRAG_RESULT_DATA1:
1535 case FRAG_RESULT_DATA2:
1536 case FRAG_RESULT_DATA3:
1537 c->output_color_var[var->data.location -
1538 FRAG_RESULT_DATA0] = var;
1539 break;
1540 case FRAG_RESULT_DEPTH:
1541 c->output_position_index = loc;
1542 break;
1543 case FRAG_RESULT_SAMPLE_MASK:
1544 c->output_sample_mask_index = loc;
1545 break;
1546 }
1547 } else {
1548 switch (var->data.location) {
1549 case VARYING_SLOT_POS:
1550 c->output_position_index = loc;
1551 break;
1552 case VARYING_SLOT_PSIZ:
1553 c->output_point_size_index = loc;
1554 break;
1555 }
1556 }
1557 }
1558 }
1559
1560 static void
1561 ntq_setup_uniforms(struct v3d_compile *c)
1562 {
1563 nir_foreach_variable(var, &c->s->uniforms) {
1564 uint32_t vec4_count = glsl_count_attribute_slots(var->type,
1565 false);
1566 unsigned vec4_size = 4 * sizeof(float);
1567
1568 declare_uniform_range(c, var->data.driver_location * vec4_size,
1569 vec4_count * vec4_size);
1570
1571 }
1572 }
1573
1574 /**
1575 * Sets up the mapping from nir_register to struct qreg *.
1576 *
1577 * Each nir_register gets a struct qreg per 32-bit component being stored.
1578 */
1579 static void
1580 ntq_setup_registers(struct v3d_compile *c, struct exec_list *list)
1581 {
1582 foreach_list_typed(nir_register, nir_reg, node, list) {
1583 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1584 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1585 array_len *
1586 nir_reg->num_components);
1587
1588 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1589
1590 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1591 qregs[i] = vir_get_temp(c);
1592 }
1593 }
1594
1595 static void
1596 ntq_emit_load_const(struct v3d_compile *c, nir_load_const_instr *instr)
1597 {
1598 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1599 for (int i = 0; i < instr->def.num_components; i++)
1600 qregs[i] = vir_uniform_ui(c, instr->value.u32[i]);
1601
1602 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1603 }
1604
1605 static void
1606 ntq_emit_ssa_undef(struct v3d_compile *c, nir_ssa_undef_instr *instr)
1607 {
1608 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1609
1610 /* VIR needs there to be *some* value, so pick 0 (same as for
1611 * ntq_setup_registers().
1612 */
1613 for (int i = 0; i < instr->def.num_components; i++)
1614 qregs[i] = vir_uniform_ui(c, 0);
1615 }
1616
1617 static void
1618 ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
1619 {
1620 nir_const_value *const_offset;
1621 unsigned offset;
1622
1623 switch (instr->intrinsic) {
1624 case nir_intrinsic_load_uniform:
1625 assert(instr->num_components == 1);
1626 const_offset = nir_src_as_const_value(instr->src[0]);
1627 if (const_offset) {
1628 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1629 assert(offset % 4 == 0);
1630 /* We need dwords */
1631 offset = offset / 4;
1632 ntq_store_dest(c, &instr->dest, 0,
1633 vir_uniform(c, QUNIFORM_UNIFORM,
1634 offset));
1635 } else {
1636 ntq_store_dest(c, &instr->dest, 0,
1637 indirect_uniform_load(c, instr));
1638 }
1639 break;
1640
1641 case nir_intrinsic_load_ubo:
1642 for (int i = 0; i < instr->num_components; i++) {
1643 int ubo = nir_src_as_const_value(instr->src[0])->u32[0];
1644
1645 /* Adjust for where we stored the TGSI register base. */
1646 vir_ADD_dest(c,
1647 vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUA),
1648 vir_uniform(c, QUNIFORM_UBO_ADDR, 1 + ubo),
1649 vir_ADD(c,
1650 ntq_get_src(c, instr->src[1], 0),
1651 vir_uniform_ui(c, i * 4)));
1652
1653 ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
1654 }
1655 break;
1656
1657 const_offset = nir_src_as_const_value(instr->src[0]);
1658 if (const_offset) {
1659 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1660 assert(offset % 4 == 0);
1661 /* We need dwords */
1662 offset = offset / 4;
1663 ntq_store_dest(c, &instr->dest, 0,
1664 vir_uniform(c, QUNIFORM_UNIFORM,
1665 offset));
1666 } else {
1667 ntq_store_dest(c, &instr->dest, 0,
1668 indirect_uniform_load(c, instr));
1669 }
1670 break;
1671
1672 case nir_intrinsic_load_user_clip_plane:
1673 for (int i = 0; i < instr->num_components; i++) {
1674 ntq_store_dest(c, &instr->dest, i,
1675 vir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1676 nir_intrinsic_ucp_id(instr) *
1677 4 + i));
1678 }
1679 break;
1680
1681 case nir_intrinsic_load_alpha_ref_float:
1682 ntq_store_dest(c, &instr->dest, 0,
1683 vir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1684 break;
1685
1686 case nir_intrinsic_load_sample_mask_in:
1687 ntq_store_dest(c, &instr->dest, 0,
1688 vir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1689 break;
1690
1691 case nir_intrinsic_load_front_face:
1692 /* The register contains 0 (front) or 1 (back), and we need to
1693 * turn it into a NIR bool where true means front.
1694 */
1695 ntq_store_dest(c, &instr->dest, 0,
1696 vir_ADD(c,
1697 vir_uniform_ui(c, -1),
1698 vir_REVF(c)));
1699 break;
1700
1701 case nir_intrinsic_load_instance_id:
1702 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->iid));
1703 break;
1704
1705 case nir_intrinsic_load_vertex_id:
1706 ntq_store_dest(c, &instr->dest, 0, vir_MOV(c, c->vid));
1707 break;
1708
1709 case nir_intrinsic_load_input:
1710 const_offset = nir_src_as_const_value(instr->src[0]);
1711 assert(const_offset && "v3d doesn't support indirect inputs");
1712 for (int i = 0; i < instr->num_components; i++) {
1713 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1714 int comp = nir_intrinsic_component(instr) + i;
1715 ntq_store_dest(c, &instr->dest, i,
1716 vir_MOV(c, c->inputs[offset * 4 + comp]));
1717 }
1718 break;
1719
1720 case nir_intrinsic_store_output:
1721 const_offset = nir_src_as_const_value(instr->src[1]);
1722 assert(const_offset && "v3d doesn't support indirect outputs");
1723 offset = ((nir_intrinsic_base(instr) +
1724 const_offset->u32[0]) * 4 +
1725 nir_intrinsic_component(instr));
1726
1727 for (int i = 0; i < instr->num_components; i++) {
1728 c->outputs[offset + i] =
1729 vir_MOV(c, ntq_get_src(c, instr->src[0], i));
1730 }
1731 c->num_outputs = MAX2(c->num_outputs,
1732 offset + instr->num_components);
1733 break;
1734
1735 case nir_intrinsic_discard:
1736 if (c->execute.file != QFILE_NULL) {
1737 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1738 vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1739 vir_uniform_ui(c, 0)),
1740 V3D_QPU_COND_IFA);
1741 } else {
1742 vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1743 vir_uniform_ui(c, 0));
1744 }
1745 break;
1746
1747 case nir_intrinsic_discard_if: {
1748 /* true (~0) if we're discarding */
1749 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1750
1751 if (c->execute.file != QFILE_NULL) {
1752 /* execute == 0 means the channel is active. Invert
1753 * the condition so that we can use zero as "executing
1754 * and discarding."
1755 */
1756 vir_PF(c, vir_OR(c, c->execute, vir_NOT(c, cond)),
1757 V3D_QPU_PF_PUSHZ);
1758 vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1759 vir_uniform_ui(c, 0)),
1760 V3D_QPU_COND_IFA);
1761 } else {
1762 vir_PF(c, cond, V3D_QPU_PF_PUSHZ);
1763 vir_set_cond(vir_SETMSF_dest(c, vir_reg(QFILE_NULL, 0),
1764 vir_uniform_ui(c, 0)),
1765 V3D_QPU_COND_IFNA);
1766 }
1767
1768 break;
1769 }
1770
1771 default:
1772 fprintf(stderr, "Unknown intrinsic: ");
1773 nir_print_instr(&instr->instr, stderr);
1774 fprintf(stderr, "\n");
1775 break;
1776 }
1777 }
1778
1779 /* Clears (activates) the execute flags for any channels whose jump target
1780 * matches this block.
1781 */
1782 static void
1783 ntq_activate_execute_for_block(struct v3d_compile *c)
1784 {
1785 vir_PF(c, vir_SUB(c, c->execute, vir_uniform_ui(c, c->cur_block->index)),
1786 V3D_QPU_PF_PUSHZ);
1787
1788 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
1789 }
1790
1791 static void
1792 ntq_emit_if(struct v3d_compile *c, nir_if *if_stmt)
1793 {
1794 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1795 bool empty_else_block =
1796 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1797 exec_list_is_empty(&nir_else_block->instr_list));
1798
1799 struct qblock *then_block = vir_new_block(c);
1800 struct qblock *after_block = vir_new_block(c);
1801 struct qblock *else_block;
1802 if (empty_else_block)
1803 else_block = after_block;
1804 else
1805 else_block = vir_new_block(c);
1806
1807 bool was_top_level = false;
1808 if (c->execute.file == QFILE_NULL) {
1809 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
1810 was_top_level = true;
1811 }
1812
1813 /* Set A for executing (execute == 0) and jumping (if->condition ==
1814 * 0) channels, and then update execute flags for those to point to
1815 * the ELSE block.
1816 */
1817 vir_PF(c, vir_OR(c,
1818 c->execute,
1819 ntq_get_src(c, if_stmt->condition, 0)),
1820 V3D_QPU_PF_PUSHZ);
1821 vir_MOV_cond(c, V3D_QPU_COND_IFA,
1822 c->execute,
1823 vir_uniform_ui(c, else_block->index));
1824
1825 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1826 * through.
1827 */
1828 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1829 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLNA);
1830 vir_link_blocks(c->cur_block, else_block);
1831 vir_link_blocks(c->cur_block, then_block);
1832
1833 /* Process the THEN block. */
1834 vir_set_emit_block(c, then_block);
1835 ntq_emit_cf_list(c, &if_stmt->then_list);
1836
1837 if (!empty_else_block) {
1838 /* Handle the end of the THEN block. First, all currently
1839 * active channels update their execute flags to point to
1840 * ENDIF
1841 */
1842 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1843 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1844 vir_uniform_ui(c, after_block->index));
1845
1846 /* If everything points at ENDIF, then jump there immediately. */
1847 vir_PF(c, vir_SUB(c, c->execute,
1848 vir_uniform_ui(c, after_block->index)),
1849 V3D_QPU_PF_PUSHZ);
1850 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ALLA);
1851 vir_link_blocks(c->cur_block, after_block);
1852 vir_link_blocks(c->cur_block, else_block);
1853
1854 vir_set_emit_block(c, else_block);
1855 ntq_activate_execute_for_block(c);
1856 ntq_emit_cf_list(c, &if_stmt->else_list);
1857 }
1858
1859 vir_link_blocks(c->cur_block, after_block);
1860
1861 vir_set_emit_block(c, after_block);
1862 if (was_top_level)
1863 c->execute = c->undef;
1864 else
1865 ntq_activate_execute_for_block(c);
1866 }
1867
1868 static void
1869 ntq_emit_jump(struct v3d_compile *c, nir_jump_instr *jump)
1870 {
1871 switch (jump->type) {
1872 case nir_jump_break:
1873 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1874 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1875 vir_uniform_ui(c, c->loop_break_block->index));
1876 break;
1877
1878 case nir_jump_continue:
1879 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1880 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute,
1881 vir_uniform_ui(c, c->loop_cont_block->index));
1882 break;
1883
1884 case nir_jump_return:
1885 unreachable("All returns shouold be lowered\n");
1886 }
1887 }
1888
1889 static void
1890 ntq_emit_instr(struct v3d_compile *c, nir_instr *instr)
1891 {
1892 switch (instr->type) {
1893 case nir_instr_type_alu:
1894 ntq_emit_alu(c, nir_instr_as_alu(instr));
1895 break;
1896
1897 case nir_instr_type_intrinsic:
1898 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1899 break;
1900
1901 case nir_instr_type_load_const:
1902 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1903 break;
1904
1905 case nir_instr_type_ssa_undef:
1906 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1907 break;
1908
1909 case nir_instr_type_tex:
1910 ntq_emit_tex(c, nir_instr_as_tex(instr));
1911 break;
1912
1913 case nir_instr_type_jump:
1914 ntq_emit_jump(c, nir_instr_as_jump(instr));
1915 break;
1916
1917 default:
1918 fprintf(stderr, "Unknown NIR instr type: ");
1919 nir_print_instr(instr, stderr);
1920 fprintf(stderr, "\n");
1921 abort();
1922 }
1923 }
1924
1925 static void
1926 ntq_emit_block(struct v3d_compile *c, nir_block *block)
1927 {
1928 nir_foreach_instr(instr, block) {
1929 ntq_emit_instr(c, instr);
1930 }
1931 }
1932
1933 static void ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list);
1934
1935 static void
1936 ntq_emit_loop(struct v3d_compile *c, nir_loop *loop)
1937 {
1938 bool was_top_level = false;
1939 if (c->execute.file == QFILE_NULL) {
1940 c->execute = vir_MOV(c, vir_uniform_ui(c, 0));
1941 was_top_level = true;
1942 }
1943
1944 struct qblock *save_loop_cont_block = c->loop_cont_block;
1945 struct qblock *save_loop_break_block = c->loop_break_block;
1946
1947 c->loop_cont_block = vir_new_block(c);
1948 c->loop_break_block = vir_new_block(c);
1949
1950 vir_link_blocks(c->cur_block, c->loop_cont_block);
1951 vir_set_emit_block(c, c->loop_cont_block);
1952 ntq_activate_execute_for_block(c);
1953
1954 ntq_emit_cf_list(c, &loop->body);
1955
1956 /* Re-enable any previous continues now, so our ANYA check below
1957 * works.
1958 *
1959 * XXX: Use the .ORZ flags update, instead.
1960 */
1961 vir_PF(c, vir_SUB(c,
1962 c->execute,
1963 vir_uniform_ui(c, c->loop_cont_block->index)),
1964 V3D_QPU_PF_PUSHZ);
1965 vir_MOV_cond(c, V3D_QPU_COND_IFA, c->execute, vir_uniform_ui(c, 0));
1966
1967 vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
1968
1969 vir_BRANCH(c, V3D_QPU_BRANCH_COND_ANYA);
1970 vir_link_blocks(c->cur_block, c->loop_cont_block);
1971 vir_link_blocks(c->cur_block, c->loop_break_block);
1972
1973 vir_set_emit_block(c, c->loop_break_block);
1974 if (was_top_level)
1975 c->execute = c->undef;
1976 else
1977 ntq_activate_execute_for_block(c);
1978
1979 c->loop_break_block = save_loop_break_block;
1980 c->loop_cont_block = save_loop_cont_block;
1981 }
1982
1983 static void
1984 ntq_emit_function(struct v3d_compile *c, nir_function_impl *func)
1985 {
1986 fprintf(stderr, "FUNCTIONS not handled.\n");
1987 abort();
1988 }
1989
1990 static void
1991 ntq_emit_cf_list(struct v3d_compile *c, struct exec_list *list)
1992 {
1993 foreach_list_typed(nir_cf_node, node, node, list) {
1994 switch (node->type) {
1995 case nir_cf_node_block:
1996 ntq_emit_block(c, nir_cf_node_as_block(node));
1997 break;
1998
1999 case nir_cf_node_if:
2000 ntq_emit_if(c, nir_cf_node_as_if(node));
2001 break;
2002
2003 case nir_cf_node_loop:
2004 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2005 break;
2006
2007 case nir_cf_node_function:
2008 ntq_emit_function(c, nir_cf_node_as_function(node));
2009 break;
2010
2011 default:
2012 fprintf(stderr, "Unknown NIR node type\n");
2013 abort();
2014 }
2015 }
2016 }
2017
2018 static void
2019 ntq_emit_impl(struct v3d_compile *c, nir_function_impl *impl)
2020 {
2021 ntq_setup_registers(c, &impl->registers);
2022 ntq_emit_cf_list(c, &impl->body);
2023 }
2024
2025 static void
2026 nir_to_vir(struct v3d_compile *c)
2027 {
2028 if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
2029 c->payload_w = vir_MOV(c, vir_reg(QFILE_REG, 0));
2030 c->payload_w_centroid = vir_MOV(c, vir_reg(QFILE_REG, 1));
2031 c->payload_z = vir_MOV(c, vir_reg(QFILE_REG, 2));
2032
2033 if (c->fs_key->is_points) {
2034 c->point_x = emit_fragment_varying(c, NULL, 0);
2035 c->point_y = emit_fragment_varying(c, NULL, 0);
2036 } else if (c->fs_key->is_lines) {
2037 c->line_x = emit_fragment_varying(c, NULL, 0);
2038 }
2039 }
2040
2041 ntq_setup_inputs(c);
2042 ntq_setup_outputs(c);
2043 ntq_setup_uniforms(c);
2044 ntq_setup_registers(c, &c->s->registers);
2045
2046 /* Find the main function and emit the body. */
2047 nir_foreach_function(function, c->s) {
2048 assert(strcmp(function->name, "main") == 0);
2049 assert(function->impl);
2050 ntq_emit_impl(c, function->impl);
2051 }
2052 }
2053
2054 const nir_shader_compiler_options v3d_nir_options = {
2055 .lower_extract_byte = true,
2056 .lower_extract_word = true,
2057 .lower_bitfield_insert = true,
2058 .lower_bitfield_extract = true,
2059 .lower_pack_unorm_2x16 = true,
2060 .lower_pack_snorm_2x16 = true,
2061 .lower_pack_unorm_4x8 = true,
2062 .lower_pack_snorm_4x8 = true,
2063 .lower_unpack_unorm_4x8 = true,
2064 .lower_unpack_snorm_4x8 = true,
2065 .lower_fdiv = true,
2066 .lower_ffma = true,
2067 .lower_flrp32 = true,
2068 .lower_fpow = true,
2069 .lower_fsat = true,
2070 .lower_fsqrt = true,
2071 .native_integers = true,
2072 };
2073
2074
2075 #if 0
2076 static int
2077 count_nir_instrs(nir_shader *nir)
2078 {
2079 int count = 0;
2080 nir_foreach_function(function, nir) {
2081 if (!function->impl)
2082 continue;
2083 nir_foreach_block(block, function->impl) {
2084 nir_foreach_instr(instr, block)
2085 count++;
2086 }
2087 }
2088 return count;
2089 }
2090 #endif
2091
2092 void
2093 v3d_nir_to_vir(struct v3d_compile *c)
2094 {
2095 if (V3D_DEBUG & (V3D_DEBUG_NIR |
2096 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2097 fprintf(stderr, "%s prog %d/%d NIR:\n",
2098 vir_get_stage_name(c),
2099 c->program_id, c->variant_id);
2100 nir_print_shader(c->s, stderr);
2101 }
2102
2103 nir_to_vir(c);
2104
2105 switch (c->s->info.stage) {
2106 case MESA_SHADER_FRAGMENT:
2107 emit_frag_end(c);
2108 break;
2109 case MESA_SHADER_VERTEX:
2110 emit_vert_end(c);
2111 break;
2112 default:
2113 unreachable("bad stage");
2114 }
2115
2116 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2117 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2118 fprintf(stderr, "%s prog %d/%d pre-opt VIR:\n",
2119 vir_get_stage_name(c),
2120 c->program_id, c->variant_id);
2121 vir_dump(c);
2122 fprintf(stderr, "\n");
2123 }
2124
2125 vir_optimize(c);
2126 vir_lower_uniforms(c);
2127
2128 /* XXX: vir_schedule_instructions(c); */
2129
2130 if (V3D_DEBUG & (V3D_DEBUG_VIR |
2131 v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
2132 fprintf(stderr, "%s prog %d/%d VIR:\n",
2133 vir_get_stage_name(c),
2134 c->program_id, c->variant_id);
2135 vir_dump(c);
2136 fprintf(stderr, "\n");
2137 }
2138
2139 v3d_vir_to_qpu(c);
2140 }