nir: rename nir_op_fne to nir_op_fneu
[mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
1 /*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <inttypes.h>
26 #include "util/format/u_format.h"
27 #include "util/crc32.h"
28 #include "util/u_helpers.h"
29 #include "util/u_math.h"
30 #include "util/u_memory.h"
31 #include "util/ralloc.h"
32 #include "util/hash_table.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "compiler/nir_types.h"
38 #include "nir/tgsi_to_nir.h"
39 #include "vc4_context.h"
40 #include "vc4_qpu.h"
41 #include "vc4_qir.h"
42
43 static struct qreg
44 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
45 static void
46 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
47
48 static int
49 type_size(const struct glsl_type *type, bool bindless)
50 {
51 return glsl_count_attribute_slots(type, false);
52 }
53
54 static void
55 resize_qreg_array(struct vc4_compile *c,
56 struct qreg **regs,
57 uint32_t *size,
58 uint32_t decl_size)
59 {
60 if (*size >= decl_size)
61 return;
62
63 uint32_t old_size = *size;
64 *size = MAX2(*size * 2, decl_size);
65 *regs = reralloc(c, *regs, struct qreg, *size);
66 if (!*regs) {
67 fprintf(stderr, "Malloc failure\n");
68 abort();
69 }
70
71 for (uint32_t i = old_size; i < *size; i++)
72 (*regs)[i] = c->undef;
73 }
74
75 static void
76 ntq_emit_thrsw(struct vc4_compile *c)
77 {
78 if (!c->fs_threaded)
79 return;
80
81 /* Always thread switch after each texture operation for now.
82 *
83 * We could do better by batching a bunch of texture fetches up and
84 * then doing one thread switch and collecting all their results
85 * afterward.
86 */
87 qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
88 c->undef, c->undef));
89 c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
90 }
91
92 static struct qreg
93 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
94 {
95 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
96
97 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
98 uint32_t range = nir_intrinsic_range(intr);
99 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
100 indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
101 qir_uniform_ui(c, range - 4));
102
103 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
104 indirect_offset,
105 qir_uniform(c, QUNIFORM_UBO0_ADDR,
106 nir_intrinsic_base(intr)));
107
108 c->num_texture_samples++;
109
110 ntq_emit_thrsw(c);
111
112 return qir_TEX_RESULT(c);
113 }
114
115 static struct qreg
116 vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
117 {
118 int buffer_index = nir_src_as_uint(intr->src[0]);
119 assert(buffer_index == 1);
120 assert(c->stage == QSTAGE_FRAG);
121
122 struct qreg offset = ntq_get_src(c, intr->src[1], 0);
123
124 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
125 offset = qir_MAX(c, offset, qir_uniform_ui(c, 0));
126 offset = qir_MIN_NOIMM(c, offset,
127 qir_uniform_ui(c, c->fs_key->ubo_1_size - 4));
128
129 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
130 offset,
131 qir_uniform(c, QUNIFORM_UBO1_ADDR, 0));
132
133 c->num_texture_samples++;
134
135 ntq_emit_thrsw(c);
136
137 return qir_TEX_RESULT(c);
138 }
139
140 nir_ssa_def *
141 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
142 {
143 switch (swiz) {
144 default:
145 case PIPE_SWIZZLE_NONE:
146 fprintf(stderr, "warning: unknown swizzle\n");
147 /* FALLTHROUGH */
148 case PIPE_SWIZZLE_0:
149 return nir_imm_float(b, 0.0);
150 case PIPE_SWIZZLE_1:
151 return nir_imm_float(b, 1.0);
152 case PIPE_SWIZZLE_X:
153 case PIPE_SWIZZLE_Y:
154 case PIPE_SWIZZLE_Z:
155 case PIPE_SWIZZLE_W:
156 return srcs[swiz];
157 }
158 }
159
160 static struct qreg *
161 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
162 {
163 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
164 def->num_components);
165 _mesa_hash_table_insert(c->def_ht, def, qregs);
166 return qregs;
167 }
168
169 /**
170 * This function is responsible for getting QIR results into the associated
171 * storage for a NIR instruction.
172 *
173 * If it's a NIR SSA def, then we just set the associated hash table entry to
174 * the new result.
175 *
176 * If it's a NIR reg, then we need to update the existing qreg assigned to the
177 * NIR destination with the incoming value. To do that without introducing
178 * new MOVs, we require that the incoming qreg either be a uniform, or be
179 * SSA-defined by the previous QIR instruction in the block and rewritable by
180 * this function. That lets us sneak ahead and insert the SF flag beforehand
181 * (knowing that the previous instruction doesn't depend on flags) and rewrite
182 * its destination to be the NIR reg's destination
183 */
184 static void
185 ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
186 struct qreg result)
187 {
188 struct qinst *last_inst = NULL;
189 if (!list_is_empty(&c->cur_block->instructions))
190 last_inst = (struct qinst *)c->cur_block->instructions.prev;
191
192 assert(result.file == QFILE_UNIF ||
193 (result.file == QFILE_TEMP &&
194 last_inst && last_inst == c->defs[result.index]));
195
196 if (dest->is_ssa) {
197 assert(chan < dest->ssa.num_components);
198
199 struct qreg *qregs;
200 struct hash_entry *entry =
201 _mesa_hash_table_search(c->def_ht, &dest->ssa);
202
203 if (entry)
204 qregs = entry->data;
205 else
206 qregs = ntq_init_ssa_def(c, &dest->ssa);
207
208 qregs[chan] = result;
209 } else {
210 nir_register *reg = dest->reg.reg;
211 assert(dest->reg.base_offset == 0);
212 assert(reg->num_array_elems == 0);
213 struct hash_entry *entry =
214 _mesa_hash_table_search(c->def_ht, reg);
215 struct qreg *qregs = entry->data;
216
217 /* Insert a MOV if the source wasn't an SSA def in the
218 * previous instruction.
219 */
220 if (result.file == QFILE_UNIF) {
221 result = qir_MOV(c, result);
222 last_inst = c->defs[result.index];
223 }
224
225 /* We know they're both temps, so just rewrite index. */
226 c->defs[last_inst->dst.index] = NULL;
227 last_inst->dst.index = qregs[chan].index;
228
229 /* If we're in control flow, then make this update of the reg
230 * conditional on the execution mask.
231 */
232 if (c->execute.file != QFILE_NULL) {
233 last_inst->dst.index = qregs[chan].index;
234
235 /* Set the flags to the current exec mask. To insert
236 * the SF, we temporarily remove our SSA instruction.
237 */
238 list_del(&last_inst->link);
239 qir_SF(c, c->execute);
240 list_addtail(&last_inst->link,
241 &c->cur_block->instructions);
242
243 last_inst->cond = QPU_COND_ZS;
244 last_inst->cond_is_exec_mask = true;
245 }
246 }
247 }
248
249 static struct qreg *
250 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
251 {
252 if (dest->is_ssa) {
253 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
254 for (int i = 0; i < dest->ssa.num_components; i++)
255 qregs[i] = c->undef;
256 return qregs;
257 } else {
258 nir_register *reg = dest->reg.reg;
259 assert(dest->reg.base_offset == 0);
260 assert(reg->num_array_elems == 0);
261 struct hash_entry *entry =
262 _mesa_hash_table_search(c->def_ht, reg);
263 return entry->data;
264 }
265 }
266
267 static struct qreg
268 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
269 {
270 struct hash_entry *entry;
271 if (src.is_ssa) {
272 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
273 assert(i < src.ssa->num_components);
274 } else {
275 nir_register *reg = src.reg.reg;
276 entry = _mesa_hash_table_search(c->def_ht, reg);
277 assert(reg->num_array_elems == 0);
278 assert(src.reg.base_offset == 0);
279 assert(i < reg->num_components);
280 }
281
282 struct qreg *qregs = entry->data;
283 return qregs[i];
284 }
285
286 static struct qreg
287 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
288 unsigned src)
289 {
290 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
291 unsigned chan = ffs(instr->dest.write_mask) - 1;
292 struct qreg r = ntq_get_src(c, instr->src[src].src,
293 instr->src[src].swizzle[chan]);
294
295 assert(!instr->src[src].abs);
296 assert(!instr->src[src].negate);
297
298 return r;
299 };
300
301 static inline struct qreg
302 qir_SAT(struct vc4_compile *c, struct qreg val)
303 {
304 return qir_FMAX(c,
305 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
306 qir_uniform_f(c, 0.0));
307 }
308
309 static struct qreg
310 ntq_rcp(struct vc4_compile *c, struct qreg x)
311 {
312 struct qreg r = qir_RCP(c, x);
313
314 /* Apply a Newton-Raphson step to improve the accuracy. */
315 r = qir_FMUL(c, r, qir_FSUB(c,
316 qir_uniform_f(c, 2.0),
317 qir_FMUL(c, x, r)));
318
319 return r;
320 }
321
322 static struct qreg
323 ntq_rsq(struct vc4_compile *c, struct qreg x)
324 {
325 struct qreg r = qir_RSQ(c, x);
326
327 /* Apply a Newton-Raphson step to improve the accuracy. */
328 r = qir_FMUL(c, r, qir_FSUB(c,
329 qir_uniform_f(c, 1.5),
330 qir_FMUL(c,
331 qir_uniform_f(c, 0.5),
332 qir_FMUL(c, x,
333 qir_FMUL(c, r, r)))));
334
335 return r;
336 }
337
338 static struct qreg
339 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
340 {
341 struct qreg src0_hi = qir_SHR(c, src0,
342 qir_uniform_ui(c, 24));
343 struct qreg src1_hi = qir_SHR(c, src1,
344 qir_uniform_ui(c, 24));
345
346 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
347 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
348 struct qreg lolo = qir_MUL24(c, src0, src1);
349
350 return qir_ADD(c, lolo, qir_SHL(c,
351 qir_ADD(c, hilo, lohi),
352 qir_uniform_ui(c, 24)));
353 }
354
355 static struct qreg
356 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
357 {
358 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
359 qir_uniform_ui(c, 8)));
360 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
361 }
362
363 /**
364 * Emits a lowered TXF_MS from an MSAA texture.
365 *
366 * The addressing math has been lowered in NIR, and now we just need to read
367 * it like a UBO.
368 */
369 static void
370 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
371 {
372 uint32_t tile_width = 32;
373 uint32_t tile_height = 32;
374 uint32_t tile_size = (tile_height * tile_width *
375 VC4_MAX_SAMPLES * sizeof(uint32_t));
376
377 unsigned unit = instr->texture_index;
378 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
379 uint32_t w_tiles = w / tile_width;
380 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
381 uint32_t h_tiles = h / tile_height;
382 uint32_t size = w_tiles * h_tiles * tile_size;
383
384 struct qreg addr;
385 assert(instr->num_srcs == 1);
386 assert(instr->src[0].src_type == nir_tex_src_coord);
387 addr = ntq_get_src(c, instr->src[0].src, 0);
388
389 /* Perform the clamping required by kernel validation. */
390 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
391 addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
392
393 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
394 addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
395
396 ntq_emit_thrsw(c);
397
398 struct qreg tex = qir_TEX_RESULT(c);
399 c->num_texture_samples++;
400
401 enum pipe_format format = c->key->tex[unit].format;
402 if (util_format_is_depth_or_stencil(format)) {
403 struct qreg scaled = ntq_scale_depth_texture(c, tex);
404 for (int i = 0; i < 4; i++)
405 ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
406 } else {
407 for (int i = 0; i < 4; i++)
408 ntq_store_dest(c, &instr->dest, i,
409 qir_UNPACK_8_F(c, tex, i));
410 }
411 }
412
413 static void
414 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
415 {
416 struct qreg s, t, r, lod, compare;
417 bool is_txb = false, is_txl = false;
418 unsigned unit = instr->texture_index;
419
420 if (instr->op == nir_texop_txf) {
421 ntq_emit_txf(c, instr);
422 return;
423 }
424
425 for (unsigned i = 0; i < instr->num_srcs; i++) {
426 switch (instr->src[i].src_type) {
427 case nir_tex_src_coord:
428 s = ntq_get_src(c, instr->src[i].src, 0);
429 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
430 t = qir_uniform_f(c, 0.5);
431 else
432 t = ntq_get_src(c, instr->src[i].src, 1);
433 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
434 r = ntq_get_src(c, instr->src[i].src, 2);
435 break;
436 case nir_tex_src_bias:
437 lod = ntq_get_src(c, instr->src[i].src, 0);
438 is_txb = true;
439 break;
440 case nir_tex_src_lod:
441 lod = ntq_get_src(c, instr->src[i].src, 0);
442 is_txl = true;
443 break;
444 case nir_tex_src_comparator:
445 compare = ntq_get_src(c, instr->src[i].src, 0);
446 break;
447 default:
448 unreachable("unknown texture source");
449 }
450 }
451
452 if (c->stage != QSTAGE_FRAG && !is_txl) {
453 /* From the GLSL 1.20 spec:
454 *
455 * "If it is mip-mapped and running on the vertex shader,
456 * then the base texture is used."
457 */
458 is_txl = true;
459 lod = qir_uniform_ui(c, 0);
460 }
461
462 if (c->key->tex[unit].force_first_level) {
463 lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
464 is_txl = true;
465 is_txb = false;
466 }
467
468 struct qreg texture_u[] = {
469 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
470 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
471 qir_uniform(c, QUNIFORM_CONSTANT, 0),
472 qir_uniform(c, QUNIFORM_CONSTANT, 0),
473 };
474 uint32_t next_texture_u = 0;
475
476 /* There is no native support for GL texture rectangle coordinates, so
477 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
478 * 1]).
479 */
480 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
481 s = qir_FMUL(c, s,
482 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
483 t = qir_FMUL(c, t,
484 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
485 }
486
487 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
488 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
489 unit | (is_txl << 16));
490 }
491
492 struct qinst *tmu;
493 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
494 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
495 tmu->src[qir_get_tex_uniform_src(tmu)] =
496 texture_u[next_texture_u++];
497 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
498 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
499 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
500 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
501 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
502 qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
503 unit));
504 tmu->src[qir_get_tex_uniform_src(tmu)] =
505 texture_u[next_texture_u++];
506 }
507
508 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
509 s = qir_SAT(c, s);
510 }
511
512 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
513 t = qir_SAT(c, t);
514 }
515
516 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
517 tmu->src[qir_get_tex_uniform_src(tmu)] =
518 texture_u[next_texture_u++];
519
520 if (is_txl || is_txb) {
521 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
522 tmu->src[qir_get_tex_uniform_src(tmu)] =
523 texture_u[next_texture_u++];
524 }
525
526 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
527 tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
528
529 c->num_texture_samples++;
530
531 ntq_emit_thrsw(c);
532
533 struct qreg tex = qir_TEX_RESULT(c);
534
535 enum pipe_format format = c->key->tex[unit].format;
536
537 struct qreg *dest = ntq_get_dest(c, &instr->dest);
538 if (util_format_is_depth_or_stencil(format)) {
539 struct qreg normalized = ntq_scale_depth_texture(c, tex);
540 struct qreg depth_output;
541
542 struct qreg u0 = qir_uniform_f(c, 0.0f);
543 struct qreg u1 = qir_uniform_f(c, 1.0f);
544 if (c->key->tex[unit].compare_mode) {
545 /* From the GL_ARB_shadow spec:
546 *
547 * "Let Dt (D subscript t) be the depth texture
548 * value, in the range [0, 1]. Let R be the
549 * interpolated texture coordinate clamped to the
550 * range [0, 1]."
551 */
552 compare = qir_SAT(c, compare);
553
554 switch (c->key->tex[unit].compare_func) {
555 case PIPE_FUNC_NEVER:
556 depth_output = qir_uniform_f(c, 0.0f);
557 break;
558 case PIPE_FUNC_ALWAYS:
559 depth_output = u1;
560 break;
561 case PIPE_FUNC_EQUAL:
562 qir_SF(c, qir_FSUB(c, compare, normalized));
563 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
564 break;
565 case PIPE_FUNC_NOTEQUAL:
566 qir_SF(c, qir_FSUB(c, compare, normalized));
567 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
568 break;
569 case PIPE_FUNC_GREATER:
570 qir_SF(c, qir_FSUB(c, compare, normalized));
571 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
572 break;
573 case PIPE_FUNC_GEQUAL:
574 qir_SF(c, qir_FSUB(c, normalized, compare));
575 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
576 break;
577 case PIPE_FUNC_LESS:
578 qir_SF(c, qir_FSUB(c, compare, normalized));
579 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
580 break;
581 case PIPE_FUNC_LEQUAL:
582 qir_SF(c, qir_FSUB(c, normalized, compare));
583 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
584 break;
585 }
586 } else {
587 depth_output = normalized;
588 }
589
590 for (int i = 0; i < 4; i++)
591 dest[i] = depth_output;
592 } else {
593 for (int i = 0; i < 4; i++)
594 dest[i] = qir_UNPACK_8_F(c, tex, i);
595 }
596 }
597
598 /**
599 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
600 * to zero).
601 */
602 static struct qreg
603 ntq_ffract(struct vc4_compile *c, struct qreg src)
604 {
605 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
606 struct qreg diff = qir_FSUB(c, src, trunc);
607 qir_SF(c, diff);
608
609 qir_FADD_dest(c, diff,
610 diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
611
612 return qir_MOV(c, diff);
613 }
614
615 /**
616 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
617 * zero).
618 */
619 static struct qreg
620 ntq_ffloor(struct vc4_compile *c, struct qreg src)
621 {
622 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
623
624 /* This will be < 0 if we truncated and the truncation was of a value
625 * that was < 0 in the first place.
626 */
627 qir_SF(c, qir_FSUB(c, src, result));
628
629 struct qinst *sub = qir_FSUB_dest(c, result,
630 result, qir_uniform_f(c, 1.0));
631 sub->cond = QPU_COND_NS;
632
633 return qir_MOV(c, result);
634 }
635
636 /**
637 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
638 * zero).
639 */
640 static struct qreg
641 ntq_fceil(struct vc4_compile *c, struct qreg src)
642 {
643 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
644
645 /* This will be < 0 if we truncated and the truncation was of a value
646 * that was > 0 in the first place.
647 */
648 qir_SF(c, qir_FSUB(c, result, src));
649
650 qir_FADD_dest(c, result,
651 result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
652
653 return qir_MOV(c, result);
654 }
655
656 static struct qreg
657 ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x)
658 {
659 /* Since we're using a Taylor approximation, we want to have a small
660 * number of coefficients and take advantage of sin/cos repeating
661 * every 2pi. We keep our x as close to 0 as we can, since the series
662 * will be less accurate as |x| increases. (Also, be careful of
663 * shifting the input x value to be tricky with sin/cos relations,
664 * because getting accurate values for x==0 is very important for SDL
665 * rendering)
666 */
667 struct qreg scaled_x =
668 qir_FMUL(c, x,
669 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
670 /* Note: FTOI truncates toward 0. */
671 struct qreg x_frac = qir_FSUB(c, scaled_x,
672 qir_ITOF(c, qir_FTOI(c, scaled_x)));
673 /* Map [0.5, 1] to [-0.5, 0] */
674 qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5)));
675 qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC;
676 /* Map [-1, -0.5] to [0, 0.5] */
677 qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5)));
678 qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
679
680 return x_frac;
681 }
682
683 static struct qreg
684 ntq_fsin(struct vc4_compile *c, struct qreg src)
685 {
686 float coeff[] = {
687 2.0 * M_PI,
688 -pow(2.0 * M_PI, 3) / (3 * 2 * 1),
689 pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
690 -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
691 pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
692 };
693
694 struct qreg x = ntq_shrink_sincos_input_range(c, src);
695 struct qreg x2 = qir_FMUL(c, x, x);
696 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
697 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
698 x = qir_FMUL(c, x, x2);
699 sum = qir_FADD(c,
700 sum,
701 qir_FMUL(c,
702 x,
703 qir_uniform_f(c, coeff[i])));
704 }
705 return sum;
706 }
707
708 static struct qreg
709 ntq_fcos(struct vc4_compile *c, struct qreg src)
710 {
711 float coeff[] = {
712 1.0f,
713 -pow(2.0 * M_PI, 2) / (2 * 1),
714 pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
715 -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
716 pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
717 -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
718 };
719
720 struct qreg x_frac = ntq_shrink_sincos_input_range(c, src);
721 struct qreg sum = qir_uniform_f(c, coeff[0]);
722 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
723 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
724 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
725 if (i != 1)
726 x = qir_FMUL(c, x, x2);
727
728 sum = qir_FADD(c, qir_FMUL(c,
729 x,
730 qir_uniform_f(c, coeff[i])),
731 sum);
732 }
733 return sum;
734 }
735
736 static struct qreg
737 ntq_fsign(struct vc4_compile *c, struct qreg src)
738 {
739 struct qreg t = qir_get_temp(c);
740
741 qir_SF(c, src);
742 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
743 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
744 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
745 return qir_MOV(c, t);
746 }
747
748 static void
749 emit_vertex_input(struct vc4_compile *c, int attr)
750 {
751 enum pipe_format format = c->vs_key->attr_formats[attr];
752 uint32_t attr_size = util_format_get_blocksize(format);
753
754 c->vattr_sizes[attr] = align(attr_size, 4);
755 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
756 c->inputs[attr * 4 + i] =
757 qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
758 c->num_inputs++;
759 }
760 }
761
762 static void
763 emit_fragcoord_input(struct vc4_compile *c, int attr)
764 {
765 c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
766 c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
767 c->inputs[attr * 4 + 2] =
768 qir_FMUL(c,
769 qir_ITOF(c, qir_FRAG_Z(c)),
770 qir_uniform_f(c, 1.0 / 0xffffff));
771 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
772 }
773
774 static struct qreg
775 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
776 uint8_t swizzle)
777 {
778 uint32_t i = c->num_input_slots++;
779 struct qreg vary = {
780 QFILE_VARY,
781 i
782 };
783
784 if (c->num_input_slots >= c->input_slots_array_size) {
785 c->input_slots_array_size =
786 MAX2(4, c->input_slots_array_size * 2);
787
788 c->input_slots = reralloc(c, c->input_slots,
789 struct vc4_varying_slot,
790 c->input_slots_array_size);
791 }
792
793 c->input_slots[i].slot = slot;
794 c->input_slots[i].swizzle = swizzle;
795
796 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
797 }
798
799 static void
800 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
801 {
802 for (int i = 0; i < 4; i++) {
803 c->inputs[attr * 4 + i] =
804 emit_fragment_varying(c, slot, i);
805 c->num_inputs++;
806 }
807 }
808
809 static void
810 add_output(struct vc4_compile *c,
811 uint32_t decl_offset,
812 uint8_t slot,
813 uint8_t swizzle)
814 {
815 uint32_t old_array_size = c->outputs_array_size;
816 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
817 decl_offset + 1);
818
819 if (old_array_size != c->outputs_array_size) {
820 c->output_slots = reralloc(c,
821 c->output_slots,
822 struct vc4_varying_slot,
823 c->outputs_array_size);
824 }
825
826 c->output_slots[decl_offset].slot = slot;
827 c->output_slots[decl_offset].swizzle = swizzle;
828 }
829
830 static bool
831 ntq_src_is_only_ssa_def_user(nir_src *src)
832 {
833 if (!src->is_ssa)
834 return false;
835
836 if (!list_is_empty(&src->ssa->if_uses))
837 return false;
838
839 return (src->ssa->uses.next == &src->use_link &&
840 src->ssa->uses.next->next == &src->ssa->uses);
841 }
842
843 /**
844 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
845 * bit set.
846 *
847 * However, as an optimization, it tries to find the instructions generating
848 * the sources to be packed and just emit the pack flag there, if possible.
849 */
850 static void
851 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
852 {
853 struct qreg result = qir_get_temp(c);
854 struct nir_alu_instr *vec4 = NULL;
855
856 /* If packing from a vec4 op (as expected), identify it so that we can
857 * peek back at what generated its sources.
858 */
859 if (instr->src[0].src.is_ssa &&
860 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
861 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
862 nir_op_vec4) {
863 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
864 }
865
866 /* If the pack is replicating the same channel 4 times, use the 8888
867 * pack flag. This is common for blending using the alpha
868 * channel.
869 */
870 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
871 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
872 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
873 struct qreg rep = ntq_get_src(c,
874 instr->src[0].src,
875 instr->src[0].swizzle[0]);
876 ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep));
877 return;
878 }
879
880 for (int i = 0; i < 4; i++) {
881 int swiz = instr->src[0].swizzle[i];
882 struct qreg src;
883 if (vec4) {
884 src = ntq_get_src(c, vec4->src[swiz].src,
885 vec4->src[swiz].swizzle[0]);
886 } else {
887 src = ntq_get_src(c, instr->src[0].src, swiz);
888 }
889
890 if (vec4 &&
891 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
892 src.file == QFILE_TEMP &&
893 c->defs[src.index] &&
894 qir_is_mul(c->defs[src.index]) &&
895 !c->defs[src.index]->dst.pack) {
896 struct qinst *rewrite = c->defs[src.index];
897 c->defs[src.index] = NULL;
898 rewrite->dst = result;
899 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
900 continue;
901 }
902
903 qir_PACK_8_F(c, result, src, i);
904 }
905
906 ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
907 }
908
909 /** Handles sign-extended bitfield extracts for 16 bits. */
910 static struct qreg
911 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
912 struct qreg bits)
913 {
914 assert(bits.file == QFILE_UNIF &&
915 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
916 c->uniform_data[bits.index] == 16);
917
918 assert(offset.file == QFILE_UNIF &&
919 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
920 int offset_bit = c->uniform_data[offset.index];
921 assert(offset_bit % 16 == 0);
922
923 return qir_UNPACK_16_I(c, base, offset_bit / 16);
924 }
925
926 /** Handles unsigned bitfield extracts for 8 bits. */
927 static struct qreg
928 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
929 struct qreg bits)
930 {
931 assert(bits.file == QFILE_UNIF &&
932 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
933 c->uniform_data[bits.index] == 8);
934
935 assert(offset.file == QFILE_UNIF &&
936 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
937 int offset_bit = c->uniform_data[offset.index];
938 assert(offset_bit % 8 == 0);
939
940 return qir_UNPACK_8_I(c, base, offset_bit / 8);
941 }
942
943 /**
944 * If compare_instr is a valid comparison instruction, emits the
945 * compare_instr's comparison and returns the sel_instr's return value based
946 * on the compare_instr's result.
947 */
948 static bool
949 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
950 nir_alu_instr *compare_instr,
951 nir_alu_instr *sel_instr)
952 {
953 enum qpu_cond cond;
954
955 switch (compare_instr->op) {
956 case nir_op_feq32:
957 case nir_op_ieq32:
958 case nir_op_seq:
959 cond = QPU_COND_ZS;
960 break;
961 case nir_op_fneu32:
962 case nir_op_ine32:
963 case nir_op_sne:
964 cond = QPU_COND_ZC;
965 break;
966 case nir_op_fge32:
967 case nir_op_ige32:
968 case nir_op_uge32:
969 case nir_op_sge:
970 cond = QPU_COND_NC;
971 break;
972 case nir_op_flt32:
973 case nir_op_ilt32:
974 case nir_op_slt:
975 cond = QPU_COND_NS;
976 break;
977 default:
978 return false;
979 }
980
981 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
982 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
983
984 unsigned unsized_type =
985 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
986 if (unsized_type == nir_type_float)
987 qir_SF(c, qir_FSUB(c, src0, src1));
988 else
989 qir_SF(c, qir_SUB(c, src0, src1));
990
991 switch (sel_instr->op) {
992 case nir_op_seq:
993 case nir_op_sne:
994 case nir_op_sge:
995 case nir_op_slt:
996 *dest = qir_SEL(c, cond,
997 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
998 break;
999
1000 case nir_op_b32csel:
1001 *dest = qir_SEL(c, cond,
1002 ntq_get_alu_src(c, sel_instr, 1),
1003 ntq_get_alu_src(c, sel_instr, 2));
1004 break;
1005
1006 default:
1007 *dest = qir_SEL(c, cond,
1008 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
1009 break;
1010 }
1011
1012 /* Make the temporary for nir_store_dest(). */
1013 *dest = qir_MOV(c, *dest);
1014
1015 return true;
1016 }
1017
1018 /**
1019 * Attempts to fold a comparison generating a boolean result into the
1020 * condition code for selecting between two values, instead of comparing the
1021 * boolean result against 0 to generate the condition code.
1022 */
1023 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
1024 struct qreg *src)
1025 {
1026 if (!instr->src[0].src.is_ssa)
1027 goto out;
1028 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
1029 goto out;
1030 nir_alu_instr *compare =
1031 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
1032 if (!compare)
1033 goto out;
1034
1035 struct qreg dest;
1036 if (ntq_emit_comparison(c, &dest, compare, instr))
1037 return dest;
1038
1039 out:
1040 qir_SF(c, src[0]);
1041 return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
1042 }
1043
1044 static struct qreg
1045 ntq_fddx(struct vc4_compile *c, struct qreg src)
1046 {
1047 /* Make sure that we have a bare temp to use for MUL rotation, so it
1048 * can be allocated to an accumulator.
1049 */
1050 if (src.pack || src.file != QFILE_TEMP)
1051 src = qir_MOV(c, src);
1052
1053 struct qreg from_left = qir_ROT_MUL(c, src, 1);
1054 struct qreg from_right = qir_ROT_MUL(c, src, 15);
1055
1056 /* Distinguish left/right pixels of the quad. */
1057 qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
1058 qir_uniform_ui(c, 1)));
1059
1060 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1061 qir_FSUB(c, from_right, src),
1062 qir_FSUB(c, src, from_left)));
1063 }
1064
1065 static struct qreg
1066 ntq_fddy(struct vc4_compile *c, struct qreg src)
1067 {
1068 if (src.pack || src.file != QFILE_TEMP)
1069 src = qir_MOV(c, src);
1070
1071 struct qreg from_bottom = qir_ROT_MUL(c, src, 2);
1072 struct qreg from_top = qir_ROT_MUL(c, src, 14);
1073
1074 /* Distinguish top/bottom pixels of the quad. */
1075 qir_SF(c, qir_AND(c,
1076 qir_reg(QFILE_QPU_ELEMENT, 0),
1077 qir_uniform_ui(c, 2)));
1078
1079 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1080 qir_FSUB(c, from_top, src),
1081 qir_FSUB(c, src, from_bottom)));
1082 }
1083
1084 static void
1085 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
1086 {
1087 /* This should always be lowered to ALU operations for VC4. */
1088 assert(!instr->dest.saturate);
1089
1090 /* Vectors are special in that they have non-scalarized writemasks,
1091 * and just take the first swizzle channel for each argument in order
1092 * into each writemask channel.
1093 */
1094 if (instr->op == nir_op_vec2 ||
1095 instr->op == nir_op_vec3 ||
1096 instr->op == nir_op_vec4) {
1097 struct qreg srcs[4];
1098 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1099 srcs[i] = ntq_get_src(c, instr->src[i].src,
1100 instr->src[i].swizzle[0]);
1101 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1102 ntq_store_dest(c, &instr->dest.dest, i,
1103 qir_MOV(c, srcs[i]));
1104 return;
1105 }
1106
1107 if (instr->op == nir_op_pack_unorm_4x8) {
1108 ntq_emit_pack_unorm_4x8(c, instr);
1109 return;
1110 }
1111
1112 if (instr->op == nir_op_unpack_unorm_4x8) {
1113 struct qreg src = ntq_get_src(c, instr->src[0].src,
1114 instr->src[0].swizzle[0]);
1115 for (int i = 0; i < 4; i++) {
1116 if (instr->dest.write_mask & (1 << i))
1117 ntq_store_dest(c, &instr->dest.dest, i,
1118 qir_UNPACK_8_F(c, src, i));
1119 }
1120 return;
1121 }
1122
1123 /* General case: We can just grab the one used channel per src. */
1124 struct qreg src[nir_op_infos[instr->op].num_inputs];
1125 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1126 src[i] = ntq_get_alu_src(c, instr, i);
1127 }
1128
1129 struct qreg result;
1130
1131 switch (instr->op) {
1132 case nir_op_mov:
1133 result = qir_MOV(c, src[0]);
1134 break;
1135 case nir_op_fmul:
1136 result = qir_FMUL(c, src[0], src[1]);
1137 break;
1138 case nir_op_fadd:
1139 result = qir_FADD(c, src[0], src[1]);
1140 break;
1141 case nir_op_fsub:
1142 result = qir_FSUB(c, src[0], src[1]);
1143 break;
1144 case nir_op_fmin:
1145 result = qir_FMIN(c, src[0], src[1]);
1146 break;
1147 case nir_op_fmax:
1148 result = qir_FMAX(c, src[0], src[1]);
1149 break;
1150
1151 case nir_op_f2i32:
1152 case nir_op_f2u32:
1153 result = qir_FTOI(c, src[0]);
1154 break;
1155 case nir_op_i2f32:
1156 case nir_op_u2f32:
1157 result = qir_ITOF(c, src[0]);
1158 break;
1159 case nir_op_b2f32:
1160 result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1161 break;
1162 case nir_op_b2i32:
1163 result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1164 break;
1165 case nir_op_i2b32:
1166 case nir_op_f2b32:
1167 qir_SF(c, src[0]);
1168 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
1169 qir_uniform_ui(c, ~0),
1170 qir_uniform_ui(c, 0)));
1171 break;
1172
1173 case nir_op_iadd:
1174 result = qir_ADD(c, src[0], src[1]);
1175 break;
1176 case nir_op_ushr:
1177 result = qir_SHR(c, src[0], src[1]);
1178 break;
1179 case nir_op_isub:
1180 result = qir_SUB(c, src[0], src[1]);
1181 break;
1182 case nir_op_ishr:
1183 result = qir_ASR(c, src[0], src[1]);
1184 break;
1185 case nir_op_ishl:
1186 result = qir_SHL(c, src[0], src[1]);
1187 break;
1188 case nir_op_imin:
1189 result = qir_MIN(c, src[0], src[1]);
1190 break;
1191 case nir_op_imax:
1192 result = qir_MAX(c, src[0], src[1]);
1193 break;
1194 case nir_op_iand:
1195 result = qir_AND(c, src[0], src[1]);
1196 break;
1197 case nir_op_ior:
1198 result = qir_OR(c, src[0], src[1]);
1199 break;
1200 case nir_op_ixor:
1201 result = qir_XOR(c, src[0], src[1]);
1202 break;
1203 case nir_op_inot:
1204 result = qir_NOT(c, src[0]);
1205 break;
1206
1207 case nir_op_imul:
1208 result = ntq_umul(c, src[0], src[1]);
1209 break;
1210
1211 case nir_op_seq:
1212 case nir_op_sne:
1213 case nir_op_sge:
1214 case nir_op_slt:
1215 case nir_op_feq32:
1216 case nir_op_fneu32:
1217 case nir_op_fge32:
1218 case nir_op_flt32:
1219 case nir_op_ieq32:
1220 case nir_op_ine32:
1221 case nir_op_ige32:
1222 case nir_op_uge32:
1223 case nir_op_ilt32:
1224 if (!ntq_emit_comparison(c, &result, instr, instr)) {
1225 fprintf(stderr, "Bad comparison instruction\n");
1226 }
1227 break;
1228
1229 case nir_op_b32csel:
1230 result = ntq_emit_bcsel(c, instr, src);
1231 break;
1232 case nir_op_fcsel:
1233 qir_SF(c, src[0]);
1234 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
1235 break;
1236
1237 case nir_op_frcp:
1238 result = ntq_rcp(c, src[0]);
1239 break;
1240 case nir_op_frsq:
1241 result = ntq_rsq(c, src[0]);
1242 break;
1243 case nir_op_fexp2:
1244 result = qir_EXP2(c, src[0]);
1245 break;
1246 case nir_op_flog2:
1247 result = qir_LOG2(c, src[0]);
1248 break;
1249
1250 case nir_op_ftrunc:
1251 result = qir_ITOF(c, qir_FTOI(c, src[0]));
1252 break;
1253 case nir_op_fceil:
1254 result = ntq_fceil(c, src[0]);
1255 break;
1256 case nir_op_ffract:
1257 result = ntq_ffract(c, src[0]);
1258 break;
1259 case nir_op_ffloor:
1260 result = ntq_ffloor(c, src[0]);
1261 break;
1262
1263 case nir_op_fsin:
1264 result = ntq_fsin(c, src[0]);
1265 break;
1266 case nir_op_fcos:
1267 result = ntq_fcos(c, src[0]);
1268 break;
1269
1270 case nir_op_fsign:
1271 result = ntq_fsign(c, src[0]);
1272 break;
1273
1274 case nir_op_fabs:
1275 result = qir_FMAXABS(c, src[0], src[0]);
1276 break;
1277 case nir_op_iabs:
1278 result = qir_MAX(c, src[0],
1279 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1280 break;
1281
1282 case nir_op_ibitfield_extract:
1283 result = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1284 break;
1285
1286 case nir_op_ubitfield_extract:
1287 result = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1288 break;
1289
1290 case nir_op_usadd_4x8:
1291 result = qir_V8ADDS(c, src[0], src[1]);
1292 break;
1293
1294 case nir_op_ussub_4x8:
1295 result = qir_V8SUBS(c, src[0], src[1]);
1296 break;
1297
1298 case nir_op_umin_4x8:
1299 result = qir_V8MIN(c, src[0], src[1]);
1300 break;
1301
1302 case nir_op_umax_4x8:
1303 result = qir_V8MAX(c, src[0], src[1]);
1304 break;
1305
1306 case nir_op_umul_unorm_4x8:
1307 result = qir_V8MULD(c, src[0], src[1]);
1308 break;
1309
1310 case nir_op_fddx:
1311 case nir_op_fddx_coarse:
1312 case nir_op_fddx_fine:
1313 result = ntq_fddx(c, src[0]);
1314 break;
1315
1316 case nir_op_fddy:
1317 case nir_op_fddy_coarse:
1318 case nir_op_fddy_fine:
1319 result = ntq_fddy(c, src[0]);
1320 break;
1321
1322 default:
1323 fprintf(stderr, "unknown NIR ALU inst: ");
1324 nir_print_instr(&instr->instr, stderr);
1325 fprintf(stderr, "\n");
1326 abort();
1327 }
1328
1329 /* We have a scalar result, so the instruction should only have a
1330 * single channel written to.
1331 */
1332 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
1333 ntq_store_dest(c, &instr->dest.dest,
1334 ffs(instr->dest.write_mask) - 1, result);
1335 }
1336
1337 static void
1338 emit_frag_end(struct vc4_compile *c)
1339 {
1340 struct qreg color;
1341 if (c->output_color_index != -1) {
1342 color = c->outputs[c->output_color_index];
1343 } else {
1344 color = qir_uniform_ui(c, 0);
1345 }
1346
1347 uint32_t discard_cond = QPU_COND_ALWAYS;
1348 if (c->s->info.fs.uses_discard) {
1349 qir_SF(c, c->discard);
1350 discard_cond = QPU_COND_ZS;
1351 }
1352
1353 if (c->fs_key->stencil_enabled) {
1354 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1355 qir_uniform(c, QUNIFORM_STENCIL, 0));
1356 if (c->fs_key->stencil_twoside) {
1357 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1358 qir_uniform(c, QUNIFORM_STENCIL, 1));
1359 }
1360 if (c->fs_key->stencil_full_writemasks) {
1361 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1362 qir_uniform(c, QUNIFORM_STENCIL, 2));
1363 }
1364 }
1365
1366 if (c->output_sample_mask_index != -1) {
1367 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1368 }
1369
1370 if (c->fs_key->depth_enabled) {
1371 if (c->output_position_index != -1) {
1372 qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1373 qir_FMUL(c,
1374 c->outputs[c->output_position_index],
1375 qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1376 } else {
1377 qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1378 qir_FRAG_Z(c))->cond = discard_cond;
1379 }
1380 }
1381
1382 if (!c->msaa_per_sample_output) {
1383 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1384 color)->cond = discard_cond;
1385 } else {
1386 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1387 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1388 c->sample_colors[i])->cond = discard_cond;
1389 }
1390 }
1391 }
1392
1393 static void
1394 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1395 {
1396 struct qreg packed = qir_get_temp(c);
1397
1398 for (int i = 0; i < 2; i++) {
1399 struct qreg scale =
1400 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1401
1402 struct qreg packed_chan = packed;
1403 packed_chan.pack = QPU_PACK_A_16A + i;
1404
1405 qir_FTOI_dest(c, packed_chan,
1406 qir_FMUL(c,
1407 qir_FMUL(c,
1408 c->outputs[c->output_position_index + i],
1409 scale),
1410 rcp_w));
1411 }
1412
1413 qir_VPM_WRITE(c, packed);
1414 }
1415
1416 static void
1417 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1418 {
1419 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1420 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1421
1422 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1423 c->outputs[c->output_position_index + 2],
1424 zscale),
1425 rcp_w),
1426 zoffset));
1427 }
1428
1429 static void
1430 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1431 {
1432 qir_VPM_WRITE(c, rcp_w);
1433 }
1434
1435 static void
1436 emit_point_size_write(struct vc4_compile *c)
1437 {
1438 struct qreg point_size;
1439
1440 if (c->output_point_size_index != -1)
1441 point_size = c->outputs[c->output_point_size_index];
1442 else
1443 point_size = qir_uniform_f(c, 1.0);
1444
1445 qir_VPM_WRITE(c, point_size);
1446 }
1447
1448 /**
1449 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1450 *
1451 * The simulator insists that there be at least one vertex attribute, so
1452 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1453 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1454 * to consume it here.
1455 */
1456 static void
1457 emit_stub_vpm_read(struct vc4_compile *c)
1458 {
1459 if (c->num_inputs)
1460 return;
1461
1462 c->vattr_sizes[0] = 4;
1463 (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1464 c->num_inputs++;
1465 }
1466
1467 static void
1468 emit_vert_end(struct vc4_compile *c,
1469 struct vc4_varying_slot *fs_inputs,
1470 uint32_t num_fs_inputs)
1471 {
1472 struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1473
1474 emit_stub_vpm_read(c);
1475
1476 emit_scaled_viewport_write(c, rcp_w);
1477 emit_zs_write(c, rcp_w);
1478 emit_rcp_wc_write(c, rcp_w);
1479 if (c->vs_key->per_vertex_point_size)
1480 emit_point_size_write(c);
1481
1482 for (int i = 0; i < num_fs_inputs; i++) {
1483 struct vc4_varying_slot *input = &fs_inputs[i];
1484 int j;
1485
1486 for (j = 0; j < c->num_outputs; j++) {
1487 struct vc4_varying_slot *output =
1488 &c->output_slots[j];
1489
1490 if (input->slot == output->slot &&
1491 input->swizzle == output->swizzle) {
1492 qir_VPM_WRITE(c, c->outputs[j]);
1493 break;
1494 }
1495 }
1496 /* Emit padding if we didn't find a declared VS output for
1497 * this FS input.
1498 */
1499 if (j == c->num_outputs)
1500 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1501 }
1502 }
1503
1504 static void
1505 emit_coord_end(struct vc4_compile *c)
1506 {
1507 struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1508
1509 emit_stub_vpm_read(c);
1510
1511 for (int i = 0; i < 4; i++)
1512 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1513
1514 emit_scaled_viewport_write(c, rcp_w);
1515 emit_zs_write(c, rcp_w);
1516 emit_rcp_wc_write(c, rcp_w);
1517 if (c->vs_key->per_vertex_point_size)
1518 emit_point_size_write(c);
1519 }
1520
1521 static void
1522 vc4_optimize_nir(struct nir_shader *s)
1523 {
1524 bool progress;
1525 unsigned lower_flrp =
1526 (s->options->lower_flrp16 ? 16 : 0) |
1527 (s->options->lower_flrp32 ? 32 : 0) |
1528 (s->options->lower_flrp64 ? 64 : 0);
1529
1530 do {
1531 progress = false;
1532
1533 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1534 NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
1535 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1536 NIR_PASS(progress, s, nir_copy_prop);
1537 NIR_PASS(progress, s, nir_opt_remove_phis);
1538 NIR_PASS(progress, s, nir_opt_dce);
1539 NIR_PASS(progress, s, nir_opt_dead_cf);
1540 NIR_PASS(progress, s, nir_opt_cse);
1541 NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
1542 NIR_PASS(progress, s, nir_opt_algebraic);
1543 NIR_PASS(progress, s, nir_opt_constant_folding);
1544 if (lower_flrp != 0) {
1545 bool lower_flrp_progress = false;
1546
1547 NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
1548 lower_flrp,
1549 false /* always_precise */,
1550 s->options->lower_ffma);
1551 if (lower_flrp_progress) {
1552 NIR_PASS(progress, s, nir_opt_constant_folding);
1553 progress = true;
1554 }
1555
1556 /* Nothing should rematerialize any flrps, so we only
1557 * need to do this lowering once.
1558 */
1559 lower_flrp = 0;
1560 }
1561
1562 NIR_PASS(progress, s, nir_opt_undef);
1563 NIR_PASS(progress, s, nir_opt_loop_unroll,
1564 nir_var_shader_in |
1565 nir_var_shader_out |
1566 nir_var_function_temp);
1567 } while (progress);
1568 }
1569
1570 static int
1571 driver_location_compare(const void *in_a, const void *in_b)
1572 {
1573 const nir_variable *const *a = in_a;
1574 const nir_variable *const *b = in_b;
1575
1576 return (*a)->data.driver_location - (*b)->data.driver_location;
1577 }
1578
1579 static void
1580 ntq_setup_inputs(struct vc4_compile *c)
1581 {
1582 unsigned num_entries = 0;
1583 nir_foreach_shader_in_variable(var, c->s)
1584 num_entries++;
1585
1586 nir_variable *vars[num_entries];
1587
1588 unsigned i = 0;
1589 nir_foreach_shader_in_variable(var, c->s)
1590 vars[i++] = var;
1591
1592 /* Sort the variables so that we emit the input setup in
1593 * driver_location order. This is required for VPM reads, whose data
1594 * is fetched into the VPM in driver_location (TGSI register index)
1595 * order.
1596 */
1597 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1598
1599 for (unsigned i = 0; i < num_entries; i++) {
1600 nir_variable *var = vars[i];
1601 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1602 unsigned loc = var->data.driver_location;
1603
1604 assert(array_len == 1);
1605 (void)array_len;
1606 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1607 (loc + 1) * 4);
1608
1609 if (c->stage == QSTAGE_FRAG) {
1610 if (var->data.location == VARYING_SLOT_POS) {
1611 emit_fragcoord_input(c, loc);
1612 } else if (util_varying_is_point_coord(var->data.location,
1613 c->fs_key->point_sprite_mask)) {
1614 c->inputs[loc * 4 + 0] = c->point_x;
1615 c->inputs[loc * 4 + 1] = c->point_y;
1616 } else {
1617 emit_fragment_input(c, loc, var->data.location);
1618 }
1619 } else {
1620 emit_vertex_input(c, loc);
1621 }
1622 }
1623 }
1624
1625 static void
1626 ntq_setup_outputs(struct vc4_compile *c)
1627 {
1628 nir_foreach_shader_out_variable(var, c->s) {
1629 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1630 unsigned loc = var->data.driver_location * 4;
1631
1632 assert(array_len == 1);
1633 (void)array_len;
1634
1635 for (int i = 0; i < 4; i++)
1636 add_output(c, loc + i, var->data.location, i);
1637
1638 if (c->stage == QSTAGE_FRAG) {
1639 switch (var->data.location) {
1640 case FRAG_RESULT_COLOR:
1641 case FRAG_RESULT_DATA0:
1642 c->output_color_index = loc;
1643 break;
1644 case FRAG_RESULT_DEPTH:
1645 c->output_position_index = loc;
1646 break;
1647 case FRAG_RESULT_SAMPLE_MASK:
1648 c->output_sample_mask_index = loc;
1649 break;
1650 }
1651 } else {
1652 switch (var->data.location) {
1653 case VARYING_SLOT_POS:
1654 c->output_position_index = loc;
1655 break;
1656 case VARYING_SLOT_PSIZ:
1657 c->output_point_size_index = loc;
1658 break;
1659 }
1660 }
1661 }
1662 }
1663
1664 /**
1665 * Sets up the mapping from nir_register to struct qreg *.
1666 *
1667 * Each nir_register gets a struct qreg per 32-bit component being stored.
1668 */
1669 static void
1670 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1671 {
1672 foreach_list_typed(nir_register, nir_reg, node, list) {
1673 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1674 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1675 array_len *
1676 nir_reg->num_components);
1677
1678 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1679
1680 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1681 qregs[i] = qir_get_temp(c);
1682 }
1683 }
1684
1685 static void
1686 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1687 {
1688 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1689 for (int i = 0; i < instr->def.num_components; i++)
1690 qregs[i] = qir_uniform_ui(c, instr->value[i].u32);
1691
1692 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1693 }
1694
1695 static void
1696 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1697 {
1698 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1699
1700 /* QIR needs there to be *some* value, so pick 0 (same as for
1701 * ntq_setup_registers().
1702 */
1703 for (int i = 0; i < instr->def.num_components; i++)
1704 qregs[i] = qir_uniform_ui(c, 0);
1705 }
1706
1707 static void
1708 ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr)
1709 {
1710 assert(nir_src_as_uint(instr->src[0]) == 0);
1711
1712 /* Reads of the per-sample color need to be done in
1713 * order.
1714 */
1715 int sample_index = (nir_intrinsic_base(instr) -
1716 VC4_NIR_TLB_COLOR_READ_INPUT);
1717 for (int i = 0; i <= sample_index; i++) {
1718 if (c->color_reads[i].file == QFILE_NULL) {
1719 c->color_reads[i] =
1720 qir_TLB_COLOR_READ(c);
1721 }
1722 }
1723 ntq_store_dest(c, &instr->dest, 0,
1724 qir_MOV(c, c->color_reads[sample_index]));
1725 }
1726
1727 static void
1728 ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr)
1729 {
1730 assert(instr->num_components == 1);
1731 assert(nir_src_is_const(instr->src[0]) &&
1732 "vc4 doesn't support indirect inputs");
1733
1734 if (c->stage == QSTAGE_FRAG &&
1735 nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1736 ntq_emit_color_read(c, instr);
1737 return;
1738 }
1739
1740 uint32_t offset = nir_intrinsic_base(instr) +
1741 nir_src_as_uint(instr->src[0]);
1742 int comp = nir_intrinsic_component(instr);
1743 ntq_store_dest(c, &instr->dest, 0,
1744 qir_MOV(c, c->inputs[offset * 4 + comp]));
1745 }
1746
1747 static void
1748 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1749 {
1750 unsigned offset;
1751
1752 switch (instr->intrinsic) {
1753 case nir_intrinsic_load_uniform:
1754 assert(instr->num_components == 1);
1755 if (nir_src_is_const(instr->src[0])) {
1756 offset = nir_intrinsic_base(instr) +
1757 nir_src_as_uint(instr->src[0]);
1758 assert(offset % 4 == 0);
1759 /* We need dwords */
1760 offset = offset / 4;
1761 ntq_store_dest(c, &instr->dest, 0,
1762 qir_uniform(c, QUNIFORM_UNIFORM,
1763 offset));
1764 } else {
1765 ntq_store_dest(c, &instr->dest, 0,
1766 indirect_uniform_load(c, instr));
1767 }
1768 break;
1769
1770 case nir_intrinsic_load_ubo:
1771 assert(instr->num_components == 1);
1772 ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr));
1773 break;
1774
1775 case nir_intrinsic_load_user_clip_plane:
1776 for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) {
1777 ntq_store_dest(c, &instr->dest, i,
1778 qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1779 nir_intrinsic_ucp_id(instr) *
1780 4 + i));
1781 }
1782 break;
1783
1784 case nir_intrinsic_load_blend_const_color_r_float:
1785 case nir_intrinsic_load_blend_const_color_g_float:
1786 case nir_intrinsic_load_blend_const_color_b_float:
1787 case nir_intrinsic_load_blend_const_color_a_float:
1788 ntq_store_dest(c, &instr->dest, 0,
1789 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
1790 (instr->intrinsic -
1791 nir_intrinsic_load_blend_const_color_r_float),
1792 0));
1793 break;
1794
1795 case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
1796 ntq_store_dest(c, &instr->dest, 0,
1797 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
1798 0));
1799 break;
1800
1801 case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
1802 ntq_store_dest(c, &instr->dest, 0,
1803 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
1804 0));
1805 break;
1806
1807 case nir_intrinsic_load_alpha_ref_float:
1808 ntq_store_dest(c, &instr->dest, 0,
1809 qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1810 break;
1811
1812 case nir_intrinsic_load_sample_mask_in:
1813 ntq_store_dest(c, &instr->dest, 0,
1814 qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1815 break;
1816
1817 case nir_intrinsic_load_front_face:
1818 /* The register contains 0 (front) or 1 (back), and we need to
1819 * turn it into a NIR bool where true means front.
1820 */
1821 ntq_store_dest(c, &instr->dest, 0,
1822 qir_ADD(c,
1823 qir_uniform_ui(c, -1),
1824 qir_reg(QFILE_FRAG_REV_FLAG, 0)));
1825 break;
1826
1827 case nir_intrinsic_load_input:
1828 ntq_emit_load_input(c, instr);
1829 break;
1830
1831 case nir_intrinsic_store_output:
1832 assert(nir_src_is_const(instr->src[1]) &&
1833 "vc4 doesn't support indirect outputs");
1834 offset = nir_intrinsic_base(instr) +
1835 nir_src_as_uint(instr->src[1]);
1836
1837 /* MSAA color outputs are the only case where we have an
1838 * output that's not lowered to being a store of a single 32
1839 * bit value.
1840 */
1841 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1842 assert(offset == c->output_color_index);
1843 for (int i = 0; i < 4; i++) {
1844 c->sample_colors[i] =
1845 qir_MOV(c, ntq_get_src(c, instr->src[0],
1846 i));
1847 }
1848 } else {
1849 offset = offset * 4 + nir_intrinsic_component(instr);
1850 assert(instr->num_components == 1);
1851 c->outputs[offset] =
1852 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1853 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1854 }
1855 break;
1856
1857 case nir_intrinsic_discard:
1858 if (c->execute.file != QFILE_NULL) {
1859 qir_SF(c, c->execute);
1860 qir_MOV_cond(c, QPU_COND_ZS, c->discard,
1861 qir_uniform_ui(c, ~0));
1862 } else {
1863 qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
1864 }
1865 break;
1866
1867 case nir_intrinsic_discard_if: {
1868 /* true (~0) if we're discarding */
1869 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1870
1871 if (c->execute.file != QFILE_NULL) {
1872 /* execute == 0 means the channel is active. Invert
1873 * the condition so that we can use zero as "executing
1874 * and discarding."
1875 */
1876 qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
1877 qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
1878 } else {
1879 qir_OR_dest(c, c->discard, c->discard,
1880 ntq_get_src(c, instr->src[0], 0));
1881 }
1882
1883 break;
1884 }
1885
1886 default:
1887 fprintf(stderr, "Unknown intrinsic: ");
1888 nir_print_instr(&instr->instr, stderr);
1889 fprintf(stderr, "\n");
1890 break;
1891 }
1892 }
1893
1894 /* Clears (activates) the execute flags for any channels whose jump target
1895 * matches this block.
1896 */
1897 static void
1898 ntq_activate_execute_for_block(struct vc4_compile *c)
1899 {
1900 qir_SF(c, qir_SUB(c,
1901 c->execute,
1902 qir_uniform_ui(c, c->cur_block->index)));
1903 qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0));
1904 }
1905
1906 static void
1907 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1908 {
1909 if (!c->vc4->screen->has_control_flow) {
1910 fprintf(stderr,
1911 "IF statement support requires updated kernel.\n");
1912 return;
1913 }
1914
1915 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1916 bool empty_else_block =
1917 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1918 exec_list_is_empty(&nir_else_block->instr_list));
1919
1920 struct qblock *then_block = qir_new_block(c);
1921 struct qblock *after_block = qir_new_block(c);
1922 struct qblock *else_block;
1923 if (empty_else_block)
1924 else_block = after_block;
1925 else
1926 else_block = qir_new_block(c);
1927
1928 bool was_top_level = false;
1929 if (c->execute.file == QFILE_NULL) {
1930 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
1931 was_top_level = true;
1932 }
1933
1934 /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1935 * 0) channels, and then update execute flags for those to point to
1936 * the ELSE block.
1937 */
1938 qir_SF(c, qir_OR(c,
1939 c->execute,
1940 ntq_get_src(c, if_stmt->condition, 0)));
1941 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1942 qir_uniform_ui(c, else_block->index));
1943
1944 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1945 * through.
1946 */
1947 qir_SF(c, c->execute);
1948 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC);
1949 qir_link_blocks(c->cur_block, else_block);
1950 qir_link_blocks(c->cur_block, then_block);
1951
1952 /* Process the THEN block. */
1953 qir_set_emit_block(c, then_block);
1954 ntq_emit_cf_list(c, &if_stmt->then_list);
1955
1956 if (!empty_else_block) {
1957 /* Handle the end of the THEN block. First, all currently
1958 * active channels update their execute flags to point to
1959 * ENDIF
1960 */
1961 qir_SF(c, c->execute);
1962 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1963 qir_uniform_ui(c, after_block->index));
1964
1965 /* If everything points at ENDIF, then jump there immediately. */
1966 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index)));
1967 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1968 qir_link_blocks(c->cur_block, after_block);
1969 qir_link_blocks(c->cur_block, else_block);
1970
1971 qir_set_emit_block(c, else_block);
1972 ntq_activate_execute_for_block(c);
1973 ntq_emit_cf_list(c, &if_stmt->else_list);
1974 }
1975
1976 qir_link_blocks(c->cur_block, after_block);
1977
1978 qir_set_emit_block(c, after_block);
1979 if (was_top_level) {
1980 c->execute = c->undef;
1981 c->last_top_block = c->cur_block;
1982 } else {
1983 ntq_activate_execute_for_block(c);
1984 }
1985 }
1986
1987 static void
1988 ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
1989 {
1990 struct qblock *jump_block;
1991 switch (jump->type) {
1992 case nir_jump_break:
1993 jump_block = c->loop_break_block;
1994 break;
1995 case nir_jump_continue:
1996 jump_block = c->loop_cont_block;
1997 break;
1998 default:
1999 unreachable("Unsupported jump type\n");
2000 }
2001
2002 qir_SF(c, c->execute);
2003 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
2004 qir_uniform_ui(c, jump_block->index));
2005
2006 /* Jump to the destination block if everyone has taken the jump. */
2007 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
2008 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
2009 struct qblock *new_block = qir_new_block(c);
2010 qir_link_blocks(c->cur_block, jump_block);
2011 qir_link_blocks(c->cur_block, new_block);
2012 qir_set_emit_block(c, new_block);
2013 }
2014
2015 static void
2016 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
2017 {
2018 switch (instr->type) {
2019 case nir_instr_type_alu:
2020 ntq_emit_alu(c, nir_instr_as_alu(instr));
2021 break;
2022
2023 case nir_instr_type_intrinsic:
2024 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2025 break;
2026
2027 case nir_instr_type_load_const:
2028 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2029 break;
2030
2031 case nir_instr_type_ssa_undef:
2032 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2033 break;
2034
2035 case nir_instr_type_tex:
2036 ntq_emit_tex(c, nir_instr_as_tex(instr));
2037 break;
2038
2039 case nir_instr_type_jump:
2040 ntq_emit_jump(c, nir_instr_as_jump(instr));
2041 break;
2042
2043 default:
2044 fprintf(stderr, "Unknown NIR instr type: ");
2045 nir_print_instr(instr, stderr);
2046 fprintf(stderr, "\n");
2047 abort();
2048 }
2049 }
2050
2051 static void
2052 ntq_emit_block(struct vc4_compile *c, nir_block *block)
2053 {
2054 nir_foreach_instr(instr, block) {
2055 ntq_emit_instr(c, instr);
2056 }
2057 }
2058
2059 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
2060
2061 static void
2062 ntq_emit_loop(struct vc4_compile *c, nir_loop *loop)
2063 {
2064 if (!c->vc4->screen->has_control_flow) {
2065 fprintf(stderr,
2066 "loop support requires updated kernel.\n");
2067 ntq_emit_cf_list(c, &loop->body);
2068 return;
2069 }
2070
2071 bool was_top_level = false;
2072 if (c->execute.file == QFILE_NULL) {
2073 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
2074 was_top_level = true;
2075 }
2076
2077 struct qblock *save_loop_cont_block = c->loop_cont_block;
2078 struct qblock *save_loop_break_block = c->loop_break_block;
2079
2080 c->loop_cont_block = qir_new_block(c);
2081 c->loop_break_block = qir_new_block(c);
2082
2083 qir_link_blocks(c->cur_block, c->loop_cont_block);
2084 qir_set_emit_block(c, c->loop_cont_block);
2085 ntq_activate_execute_for_block(c);
2086
2087 ntq_emit_cf_list(c, &loop->body);
2088
2089 /* If anything had explicitly continued, or is here at the end of the
2090 * loop, then we need to loop again. SF updates are masked by the
2091 * instruction's condition, so we can do the OR of the two conditions
2092 * within SF.
2093 */
2094 qir_SF(c, c->execute);
2095 struct qinst *cont_check =
2096 qir_SUB_dest(c,
2097 c->undef,
2098 c->execute,
2099 qir_uniform_ui(c, c->loop_cont_block->index));
2100 cont_check->cond = QPU_COND_ZC;
2101 cont_check->sf = true;
2102
2103 qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS);
2104 qir_link_blocks(c->cur_block, c->loop_cont_block);
2105 qir_link_blocks(c->cur_block, c->loop_break_block);
2106
2107 qir_set_emit_block(c, c->loop_break_block);
2108 if (was_top_level) {
2109 c->execute = c->undef;
2110 c->last_top_block = c->cur_block;
2111 } else {
2112 ntq_activate_execute_for_block(c);
2113 }
2114
2115 c->loop_break_block = save_loop_break_block;
2116 c->loop_cont_block = save_loop_cont_block;
2117 }
2118
2119 static void
2120 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
2121 {
2122 fprintf(stderr, "FUNCTIONS not handled.\n");
2123 abort();
2124 }
2125
2126 static void
2127 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
2128 {
2129 foreach_list_typed(nir_cf_node, node, node, list) {
2130 switch (node->type) {
2131 case nir_cf_node_block:
2132 ntq_emit_block(c, nir_cf_node_as_block(node));
2133 break;
2134
2135 case nir_cf_node_if:
2136 ntq_emit_if(c, nir_cf_node_as_if(node));
2137 break;
2138
2139 case nir_cf_node_loop:
2140 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2141 break;
2142
2143 case nir_cf_node_function:
2144 ntq_emit_function(c, nir_cf_node_as_function(node));
2145 break;
2146
2147 default:
2148 fprintf(stderr, "Unknown NIR node type\n");
2149 abort();
2150 }
2151 }
2152 }
2153
2154 static void
2155 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
2156 {
2157 ntq_setup_registers(c, &impl->registers);
2158 ntq_emit_cf_list(c, &impl->body);
2159 }
2160
2161 static void
2162 nir_to_qir(struct vc4_compile *c)
2163 {
2164 if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard)
2165 c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
2166
2167 ntq_setup_inputs(c);
2168 ntq_setup_outputs(c);
2169
2170 /* Find the main function and emit the body. */
2171 nir_foreach_function(function, c->s) {
2172 assert(strcmp(function->name, "main") == 0);
2173 assert(function->impl);
2174 ntq_emit_impl(c, function->impl);
2175 }
2176 }
2177
2178 static const nir_shader_compiler_options nir_options = {
2179 .lower_all_io_to_temps = true,
2180 .lower_extract_byte = true,
2181 .lower_extract_word = true,
2182 .lower_fdiv = true,
2183 .lower_ffma = true,
2184 .lower_flrp32 = true,
2185 .lower_fmod = true,
2186 .lower_fpow = true,
2187 .lower_fsat = true,
2188 .lower_fsqrt = true,
2189 .lower_ldexp = true,
2190 .lower_negate = true,
2191 .lower_rotate = true,
2192 .lower_to_scalar = true,
2193 .max_unroll_iterations = 32,
2194 };
2195
2196 const void *
2197 vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
2198 enum pipe_shader_ir ir,
2199 enum pipe_shader_type shader)
2200 {
2201 return &nir_options;
2202 }
2203
2204 static int
2205 count_nir_instrs(nir_shader *nir)
2206 {
2207 int count = 0;
2208 nir_foreach_function(function, nir) {
2209 if (!function->impl)
2210 continue;
2211 nir_foreach_block(block, function->impl) {
2212 nir_foreach_instr(instr, block)
2213 count++;
2214 }
2215 }
2216 return count;
2217 }
2218
2219 static struct vc4_compile *
2220 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
2221 struct vc4_key *key, bool fs_threaded)
2222 {
2223 struct vc4_compile *c = qir_compile_init();
2224
2225 c->vc4 = vc4;
2226 c->stage = stage;
2227 c->shader_state = &key->shader_state->base;
2228 c->program_id = key->shader_state->program_id;
2229 c->variant_id =
2230 p_atomic_inc_return(&key->shader_state->compiled_variant_count);
2231 c->fs_threaded = fs_threaded;
2232
2233 c->key = key;
2234 switch (stage) {
2235 case QSTAGE_FRAG:
2236 c->fs_key = (struct vc4_fs_key *)key;
2237 if (c->fs_key->is_points) {
2238 c->point_x = emit_fragment_varying(c, ~0, 0);
2239 c->point_y = emit_fragment_varying(c, ~0, 0);
2240 } else if (c->fs_key->is_lines) {
2241 c->line_x = emit_fragment_varying(c, ~0, 0);
2242 }
2243 break;
2244 case QSTAGE_VERT:
2245 c->vs_key = (struct vc4_vs_key *)key;
2246 break;
2247 case QSTAGE_COORD:
2248 c->vs_key = (struct vc4_vs_key *)key;
2249 break;
2250 }
2251
2252 c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
2253
2254 if (stage == QSTAGE_FRAG) {
2255 if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) {
2256 NIR_PASS_V(c->s, nir_lower_alpha_test,
2257 c->fs_key->alpha_test_func,
2258 c->fs_key->sample_alpha_to_one &&
2259 c->fs_key->msaa,
2260 NULL);
2261 }
2262 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
2263 }
2264
2265 struct nir_lower_tex_options tex_options = {
2266 /* We would need to implement txs, but we don't want the
2267 * int/float conversions
2268 */
2269 .lower_rect = false,
2270
2271 .lower_txp = ~0,
2272
2273 /* Apply swizzles to all samplers. */
2274 .swizzle_result = ~0,
2275 };
2276
2277 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2278 * The format swizzling applies before sRGB decode, and
2279 * ARB_texture_swizzle is the last thing before returning the sample.
2280 */
2281 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
2282 enum pipe_format format = c->key->tex[i].format;
2283
2284 if (!format)
2285 continue;
2286
2287 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
2288
2289 for (int j = 0; j < 4; j++) {
2290 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
2291
2292 if (arb_swiz <= 3) {
2293 tex_options.swizzles[i][j] =
2294 format_swizzle[arb_swiz];
2295 } else {
2296 tex_options.swizzles[i][j] = arb_swiz;
2297 }
2298 }
2299
2300 if (util_format_is_srgb(format))
2301 tex_options.lower_srgb |= (1 << i);
2302 }
2303
2304 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
2305
2306 if (c->fs_key && c->fs_key->light_twoside)
2307 NIR_PASS_V(c->s, nir_lower_two_sided_color, true);
2308
2309 if (c->vs_key && c->vs_key->clamp_color)
2310 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
2311
2312 if (c->key->ucp_enables) {
2313 if (stage == QSTAGE_FRAG) {
2314 NIR_PASS_V(c->s, nir_lower_clip_fs,
2315 c->key->ucp_enables, false);
2316 } else {
2317 NIR_PASS_V(c->s, nir_lower_clip_vs,
2318 c->key->ucp_enables, false, false, NULL);
2319 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
2320 nir_var_shader_out);
2321 }
2322 }
2323
2324 /* FS input scalarizing must happen after nir_lower_two_sided_color,
2325 * which only handles a vec4 at a time. Similarly, VS output
2326 * scalarizing must happen after nir_lower_clip_vs.
2327 */
2328 if (c->stage == QSTAGE_FRAG)
2329 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
2330 else
2331 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
2332
2333 NIR_PASS_V(c->s, vc4_nir_lower_io, c);
2334 NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
2335 NIR_PASS_V(c->s, nir_lower_idiv, nir_lower_idiv_fast);
2336
2337 vc4_optimize_nir(c->s);
2338
2339 /* Do late algebraic optimization to turn add(a, neg(b)) back into
2340 * subs, then the mandatory cleanup after algebraic. Note that it may
2341 * produce fnegs, and if so then we need to keep running to squash
2342 * fneg(fneg(a)).
2343 */
2344 bool more_late_algebraic = true;
2345 while (more_late_algebraic) {
2346 more_late_algebraic = false;
2347 NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
2348 NIR_PASS_V(c->s, nir_opt_constant_folding);
2349 NIR_PASS_V(c->s, nir_copy_prop);
2350 NIR_PASS_V(c->s, nir_opt_dce);
2351 NIR_PASS_V(c->s, nir_opt_cse);
2352 }
2353
2354 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
2355
2356 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
2357
2358 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2359 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2360 qir_get_stage_name(c->stage),
2361 c->program_id, c->variant_id,
2362 count_nir_instrs(c->s));
2363 }
2364
2365 if (vc4_debug & VC4_DEBUG_NIR) {
2366 fprintf(stderr, "%s prog %d/%d NIR:\n",
2367 qir_get_stage_name(c->stage),
2368 c->program_id, c->variant_id);
2369 nir_print_shader(c->s, stderr);
2370 }
2371
2372 nir_to_qir(c);
2373
2374 switch (stage) {
2375 case QSTAGE_FRAG:
2376 /* FS threading requires that the thread execute
2377 * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
2378 * (with no other THRSW afterwards, obviously). If we didn't
2379 * fetch a texture at a top level block, this wouldn't be
2380 * true.
2381 */
2382 if (c->fs_threaded && !c->last_thrsw_at_top_level) {
2383 c->failed = true;
2384 return c;
2385 }
2386
2387 emit_frag_end(c);
2388 break;
2389 case QSTAGE_VERT:
2390 emit_vert_end(c,
2391 c->vs_key->fs_inputs->input_slots,
2392 c->vs_key->fs_inputs->num_inputs);
2393 break;
2394 case QSTAGE_COORD:
2395 emit_coord_end(c);
2396 break;
2397 }
2398
2399 if (vc4_debug & VC4_DEBUG_QIR) {
2400 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
2401 qir_get_stage_name(c->stage),
2402 c->program_id, c->variant_id);
2403 qir_dump(c);
2404 fprintf(stderr, "\n");
2405 }
2406
2407 qir_optimize(c);
2408 qir_lower_uniforms(c);
2409
2410 qir_schedule_instructions(c);
2411 qir_emit_uniform_stream_resets(c);
2412
2413 if (vc4_debug & VC4_DEBUG_QIR) {
2414 fprintf(stderr, "%s prog %d/%d QIR:\n",
2415 qir_get_stage_name(c->stage),
2416 c->program_id, c->variant_id);
2417 qir_dump(c);
2418 fprintf(stderr, "\n");
2419 }
2420
2421 qir_reorder_uniforms(c);
2422 vc4_generate_code(vc4, c);
2423
2424 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2425 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2426 qir_get_stage_name(c->stage),
2427 c->program_id, c->variant_id,
2428 c->qpu_inst_count);
2429 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2430 qir_get_stage_name(c->stage),
2431 c->program_id, c->variant_id,
2432 c->num_uniforms);
2433 }
2434
2435 ralloc_free(c->s);
2436
2437 return c;
2438 }
2439
2440 static void *
2441 vc4_shader_state_create(struct pipe_context *pctx,
2442 const struct pipe_shader_state *cso)
2443 {
2444 struct vc4_context *vc4 = vc4_context(pctx);
2445 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
2446 if (!so)
2447 return NULL;
2448
2449 so->program_id = vc4->next_uncompiled_program_id++;
2450
2451 nir_shader *s;
2452
2453 if (cso->type == PIPE_SHADER_IR_NIR) {
2454 /* The backend takes ownership of the NIR shader on state
2455 * creation.
2456 */
2457 s = cso->ir.nir;
2458 } else {
2459 assert(cso->type == PIPE_SHADER_IR_TGSI);
2460
2461 if (vc4_debug & VC4_DEBUG_TGSI) {
2462 fprintf(stderr, "prog %d TGSI:\n",
2463 so->program_id);
2464 tgsi_dump(cso->tokens, 0);
2465 fprintf(stderr, "\n");
2466 }
2467 s = tgsi_to_nir(cso->tokens, pctx->screen, false);
2468 }
2469
2470 if (s->info.stage == MESA_SHADER_VERTEX)
2471 NIR_PASS_V(s, nir_lower_point_size, 1.0f, 0.0f);
2472
2473 NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
2474 type_size, (nir_lower_io_options)0);
2475
2476 NIR_PASS_V(s, nir_lower_regs_to_ssa);
2477 NIR_PASS_V(s, nir_normalize_cubemap_coords);
2478
2479 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
2480
2481 vc4_optimize_nir(s);
2482
2483 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
2484
2485 /* Garbage collect dead instructions */
2486 nir_sweep(s);
2487
2488 so->base.type = PIPE_SHADER_IR_NIR;
2489 so->base.ir.nir = s;
2490
2491 if (vc4_debug & VC4_DEBUG_NIR) {
2492 fprintf(stderr, "%s prog %d NIR:\n",
2493 gl_shader_stage_name(s->info.stage),
2494 so->program_id);
2495 nir_print_shader(s, stderr);
2496 fprintf(stderr, "\n");
2497 }
2498
2499 return so;
2500 }
2501
2502 static void
2503 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
2504 struct vc4_compile *c)
2505 {
2506 int count = c->num_uniforms;
2507 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2508
2509 uinfo->count = count;
2510 uinfo->data = ralloc_array(shader, uint32_t, count);
2511 memcpy(uinfo->data, c->uniform_data,
2512 count * sizeof(*uinfo->data));
2513 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2514 memcpy(uinfo->contents, c->uniform_contents,
2515 count * sizeof(*uinfo->contents));
2516 uinfo->num_texture_samples = c->num_texture_samples;
2517
2518 vc4_set_shader_uniform_dirty_flags(shader);
2519 }
2520
2521 static void
2522 vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c,
2523 struct vc4_compiled_shader *shader)
2524 {
2525 struct vc4_fs_inputs inputs;
2526
2527 memset(&inputs, 0, sizeof(inputs));
2528 inputs.input_slots = ralloc_array(shader,
2529 struct vc4_varying_slot,
2530 c->num_input_slots);
2531
2532 bool input_live[c->num_input_slots];
2533
2534 memset(input_live, 0, sizeof(input_live));
2535 qir_for_each_inst_inorder(inst, c) {
2536 for (int i = 0; i < qir_get_nsrc(inst); i++) {
2537 if (inst->src[i].file == QFILE_VARY)
2538 input_live[inst->src[i].index] = true;
2539 }
2540 }
2541
2542 for (int i = 0; i < c->num_input_slots; i++) {
2543 struct vc4_varying_slot *slot = &c->input_slots[i];
2544
2545 if (!input_live[i])
2546 continue;
2547
2548 /* Skip non-VS-output inputs. */
2549 if (slot->slot == (uint8_t)~0)
2550 continue;
2551
2552 if (slot->slot == VARYING_SLOT_COL0 ||
2553 slot->slot == VARYING_SLOT_COL1 ||
2554 slot->slot == VARYING_SLOT_BFC0 ||
2555 slot->slot == VARYING_SLOT_BFC1) {
2556 shader->color_inputs |= (1 << inputs.num_inputs);
2557 }
2558
2559 inputs.input_slots[inputs.num_inputs] = *slot;
2560 inputs.num_inputs++;
2561 }
2562 shader->num_inputs = inputs.num_inputs;
2563
2564 /* Add our set of inputs to the set of all inputs seen. This way, we
2565 * can have a single pointer that identifies an FS inputs set,
2566 * allowing VS to avoid recompiling when the FS is recompiled (or a
2567 * new one is bound using separate shader objects) but the inputs
2568 * don't change.
2569 */
2570 struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs);
2571 if (entry) {
2572 shader->fs_inputs = entry->key;
2573 ralloc_free(inputs.input_slots);
2574 } else {
2575 struct vc4_fs_inputs *alloc_inputs;
2576
2577 alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs);
2578 memcpy(alloc_inputs, &inputs, sizeof(inputs));
2579 ralloc_steal(alloc_inputs, inputs.input_slots);
2580 _mesa_set_add(vc4->fs_inputs_set, alloc_inputs);
2581
2582 shader->fs_inputs = alloc_inputs;
2583 }
2584 }
2585
2586 static struct vc4_compiled_shader *
2587 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2588 struct vc4_key *key)
2589 {
2590 struct hash_table *ht;
2591 uint32_t key_size;
2592 bool try_threading;
2593
2594 if (stage == QSTAGE_FRAG) {
2595 ht = vc4->fs_cache;
2596 key_size = sizeof(struct vc4_fs_key);
2597 try_threading = vc4->screen->has_threaded_fs;
2598 } else {
2599 ht = vc4->vs_cache;
2600 key_size = sizeof(struct vc4_vs_key);
2601 try_threading = false;
2602 }
2603
2604 struct vc4_compiled_shader *shader;
2605 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2606 if (entry)
2607 return entry->data;
2608
2609 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
2610 /* If the FS failed to compile threaded, fall back to single threaded. */
2611 if (try_threading && c->failed) {
2612 qir_compile_destroy(c);
2613 c = vc4_shader_ntq(vc4, stage, key, false);
2614 }
2615
2616 shader = rzalloc(NULL, struct vc4_compiled_shader);
2617
2618 shader->program_id = vc4->next_compiled_program_id++;
2619 if (stage == QSTAGE_FRAG) {
2620 vc4_setup_compiled_fs_inputs(vc4, c, shader);
2621
2622 /* Note: the temporary clone in c->s has been freed. */
2623 nir_shader *orig_shader = key->shader_state->base.ir.nir;
2624 if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH))
2625 shader->disable_early_z = true;
2626 } else {
2627 shader->num_inputs = c->num_inputs;
2628
2629 shader->vattr_offsets[0] = 0;
2630 for (int i = 0; i < 8; i++) {
2631 shader->vattr_offsets[i + 1] =
2632 shader->vattr_offsets[i] + c->vattr_sizes[i];
2633
2634 if (c->vattr_sizes[i])
2635 shader->vattrs_live |= (1 << i);
2636 }
2637 }
2638
2639 shader->failed = c->failed;
2640 if (c->failed) {
2641 shader->failed = true;
2642 } else {
2643 copy_uniform_state_to_shader(shader, c);
2644 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2645 c->qpu_inst_count *
2646 sizeof(uint64_t));
2647 }
2648
2649 shader->fs_threaded = c->fs_threaded;
2650
2651 if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) {
2652 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n",
2653 qir_get_stage_name(c->stage),
2654 c->program_id, c->variant_id,
2655 1 + shader->fs_threaded);
2656 }
2657
2658 qir_compile_destroy(c);
2659
2660 struct vc4_key *dup_key;
2661 dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
2662 memcpy(dup_key, key, key_size);
2663 _mesa_hash_table_insert(ht, dup_key, shader);
2664
2665 return shader;
2666 }
2667
2668 static void
2669 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2670 struct vc4_texture_stateobj *texstate)
2671 {
2672 for (int i = 0; i < texstate->num_textures; i++) {
2673 struct pipe_sampler_view *sampler = texstate->textures[i];
2674 struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler);
2675 struct pipe_sampler_state *sampler_state =
2676 texstate->samplers[i];
2677
2678 if (!sampler)
2679 continue;
2680
2681 key->tex[i].format = sampler->format;
2682 key->tex[i].swizzle[0] = sampler->swizzle_r;
2683 key->tex[i].swizzle[1] = sampler->swizzle_g;
2684 key->tex[i].swizzle[2] = sampler->swizzle_b;
2685 key->tex[i].swizzle[3] = sampler->swizzle_a;
2686
2687 if (sampler->texture->nr_samples > 1) {
2688 key->tex[i].msaa_width = sampler->texture->width0;
2689 key->tex[i].msaa_height = sampler->texture->height0;
2690 } else if (sampler){
2691 key->tex[i].compare_mode = sampler_state->compare_mode;
2692 key->tex[i].compare_func = sampler_state->compare_func;
2693 key->tex[i].wrap_s = sampler_state->wrap_s;
2694 key->tex[i].wrap_t = sampler_state->wrap_t;
2695 key->tex[i].force_first_level =
2696 vc4_sampler->force_first_level;
2697 }
2698 }
2699
2700 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2701 }
2702
2703 static void
2704 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2705 {
2706 struct vc4_job *job = vc4->job;
2707 struct vc4_fs_key local_key;
2708 struct vc4_fs_key *key = &local_key;
2709
2710 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2711 VC4_DIRTY_BLEND |
2712 VC4_DIRTY_FRAMEBUFFER |
2713 VC4_DIRTY_ZSA |
2714 VC4_DIRTY_RASTERIZER |
2715 VC4_DIRTY_SAMPLE_MASK |
2716 VC4_DIRTY_FRAGTEX |
2717 VC4_DIRTY_UNCOMPILED_FS |
2718 VC4_DIRTY_UBO_1_SIZE))) {
2719 return;
2720 }
2721
2722 memset(key, 0, sizeof(*key));
2723 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2724 key->base.shader_state = vc4->prog.bind_fs;
2725 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2726 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2727 prim_mode <= PIPE_PRIM_LINE_STRIP);
2728 key->blend = vc4->blend->rt[0];
2729 if (vc4->blend->logicop_enable) {
2730 key->logicop_func = vc4->blend->logicop_func;
2731 } else {
2732 key->logicop_func = PIPE_LOGICOP_COPY;
2733 }
2734 if (job->msaa) {
2735 key->msaa = vc4->rasterizer->base.multisample;
2736 key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2737 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2738 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2739 }
2740
2741 if (vc4->framebuffer.cbufs[0])
2742 key->color_format = vc4->framebuffer.cbufs[0]->format;
2743
2744 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2745 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2746 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2747 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2748 key->stencil_enabled);
2749 if (vc4->zsa->base.alpha.enabled)
2750 key->alpha_test_func = vc4->zsa->base.alpha.func;
2751 else
2752 key->alpha_test_func = COMPARE_FUNC_ALWAYS;
2753
2754 if (key->is_points) {
2755 key->point_sprite_mask =
2756 vc4->rasterizer->base.sprite_coord_enable;
2757 key->point_coord_upper_left =
2758 (vc4->rasterizer->base.sprite_coord_mode ==
2759 PIPE_SPRITE_COORD_UPPER_LEFT);
2760 }
2761
2762 key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size;
2763 key->light_twoside = vc4->rasterizer->base.light_twoside;
2764
2765 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2766 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2767 if (vc4->prog.fs == old_fs)
2768 return;
2769
2770 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2771
2772 if (vc4->rasterizer->base.flatshade &&
2773 (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) {
2774 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2775 }
2776
2777 if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
2778 vc4->dirty |= VC4_DIRTY_FS_INPUTS;
2779 }
2780
2781 static void
2782 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2783 {
2784 struct vc4_vs_key local_key;
2785 struct vc4_vs_key *key = &local_key;
2786
2787 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2788 VC4_DIRTY_RASTERIZER |
2789 VC4_DIRTY_VERTTEX |
2790 VC4_DIRTY_VTXSTATE |
2791 VC4_DIRTY_UNCOMPILED_VS |
2792 VC4_DIRTY_FS_INPUTS))) {
2793 return;
2794 }
2795
2796 memset(key, 0, sizeof(*key));
2797 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2798 key->base.shader_state = vc4->prog.bind_vs;
2799 key->fs_inputs = vc4->prog.fs->fs_inputs;
2800 key->clamp_color = vc4->rasterizer->base.clamp_vertex_color;
2801
2802 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2803 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2804
2805 key->per_vertex_point_size =
2806 (prim_mode == PIPE_PRIM_POINTS &&
2807 vc4->rasterizer->base.point_size_per_vertex);
2808
2809 struct vc4_compiled_shader *vs =
2810 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2811 if (vs != vc4->prog.vs) {
2812 vc4->prog.vs = vs;
2813 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2814 }
2815
2816 key->is_coord = true;
2817 /* Coord shaders don't care what the FS inputs are. */
2818 key->fs_inputs = NULL;
2819 struct vc4_compiled_shader *cs =
2820 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2821 if (cs != vc4->prog.cs) {
2822 vc4->prog.cs = cs;
2823 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2824 }
2825 }
2826
2827 bool
2828 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2829 {
2830 vc4_update_compiled_fs(vc4, prim_mode);
2831 vc4_update_compiled_vs(vc4, prim_mode);
2832
2833 return !(vc4->prog.cs->failed ||
2834 vc4->prog.vs->failed ||
2835 vc4->prog.fs->failed);
2836 }
2837
2838 static uint32_t
2839 fs_cache_hash(const void *key)
2840 {
2841 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2842 }
2843
2844 static uint32_t
2845 vs_cache_hash(const void *key)
2846 {
2847 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2848 }
2849
2850 static bool
2851 fs_cache_compare(const void *key1, const void *key2)
2852 {
2853 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2854 }
2855
2856 static bool
2857 vs_cache_compare(const void *key1, const void *key2)
2858 {
2859 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2860 }
2861
2862 static uint32_t
2863 fs_inputs_hash(const void *key)
2864 {
2865 const struct vc4_fs_inputs *inputs = key;
2866
2867 return _mesa_hash_data(inputs->input_slots,
2868 sizeof(*inputs->input_slots) *
2869 inputs->num_inputs);
2870 }
2871
2872 static bool
2873 fs_inputs_compare(const void *key1, const void *key2)
2874 {
2875 const struct vc4_fs_inputs *inputs1 = key1;
2876 const struct vc4_fs_inputs *inputs2 = key2;
2877
2878 return (inputs1->num_inputs == inputs2->num_inputs &&
2879 memcmp(inputs1->input_slots,
2880 inputs2->input_slots,
2881 sizeof(*inputs1->input_slots) *
2882 inputs1->num_inputs) == 0);
2883 }
2884
2885 static void
2886 delete_from_cache_if_matches(struct hash_table *ht,
2887 struct vc4_compiled_shader **last_compile,
2888 struct hash_entry *entry,
2889 struct vc4_uncompiled_shader *so)
2890 {
2891 const struct vc4_key *key = entry->key;
2892
2893 if (key->shader_state == so) {
2894 struct vc4_compiled_shader *shader = entry->data;
2895 _mesa_hash_table_remove(ht, entry);
2896 vc4_bo_unreference(&shader->bo);
2897
2898 if (shader == *last_compile)
2899 *last_compile = NULL;
2900
2901 ralloc_free(shader);
2902 }
2903 }
2904
2905 static void
2906 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2907 {
2908 struct vc4_context *vc4 = vc4_context(pctx);
2909 struct vc4_uncompiled_shader *so = hwcso;
2910
2911 hash_table_foreach(vc4->fs_cache, entry) {
2912 delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs,
2913 entry, so);
2914 }
2915 hash_table_foreach(vc4->vs_cache, entry) {
2916 delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs,
2917 entry, so);
2918 }
2919
2920 ralloc_free(so->base.ir.nir);
2921 free(so);
2922 }
2923
2924 static void
2925 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2926 {
2927 struct vc4_context *vc4 = vc4_context(pctx);
2928 vc4->prog.bind_fs = hwcso;
2929 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2930 }
2931
2932 static void
2933 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2934 {
2935 struct vc4_context *vc4 = vc4_context(pctx);
2936 vc4->prog.bind_vs = hwcso;
2937 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2938 }
2939
2940 void
2941 vc4_program_init(struct pipe_context *pctx)
2942 {
2943 struct vc4_context *vc4 = vc4_context(pctx);
2944
2945 pctx->create_vs_state = vc4_shader_state_create;
2946 pctx->delete_vs_state = vc4_shader_state_delete;
2947
2948 pctx->create_fs_state = vc4_shader_state_create;
2949 pctx->delete_fs_state = vc4_shader_state_delete;
2950
2951 pctx->bind_fs_state = vc4_fp_state_bind;
2952 pctx->bind_vs_state = vc4_vp_state_bind;
2953
2954 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2955 fs_cache_compare);
2956 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2957 vs_cache_compare);
2958 vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash,
2959 fs_inputs_compare);
2960 }
2961
2962 void
2963 vc4_program_fini(struct pipe_context *pctx)
2964 {
2965 struct vc4_context *vc4 = vc4_context(pctx);
2966
2967 hash_table_foreach(vc4->fs_cache, entry) {
2968 struct vc4_compiled_shader *shader = entry->data;
2969 vc4_bo_unreference(&shader->bo);
2970 ralloc_free(shader);
2971 _mesa_hash_table_remove(vc4->fs_cache, entry);
2972 }
2973
2974 hash_table_foreach(vc4->vs_cache, entry) {
2975 struct vc4_compiled_shader *shader = entry->data;
2976 vc4_bo_unreference(&shader->bo);
2977 ralloc_free(shader);
2978 _mesa_hash_table_remove(vc4->vs_cache, entry);
2979 }
2980 }