vc4: Avoid emitting small immediates for UBO indirect load address guards.
[mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
1 /*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <inttypes.h>
26 #include "util/u_format.h"
27 #include "util/crc32.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
38 #include "vc4_qpu.h"
39 #include "vc4_qir.h"
40 #include "mesa/state_tracker/st_glsl_types.h"
41
42 static struct qreg
43 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
44 static void
45 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
46
47 static void
48 resize_qreg_array(struct vc4_compile *c,
49 struct qreg **regs,
50 uint32_t *size,
51 uint32_t decl_size)
52 {
53 if (*size >= decl_size)
54 return;
55
56 uint32_t old_size = *size;
57 *size = MAX2(*size * 2, decl_size);
58 *regs = reralloc(c, *regs, struct qreg, *size);
59 if (!*regs) {
60 fprintf(stderr, "Malloc failure\n");
61 abort();
62 }
63
64 for (uint32_t i = old_size; i < *size; i++)
65 (*regs)[i] = c->undef;
66 }
67
68 static void
69 ntq_emit_thrsw(struct vc4_compile *c)
70 {
71 if (!c->fs_threaded)
72 return;
73
74 /* Always thread switch after each texture operation for now.
75 *
76 * We could do better by batching a bunch of texture fetches up and
77 * then doing one thread switch and collecting all their results
78 * afterward.
79 */
80 qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
81 c->undef, c->undef));
82 c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
83 }
84
85 static struct qreg
86 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
87 {
88 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
89 uint32_t offset = nir_intrinsic_base(intr);
90 struct vc4_compiler_ubo_range *range = NULL;
91 unsigned i;
92 for (i = 0; i < c->num_uniform_ranges; i++) {
93 range = &c->ubo_ranges[i];
94 if (offset >= range->src_offset &&
95 offset < range->src_offset + range->size) {
96 break;
97 }
98 }
99 /* The driver-location-based offset always has to be within a declared
100 * uniform range.
101 */
102 assert(range);
103 if (!range->used) {
104 range->used = true;
105 range->dst_offset = c->next_ubo_dst_offset;
106 c->next_ubo_dst_offset += range->size;
107 c->num_ubo_ranges++;
108 }
109
110 offset -= range->src_offset;
111
112 /* Adjust for where we stored the TGSI register base. */
113 indirect_offset = qir_ADD(c, indirect_offset,
114 qir_uniform_ui(c, (range->dst_offset +
115 offset)));
116
117 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
118 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
119 indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
120 qir_uniform_ui(c, (range->dst_offset +
121 range->size - 4)));
122
123 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
124 indirect_offset,
125 qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
126
127 c->num_texture_samples++;
128
129 ntq_emit_thrsw(c);
130
131 return qir_TEX_RESULT(c);
132 }
133
134 nir_ssa_def *
135 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
136 {
137 switch (swiz) {
138 default:
139 case PIPE_SWIZZLE_NONE:
140 fprintf(stderr, "warning: unknown swizzle\n");
141 /* FALLTHROUGH */
142 case PIPE_SWIZZLE_0:
143 return nir_imm_float(b, 0.0);
144 case PIPE_SWIZZLE_1:
145 return nir_imm_float(b, 1.0);
146 case PIPE_SWIZZLE_X:
147 case PIPE_SWIZZLE_Y:
148 case PIPE_SWIZZLE_Z:
149 case PIPE_SWIZZLE_W:
150 return srcs[swiz];
151 }
152 }
153
154 static struct qreg *
155 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
156 {
157 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
158 def->num_components);
159 _mesa_hash_table_insert(c->def_ht, def, qregs);
160 return qregs;
161 }
162
163 /**
164 * This function is responsible for getting QIR results into the associated
165 * storage for a NIR instruction.
166 *
167 * If it's a NIR SSA def, then we just set the associated hash table entry to
168 * the new result.
169 *
170 * If it's a NIR reg, then we need to update the existing qreg assigned to the
171 * NIR destination with the incoming value. To do that without introducing
172 * new MOVs, we require that the incoming qreg either be a uniform, or be
173 * SSA-defined by the previous QIR instruction in the block and rewritable by
174 * this function. That lets us sneak ahead and insert the SF flag beforehand
175 * (knowing that the previous instruction doesn't depend on flags) and rewrite
176 * its destination to be the NIR reg's destination
177 */
178 static void
179 ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
180 struct qreg result)
181 {
182 struct qinst *last_inst = NULL;
183 if (!list_empty(&c->cur_block->instructions))
184 last_inst = (struct qinst *)c->cur_block->instructions.prev;
185
186 assert(result.file == QFILE_UNIF ||
187 (result.file == QFILE_TEMP &&
188 last_inst && last_inst == c->defs[result.index]));
189
190 if (dest->is_ssa) {
191 assert(chan < dest->ssa.num_components);
192
193 struct qreg *qregs;
194 struct hash_entry *entry =
195 _mesa_hash_table_search(c->def_ht, &dest->ssa);
196
197 if (entry)
198 qregs = entry->data;
199 else
200 qregs = ntq_init_ssa_def(c, &dest->ssa);
201
202 qregs[chan] = result;
203 } else {
204 nir_register *reg = dest->reg.reg;
205 assert(dest->reg.base_offset == 0);
206 assert(reg->num_array_elems == 0);
207 struct hash_entry *entry =
208 _mesa_hash_table_search(c->def_ht, reg);
209 struct qreg *qregs = entry->data;
210
211 /* Insert a MOV if the source wasn't an SSA def in the
212 * previous instruction.
213 */
214 if (result.file == QFILE_UNIF) {
215 result = qir_MOV(c, result);
216 last_inst = c->defs[result.index];
217 }
218
219 /* We know they're both temps, so just rewrite index. */
220 c->defs[last_inst->dst.index] = NULL;
221 last_inst->dst.index = qregs[chan].index;
222
223 /* If we're in control flow, then make this update of the reg
224 * conditional on the execution mask.
225 */
226 if (c->execute.file != QFILE_NULL) {
227 last_inst->dst.index = qregs[chan].index;
228
229 /* Set the flags to the current exec mask. To insert
230 * the SF, we temporarily remove our SSA instruction.
231 */
232 list_del(&last_inst->link);
233 qir_SF(c, c->execute);
234 list_addtail(&last_inst->link,
235 &c->cur_block->instructions);
236
237 last_inst->cond = QPU_COND_ZS;
238 last_inst->cond_is_exec_mask = true;
239 }
240 }
241 }
242
243 static struct qreg *
244 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
245 {
246 if (dest->is_ssa) {
247 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
248 for (int i = 0; i < dest->ssa.num_components; i++)
249 qregs[i] = c->undef;
250 return qregs;
251 } else {
252 nir_register *reg = dest->reg.reg;
253 assert(dest->reg.base_offset == 0);
254 assert(reg->num_array_elems == 0);
255 struct hash_entry *entry =
256 _mesa_hash_table_search(c->def_ht, reg);
257 return entry->data;
258 }
259 }
260
261 static struct qreg
262 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
263 {
264 struct hash_entry *entry;
265 if (src.is_ssa) {
266 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
267 assert(i < src.ssa->num_components);
268 } else {
269 nir_register *reg = src.reg.reg;
270 entry = _mesa_hash_table_search(c->def_ht, reg);
271 assert(reg->num_array_elems == 0);
272 assert(src.reg.base_offset == 0);
273 assert(i < reg->num_components);
274 }
275
276 struct qreg *qregs = entry->data;
277 return qregs[i];
278 }
279
280 static struct qreg
281 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
282 unsigned src)
283 {
284 assert(util_is_power_of_two(instr->dest.write_mask));
285 unsigned chan = ffs(instr->dest.write_mask) - 1;
286 struct qreg r = ntq_get_src(c, instr->src[src].src,
287 instr->src[src].swizzle[chan]);
288
289 assert(!instr->src[src].abs);
290 assert(!instr->src[src].negate);
291
292 return r;
293 };
294
295 static inline struct qreg
296 qir_SAT(struct vc4_compile *c, struct qreg val)
297 {
298 return qir_FMAX(c,
299 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
300 qir_uniform_f(c, 0.0));
301 }
302
303 static struct qreg
304 ntq_rcp(struct vc4_compile *c, struct qreg x)
305 {
306 struct qreg r = qir_RCP(c, x);
307
308 /* Apply a Newton-Raphson step to improve the accuracy. */
309 r = qir_FMUL(c, r, qir_FSUB(c,
310 qir_uniform_f(c, 2.0),
311 qir_FMUL(c, x, r)));
312
313 return r;
314 }
315
316 static struct qreg
317 ntq_rsq(struct vc4_compile *c, struct qreg x)
318 {
319 struct qreg r = qir_RSQ(c, x);
320
321 /* Apply a Newton-Raphson step to improve the accuracy. */
322 r = qir_FMUL(c, r, qir_FSUB(c,
323 qir_uniform_f(c, 1.5),
324 qir_FMUL(c,
325 qir_uniform_f(c, 0.5),
326 qir_FMUL(c, x,
327 qir_FMUL(c, r, r)))));
328
329 return r;
330 }
331
332 static struct qreg
333 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
334 {
335 struct qreg src0_hi = qir_SHR(c, src0,
336 qir_uniform_ui(c, 24));
337 struct qreg src1_hi = qir_SHR(c, src1,
338 qir_uniform_ui(c, 24));
339
340 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
341 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
342 struct qreg lolo = qir_MUL24(c, src0, src1);
343
344 return qir_ADD(c, lolo, qir_SHL(c,
345 qir_ADD(c, hilo, lohi),
346 qir_uniform_ui(c, 24)));
347 }
348
349 static struct qreg
350 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
351 {
352 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
353 qir_uniform_ui(c, 8)));
354 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
355 }
356
357 /**
358 * Emits a lowered TXF_MS from an MSAA texture.
359 *
360 * The addressing math has been lowered in NIR, and now we just need to read
361 * it like a UBO.
362 */
363 static void
364 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
365 {
366 uint32_t tile_width = 32;
367 uint32_t tile_height = 32;
368 uint32_t tile_size = (tile_height * tile_width *
369 VC4_MAX_SAMPLES * sizeof(uint32_t));
370
371 unsigned unit = instr->texture_index;
372 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
373 uint32_t w_tiles = w / tile_width;
374 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
375 uint32_t h_tiles = h / tile_height;
376 uint32_t size = w_tiles * h_tiles * tile_size;
377
378 struct qreg addr;
379 assert(instr->num_srcs == 1);
380 assert(instr->src[0].src_type == nir_tex_src_coord);
381 addr = ntq_get_src(c, instr->src[0].src, 0);
382
383 /* Perform the clamping required by kernel validation. */
384 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
385 addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
386
387 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
388 addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
389
390 ntq_emit_thrsw(c);
391
392 struct qreg tex = qir_TEX_RESULT(c);
393 c->num_texture_samples++;
394
395 enum pipe_format format = c->key->tex[unit].format;
396 if (util_format_is_depth_or_stencil(format)) {
397 struct qreg scaled = ntq_scale_depth_texture(c, tex);
398 for (int i = 0; i < 4; i++)
399 ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
400 } else {
401 for (int i = 0; i < 4; i++)
402 ntq_store_dest(c, &instr->dest, i,
403 qir_UNPACK_8_F(c, tex, i));
404 }
405 }
406
407 static void
408 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
409 {
410 struct qreg s, t, r, lod, compare;
411 bool is_txb = false, is_txl = false;
412 unsigned unit = instr->texture_index;
413
414 if (instr->op == nir_texop_txf) {
415 ntq_emit_txf(c, instr);
416 return;
417 }
418
419 for (unsigned i = 0; i < instr->num_srcs; i++) {
420 switch (instr->src[i].src_type) {
421 case nir_tex_src_coord:
422 s = ntq_get_src(c, instr->src[i].src, 0);
423 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
424 t = qir_uniform_f(c, 0.5);
425 else
426 t = ntq_get_src(c, instr->src[i].src, 1);
427 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
428 r = ntq_get_src(c, instr->src[i].src, 2);
429 break;
430 case nir_tex_src_bias:
431 lod = ntq_get_src(c, instr->src[i].src, 0);
432 is_txb = true;
433 break;
434 case nir_tex_src_lod:
435 lod = ntq_get_src(c, instr->src[i].src, 0);
436 is_txl = true;
437 break;
438 case nir_tex_src_comparator:
439 compare = ntq_get_src(c, instr->src[i].src, 0);
440 break;
441 default:
442 unreachable("unknown texture source");
443 }
444 }
445
446 if (c->stage != QSTAGE_FRAG && !is_txl) {
447 /* From the GLSL 1.20 spec:
448 *
449 * "If it is mip-mapped and running on the vertex shader,
450 * then the base texture is used."
451 */
452 is_txl = true;
453 lod = qir_uniform_ui(c, 0);
454 }
455
456 if (c->key->tex[unit].force_first_level) {
457 lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
458 is_txl = true;
459 is_txb = false;
460 }
461
462 struct qreg texture_u[] = {
463 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
464 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
465 qir_uniform(c, QUNIFORM_CONSTANT, 0),
466 qir_uniform(c, QUNIFORM_CONSTANT, 0),
467 };
468 uint32_t next_texture_u = 0;
469
470 /* There is no native support for GL texture rectangle coordinates, so
471 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
472 * 1]).
473 */
474 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
475 s = qir_FMUL(c, s,
476 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
477 t = qir_FMUL(c, t,
478 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
479 }
480
481 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
482 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
483 unit | (is_txl << 16));
484 }
485
486 struct qinst *tmu;
487 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
488 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
489 tmu->src[qir_get_tex_uniform_src(tmu)] =
490 texture_u[next_texture_u++];
491 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
492 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
493 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
494 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
495 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
496 qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
497 unit));
498 tmu->src[qir_get_tex_uniform_src(tmu)] =
499 texture_u[next_texture_u++];
500 }
501
502 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
503 s = qir_SAT(c, s);
504 }
505
506 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
507 t = qir_SAT(c, t);
508 }
509
510 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
511 tmu->src[qir_get_tex_uniform_src(tmu)] =
512 texture_u[next_texture_u++];
513
514 if (is_txl || is_txb) {
515 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
516 tmu->src[qir_get_tex_uniform_src(tmu)] =
517 texture_u[next_texture_u++];
518 }
519
520 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
521 tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
522
523 c->num_texture_samples++;
524
525 ntq_emit_thrsw(c);
526
527 struct qreg tex = qir_TEX_RESULT(c);
528
529 enum pipe_format format = c->key->tex[unit].format;
530
531 struct qreg *dest = ntq_get_dest(c, &instr->dest);
532 if (util_format_is_depth_or_stencil(format)) {
533 struct qreg normalized = ntq_scale_depth_texture(c, tex);
534 struct qreg depth_output;
535
536 struct qreg u0 = qir_uniform_f(c, 0.0f);
537 struct qreg u1 = qir_uniform_f(c, 1.0f);
538 if (c->key->tex[unit].compare_mode) {
539 /* From the GL_ARB_shadow spec:
540 *
541 * "Let Dt (D subscript t) be the depth texture
542 * value, in the range [0, 1]. Let R be the
543 * interpolated texture coordinate clamped to the
544 * range [0, 1]."
545 */
546 compare = qir_SAT(c, compare);
547
548 switch (c->key->tex[unit].compare_func) {
549 case PIPE_FUNC_NEVER:
550 depth_output = qir_uniform_f(c, 0.0f);
551 break;
552 case PIPE_FUNC_ALWAYS:
553 depth_output = u1;
554 break;
555 case PIPE_FUNC_EQUAL:
556 qir_SF(c, qir_FSUB(c, compare, normalized));
557 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
558 break;
559 case PIPE_FUNC_NOTEQUAL:
560 qir_SF(c, qir_FSUB(c, compare, normalized));
561 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
562 break;
563 case PIPE_FUNC_GREATER:
564 qir_SF(c, qir_FSUB(c, compare, normalized));
565 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
566 break;
567 case PIPE_FUNC_GEQUAL:
568 qir_SF(c, qir_FSUB(c, normalized, compare));
569 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
570 break;
571 case PIPE_FUNC_LESS:
572 qir_SF(c, qir_FSUB(c, compare, normalized));
573 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
574 break;
575 case PIPE_FUNC_LEQUAL:
576 qir_SF(c, qir_FSUB(c, normalized, compare));
577 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
578 break;
579 }
580 } else {
581 depth_output = normalized;
582 }
583
584 for (int i = 0; i < 4; i++)
585 dest[i] = depth_output;
586 } else {
587 for (int i = 0; i < 4; i++)
588 dest[i] = qir_UNPACK_8_F(c, tex, i);
589 }
590 }
591
592 /**
593 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
594 * to zero).
595 */
596 static struct qreg
597 ntq_ffract(struct vc4_compile *c, struct qreg src)
598 {
599 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
600 struct qreg diff = qir_FSUB(c, src, trunc);
601 qir_SF(c, diff);
602
603 qir_FADD_dest(c, diff,
604 diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
605
606 return qir_MOV(c, diff);
607 }
608
609 /**
610 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
611 * zero).
612 */
613 static struct qreg
614 ntq_ffloor(struct vc4_compile *c, struct qreg src)
615 {
616 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
617
618 /* This will be < 0 if we truncated and the truncation was of a value
619 * that was < 0 in the first place.
620 */
621 qir_SF(c, qir_FSUB(c, src, result));
622
623 struct qinst *sub = qir_FSUB_dest(c, result,
624 result, qir_uniform_f(c, 1.0));
625 sub->cond = QPU_COND_NS;
626
627 return qir_MOV(c, result);
628 }
629
630 /**
631 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
632 * zero).
633 */
634 static struct qreg
635 ntq_fceil(struct vc4_compile *c, struct qreg src)
636 {
637 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
638
639 /* This will be < 0 if we truncated and the truncation was of a value
640 * that was > 0 in the first place.
641 */
642 qir_SF(c, qir_FSUB(c, result, src));
643
644 qir_FADD_dest(c, result,
645 result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
646
647 return qir_MOV(c, result);
648 }
649
650 static struct qreg
651 ntq_fsin(struct vc4_compile *c, struct qreg src)
652 {
653 float coeff[] = {
654 -2.0 * M_PI,
655 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
656 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
657 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
658 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
659 };
660
661 struct qreg scaled_x =
662 qir_FMUL(c,
663 src,
664 qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
665
666 struct qreg x = qir_FADD(c,
667 ntq_ffract(c, scaled_x),
668 qir_uniform_f(c, -0.5));
669 struct qreg x2 = qir_FMUL(c, x, x);
670 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
671 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
672 x = qir_FMUL(c, x, x2);
673 sum = qir_FADD(c,
674 sum,
675 qir_FMUL(c,
676 x,
677 qir_uniform_f(c, coeff[i])));
678 }
679 return sum;
680 }
681
682 static struct qreg
683 ntq_fcos(struct vc4_compile *c, struct qreg src)
684 {
685 float coeff[] = {
686 -1.0f,
687 pow(2.0 * M_PI, 2) / (2 * 1),
688 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
689 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
690 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
691 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
692 };
693
694 struct qreg scaled_x =
695 qir_FMUL(c, src,
696 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
697 struct qreg x_frac = qir_FADD(c,
698 ntq_ffract(c, scaled_x),
699 qir_uniform_f(c, -0.5));
700
701 struct qreg sum = qir_uniform_f(c, coeff[0]);
702 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
703 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
704 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
705 if (i != 1)
706 x = qir_FMUL(c, x, x2);
707
708 struct qreg mul = qir_FMUL(c,
709 x,
710 qir_uniform_f(c, coeff[i]));
711 if (i == 0)
712 sum = mul;
713 else
714 sum = qir_FADD(c, sum, mul);
715 }
716 return sum;
717 }
718
719 static struct qreg
720 ntq_fsign(struct vc4_compile *c, struct qreg src)
721 {
722 struct qreg t = qir_get_temp(c);
723
724 qir_SF(c, src);
725 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
726 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
727 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
728 return qir_MOV(c, t);
729 }
730
731 static void
732 emit_vertex_input(struct vc4_compile *c, int attr)
733 {
734 enum pipe_format format = c->vs_key->attr_formats[attr];
735 uint32_t attr_size = util_format_get_blocksize(format);
736
737 c->vattr_sizes[attr] = align(attr_size, 4);
738 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
739 c->inputs[attr * 4 + i] =
740 qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
741 c->num_inputs++;
742 }
743 }
744
745 static void
746 emit_fragcoord_input(struct vc4_compile *c, int attr)
747 {
748 c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
749 c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
750 c->inputs[attr * 4 + 2] =
751 qir_FMUL(c,
752 qir_ITOF(c, qir_FRAG_Z(c)),
753 qir_uniform_f(c, 1.0 / 0xffffff));
754 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
755 }
756
757 static struct qreg
758 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
759 uint8_t swizzle)
760 {
761 uint32_t i = c->num_input_slots++;
762 struct qreg vary = {
763 QFILE_VARY,
764 i
765 };
766
767 if (c->num_input_slots >= c->input_slots_array_size) {
768 c->input_slots_array_size =
769 MAX2(4, c->input_slots_array_size * 2);
770
771 c->input_slots = reralloc(c, c->input_slots,
772 struct vc4_varying_slot,
773 c->input_slots_array_size);
774 }
775
776 c->input_slots[i].slot = slot;
777 c->input_slots[i].swizzle = swizzle;
778
779 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
780 }
781
782 static void
783 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
784 {
785 for (int i = 0; i < 4; i++) {
786 c->inputs[attr * 4 + i] =
787 emit_fragment_varying(c, slot, i);
788 c->num_inputs++;
789 }
790 }
791
792 static void
793 add_output(struct vc4_compile *c,
794 uint32_t decl_offset,
795 uint8_t slot,
796 uint8_t swizzle)
797 {
798 uint32_t old_array_size = c->outputs_array_size;
799 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
800 decl_offset + 1);
801
802 if (old_array_size != c->outputs_array_size) {
803 c->output_slots = reralloc(c,
804 c->output_slots,
805 struct vc4_varying_slot,
806 c->outputs_array_size);
807 }
808
809 c->output_slots[decl_offset].slot = slot;
810 c->output_slots[decl_offset].swizzle = swizzle;
811 }
812
813 static void
814 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
815 {
816 unsigned array_id = c->num_uniform_ranges++;
817 if (array_id >= c->ubo_ranges_array_size) {
818 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
819 array_id + 1);
820 c->ubo_ranges = reralloc(c, c->ubo_ranges,
821 struct vc4_compiler_ubo_range,
822 c->ubo_ranges_array_size);
823 }
824
825 c->ubo_ranges[array_id].dst_offset = 0;
826 c->ubo_ranges[array_id].src_offset = start;
827 c->ubo_ranges[array_id].size = size;
828 c->ubo_ranges[array_id].used = false;
829 }
830
831 static bool
832 ntq_src_is_only_ssa_def_user(nir_src *src)
833 {
834 if (!src->is_ssa)
835 return false;
836
837 if (!list_empty(&src->ssa->if_uses))
838 return false;
839
840 return (src->ssa->uses.next == &src->use_link &&
841 src->ssa->uses.next->next == &src->ssa->uses);
842 }
843
844 /**
845 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
846 * bit set.
847 *
848 * However, as an optimization, it tries to find the instructions generating
849 * the sources to be packed and just emit the pack flag there, if possible.
850 */
851 static void
852 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
853 {
854 struct qreg result = qir_get_temp(c);
855 struct nir_alu_instr *vec4 = NULL;
856
857 /* If packing from a vec4 op (as expected), identify it so that we can
858 * peek back at what generated its sources.
859 */
860 if (instr->src[0].src.is_ssa &&
861 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
862 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
863 nir_op_vec4) {
864 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
865 }
866
867 /* If the pack is replicating the same channel 4 times, use the 8888
868 * pack flag. This is common for blending using the alpha
869 * channel.
870 */
871 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
872 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
873 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
874 struct qreg rep = ntq_get_src(c,
875 instr->src[0].src,
876 instr->src[0].swizzle[0]);
877 ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep));
878 return;
879 }
880
881 for (int i = 0; i < 4; i++) {
882 int swiz = instr->src[0].swizzle[i];
883 struct qreg src;
884 if (vec4) {
885 src = ntq_get_src(c, vec4->src[swiz].src,
886 vec4->src[swiz].swizzle[0]);
887 } else {
888 src = ntq_get_src(c, instr->src[0].src, swiz);
889 }
890
891 if (vec4 &&
892 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
893 src.file == QFILE_TEMP &&
894 c->defs[src.index] &&
895 qir_is_mul(c->defs[src.index]) &&
896 !c->defs[src.index]->dst.pack) {
897 struct qinst *rewrite = c->defs[src.index];
898 c->defs[src.index] = NULL;
899 rewrite->dst = result;
900 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
901 continue;
902 }
903
904 qir_PACK_8_F(c, result, src, i);
905 }
906
907 ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
908 }
909
910 /** Handles sign-extended bitfield extracts for 16 bits. */
911 static struct qreg
912 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
913 struct qreg bits)
914 {
915 assert(bits.file == QFILE_UNIF &&
916 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
917 c->uniform_data[bits.index] == 16);
918
919 assert(offset.file == QFILE_UNIF &&
920 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
921 int offset_bit = c->uniform_data[offset.index];
922 assert(offset_bit % 16 == 0);
923
924 return qir_UNPACK_16_I(c, base, offset_bit / 16);
925 }
926
927 /** Handles unsigned bitfield extracts for 8 bits. */
928 static struct qreg
929 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
930 struct qreg bits)
931 {
932 assert(bits.file == QFILE_UNIF &&
933 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
934 c->uniform_data[bits.index] == 8);
935
936 assert(offset.file == QFILE_UNIF &&
937 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
938 int offset_bit = c->uniform_data[offset.index];
939 assert(offset_bit % 8 == 0);
940
941 return qir_UNPACK_8_I(c, base, offset_bit / 8);
942 }
943
944 /**
945 * If compare_instr is a valid comparison instruction, emits the
946 * compare_instr's comparison and returns the sel_instr's return value based
947 * on the compare_instr's result.
948 */
949 static bool
950 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
951 nir_alu_instr *compare_instr,
952 nir_alu_instr *sel_instr)
953 {
954 enum qpu_cond cond;
955
956 switch (compare_instr->op) {
957 case nir_op_feq:
958 case nir_op_ieq:
959 case nir_op_seq:
960 cond = QPU_COND_ZS;
961 break;
962 case nir_op_fne:
963 case nir_op_ine:
964 case nir_op_sne:
965 cond = QPU_COND_ZC;
966 break;
967 case nir_op_fge:
968 case nir_op_ige:
969 case nir_op_uge:
970 case nir_op_sge:
971 cond = QPU_COND_NC;
972 break;
973 case nir_op_flt:
974 case nir_op_ilt:
975 case nir_op_slt:
976 cond = QPU_COND_NS;
977 break;
978 default:
979 return false;
980 }
981
982 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
983 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
984
985 unsigned unsized_type =
986 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
987 if (unsized_type == nir_type_float)
988 qir_SF(c, qir_FSUB(c, src0, src1));
989 else
990 qir_SF(c, qir_SUB(c, src0, src1));
991
992 switch (sel_instr->op) {
993 case nir_op_seq:
994 case nir_op_sne:
995 case nir_op_sge:
996 case nir_op_slt:
997 *dest = qir_SEL(c, cond,
998 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
999 break;
1000
1001 case nir_op_bcsel:
1002 *dest = qir_SEL(c, cond,
1003 ntq_get_alu_src(c, sel_instr, 1),
1004 ntq_get_alu_src(c, sel_instr, 2));
1005 break;
1006
1007 default:
1008 *dest = qir_SEL(c, cond,
1009 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
1010 break;
1011 }
1012
1013 /* Make the temporary for nir_store_dest(). */
1014 *dest = qir_MOV(c, *dest);
1015
1016 return true;
1017 }
1018
1019 /**
1020 * Attempts to fold a comparison generating a boolean result into the
1021 * condition code for selecting between two values, instead of comparing the
1022 * boolean result against 0 to generate the condition code.
1023 */
1024 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
1025 struct qreg *src)
1026 {
1027 if (!instr->src[0].src.is_ssa)
1028 goto out;
1029 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
1030 goto out;
1031 nir_alu_instr *compare =
1032 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
1033 if (!compare)
1034 goto out;
1035
1036 struct qreg dest;
1037 if (ntq_emit_comparison(c, &dest, compare, instr))
1038 return dest;
1039
1040 out:
1041 qir_SF(c, src[0]);
1042 return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
1043 }
1044
1045 static struct qreg
1046 ntq_fddx(struct vc4_compile *c, struct qreg src)
1047 {
1048 /* Make sure that we have a bare temp to use for MUL rotation, so it
1049 * can be allocated to an accumulator.
1050 */
1051 if (src.pack || src.file != QFILE_TEMP)
1052 src = qir_MOV(c, src);
1053
1054 struct qreg from_left = qir_ROT_MUL(c, src, 1);
1055 struct qreg from_right = qir_ROT_MUL(c, src, 15);
1056
1057 /* Distinguish left/right pixels of the quad. */
1058 qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
1059 qir_uniform_ui(c, 1)));
1060
1061 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1062 qir_FSUB(c, from_right, src),
1063 qir_FSUB(c, src, from_left)));
1064 }
1065
1066 static struct qreg
1067 ntq_fddy(struct vc4_compile *c, struct qreg src)
1068 {
1069 if (src.pack || src.file != QFILE_TEMP)
1070 src = qir_MOV(c, src);
1071
1072 struct qreg from_bottom = qir_ROT_MUL(c, src, 2);
1073 struct qreg from_top = qir_ROT_MUL(c, src, 14);
1074
1075 /* Distinguish top/bottom pixels of the quad. */
1076 qir_SF(c, qir_AND(c,
1077 qir_reg(QFILE_QPU_ELEMENT, 0),
1078 qir_uniform_ui(c, 2)));
1079
1080 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1081 qir_FSUB(c, from_top, src),
1082 qir_FSUB(c, src, from_bottom)));
1083 }
1084
1085 static void
1086 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
1087 {
1088 /* This should always be lowered to ALU operations for VC4. */
1089 assert(!instr->dest.saturate);
1090
1091 /* Vectors are special in that they have non-scalarized writemasks,
1092 * and just take the first swizzle channel for each argument in order
1093 * into each writemask channel.
1094 */
1095 if (instr->op == nir_op_vec2 ||
1096 instr->op == nir_op_vec3 ||
1097 instr->op == nir_op_vec4) {
1098 struct qreg srcs[4];
1099 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1100 srcs[i] = ntq_get_src(c, instr->src[i].src,
1101 instr->src[i].swizzle[0]);
1102 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1103 ntq_store_dest(c, &instr->dest.dest, i,
1104 qir_MOV(c, srcs[i]));
1105 return;
1106 }
1107
1108 if (instr->op == nir_op_pack_unorm_4x8) {
1109 ntq_emit_pack_unorm_4x8(c, instr);
1110 return;
1111 }
1112
1113 if (instr->op == nir_op_unpack_unorm_4x8) {
1114 struct qreg src = ntq_get_src(c, instr->src[0].src,
1115 instr->src[0].swizzle[0]);
1116 for (int i = 0; i < 4; i++) {
1117 if (instr->dest.write_mask & (1 << i))
1118 ntq_store_dest(c, &instr->dest.dest, i,
1119 qir_UNPACK_8_F(c, src, i));
1120 }
1121 return;
1122 }
1123
1124 /* General case: We can just grab the one used channel per src. */
1125 struct qreg src[nir_op_infos[instr->op].num_inputs];
1126 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1127 src[i] = ntq_get_alu_src(c, instr, i);
1128 }
1129
1130 struct qreg result;
1131
1132 switch (instr->op) {
1133 case nir_op_fmov:
1134 case nir_op_imov:
1135 result = qir_MOV(c, src[0]);
1136 break;
1137 case nir_op_fmul:
1138 result = qir_FMUL(c, src[0], src[1]);
1139 break;
1140 case nir_op_fadd:
1141 result = qir_FADD(c, src[0], src[1]);
1142 break;
1143 case nir_op_fsub:
1144 result = qir_FSUB(c, src[0], src[1]);
1145 break;
1146 case nir_op_fmin:
1147 result = qir_FMIN(c, src[0], src[1]);
1148 break;
1149 case nir_op_fmax:
1150 result = qir_FMAX(c, src[0], src[1]);
1151 break;
1152
1153 case nir_op_f2i:
1154 case nir_op_f2u:
1155 result = qir_FTOI(c, src[0]);
1156 break;
1157 case nir_op_i2f:
1158 case nir_op_u2f:
1159 result = qir_ITOF(c, src[0]);
1160 break;
1161 case nir_op_b2f:
1162 result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1163 break;
1164 case nir_op_b2i:
1165 result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1166 break;
1167 case nir_op_i2b:
1168 case nir_op_f2b:
1169 qir_SF(c, src[0]);
1170 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
1171 qir_uniform_ui(c, ~0),
1172 qir_uniform_ui(c, 0)));
1173 break;
1174
1175 case nir_op_iadd:
1176 result = qir_ADD(c, src[0], src[1]);
1177 break;
1178 case nir_op_ushr:
1179 result = qir_SHR(c, src[0], src[1]);
1180 break;
1181 case nir_op_isub:
1182 result = qir_SUB(c, src[0], src[1]);
1183 break;
1184 case nir_op_ishr:
1185 result = qir_ASR(c, src[0], src[1]);
1186 break;
1187 case nir_op_ishl:
1188 result = qir_SHL(c, src[0], src[1]);
1189 break;
1190 case nir_op_imin:
1191 result = qir_MIN(c, src[0], src[1]);
1192 break;
1193 case nir_op_imax:
1194 result = qir_MAX(c, src[0], src[1]);
1195 break;
1196 case nir_op_iand:
1197 result = qir_AND(c, src[0], src[1]);
1198 break;
1199 case nir_op_ior:
1200 result = qir_OR(c, src[0], src[1]);
1201 break;
1202 case nir_op_ixor:
1203 result = qir_XOR(c, src[0], src[1]);
1204 break;
1205 case nir_op_inot:
1206 result = qir_NOT(c, src[0]);
1207 break;
1208
1209 case nir_op_imul:
1210 result = ntq_umul(c, src[0], src[1]);
1211 break;
1212
1213 case nir_op_seq:
1214 case nir_op_sne:
1215 case nir_op_sge:
1216 case nir_op_slt:
1217 case nir_op_feq:
1218 case nir_op_fne:
1219 case nir_op_fge:
1220 case nir_op_flt:
1221 case nir_op_ieq:
1222 case nir_op_ine:
1223 case nir_op_ige:
1224 case nir_op_uge:
1225 case nir_op_ilt:
1226 if (!ntq_emit_comparison(c, &result, instr, instr)) {
1227 fprintf(stderr, "Bad comparison instruction\n");
1228 }
1229 break;
1230
1231 case nir_op_bcsel:
1232 result = ntq_emit_bcsel(c, instr, src);
1233 break;
1234 case nir_op_fcsel:
1235 qir_SF(c, src[0]);
1236 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
1237 break;
1238
1239 case nir_op_frcp:
1240 result = ntq_rcp(c, src[0]);
1241 break;
1242 case nir_op_frsq:
1243 result = ntq_rsq(c, src[0]);
1244 break;
1245 case nir_op_fexp2:
1246 result = qir_EXP2(c, src[0]);
1247 break;
1248 case nir_op_flog2:
1249 result = qir_LOG2(c, src[0]);
1250 break;
1251
1252 case nir_op_ftrunc:
1253 result = qir_ITOF(c, qir_FTOI(c, src[0]));
1254 break;
1255 case nir_op_fceil:
1256 result = ntq_fceil(c, src[0]);
1257 break;
1258 case nir_op_ffract:
1259 result = ntq_ffract(c, src[0]);
1260 break;
1261 case nir_op_ffloor:
1262 result = ntq_ffloor(c, src[0]);
1263 break;
1264
1265 case nir_op_fsin:
1266 result = ntq_fsin(c, src[0]);
1267 break;
1268 case nir_op_fcos:
1269 result = ntq_fcos(c, src[0]);
1270 break;
1271
1272 case nir_op_fsign:
1273 result = ntq_fsign(c, src[0]);
1274 break;
1275
1276 case nir_op_fabs:
1277 result = qir_FMAXABS(c, src[0], src[0]);
1278 break;
1279 case nir_op_iabs:
1280 result = qir_MAX(c, src[0],
1281 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1282 break;
1283
1284 case nir_op_ibitfield_extract:
1285 result = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1286 break;
1287
1288 case nir_op_ubitfield_extract:
1289 result = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1290 break;
1291
1292 case nir_op_usadd_4x8:
1293 result = qir_V8ADDS(c, src[0], src[1]);
1294 break;
1295
1296 case nir_op_ussub_4x8:
1297 result = qir_V8SUBS(c, src[0], src[1]);
1298 break;
1299
1300 case nir_op_umin_4x8:
1301 result = qir_V8MIN(c, src[0], src[1]);
1302 break;
1303
1304 case nir_op_umax_4x8:
1305 result = qir_V8MAX(c, src[0], src[1]);
1306 break;
1307
1308 case nir_op_umul_unorm_4x8:
1309 result = qir_V8MULD(c, src[0], src[1]);
1310 break;
1311
1312 case nir_op_fddx:
1313 case nir_op_fddx_coarse:
1314 case nir_op_fddx_fine:
1315 result = ntq_fddx(c, src[0]);
1316 break;
1317
1318 case nir_op_fddy:
1319 case nir_op_fddy_coarse:
1320 case nir_op_fddy_fine:
1321 result = ntq_fddy(c, src[0]);
1322 break;
1323
1324 default:
1325 fprintf(stderr, "unknown NIR ALU inst: ");
1326 nir_print_instr(&instr->instr, stderr);
1327 fprintf(stderr, "\n");
1328 abort();
1329 }
1330
1331 /* We have a scalar result, so the instruction should only have a
1332 * single channel written to.
1333 */
1334 assert(util_is_power_of_two(instr->dest.write_mask));
1335 ntq_store_dest(c, &instr->dest.dest,
1336 ffs(instr->dest.write_mask) - 1, result);
1337 }
1338
1339 static void
1340 emit_frag_end(struct vc4_compile *c)
1341 {
1342 struct qreg color;
1343 if (c->output_color_index != -1) {
1344 color = c->outputs[c->output_color_index];
1345 } else {
1346 color = qir_uniform_ui(c, 0);
1347 }
1348
1349 uint32_t discard_cond = QPU_COND_ALWAYS;
1350 if (c->s->info->fs.uses_discard) {
1351 qir_SF(c, c->discard);
1352 discard_cond = QPU_COND_ZS;
1353 }
1354
1355 if (c->fs_key->stencil_enabled) {
1356 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1357 qir_uniform(c, QUNIFORM_STENCIL, 0));
1358 if (c->fs_key->stencil_twoside) {
1359 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1360 qir_uniform(c, QUNIFORM_STENCIL, 1));
1361 }
1362 if (c->fs_key->stencil_full_writemasks) {
1363 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1364 qir_uniform(c, QUNIFORM_STENCIL, 2));
1365 }
1366 }
1367
1368 if (c->output_sample_mask_index != -1) {
1369 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1370 }
1371
1372 if (c->fs_key->depth_enabled) {
1373 if (c->output_position_index != -1) {
1374 qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1375 qir_FMUL(c,
1376 c->outputs[c->output_position_index],
1377 qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1378 } else {
1379 qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1380 qir_FRAG_Z(c))->cond = discard_cond;
1381 }
1382 }
1383
1384 if (!c->msaa_per_sample_output) {
1385 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1386 color)->cond = discard_cond;
1387 } else {
1388 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1389 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1390 c->sample_colors[i])->cond = discard_cond;
1391 }
1392 }
1393 }
1394
1395 static void
1396 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1397 {
1398 struct qreg packed = qir_get_temp(c);
1399
1400 for (int i = 0; i < 2; i++) {
1401 struct qreg scale =
1402 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1403
1404 struct qreg packed_chan = packed;
1405 packed_chan.pack = QPU_PACK_A_16A + i;
1406
1407 qir_FTOI_dest(c, packed_chan,
1408 qir_FMUL(c,
1409 qir_FMUL(c,
1410 c->outputs[c->output_position_index + i],
1411 scale),
1412 rcp_w));
1413 }
1414
1415 qir_VPM_WRITE(c, packed);
1416 }
1417
1418 static void
1419 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1420 {
1421 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1422 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1423
1424 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1425 c->outputs[c->output_position_index + 2],
1426 zscale),
1427 rcp_w),
1428 zoffset));
1429 }
1430
1431 static void
1432 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1433 {
1434 qir_VPM_WRITE(c, rcp_w);
1435 }
1436
1437 static void
1438 emit_point_size_write(struct vc4_compile *c)
1439 {
1440 struct qreg point_size;
1441
1442 if (c->output_point_size_index != -1)
1443 point_size = c->outputs[c->output_point_size_index];
1444 else
1445 point_size = qir_uniform_f(c, 1.0);
1446
1447 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1448 * BCM21553).
1449 */
1450 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1451
1452 qir_VPM_WRITE(c, point_size);
1453 }
1454
1455 /**
1456 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1457 *
1458 * The simulator insists that there be at least one vertex attribute, so
1459 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1460 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1461 * to consume it here.
1462 */
1463 static void
1464 emit_stub_vpm_read(struct vc4_compile *c)
1465 {
1466 if (c->num_inputs)
1467 return;
1468
1469 c->vattr_sizes[0] = 4;
1470 (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1471 c->num_inputs++;
1472 }
1473
1474 static void
1475 emit_vert_end(struct vc4_compile *c,
1476 struct vc4_varying_slot *fs_inputs,
1477 uint32_t num_fs_inputs)
1478 {
1479 struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1480
1481 emit_stub_vpm_read(c);
1482
1483 emit_scaled_viewport_write(c, rcp_w);
1484 emit_zs_write(c, rcp_w);
1485 emit_rcp_wc_write(c, rcp_w);
1486 if (c->vs_key->per_vertex_point_size)
1487 emit_point_size_write(c);
1488
1489 for (int i = 0; i < num_fs_inputs; i++) {
1490 struct vc4_varying_slot *input = &fs_inputs[i];
1491 int j;
1492
1493 for (j = 0; j < c->num_outputs; j++) {
1494 struct vc4_varying_slot *output =
1495 &c->output_slots[j];
1496
1497 if (input->slot == output->slot &&
1498 input->swizzle == output->swizzle) {
1499 qir_VPM_WRITE(c, c->outputs[j]);
1500 break;
1501 }
1502 }
1503 /* Emit padding if we didn't find a declared VS output for
1504 * this FS input.
1505 */
1506 if (j == c->num_outputs)
1507 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1508 }
1509 }
1510
1511 static void
1512 emit_coord_end(struct vc4_compile *c)
1513 {
1514 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1515
1516 emit_stub_vpm_read(c);
1517
1518 for (int i = 0; i < 4; i++)
1519 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1520
1521 emit_scaled_viewport_write(c, rcp_w);
1522 emit_zs_write(c, rcp_w);
1523 emit_rcp_wc_write(c, rcp_w);
1524 if (c->vs_key->per_vertex_point_size)
1525 emit_point_size_write(c);
1526 }
1527
1528 static void
1529 vc4_optimize_nir(struct nir_shader *s)
1530 {
1531 bool progress;
1532
1533 do {
1534 progress = false;
1535
1536 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1537 NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1538 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1539 NIR_PASS(progress, s, nir_copy_prop);
1540 NIR_PASS(progress, s, nir_opt_remove_phis);
1541 NIR_PASS(progress, s, nir_opt_dce);
1542 NIR_PASS(progress, s, nir_opt_dead_cf);
1543 NIR_PASS(progress, s, nir_opt_cse);
1544 NIR_PASS(progress, s, nir_opt_peephole_select, 8);
1545 NIR_PASS(progress, s, nir_opt_algebraic);
1546 NIR_PASS(progress, s, nir_opt_constant_folding);
1547 NIR_PASS(progress, s, nir_opt_undef);
1548 NIR_PASS(progress, s, nir_opt_loop_unroll,
1549 nir_var_shader_in |
1550 nir_var_shader_out |
1551 nir_var_local);
1552 } while (progress);
1553 }
1554
1555 static int
1556 driver_location_compare(const void *in_a, const void *in_b)
1557 {
1558 const nir_variable *const *a = in_a;
1559 const nir_variable *const *b = in_b;
1560
1561 return (*a)->data.driver_location - (*b)->data.driver_location;
1562 }
1563
1564 static void
1565 ntq_setup_inputs(struct vc4_compile *c)
1566 {
1567 unsigned num_entries = 0;
1568 nir_foreach_variable(var, &c->s->inputs)
1569 num_entries++;
1570
1571 nir_variable *vars[num_entries];
1572
1573 unsigned i = 0;
1574 nir_foreach_variable(var, &c->s->inputs)
1575 vars[i++] = var;
1576
1577 /* Sort the variables so that we emit the input setup in
1578 * driver_location order. This is required for VPM reads, whose data
1579 * is fetched into the VPM in driver_location (TGSI register index)
1580 * order.
1581 */
1582 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1583
1584 for (unsigned i = 0; i < num_entries; i++) {
1585 nir_variable *var = vars[i];
1586 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1587 unsigned loc = var->data.driver_location;
1588
1589 assert(array_len == 1);
1590 (void)array_len;
1591 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1592 (loc + 1) * 4);
1593
1594 if (c->stage == QSTAGE_FRAG) {
1595 if (var->data.location == VARYING_SLOT_POS) {
1596 emit_fragcoord_input(c, loc);
1597 } else if (var->data.location == VARYING_SLOT_PNTC ||
1598 (var->data.location >= VARYING_SLOT_VAR0 &&
1599 (c->fs_key->point_sprite_mask &
1600 (1 << (var->data.location -
1601 VARYING_SLOT_VAR0))))) {
1602 c->inputs[loc * 4 + 0] = c->point_x;
1603 c->inputs[loc * 4 + 1] = c->point_y;
1604 } else {
1605 emit_fragment_input(c, loc, var->data.location);
1606 }
1607 } else {
1608 emit_vertex_input(c, loc);
1609 }
1610 }
1611 }
1612
1613 static void
1614 ntq_setup_outputs(struct vc4_compile *c)
1615 {
1616 nir_foreach_variable(var, &c->s->outputs) {
1617 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1618 unsigned loc = var->data.driver_location * 4;
1619
1620 assert(array_len == 1);
1621 (void)array_len;
1622
1623 for (int i = 0; i < 4; i++)
1624 add_output(c, loc + i, var->data.location, i);
1625
1626 if (c->stage == QSTAGE_FRAG) {
1627 switch (var->data.location) {
1628 case FRAG_RESULT_COLOR:
1629 case FRAG_RESULT_DATA0:
1630 c->output_color_index = loc;
1631 break;
1632 case FRAG_RESULT_DEPTH:
1633 c->output_position_index = loc;
1634 break;
1635 case FRAG_RESULT_SAMPLE_MASK:
1636 c->output_sample_mask_index = loc;
1637 break;
1638 }
1639 } else {
1640 switch (var->data.location) {
1641 case VARYING_SLOT_POS:
1642 c->output_position_index = loc;
1643 break;
1644 case VARYING_SLOT_PSIZ:
1645 c->output_point_size_index = loc;
1646 break;
1647 }
1648 }
1649 }
1650 }
1651
1652 static void
1653 ntq_setup_uniforms(struct vc4_compile *c)
1654 {
1655 nir_foreach_variable(var, &c->s->uniforms) {
1656 uint32_t vec4_count = st_glsl_type_size(var->type);
1657 unsigned vec4_size = 4 * sizeof(float);
1658
1659 declare_uniform_range(c, var->data.driver_location * vec4_size,
1660 vec4_count * vec4_size);
1661
1662 }
1663 }
1664
1665 /**
1666 * Sets up the mapping from nir_register to struct qreg *.
1667 *
1668 * Each nir_register gets a struct qreg per 32-bit component being stored.
1669 */
1670 static void
1671 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1672 {
1673 foreach_list_typed(nir_register, nir_reg, node, list) {
1674 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1675 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1676 array_len *
1677 nir_reg->num_components);
1678
1679 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1680
1681 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1682 qregs[i] = qir_get_temp(c);
1683 }
1684 }
1685
1686 static void
1687 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1688 {
1689 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1690 for (int i = 0; i < instr->def.num_components; i++)
1691 qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
1692
1693 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1694 }
1695
1696 static void
1697 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1698 {
1699 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1700
1701 /* QIR needs there to be *some* value, so pick 0 (same as for
1702 * ntq_setup_registers().
1703 */
1704 for (int i = 0; i < instr->def.num_components; i++)
1705 qregs[i] = qir_uniform_ui(c, 0);
1706 }
1707
1708 static void
1709 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1710 {
1711 nir_const_value *const_offset;
1712 unsigned offset;
1713
1714 switch (instr->intrinsic) {
1715 case nir_intrinsic_load_uniform:
1716 assert(instr->num_components == 1);
1717 const_offset = nir_src_as_const_value(instr->src[0]);
1718 if (const_offset) {
1719 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1720 assert(offset % 4 == 0);
1721 /* We need dwords */
1722 offset = offset / 4;
1723 ntq_store_dest(c, &instr->dest, 0,
1724 qir_uniform(c, QUNIFORM_UNIFORM,
1725 offset));
1726 } else {
1727 ntq_store_dest(c, &instr->dest, 0,
1728 indirect_uniform_load(c, instr));
1729 }
1730 break;
1731
1732 case nir_intrinsic_load_user_clip_plane:
1733 for (int i = 0; i < instr->num_components; i++) {
1734 ntq_store_dest(c, &instr->dest, i,
1735 qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1736 nir_intrinsic_ucp_id(instr) *
1737 4 + i));
1738 }
1739 break;
1740
1741 case nir_intrinsic_load_blend_const_color_r_float:
1742 case nir_intrinsic_load_blend_const_color_g_float:
1743 case nir_intrinsic_load_blend_const_color_b_float:
1744 case nir_intrinsic_load_blend_const_color_a_float:
1745 ntq_store_dest(c, &instr->dest, 0,
1746 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
1747 (instr->intrinsic -
1748 nir_intrinsic_load_blend_const_color_r_float),
1749 0));
1750 break;
1751
1752 case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
1753 ntq_store_dest(c, &instr->dest, 0,
1754 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
1755 0));
1756 break;
1757
1758 case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
1759 ntq_store_dest(c, &instr->dest, 0,
1760 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
1761 0));
1762 break;
1763
1764 case nir_intrinsic_load_alpha_ref_float:
1765 ntq_store_dest(c, &instr->dest, 0,
1766 qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1767 break;
1768
1769 case nir_intrinsic_load_sample_mask_in:
1770 ntq_store_dest(c, &instr->dest, 0,
1771 qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1772 break;
1773
1774 case nir_intrinsic_load_front_face:
1775 /* The register contains 0 (front) or 1 (back), and we need to
1776 * turn it into a NIR bool where true means front.
1777 */
1778 ntq_store_dest(c, &instr->dest, 0,
1779 qir_ADD(c,
1780 qir_uniform_ui(c, -1),
1781 qir_reg(QFILE_FRAG_REV_FLAG, 0)));
1782 break;
1783
1784 case nir_intrinsic_load_input:
1785 assert(instr->num_components == 1);
1786 const_offset = nir_src_as_const_value(instr->src[0]);
1787 assert(const_offset && "vc4 doesn't support indirect inputs");
1788 if (c->stage == QSTAGE_FRAG &&
1789 nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1790 assert(const_offset->u32[0] == 0);
1791 /* Reads of the per-sample color need to be done in
1792 * order.
1793 */
1794 int sample_index = (nir_intrinsic_base(instr) -
1795 VC4_NIR_TLB_COLOR_READ_INPUT);
1796 for (int i = 0; i <= sample_index; i++) {
1797 if (c->color_reads[i].file == QFILE_NULL) {
1798 c->color_reads[i] =
1799 qir_TLB_COLOR_READ(c);
1800 }
1801 }
1802 ntq_store_dest(c, &instr->dest, 0,
1803 qir_MOV(c, c->color_reads[sample_index]));
1804 } else {
1805 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1806 int comp = nir_intrinsic_component(instr);
1807 ntq_store_dest(c, &instr->dest, 0,
1808 qir_MOV(c, c->inputs[offset * 4 + comp]));
1809 }
1810 break;
1811
1812 case nir_intrinsic_store_output:
1813 const_offset = nir_src_as_const_value(instr->src[1]);
1814 assert(const_offset && "vc4 doesn't support indirect outputs");
1815 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1816
1817 /* MSAA color outputs are the only case where we have an
1818 * output that's not lowered to being a store of a single 32
1819 * bit value.
1820 */
1821 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1822 assert(offset == c->output_color_index);
1823 for (int i = 0; i < 4; i++) {
1824 c->sample_colors[i] =
1825 qir_MOV(c, ntq_get_src(c, instr->src[0],
1826 i));
1827 }
1828 } else {
1829 offset = offset * 4 + nir_intrinsic_component(instr);
1830 assert(instr->num_components == 1);
1831 c->outputs[offset] =
1832 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1833 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1834 }
1835 break;
1836
1837 case nir_intrinsic_discard:
1838 if (c->execute.file != QFILE_NULL) {
1839 qir_SF(c, c->execute);
1840 qir_MOV_cond(c, QPU_COND_ZS, c->discard,
1841 qir_uniform_ui(c, ~0));
1842 } else {
1843 qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
1844 }
1845 break;
1846
1847 case nir_intrinsic_discard_if: {
1848 /* true (~0) if we're discarding */
1849 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1850
1851 if (c->execute.file != QFILE_NULL) {
1852 /* execute == 0 means the channel is active. Invert
1853 * the condition so that we can use zero as "executing
1854 * and discarding."
1855 */
1856 qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
1857 qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
1858 } else {
1859 qir_OR_dest(c, c->discard, c->discard,
1860 ntq_get_src(c, instr->src[0], 0));
1861 }
1862
1863 break;
1864 }
1865
1866 default:
1867 fprintf(stderr, "Unknown intrinsic: ");
1868 nir_print_instr(&instr->instr, stderr);
1869 fprintf(stderr, "\n");
1870 break;
1871 }
1872 }
1873
1874 /* Clears (activates) the execute flags for any channels whose jump target
1875 * matches this block.
1876 */
1877 static void
1878 ntq_activate_execute_for_block(struct vc4_compile *c)
1879 {
1880 qir_SF(c, qir_SUB(c,
1881 c->execute,
1882 qir_uniform_ui(c, c->cur_block->index)));
1883 qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0));
1884 }
1885
1886 static void
1887 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1888 {
1889 if (!c->vc4->screen->has_control_flow) {
1890 fprintf(stderr,
1891 "IF statement support requires updated kernel.\n");
1892 return;
1893 }
1894
1895 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1896 bool empty_else_block =
1897 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1898 exec_list_is_empty(&nir_else_block->instr_list));
1899
1900 struct qblock *then_block = qir_new_block(c);
1901 struct qblock *after_block = qir_new_block(c);
1902 struct qblock *else_block;
1903 if (empty_else_block)
1904 else_block = after_block;
1905 else
1906 else_block = qir_new_block(c);
1907
1908 bool was_top_level = false;
1909 if (c->execute.file == QFILE_NULL) {
1910 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
1911 was_top_level = true;
1912 }
1913
1914 /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1915 * 0) channels, and then update execute flags for those to point to
1916 * the ELSE block.
1917 */
1918 qir_SF(c, qir_OR(c,
1919 c->execute,
1920 ntq_get_src(c, if_stmt->condition, 0)));
1921 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1922 qir_uniform_ui(c, else_block->index));
1923
1924 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1925 * through.
1926 */
1927 qir_SF(c, c->execute);
1928 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC);
1929 qir_link_blocks(c->cur_block, else_block);
1930 qir_link_blocks(c->cur_block, then_block);
1931
1932 /* Process the THEN block. */
1933 qir_set_emit_block(c, then_block);
1934 ntq_emit_cf_list(c, &if_stmt->then_list);
1935
1936 if (!empty_else_block) {
1937 /* Handle the end of the THEN block. First, all currently
1938 * active channels update their execute flags to point to
1939 * ENDIF
1940 */
1941 qir_SF(c, c->execute);
1942 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1943 qir_uniform_ui(c, after_block->index));
1944
1945 /* If everything points at ENDIF, then jump there immediately. */
1946 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index)));
1947 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1948 qir_link_blocks(c->cur_block, after_block);
1949 qir_link_blocks(c->cur_block, else_block);
1950
1951 qir_set_emit_block(c, else_block);
1952 ntq_activate_execute_for_block(c);
1953 ntq_emit_cf_list(c, &if_stmt->else_list);
1954 }
1955
1956 qir_link_blocks(c->cur_block, after_block);
1957
1958 qir_set_emit_block(c, after_block);
1959 if (was_top_level)
1960 c->execute = c->undef;
1961 else
1962 ntq_activate_execute_for_block(c);
1963
1964 }
1965
1966 static void
1967 ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
1968 {
1969 struct qblock *jump_block;
1970 switch (jump->type) {
1971 case nir_jump_break:
1972 jump_block = c->loop_break_block;
1973 break;
1974 case nir_jump_continue:
1975 jump_block = c->loop_cont_block;
1976 break;
1977 default:
1978 unreachable("Unsupported jump type\n");
1979 }
1980
1981 qir_SF(c, c->execute);
1982 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1983 qir_uniform_ui(c, jump_block->index));
1984
1985 /* Jump to the destination block if everyone has taken the jump. */
1986 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
1987 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1988 struct qblock *new_block = qir_new_block(c);
1989 qir_link_blocks(c->cur_block, jump_block);
1990 qir_link_blocks(c->cur_block, new_block);
1991 qir_set_emit_block(c, new_block);
1992 }
1993
1994 static void
1995 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1996 {
1997 switch (instr->type) {
1998 case nir_instr_type_alu:
1999 ntq_emit_alu(c, nir_instr_as_alu(instr));
2000 break;
2001
2002 case nir_instr_type_intrinsic:
2003 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2004 break;
2005
2006 case nir_instr_type_load_const:
2007 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2008 break;
2009
2010 case nir_instr_type_ssa_undef:
2011 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2012 break;
2013
2014 case nir_instr_type_tex:
2015 ntq_emit_tex(c, nir_instr_as_tex(instr));
2016 break;
2017
2018 case nir_instr_type_jump:
2019 ntq_emit_jump(c, nir_instr_as_jump(instr));
2020 break;
2021
2022 default:
2023 fprintf(stderr, "Unknown NIR instr type: ");
2024 nir_print_instr(instr, stderr);
2025 fprintf(stderr, "\n");
2026 abort();
2027 }
2028 }
2029
2030 static void
2031 ntq_emit_block(struct vc4_compile *c, nir_block *block)
2032 {
2033 nir_foreach_instr(instr, block) {
2034 ntq_emit_instr(c, instr);
2035 }
2036 }
2037
2038 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
2039
2040 static void
2041 ntq_emit_loop(struct vc4_compile *c, nir_loop *loop)
2042 {
2043 if (!c->vc4->screen->has_control_flow) {
2044 fprintf(stderr,
2045 "loop support requires updated kernel.\n");
2046 ntq_emit_cf_list(c, &loop->body);
2047 return;
2048 }
2049
2050 bool was_top_level = false;
2051 if (c->execute.file == QFILE_NULL) {
2052 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
2053 was_top_level = true;
2054 }
2055
2056 struct qblock *save_loop_cont_block = c->loop_cont_block;
2057 struct qblock *save_loop_break_block = c->loop_break_block;
2058
2059 c->loop_cont_block = qir_new_block(c);
2060 c->loop_break_block = qir_new_block(c);
2061
2062 qir_link_blocks(c->cur_block, c->loop_cont_block);
2063 qir_set_emit_block(c, c->loop_cont_block);
2064 ntq_activate_execute_for_block(c);
2065
2066 ntq_emit_cf_list(c, &loop->body);
2067
2068 /* If anything had explicitly continued, or is here at the end of the
2069 * loop, then we need to loop again. SF updates are masked by the
2070 * instruction's condition, so we can do the OR of the two conditions
2071 * within SF.
2072 */
2073 qir_SF(c, c->execute);
2074 struct qinst *cont_check =
2075 qir_SUB_dest(c,
2076 c->undef,
2077 c->execute,
2078 qir_uniform_ui(c, c->loop_cont_block->index));
2079 cont_check->cond = QPU_COND_ZC;
2080 cont_check->sf = true;
2081
2082 qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS);
2083 qir_link_blocks(c->cur_block, c->loop_cont_block);
2084 qir_link_blocks(c->cur_block, c->loop_break_block);
2085
2086 qir_set_emit_block(c, c->loop_break_block);
2087 if (was_top_level)
2088 c->execute = c->undef;
2089 else
2090 ntq_activate_execute_for_block(c);
2091
2092 c->loop_break_block = save_loop_break_block;
2093 c->loop_cont_block = save_loop_cont_block;
2094 }
2095
2096 static void
2097 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
2098 {
2099 fprintf(stderr, "FUNCTIONS not handled.\n");
2100 abort();
2101 }
2102
2103 static void
2104 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
2105 {
2106 foreach_list_typed(nir_cf_node, node, node, list) {
2107 switch (node->type) {
2108 case nir_cf_node_block:
2109 ntq_emit_block(c, nir_cf_node_as_block(node));
2110 break;
2111
2112 case nir_cf_node_if:
2113 ntq_emit_if(c, nir_cf_node_as_if(node));
2114 break;
2115
2116 case nir_cf_node_loop:
2117 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2118 break;
2119
2120 case nir_cf_node_function:
2121 ntq_emit_function(c, nir_cf_node_as_function(node));
2122 break;
2123
2124 default:
2125 fprintf(stderr, "Unknown NIR node type\n");
2126 abort();
2127 }
2128 }
2129 }
2130
2131 static void
2132 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
2133 {
2134 ntq_setup_registers(c, &impl->registers);
2135 ntq_emit_cf_list(c, &impl->body);
2136 }
2137
2138 static void
2139 nir_to_qir(struct vc4_compile *c)
2140 {
2141 if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard)
2142 c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
2143
2144 ntq_setup_inputs(c);
2145 ntq_setup_outputs(c);
2146 ntq_setup_uniforms(c);
2147 ntq_setup_registers(c, &c->s->registers);
2148
2149 /* Find the main function and emit the body. */
2150 nir_foreach_function(function, c->s) {
2151 assert(strcmp(function->name, "main") == 0);
2152 assert(function->impl);
2153 ntq_emit_impl(c, function->impl);
2154 }
2155 }
2156
2157 static const nir_shader_compiler_options nir_options = {
2158 .lower_extract_byte = true,
2159 .lower_extract_word = true,
2160 .lower_ffma = true,
2161 .lower_flrp32 = true,
2162 .lower_fpow = true,
2163 .lower_fsat = true,
2164 .lower_fsqrt = true,
2165 .lower_negate = true,
2166 .native_integers = true,
2167 .max_unroll_iterations = 32,
2168 };
2169
2170 const void *
2171 vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
2172 enum pipe_shader_ir ir, unsigned shader)
2173 {
2174 return &nir_options;
2175 }
2176
2177 static int
2178 count_nir_instrs(nir_shader *nir)
2179 {
2180 int count = 0;
2181 nir_foreach_function(function, nir) {
2182 if (!function->impl)
2183 continue;
2184 nir_foreach_block(block, function->impl) {
2185 nir_foreach_instr(instr, block)
2186 count++;
2187 }
2188 }
2189 return count;
2190 }
2191
2192 static struct vc4_compile *
2193 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
2194 struct vc4_key *key, bool fs_threaded)
2195 {
2196 struct vc4_compile *c = qir_compile_init();
2197
2198 c->vc4 = vc4;
2199 c->stage = stage;
2200 c->shader_state = &key->shader_state->base;
2201 c->program_id = key->shader_state->program_id;
2202 c->variant_id =
2203 p_atomic_inc_return(&key->shader_state->compiled_variant_count);
2204 c->fs_threaded = fs_threaded;
2205
2206 c->key = key;
2207 switch (stage) {
2208 case QSTAGE_FRAG:
2209 c->fs_key = (struct vc4_fs_key *)key;
2210 if (c->fs_key->is_points) {
2211 c->point_x = emit_fragment_varying(c, ~0, 0);
2212 c->point_y = emit_fragment_varying(c, ~0, 0);
2213 } else if (c->fs_key->is_lines) {
2214 c->line_x = emit_fragment_varying(c, ~0, 0);
2215 }
2216 break;
2217 case QSTAGE_VERT:
2218 c->vs_key = (struct vc4_vs_key *)key;
2219 break;
2220 case QSTAGE_COORD:
2221 c->vs_key = (struct vc4_vs_key *)key;
2222 break;
2223 }
2224
2225 c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
2226
2227 if (stage == QSTAGE_FRAG)
2228 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
2229
2230 struct nir_lower_tex_options tex_options = {
2231 /* We would need to implement txs, but we don't want the
2232 * int/float conversions
2233 */
2234 .lower_rect = false,
2235
2236 .lower_txp = ~0,
2237
2238 /* Apply swizzles to all samplers. */
2239 .swizzle_result = ~0,
2240 };
2241
2242 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2243 * The format swizzling applies before sRGB decode, and
2244 * ARB_texture_swizzle is the last thing before returning the sample.
2245 */
2246 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
2247 enum pipe_format format = c->key->tex[i].format;
2248
2249 if (!format)
2250 continue;
2251
2252 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
2253
2254 for (int j = 0; j < 4; j++) {
2255 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
2256
2257 if (arb_swiz <= 3) {
2258 tex_options.swizzles[i][j] =
2259 format_swizzle[arb_swiz];
2260 } else {
2261 tex_options.swizzles[i][j] = arb_swiz;
2262 }
2263 }
2264
2265 if (util_format_is_srgb(format))
2266 tex_options.lower_srgb |= (1 << i);
2267 }
2268
2269 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
2270
2271 if (c->fs_key && c->fs_key->light_twoside)
2272 NIR_PASS_V(c->s, nir_lower_two_sided_color);
2273
2274 if (c->vs_key && c->vs_key->clamp_color)
2275 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
2276
2277 if (c->key->ucp_enables) {
2278 if (stage == QSTAGE_FRAG) {
2279 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
2280 } else {
2281 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
2282 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
2283 nir_var_shader_out);
2284 }
2285 }
2286
2287 /* FS input scalarizing must happen after nir_lower_two_sided_color,
2288 * which only handles a vec4 at a time. Similarly, VS output
2289 * scalarizing must happen after nir_lower_clip_vs.
2290 */
2291 if (c->stage == QSTAGE_FRAG)
2292 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
2293 else
2294 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
2295
2296 NIR_PASS_V(c->s, vc4_nir_lower_io, c);
2297 NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
2298 NIR_PASS_V(c->s, nir_lower_idiv);
2299
2300 vc4_optimize_nir(c->s);
2301
2302 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
2303
2304 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2305 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2306 qir_get_stage_name(c->stage),
2307 c->program_id, c->variant_id,
2308 count_nir_instrs(c->s));
2309 }
2310
2311 if (vc4_debug & VC4_DEBUG_NIR) {
2312 fprintf(stderr, "%s prog %d/%d NIR:\n",
2313 qir_get_stage_name(c->stage),
2314 c->program_id, c->variant_id);
2315 nir_print_shader(c->s, stderr);
2316 }
2317
2318 nir_to_qir(c);
2319
2320 switch (stage) {
2321 case QSTAGE_FRAG:
2322 /* FS threading requires that the thread execute
2323 * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
2324 * (with no other THRSW afterwards, obviously). If we didn't
2325 * fetch a texture at a top level block, this wouldn't be
2326 * true.
2327 */
2328 if (c->fs_threaded && !c->last_thrsw_at_top_level) {
2329 c->failed = true;
2330 return c;
2331 }
2332
2333 emit_frag_end(c);
2334 break;
2335 case QSTAGE_VERT:
2336 emit_vert_end(c,
2337 c->vs_key->fs_inputs->input_slots,
2338 c->vs_key->fs_inputs->num_inputs);
2339 break;
2340 case QSTAGE_COORD:
2341 emit_coord_end(c);
2342 break;
2343 }
2344
2345 if (vc4_debug & VC4_DEBUG_QIR) {
2346 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
2347 qir_get_stage_name(c->stage),
2348 c->program_id, c->variant_id);
2349 qir_dump(c);
2350 fprintf(stderr, "\n");
2351 }
2352
2353 qir_optimize(c);
2354 qir_lower_uniforms(c);
2355
2356 qir_schedule_instructions(c);
2357 qir_emit_uniform_stream_resets(c);
2358
2359 if (vc4_debug & VC4_DEBUG_QIR) {
2360 fprintf(stderr, "%s prog %d/%d QIR:\n",
2361 qir_get_stage_name(c->stage),
2362 c->program_id, c->variant_id);
2363 qir_dump(c);
2364 fprintf(stderr, "\n");
2365 }
2366
2367 qir_reorder_uniforms(c);
2368 vc4_generate_code(vc4, c);
2369
2370 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2371 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2372 qir_get_stage_name(c->stage),
2373 c->program_id, c->variant_id,
2374 c->qpu_inst_count);
2375 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2376 qir_get_stage_name(c->stage),
2377 c->program_id, c->variant_id,
2378 c->num_uniforms);
2379 }
2380
2381 ralloc_free(c->s);
2382
2383 return c;
2384 }
2385
2386 static void *
2387 vc4_shader_state_create(struct pipe_context *pctx,
2388 const struct pipe_shader_state *cso)
2389 {
2390 struct vc4_context *vc4 = vc4_context(pctx);
2391 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
2392 if (!so)
2393 return NULL;
2394
2395 so->program_id = vc4->next_uncompiled_program_id++;
2396
2397 nir_shader *s;
2398
2399 if (cso->type == PIPE_SHADER_IR_NIR) {
2400 /* The backend takes ownership of the NIR shader on state
2401 * creation.
2402 */
2403 s = cso->ir.nir;
2404 } else {
2405 assert(cso->type == PIPE_SHADER_IR_TGSI);
2406
2407 if (vc4_debug & VC4_DEBUG_TGSI) {
2408 fprintf(stderr, "prog %d TGSI:\n",
2409 so->program_id);
2410 tgsi_dump(cso->tokens, 0);
2411 fprintf(stderr, "\n");
2412 }
2413 s = tgsi_to_nir(cso->tokens, &nir_options);
2414 }
2415
2416 NIR_PASS_V(s, nir_opt_global_to_local);
2417 NIR_PASS_V(s, nir_lower_regs_to_ssa);
2418 NIR_PASS_V(s, nir_normalize_cubemap_coords);
2419
2420 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
2421
2422 vc4_optimize_nir(s);
2423
2424 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
2425
2426 /* Garbage collect dead instructions */
2427 nir_sweep(s);
2428
2429 so->base.type = PIPE_SHADER_IR_NIR;
2430 so->base.ir.nir = s;
2431
2432 if (vc4_debug & VC4_DEBUG_NIR) {
2433 fprintf(stderr, "%s prog %d NIR:\n",
2434 gl_shader_stage_name(s->stage),
2435 so->program_id);
2436 nir_print_shader(s, stderr);
2437 fprintf(stderr, "\n");
2438 }
2439
2440 return so;
2441 }
2442
2443 static void
2444 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
2445 struct vc4_compile *c)
2446 {
2447 int count = c->num_uniforms;
2448 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2449
2450 uinfo->count = count;
2451 uinfo->data = ralloc_array(shader, uint32_t, count);
2452 memcpy(uinfo->data, c->uniform_data,
2453 count * sizeof(*uinfo->data));
2454 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2455 memcpy(uinfo->contents, c->uniform_contents,
2456 count * sizeof(*uinfo->contents));
2457 uinfo->num_texture_samples = c->num_texture_samples;
2458
2459 vc4_set_shader_uniform_dirty_flags(shader);
2460 }
2461
2462 static void
2463 vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c,
2464 struct vc4_compiled_shader *shader)
2465 {
2466 struct vc4_fs_inputs inputs;
2467
2468 memset(&inputs, 0, sizeof(inputs));
2469 inputs.input_slots = ralloc_array(shader,
2470 struct vc4_varying_slot,
2471 c->num_input_slots);
2472
2473 bool input_live[c->num_input_slots];
2474
2475 memset(input_live, 0, sizeof(input_live));
2476 qir_for_each_inst_inorder(inst, c) {
2477 for (int i = 0; i < qir_get_nsrc(inst); i++) {
2478 if (inst->src[i].file == QFILE_VARY)
2479 input_live[inst->src[i].index] = true;
2480 }
2481 }
2482
2483 for (int i = 0; i < c->num_input_slots; i++) {
2484 struct vc4_varying_slot *slot = &c->input_slots[i];
2485
2486 if (!input_live[i])
2487 continue;
2488
2489 /* Skip non-VS-output inputs. */
2490 if (slot->slot == (uint8_t)~0)
2491 continue;
2492
2493 if (slot->slot == VARYING_SLOT_COL0 ||
2494 slot->slot == VARYING_SLOT_COL1 ||
2495 slot->slot == VARYING_SLOT_BFC0 ||
2496 slot->slot == VARYING_SLOT_BFC1) {
2497 shader->color_inputs |= (1 << inputs.num_inputs);
2498 }
2499
2500 inputs.input_slots[inputs.num_inputs] = *slot;
2501 inputs.num_inputs++;
2502 }
2503 shader->num_inputs = inputs.num_inputs;
2504
2505 /* Add our set of inputs to the set of all inputs seen. This way, we
2506 * can have a single pointer that identifies an FS inputs set,
2507 * allowing VS to avoid recompiling when the FS is recompiled (or a
2508 * new one is bound using separate shader objects) but the inputs
2509 * don't change.
2510 */
2511 struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs);
2512 if (entry) {
2513 shader->fs_inputs = entry->key;
2514 ralloc_free(inputs.input_slots);
2515 } else {
2516 struct vc4_fs_inputs *alloc_inputs;
2517
2518 alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs);
2519 memcpy(alloc_inputs, &inputs, sizeof(inputs));
2520 ralloc_steal(alloc_inputs, inputs.input_slots);
2521 _mesa_set_add(vc4->fs_inputs_set, alloc_inputs);
2522
2523 shader->fs_inputs = alloc_inputs;
2524 }
2525 }
2526
2527 static struct vc4_compiled_shader *
2528 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2529 struct vc4_key *key)
2530 {
2531 struct hash_table *ht;
2532 uint32_t key_size;
2533 bool try_threading;
2534
2535 if (stage == QSTAGE_FRAG) {
2536 ht = vc4->fs_cache;
2537 key_size = sizeof(struct vc4_fs_key);
2538 try_threading = vc4->screen->has_threaded_fs;
2539 } else {
2540 ht = vc4->vs_cache;
2541 key_size = sizeof(struct vc4_vs_key);
2542 try_threading = false;
2543 }
2544
2545 struct vc4_compiled_shader *shader;
2546 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2547 if (entry)
2548 return entry->data;
2549
2550 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
2551 /* If the FS failed to compile threaded, fall back to single threaded. */
2552 if (try_threading && c->failed) {
2553 qir_compile_destroy(c);
2554 c = vc4_shader_ntq(vc4, stage, key, false);
2555 }
2556
2557 shader = rzalloc(NULL, struct vc4_compiled_shader);
2558
2559 shader->program_id = vc4->next_compiled_program_id++;
2560 if (stage == QSTAGE_FRAG) {
2561 vc4_setup_compiled_fs_inputs(vc4, c, shader);
2562
2563 /* Note: the temporary clone in c->s has been freed. */
2564 nir_shader *orig_shader = key->shader_state->base.ir.nir;
2565 if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH))
2566 shader->disable_early_z = true;
2567 } else {
2568 shader->num_inputs = c->num_inputs;
2569
2570 shader->vattr_offsets[0] = 0;
2571 for (int i = 0; i < 8; i++) {
2572 shader->vattr_offsets[i + 1] =
2573 shader->vattr_offsets[i] + c->vattr_sizes[i];
2574
2575 if (c->vattr_sizes[i])
2576 shader->vattrs_live |= (1 << i);
2577 }
2578 }
2579
2580 shader->failed = c->failed;
2581 if (c->failed) {
2582 shader->failed = true;
2583 } else {
2584 copy_uniform_state_to_shader(shader, c);
2585 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2586 c->qpu_inst_count *
2587 sizeof(uint64_t));
2588 }
2589
2590 shader->fs_threaded = c->fs_threaded;
2591
2592 /* Copy the compiler UBO range state to the compiled shader, dropping
2593 * out arrays that were never referenced by an indirect load.
2594 *
2595 * (Note that QIR dead code elimination of an array access still
2596 * leaves that array alive, though)
2597 */
2598 if (c->num_ubo_ranges) {
2599 shader->num_ubo_ranges = c->num_ubo_ranges;
2600 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2601 c->num_ubo_ranges);
2602 uint32_t j = 0;
2603 for (int i = 0; i < c->num_uniform_ranges; i++) {
2604 struct vc4_compiler_ubo_range *range =
2605 &c->ubo_ranges[i];
2606 if (!range->used)
2607 continue;
2608
2609 shader->ubo_ranges[j].dst_offset = range->dst_offset;
2610 shader->ubo_ranges[j].src_offset = range->src_offset;
2611 shader->ubo_ranges[j].size = range->size;
2612 shader->ubo_size += c->ubo_ranges[i].size;
2613 j++;
2614 }
2615 }
2616 if (shader->ubo_size) {
2617 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2618 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2619 qir_get_stage_name(c->stage),
2620 c->program_id, c->variant_id,
2621 shader->ubo_size / 4);
2622 }
2623 }
2624
2625 qir_compile_destroy(c);
2626
2627 struct vc4_key *dup_key;
2628 dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
2629 memcpy(dup_key, key, key_size);
2630 _mesa_hash_table_insert(ht, dup_key, shader);
2631
2632 return shader;
2633 }
2634
2635 static void
2636 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2637 struct vc4_texture_stateobj *texstate)
2638 {
2639 for (int i = 0; i < texstate->num_textures; i++) {
2640 struct pipe_sampler_view *sampler = texstate->textures[i];
2641 struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler);
2642 struct pipe_sampler_state *sampler_state =
2643 texstate->samplers[i];
2644
2645 if (!sampler)
2646 continue;
2647
2648 key->tex[i].format = sampler->format;
2649 key->tex[i].swizzle[0] = sampler->swizzle_r;
2650 key->tex[i].swizzle[1] = sampler->swizzle_g;
2651 key->tex[i].swizzle[2] = sampler->swizzle_b;
2652 key->tex[i].swizzle[3] = sampler->swizzle_a;
2653
2654 if (sampler->texture->nr_samples > 1) {
2655 key->tex[i].msaa_width = sampler->texture->width0;
2656 key->tex[i].msaa_height = sampler->texture->height0;
2657 } else if (sampler){
2658 key->tex[i].compare_mode = sampler_state->compare_mode;
2659 key->tex[i].compare_func = sampler_state->compare_func;
2660 key->tex[i].wrap_s = sampler_state->wrap_s;
2661 key->tex[i].wrap_t = sampler_state->wrap_t;
2662 key->tex[i].force_first_level =
2663 vc4_sampler->force_first_level;
2664 }
2665 }
2666
2667 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2668 }
2669
2670 static void
2671 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2672 {
2673 struct vc4_job *job = vc4->job;
2674 struct vc4_fs_key local_key;
2675 struct vc4_fs_key *key = &local_key;
2676
2677 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2678 VC4_DIRTY_BLEND |
2679 VC4_DIRTY_FRAMEBUFFER |
2680 VC4_DIRTY_ZSA |
2681 VC4_DIRTY_RASTERIZER |
2682 VC4_DIRTY_SAMPLE_MASK |
2683 VC4_DIRTY_FRAGTEX |
2684 VC4_DIRTY_UNCOMPILED_FS))) {
2685 return;
2686 }
2687
2688 memset(key, 0, sizeof(*key));
2689 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2690 key->base.shader_state = vc4->prog.bind_fs;
2691 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2692 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2693 prim_mode <= PIPE_PRIM_LINE_STRIP);
2694 key->blend = vc4->blend->rt[0];
2695 if (vc4->blend->logicop_enable) {
2696 key->logicop_func = vc4->blend->logicop_func;
2697 } else {
2698 key->logicop_func = PIPE_LOGICOP_COPY;
2699 }
2700 if (job->msaa) {
2701 key->msaa = vc4->rasterizer->base.multisample;
2702 key->sample_coverage = (vc4->rasterizer->base.multisample &&
2703 vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2704 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2705 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2706 }
2707
2708 if (vc4->framebuffer.cbufs[0])
2709 key->color_format = vc4->framebuffer.cbufs[0]->format;
2710
2711 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2712 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2713 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2714 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2715 key->stencil_enabled);
2716 if (vc4->zsa->base.alpha.enabled) {
2717 key->alpha_test = true;
2718 key->alpha_test_func = vc4->zsa->base.alpha.func;
2719 }
2720
2721 if (key->is_points) {
2722 key->point_sprite_mask =
2723 vc4->rasterizer->base.sprite_coord_enable;
2724 key->point_coord_upper_left =
2725 (vc4->rasterizer->base.sprite_coord_mode ==
2726 PIPE_SPRITE_COORD_UPPER_LEFT);
2727 }
2728
2729 key->light_twoside = vc4->rasterizer->base.light_twoside;
2730
2731 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2732 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2733 if (vc4->prog.fs == old_fs)
2734 return;
2735
2736 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2737
2738 if (vc4->rasterizer->base.flatshade &&
2739 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2740 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2741 }
2742
2743 if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
2744 vc4->dirty |= VC4_DIRTY_FS_INPUTS;
2745 }
2746
2747 static void
2748 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2749 {
2750 struct vc4_vs_key local_key;
2751 struct vc4_vs_key *key = &local_key;
2752
2753 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2754 VC4_DIRTY_RASTERIZER |
2755 VC4_DIRTY_VERTTEX |
2756 VC4_DIRTY_VTXSTATE |
2757 VC4_DIRTY_UNCOMPILED_VS |
2758 VC4_DIRTY_FS_INPUTS))) {
2759 return;
2760 }
2761
2762 memset(key, 0, sizeof(*key));
2763 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2764 key->base.shader_state = vc4->prog.bind_vs;
2765 key->fs_inputs = vc4->prog.fs->fs_inputs;
2766 key->clamp_color = vc4->rasterizer->base.clamp_vertex_color;
2767
2768 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2769 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2770
2771 key->per_vertex_point_size =
2772 (prim_mode == PIPE_PRIM_POINTS &&
2773 vc4->rasterizer->base.point_size_per_vertex);
2774
2775 struct vc4_compiled_shader *vs =
2776 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2777 if (vs != vc4->prog.vs) {
2778 vc4->prog.vs = vs;
2779 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2780 }
2781
2782 key->is_coord = true;
2783 /* Coord shaders don't care what the FS inputs are. */
2784 key->fs_inputs = NULL;
2785 struct vc4_compiled_shader *cs =
2786 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2787 if (cs != vc4->prog.cs) {
2788 vc4->prog.cs = cs;
2789 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2790 }
2791 }
2792
2793 bool
2794 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2795 {
2796 vc4_update_compiled_fs(vc4, prim_mode);
2797 vc4_update_compiled_vs(vc4, prim_mode);
2798
2799 return !(vc4->prog.cs->failed ||
2800 vc4->prog.vs->failed ||
2801 vc4->prog.fs->failed);
2802 }
2803
2804 static uint32_t
2805 fs_cache_hash(const void *key)
2806 {
2807 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2808 }
2809
2810 static uint32_t
2811 vs_cache_hash(const void *key)
2812 {
2813 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2814 }
2815
2816 static bool
2817 fs_cache_compare(const void *key1, const void *key2)
2818 {
2819 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2820 }
2821
2822 static bool
2823 vs_cache_compare(const void *key1, const void *key2)
2824 {
2825 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2826 }
2827
2828 static uint32_t
2829 fs_inputs_hash(const void *key)
2830 {
2831 const struct vc4_fs_inputs *inputs = key;
2832
2833 return _mesa_hash_data(inputs->input_slots,
2834 sizeof(*inputs->input_slots) *
2835 inputs->num_inputs);
2836 }
2837
2838 static bool
2839 fs_inputs_compare(const void *key1, const void *key2)
2840 {
2841 const struct vc4_fs_inputs *inputs1 = key1;
2842 const struct vc4_fs_inputs *inputs2 = key2;
2843
2844 return (inputs1->num_inputs == inputs2->num_inputs &&
2845 memcmp(inputs1->input_slots,
2846 inputs2->input_slots,
2847 sizeof(*inputs1->input_slots) *
2848 inputs1->num_inputs) == 0);
2849 }
2850
2851 static void
2852 delete_from_cache_if_matches(struct hash_table *ht,
2853 struct hash_entry *entry,
2854 struct vc4_uncompiled_shader *so)
2855 {
2856 const struct vc4_key *key = entry->key;
2857
2858 if (key->shader_state == so) {
2859 struct vc4_compiled_shader *shader = entry->data;
2860 _mesa_hash_table_remove(ht, entry);
2861 vc4_bo_unreference(&shader->bo);
2862 ralloc_free(shader);
2863 }
2864 }
2865
2866 static void
2867 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2868 {
2869 struct vc4_context *vc4 = vc4_context(pctx);
2870 struct vc4_uncompiled_shader *so = hwcso;
2871
2872 struct hash_entry *entry;
2873 hash_table_foreach(vc4->fs_cache, entry)
2874 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2875 hash_table_foreach(vc4->vs_cache, entry)
2876 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2877
2878 ralloc_free(so->base.ir.nir);
2879 free(so);
2880 }
2881
2882 static void
2883 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2884 {
2885 struct vc4_context *vc4 = vc4_context(pctx);
2886 vc4->prog.bind_fs = hwcso;
2887 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2888 }
2889
2890 static void
2891 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2892 {
2893 struct vc4_context *vc4 = vc4_context(pctx);
2894 vc4->prog.bind_vs = hwcso;
2895 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2896 }
2897
2898 void
2899 vc4_program_init(struct pipe_context *pctx)
2900 {
2901 struct vc4_context *vc4 = vc4_context(pctx);
2902
2903 pctx->create_vs_state = vc4_shader_state_create;
2904 pctx->delete_vs_state = vc4_shader_state_delete;
2905
2906 pctx->create_fs_state = vc4_shader_state_create;
2907 pctx->delete_fs_state = vc4_shader_state_delete;
2908
2909 pctx->bind_fs_state = vc4_fp_state_bind;
2910 pctx->bind_vs_state = vc4_vp_state_bind;
2911
2912 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2913 fs_cache_compare);
2914 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2915 vs_cache_compare);
2916 vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash,
2917 fs_inputs_compare);
2918 }
2919
2920 void
2921 vc4_program_fini(struct pipe_context *pctx)
2922 {
2923 struct vc4_context *vc4 = vc4_context(pctx);
2924
2925 struct hash_entry *entry;
2926 hash_table_foreach(vc4->fs_cache, entry) {
2927 struct vc4_compiled_shader *shader = entry->data;
2928 vc4_bo_unreference(&shader->bo);
2929 ralloc_free(shader);
2930 _mesa_hash_table_remove(vc4->fs_cache, entry);
2931 }
2932
2933 hash_table_foreach(vc4->vs_cache, entry) {
2934 struct vc4_compiled_shader *shader = entry->data;
2935 vc4_bo_unreference(&shader->bo);
2936 ralloc_free(shader);
2937 _mesa_hash_table_remove(vc4->vs_cache, entry);
2938 }
2939 }