Merge remote-tracking branch 'origin/master' into vulkan
[mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
1 /*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <inttypes.h>
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_lowering.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "nir/tgsi_to_nir.h"
38 #include "vc4_context.h"
39 #include "vc4_qpu.h"
40 #include "vc4_qir.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
43 #endif
44
45 static struct qreg
46 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
47
48 static void
49 resize_qreg_array(struct vc4_compile *c,
50 struct qreg **regs,
51 uint32_t *size,
52 uint32_t decl_size)
53 {
54 if (*size >= decl_size)
55 return;
56
57 uint32_t old_size = *size;
58 *size = MAX2(*size * 2, decl_size);
59 *regs = reralloc(c, *regs, struct qreg, *size);
60 if (!*regs) {
61 fprintf(stderr, "Malloc failure\n");
62 abort();
63 }
64
65 for (uint32_t i = old_size; i < *size; i++)
66 (*regs)[i] = c->undef;
67 }
68
69 static struct qreg
70 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
71 {
72 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
73 uint32_t offset = intr->const_index[0];
74 struct vc4_compiler_ubo_range *range = NULL;
75 unsigned i;
76 for (i = 0; i < c->num_uniform_ranges; i++) {
77 range = &c->ubo_ranges[i];
78 if (offset >= range->src_offset &&
79 offset < range->src_offset + range->size) {
80 break;
81 }
82 }
83 /* The driver-location-based offset always has to be within a declared
84 * uniform range.
85 */
86 assert(range);
87 if (!range->used) {
88 range->used = true;
89 range->dst_offset = c->next_ubo_dst_offset;
90 c->next_ubo_dst_offset += range->size;
91 c->num_ubo_ranges++;
92 }
93
94 offset -= range->src_offset;
95
96 /* Adjust for where we stored the TGSI register base. */
97 indirect_offset = qir_ADD(c, indirect_offset,
98 qir_uniform_ui(c, (range->dst_offset +
99 offset)));
100
101 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
102 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
103 indirect_offset = qir_MIN(c, indirect_offset,
104 qir_uniform_ui(c, (range->dst_offset +
105 range->size - 4)));
106
107 qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
108 c->num_texture_samples++;
109 return qir_TEX_RESULT(c);
110 }
111
112 nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
113 enum quniform_contents contents)
114 {
115 nir_intrinsic_instr *intr =
116 nir_intrinsic_instr_create(b->shader,
117 nir_intrinsic_load_uniform);
118 intr->const_index[0] = (VC4_NIR_STATE_UNIFORM_OFFSET + contents) * 4;
119 intr->num_components = 1;
120 intr->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
121 nir_ssa_dest_init(&intr->instr, &intr->dest, 1, NULL);
122 nir_builder_instr_insert(b, &intr->instr);
123 return &intr->dest.ssa;
124 }
125
126 nir_ssa_def *
127 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
128 {
129 switch (swiz) {
130 default:
131 case UTIL_FORMAT_SWIZZLE_NONE:
132 fprintf(stderr, "warning: unknown swizzle\n");
133 /* FALLTHROUGH */
134 case UTIL_FORMAT_SWIZZLE_0:
135 return nir_imm_float(b, 0.0);
136 case UTIL_FORMAT_SWIZZLE_1:
137 return nir_imm_float(b, 1.0);
138 case UTIL_FORMAT_SWIZZLE_X:
139 case UTIL_FORMAT_SWIZZLE_Y:
140 case UTIL_FORMAT_SWIZZLE_Z:
141 case UTIL_FORMAT_SWIZZLE_W:
142 return srcs[swiz];
143 }
144 }
145
146 static struct qreg *
147 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
148 {
149 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
150 def->num_components);
151 _mesa_hash_table_insert(c->def_ht, def, qregs);
152 return qregs;
153 }
154
155 static struct qreg *
156 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
157 {
158 if (dest->is_ssa) {
159 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
160 for (int i = 0; i < dest->ssa.num_components; i++)
161 qregs[i] = c->undef;
162 return qregs;
163 } else {
164 nir_register *reg = dest->reg.reg;
165 assert(dest->reg.base_offset == 0);
166 assert(reg->num_array_elems == 0);
167 struct hash_entry *entry =
168 _mesa_hash_table_search(c->def_ht, reg);
169 return entry->data;
170 }
171 }
172
173 static struct qreg
174 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
175 {
176 struct hash_entry *entry;
177 if (src.is_ssa) {
178 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
179 assert(i < src.ssa->num_components);
180 } else {
181 nir_register *reg = src.reg.reg;
182 entry = _mesa_hash_table_search(c->def_ht, reg);
183 assert(reg->num_array_elems == 0);
184 assert(src.reg.base_offset == 0);
185 assert(i < reg->num_components);
186 }
187
188 struct qreg *qregs = entry->data;
189 return qregs[i];
190 }
191
192 static struct qreg
193 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
194 unsigned src)
195 {
196 assert(util_is_power_of_two(instr->dest.write_mask));
197 unsigned chan = ffs(instr->dest.write_mask) - 1;
198 struct qreg r = ntq_get_src(c, instr->src[src].src,
199 instr->src[src].swizzle[chan]);
200
201 assert(!instr->src[src].abs);
202 assert(!instr->src[src].negate);
203
204 return r;
205 };
206
207 static inline struct qreg
208 qir_SAT(struct vc4_compile *c, struct qreg val)
209 {
210 return qir_FMAX(c,
211 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
212 qir_uniform_f(c, 0.0));
213 }
214
215 static struct qreg
216 ntq_rcp(struct vc4_compile *c, struct qreg x)
217 {
218 struct qreg r = qir_RCP(c, x);
219
220 /* Apply a Newton-Raphson step to improve the accuracy. */
221 r = qir_FMUL(c, r, qir_FSUB(c,
222 qir_uniform_f(c, 2.0),
223 qir_FMUL(c, x, r)));
224
225 return r;
226 }
227
228 static struct qreg
229 ntq_rsq(struct vc4_compile *c, struct qreg x)
230 {
231 struct qreg r = qir_RSQ(c, x);
232
233 /* Apply a Newton-Raphson step to improve the accuracy. */
234 r = qir_FMUL(c, r, qir_FSUB(c,
235 qir_uniform_f(c, 1.5),
236 qir_FMUL(c,
237 qir_uniform_f(c, 0.5),
238 qir_FMUL(c, x,
239 qir_FMUL(c, r, r)))));
240
241 return r;
242 }
243
244 static struct qreg
245 qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
246 {
247 struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
248 struct qreg high = qir_POW(c,
249 qir_FMUL(c,
250 qir_FADD(c,
251 srgb,
252 qir_uniform_f(c, 0.055)),
253 qir_uniform_f(c, 1.0 / 1.055)),
254 qir_uniform_f(c, 2.4));
255
256 qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
257 return qir_SEL(c, QPU_COND_NS, low, high);
258 }
259
260 static struct qreg
261 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
262 {
263 struct qreg src0_hi = qir_SHR(c, src0,
264 qir_uniform_ui(c, 24));
265 struct qreg src1_hi = qir_SHR(c, src1,
266 qir_uniform_ui(c, 24));
267
268 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
269 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
270 struct qreg lolo = qir_MUL24(c, src0, src1);
271
272 return qir_ADD(c, lolo, qir_SHL(c,
273 qir_ADD(c, hilo, lohi),
274 qir_uniform_ui(c, 24)));
275 }
276
277 static struct qreg
278 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
279 {
280 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
281 qir_uniform_ui(c, 8)));
282 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
283 }
284
285 /**
286 * Emits a lowered TXF_MS from an MSAA texture.
287 *
288 * The addressing math has been lowered in NIR, and now we just need to read
289 * it like a UBO.
290 */
291 static void
292 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
293 {
294 uint32_t tile_width = 32;
295 uint32_t tile_height = 32;
296 uint32_t tile_size = (tile_height * tile_width *
297 VC4_MAX_SAMPLES * sizeof(uint32_t));
298
299 unsigned unit = instr->texture_index;
300 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
301 uint32_t w_tiles = w / tile_width;
302 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
303 uint32_t h_tiles = h / tile_height;
304 uint32_t size = w_tiles * h_tiles * tile_size;
305
306 struct qreg addr;
307 assert(instr->num_srcs == 1);
308 assert(instr->src[0].src_type == nir_tex_src_coord);
309 addr = ntq_get_src(c, instr->src[0].src, 0);
310
311 /* Perform the clamping required by kernel validation. */
312 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
313 addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4));
314
315 qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
316
317 struct qreg tex = qir_TEX_RESULT(c);
318 c->num_texture_samples++;
319
320 struct qreg *dest = ntq_get_dest(c, &instr->dest);
321 enum pipe_format format = c->key->tex[unit].format;
322 if (util_format_is_depth_or_stencil(format)) {
323 struct qreg scaled = ntq_scale_depth_texture(c, tex);
324 for (int i = 0; i < 4; i++)
325 dest[i] = scaled;
326 } else {
327 for (int i = 0; i < 4; i++)
328 dest[i] = qir_UNPACK_8_F(c, tex, i);
329 }
330
331 for (int i = 0; i < 4; i++) {
332 if (c->tex_srgb_decode[unit] & (1 << i))
333 dest[i] = qir_srgb_decode(c, dest[i]);
334 }
335 }
336
337 static void
338 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
339 {
340 struct qreg s, t, r, lod, proj, compare;
341 bool is_txb = false, is_txl = false, has_proj = false;
342 unsigned unit = instr->texture_index;
343
344 if (instr->op == nir_texop_txf) {
345 ntq_emit_txf(c, instr);
346 return;
347 }
348
349 for (unsigned i = 0; i < instr->num_srcs; i++) {
350 switch (instr->src[i].src_type) {
351 case nir_tex_src_coord:
352 s = ntq_get_src(c, instr->src[i].src, 0);
353 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
354 t = qir_uniform_f(c, 0.5);
355 else
356 t = ntq_get_src(c, instr->src[i].src, 1);
357 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
358 r = ntq_get_src(c, instr->src[i].src, 2);
359 break;
360 case nir_tex_src_bias:
361 lod = ntq_get_src(c, instr->src[i].src, 0);
362 is_txb = true;
363 break;
364 case nir_tex_src_lod:
365 lod = ntq_get_src(c, instr->src[i].src, 0);
366 is_txl = true;
367 break;
368 case nir_tex_src_comparitor:
369 compare = ntq_get_src(c, instr->src[i].src, 0);
370 break;
371 case nir_tex_src_projector:
372 proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
373 s = qir_FMUL(c, s, proj);
374 t = qir_FMUL(c, t, proj);
375 has_proj = true;
376 break;
377 default:
378 unreachable("unknown texture source");
379 }
380 }
381
382 struct qreg texture_u[] = {
383 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
384 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
385 qir_uniform(c, QUNIFORM_CONSTANT, 0),
386 qir_uniform(c, QUNIFORM_CONSTANT, 0),
387 };
388 uint32_t next_texture_u = 0;
389
390 /* There is no native support for GL texture rectangle coordinates, so
391 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
392 * 1]).
393 */
394 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
395 s = qir_FMUL(c, s,
396 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
397 t = qir_FMUL(c, t,
398 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
399 }
400
401 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
402 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
403 unit | (is_txl << 16));
404 }
405
406 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
407 struct qreg ma = qir_FMAXABS(c, qir_FMAXABS(c, s, t), r);
408 struct qreg rcp_ma = qir_RCP(c, ma);
409 s = qir_FMUL(c, s, rcp_ma);
410 t = qir_FMUL(c, t, rcp_ma);
411 r = qir_FMUL(c, r, rcp_ma);
412
413 qir_TEX_R(c, r, texture_u[next_texture_u++]);
414 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
415 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
416 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
417 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
418 qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
419 texture_u[next_texture_u++]);
420 }
421
422 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
423 s = qir_SAT(c, s);
424 }
425
426 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
427 t = qir_SAT(c, t);
428 }
429
430 qir_TEX_T(c, t, texture_u[next_texture_u++]);
431
432 if (is_txl || is_txb)
433 qir_TEX_B(c, lod, texture_u[next_texture_u++]);
434
435 qir_TEX_S(c, s, texture_u[next_texture_u++]);
436
437 c->num_texture_samples++;
438 struct qreg tex = qir_TEX_RESULT(c);
439
440 enum pipe_format format = c->key->tex[unit].format;
441
442 struct qreg *dest = ntq_get_dest(c, &instr->dest);
443 if (util_format_is_depth_or_stencil(format)) {
444 struct qreg normalized = ntq_scale_depth_texture(c, tex);
445 struct qreg depth_output;
446
447 struct qreg u0 = qir_uniform_f(c, 0.0f);
448 struct qreg u1 = qir_uniform_f(c, 1.0f);
449 if (c->key->tex[unit].compare_mode) {
450 if (has_proj)
451 compare = qir_FMUL(c, compare, proj);
452
453 switch (c->key->tex[unit].compare_func) {
454 case PIPE_FUNC_NEVER:
455 depth_output = qir_uniform_f(c, 0.0f);
456 break;
457 case PIPE_FUNC_ALWAYS:
458 depth_output = u1;
459 break;
460 case PIPE_FUNC_EQUAL:
461 qir_SF(c, qir_FSUB(c, compare, normalized));
462 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
463 break;
464 case PIPE_FUNC_NOTEQUAL:
465 qir_SF(c, qir_FSUB(c, compare, normalized));
466 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
467 break;
468 case PIPE_FUNC_GREATER:
469 qir_SF(c, qir_FSUB(c, compare, normalized));
470 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
471 break;
472 case PIPE_FUNC_GEQUAL:
473 qir_SF(c, qir_FSUB(c, normalized, compare));
474 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
475 break;
476 case PIPE_FUNC_LESS:
477 qir_SF(c, qir_FSUB(c, compare, normalized));
478 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
479 break;
480 case PIPE_FUNC_LEQUAL:
481 qir_SF(c, qir_FSUB(c, normalized, compare));
482 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
483 break;
484 }
485 } else {
486 depth_output = normalized;
487 }
488
489 for (int i = 0; i < 4; i++)
490 dest[i] = depth_output;
491 } else {
492 for (int i = 0; i < 4; i++)
493 dest[i] = qir_UNPACK_8_F(c, tex, i);
494 }
495
496 for (int i = 0; i < 4; i++) {
497 if (c->tex_srgb_decode[unit] & (1 << i))
498 dest[i] = qir_srgb_decode(c, dest[i]);
499 }
500 }
501
502 /**
503 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
504 * to zero).
505 */
506 static struct qreg
507 ntq_ffract(struct vc4_compile *c, struct qreg src)
508 {
509 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
510 struct qreg diff = qir_FSUB(c, src, trunc);
511 qir_SF(c, diff);
512 return qir_SEL(c, QPU_COND_NS,
513 qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff);
514 }
515
516 /**
517 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
518 * zero).
519 */
520 static struct qreg
521 ntq_ffloor(struct vc4_compile *c, struct qreg src)
522 {
523 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
524
525 /* This will be < 0 if we truncated and the truncation was of a value
526 * that was < 0 in the first place.
527 */
528 qir_SF(c, qir_FSUB(c, src, trunc));
529
530 return qir_SEL(c, QPU_COND_NS,
531 qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc);
532 }
533
534 /**
535 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
536 * zero).
537 */
538 static struct qreg
539 ntq_fceil(struct vc4_compile *c, struct qreg src)
540 {
541 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
542
543 /* This will be < 0 if we truncated and the truncation was of a value
544 * that was > 0 in the first place.
545 */
546 qir_SF(c, qir_FSUB(c, trunc, src));
547
548 return qir_SEL(c, QPU_COND_NS,
549 qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc);
550 }
551
552 static struct qreg
553 ntq_fsin(struct vc4_compile *c, struct qreg src)
554 {
555 float coeff[] = {
556 -2.0 * M_PI,
557 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
558 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
559 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
560 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
561 };
562
563 struct qreg scaled_x =
564 qir_FMUL(c,
565 src,
566 qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
567
568 struct qreg x = qir_FADD(c,
569 ntq_ffract(c, scaled_x),
570 qir_uniform_f(c, -0.5));
571 struct qreg x2 = qir_FMUL(c, x, x);
572 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
573 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
574 x = qir_FMUL(c, x, x2);
575 sum = qir_FADD(c,
576 sum,
577 qir_FMUL(c,
578 x,
579 qir_uniform_f(c, coeff[i])));
580 }
581 return sum;
582 }
583
584 static struct qreg
585 ntq_fcos(struct vc4_compile *c, struct qreg src)
586 {
587 float coeff[] = {
588 -1.0f,
589 pow(2.0 * M_PI, 2) / (2 * 1),
590 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
591 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
592 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
593 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
594 };
595
596 struct qreg scaled_x =
597 qir_FMUL(c, src,
598 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
599 struct qreg x_frac = qir_FADD(c,
600 ntq_ffract(c, scaled_x),
601 qir_uniform_f(c, -0.5));
602
603 struct qreg sum = qir_uniform_f(c, coeff[0]);
604 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
605 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
606 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
607 if (i != 1)
608 x = qir_FMUL(c, x, x2);
609
610 struct qreg mul = qir_FMUL(c,
611 x,
612 qir_uniform_f(c, coeff[i]));
613 if (i == 0)
614 sum = mul;
615 else
616 sum = qir_FADD(c, sum, mul);
617 }
618 return sum;
619 }
620
621 static struct qreg
622 ntq_fsign(struct vc4_compile *c, struct qreg src)
623 {
624 struct qreg t = qir_get_temp(c);
625
626 qir_SF(c, src);
627 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
628 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
629 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
630 return t;
631 }
632
633 static void
634 emit_vertex_input(struct vc4_compile *c, int attr)
635 {
636 enum pipe_format format = c->vs_key->attr_formats[attr];
637 uint32_t attr_size = util_format_get_blocksize(format);
638
639 c->vattr_sizes[attr] = align(attr_size, 4);
640 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
641 struct qreg vpm = { QFILE_VPM, attr * 4 + i };
642 c->inputs[attr * 4 + i] = qir_MOV(c, vpm);
643 c->num_inputs++;
644 }
645 }
646
647 static void
648 emit_fragcoord_input(struct vc4_compile *c, int attr)
649 {
650 c->inputs[attr * 4 + 0] = qir_FRAG_X(c);
651 c->inputs[attr * 4 + 1] = qir_FRAG_Y(c);
652 c->inputs[attr * 4 + 2] =
653 qir_FMUL(c,
654 qir_ITOF(c, qir_FRAG_Z(c)),
655 qir_uniform_f(c, 1.0 / 0xffffff));
656 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
657 }
658
659 static struct qreg
660 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
661 uint8_t swizzle)
662 {
663 uint32_t i = c->num_input_slots++;
664 struct qreg vary = {
665 QFILE_VARY,
666 i
667 };
668
669 if (c->num_input_slots >= c->input_slots_array_size) {
670 c->input_slots_array_size =
671 MAX2(4, c->input_slots_array_size * 2);
672
673 c->input_slots = reralloc(c, c->input_slots,
674 struct vc4_varying_slot,
675 c->input_slots_array_size);
676 }
677
678 c->input_slots[i].slot = slot;
679 c->input_slots[i].swizzle = swizzle;
680
681 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
682 }
683
684 static void
685 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
686 {
687 for (int i = 0; i < 4; i++) {
688 c->inputs[attr * 4 + i] =
689 emit_fragment_varying(c, slot, i);
690 c->num_inputs++;
691 }
692 }
693
694 static void
695 add_output(struct vc4_compile *c,
696 uint32_t decl_offset,
697 uint8_t slot,
698 uint8_t swizzle)
699 {
700 uint32_t old_array_size = c->outputs_array_size;
701 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
702 decl_offset + 1);
703
704 if (old_array_size != c->outputs_array_size) {
705 c->output_slots = reralloc(c,
706 c->output_slots,
707 struct vc4_varying_slot,
708 c->outputs_array_size);
709 }
710
711 c->output_slots[decl_offset].slot = slot;
712 c->output_slots[decl_offset].swizzle = swizzle;
713 }
714
715 static void
716 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
717 {
718 unsigned array_id = c->num_uniform_ranges++;
719 if (array_id >= c->ubo_ranges_array_size) {
720 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
721 array_id + 1);
722 c->ubo_ranges = reralloc(c, c->ubo_ranges,
723 struct vc4_compiler_ubo_range,
724 c->ubo_ranges_array_size);
725 }
726
727 c->ubo_ranges[array_id].dst_offset = 0;
728 c->ubo_ranges[array_id].src_offset = start;
729 c->ubo_ranges[array_id].size = size;
730 c->ubo_ranges[array_id].used = false;
731 }
732
733 static bool
734 ntq_src_is_only_ssa_def_user(nir_src *src)
735 {
736 if (!src->is_ssa)
737 return false;
738
739 if (!list_empty(&src->ssa->if_uses))
740 return false;
741
742 return (src->ssa->uses.next == &src->use_link &&
743 src->ssa->uses.next->next == &src->ssa->uses);
744 }
745
746 /**
747 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
748 * bit set.
749 *
750 * However, as an optimization, it tries to find the instructions generating
751 * the sources to be packed and just emit the pack flag there, if possible.
752 */
753 static void
754 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
755 {
756 struct qreg result = qir_get_temp(c);
757 struct nir_alu_instr *vec4 = NULL;
758
759 /* If packing from a vec4 op (as expected), identify it so that we can
760 * peek back at what generated its sources.
761 */
762 if (instr->src[0].src.is_ssa &&
763 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
764 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
765 nir_op_vec4) {
766 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
767 }
768
769 /* If the pack is replicating the same channel 4 times, use the 8888
770 * pack flag. This is common for blending using the alpha
771 * channel.
772 */
773 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
774 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
775 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
776 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
777 *dest = qir_PACK_8888_F(c,
778 ntq_get_src(c, instr->src[0].src,
779 instr->src[0].swizzle[0]));
780 return;
781 }
782
783 for (int i = 0; i < 4; i++) {
784 int swiz = instr->src[0].swizzle[i];
785 struct qreg src;
786 if (vec4) {
787 src = ntq_get_src(c, vec4->src[swiz].src,
788 vec4->src[swiz].swizzle[0]);
789 } else {
790 src = ntq_get_src(c, instr->src[0].src, swiz);
791 }
792
793 if (vec4 &&
794 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
795 src.file == QFILE_TEMP &&
796 c->defs[src.index] &&
797 qir_is_mul(c->defs[src.index]) &&
798 !c->defs[src.index]->dst.pack) {
799 struct qinst *rewrite = c->defs[src.index];
800 c->defs[src.index] = NULL;
801 rewrite->dst = result;
802 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
803 continue;
804 }
805
806 qir_PACK_8_F(c, result, src, i);
807 }
808
809 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
810 *dest = result;
811 }
812
813 /** Handles sign-extended bitfield extracts for 16 bits. */
814 static struct qreg
815 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
816 struct qreg bits)
817 {
818 assert(bits.file == QFILE_UNIF &&
819 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
820 c->uniform_data[bits.index] == 16);
821
822 assert(offset.file == QFILE_UNIF &&
823 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
824 int offset_bit = c->uniform_data[offset.index];
825 assert(offset_bit % 16 == 0);
826
827 return qir_UNPACK_16_I(c, base, offset_bit / 16);
828 }
829
830 /** Handles unsigned bitfield extracts for 8 bits. */
831 static struct qreg
832 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
833 struct qreg bits)
834 {
835 assert(bits.file == QFILE_UNIF &&
836 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
837 c->uniform_data[bits.index] == 8);
838
839 assert(offset.file == QFILE_UNIF &&
840 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
841 int offset_bit = c->uniform_data[offset.index];
842 assert(offset_bit % 8 == 0);
843
844 return qir_UNPACK_8_I(c, base, offset_bit / 8);
845 }
846
847 /**
848 * If compare_instr is a valid comparison instruction, emits the
849 * compare_instr's comparison and returns the sel_instr's return value based
850 * on the compare_instr's result.
851 */
852 static bool
853 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
854 nir_alu_instr *compare_instr,
855 nir_alu_instr *sel_instr)
856 {
857 enum qpu_cond cond;
858
859 switch (compare_instr->op) {
860 case nir_op_feq:
861 case nir_op_ieq:
862 case nir_op_seq:
863 cond = QPU_COND_ZS;
864 break;
865 case nir_op_fne:
866 case nir_op_ine:
867 case nir_op_sne:
868 cond = QPU_COND_ZC;
869 break;
870 case nir_op_fge:
871 case nir_op_ige:
872 case nir_op_uge:
873 case nir_op_sge:
874 cond = QPU_COND_NC;
875 break;
876 case nir_op_flt:
877 case nir_op_ilt:
878 case nir_op_slt:
879 cond = QPU_COND_NS;
880 break;
881 default:
882 return false;
883 }
884
885 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
886 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
887
888 if (nir_op_infos[compare_instr->op].input_types[0] == nir_type_float)
889 qir_SF(c, qir_FSUB(c, src0, src1));
890 else
891 qir_SF(c, qir_SUB(c, src0, src1));
892
893 switch (sel_instr->op) {
894 case nir_op_seq:
895 case nir_op_sne:
896 case nir_op_sge:
897 case nir_op_slt:
898 *dest = qir_SEL(c, cond,
899 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
900 break;
901
902 case nir_op_bcsel:
903 *dest = qir_SEL(c, cond,
904 ntq_get_alu_src(c, sel_instr, 1),
905 ntq_get_alu_src(c, sel_instr, 2));
906 break;
907
908 default:
909 *dest = qir_SEL(c, cond,
910 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
911 break;
912 }
913
914 return true;
915 }
916
917 /**
918 * Attempts to fold a comparison generating a boolean result into the
919 * condition code for selecting between two values, instead of comparing the
920 * boolean result against 0 to generate the condition code.
921 */
922 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
923 struct qreg *src)
924 {
925 if (!instr->src[0].src.is_ssa)
926 goto out;
927 nir_alu_instr *compare =
928 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
929 if (!compare)
930 goto out;
931
932 struct qreg dest;
933 if (ntq_emit_comparison(c, &dest, compare, instr))
934 return dest;
935
936 out:
937 qir_SF(c, src[0]);
938 return qir_SEL(c, QPU_COND_NS, src[1], src[2]);
939 }
940
941 static void
942 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
943 {
944 /* Vectors are special in that they have non-scalarized writemasks,
945 * and just take the first swizzle channel for each argument in order
946 * into each writemask channel.
947 */
948 if (instr->op == nir_op_vec2 ||
949 instr->op == nir_op_vec3 ||
950 instr->op == nir_op_vec4) {
951 struct qreg srcs[4];
952 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
953 srcs[i] = ntq_get_src(c, instr->src[i].src,
954 instr->src[i].swizzle[0]);
955 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
956 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
957 dest[i] = srcs[i];
958 return;
959 }
960
961 if (instr->op == nir_op_pack_unorm_4x8) {
962 ntq_emit_pack_unorm_4x8(c, instr);
963 return;
964 }
965
966 if (instr->op == nir_op_unpack_unorm_4x8) {
967 struct qreg src = ntq_get_src(c, instr->src[0].src,
968 instr->src[0].swizzle[0]);
969 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
970 for (int i = 0; i < 4; i++) {
971 if (instr->dest.write_mask & (1 << i))
972 dest[i] = qir_UNPACK_8_F(c, src, i);
973 }
974 return;
975 }
976
977 /* General case: We can just grab the one used channel per src. */
978 struct qreg src[nir_op_infos[instr->op].num_inputs];
979 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
980 src[i] = ntq_get_alu_src(c, instr, i);
981 }
982
983 /* Pick the channel to store the output in. */
984 assert(!instr->dest.saturate);
985 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
986 assert(util_is_power_of_two(instr->dest.write_mask));
987 dest += ffs(instr->dest.write_mask) - 1;
988
989 switch (instr->op) {
990 case nir_op_fmov:
991 case nir_op_imov:
992 *dest = qir_MOV(c, src[0]);
993 break;
994 case nir_op_fmul:
995 *dest = qir_FMUL(c, src[0], src[1]);
996 break;
997 case nir_op_fadd:
998 *dest = qir_FADD(c, src[0], src[1]);
999 break;
1000 case nir_op_fsub:
1001 *dest = qir_FSUB(c, src[0], src[1]);
1002 break;
1003 case nir_op_fmin:
1004 *dest = qir_FMIN(c, src[0], src[1]);
1005 break;
1006 case nir_op_fmax:
1007 *dest = qir_FMAX(c, src[0], src[1]);
1008 break;
1009
1010 case nir_op_f2i:
1011 case nir_op_f2u:
1012 *dest = qir_FTOI(c, src[0]);
1013 break;
1014 case nir_op_i2f:
1015 case nir_op_u2f:
1016 *dest = qir_ITOF(c, src[0]);
1017 break;
1018 case nir_op_b2f:
1019 *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1020 break;
1021 case nir_op_b2i:
1022 *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1023 break;
1024 case nir_op_i2b:
1025 case nir_op_f2b:
1026 qir_SF(c, src[0]);
1027 *dest = qir_SEL(c, QPU_COND_ZC,
1028 qir_uniform_ui(c, ~0),
1029 qir_uniform_ui(c, 0));
1030 break;
1031
1032 case nir_op_iadd:
1033 *dest = qir_ADD(c, src[0], src[1]);
1034 break;
1035 case nir_op_ushr:
1036 *dest = qir_SHR(c, src[0], src[1]);
1037 break;
1038 case nir_op_isub:
1039 *dest = qir_SUB(c, src[0], src[1]);
1040 break;
1041 case nir_op_ishr:
1042 *dest = qir_ASR(c, src[0], src[1]);
1043 break;
1044 case nir_op_ishl:
1045 *dest = qir_SHL(c, src[0], src[1]);
1046 break;
1047 case nir_op_imin:
1048 *dest = qir_MIN(c, src[0], src[1]);
1049 break;
1050 case nir_op_imax:
1051 *dest = qir_MAX(c, src[0], src[1]);
1052 break;
1053 case nir_op_iand:
1054 *dest = qir_AND(c, src[0], src[1]);
1055 break;
1056 case nir_op_ior:
1057 *dest = qir_OR(c, src[0], src[1]);
1058 break;
1059 case nir_op_ixor:
1060 *dest = qir_XOR(c, src[0], src[1]);
1061 break;
1062 case nir_op_inot:
1063 *dest = qir_NOT(c, src[0]);
1064 break;
1065
1066 case nir_op_imul:
1067 *dest = ntq_umul(c, src[0], src[1]);
1068 break;
1069
1070 case nir_op_seq:
1071 case nir_op_sne:
1072 case nir_op_sge:
1073 case nir_op_slt:
1074 case nir_op_feq:
1075 case nir_op_fne:
1076 case nir_op_fge:
1077 case nir_op_flt:
1078 case nir_op_ieq:
1079 case nir_op_ine:
1080 case nir_op_ige:
1081 case nir_op_uge:
1082 case nir_op_ilt:
1083 if (!ntq_emit_comparison(c, dest, instr, instr)) {
1084 fprintf(stderr, "Bad comparison instruction\n");
1085 }
1086 break;
1087
1088 case nir_op_bcsel:
1089 *dest = ntq_emit_bcsel(c, instr, src);
1090 break;
1091 case nir_op_fcsel:
1092 qir_SF(c, src[0]);
1093 *dest = qir_SEL(c, QPU_COND_ZC, src[1], src[2]);
1094 break;
1095
1096 case nir_op_frcp:
1097 *dest = ntq_rcp(c, src[0]);
1098 break;
1099 case nir_op_frsq:
1100 *dest = ntq_rsq(c, src[0]);
1101 break;
1102 case nir_op_fexp2:
1103 *dest = qir_EXP2(c, src[0]);
1104 break;
1105 case nir_op_flog2:
1106 *dest = qir_LOG2(c, src[0]);
1107 break;
1108
1109 case nir_op_ftrunc:
1110 *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
1111 break;
1112 case nir_op_fceil:
1113 *dest = ntq_fceil(c, src[0]);
1114 break;
1115 case nir_op_ffract:
1116 *dest = ntq_ffract(c, src[0]);
1117 break;
1118 case nir_op_ffloor:
1119 *dest = ntq_ffloor(c, src[0]);
1120 break;
1121
1122 case nir_op_fsin:
1123 *dest = ntq_fsin(c, src[0]);
1124 break;
1125 case nir_op_fcos:
1126 *dest = ntq_fcos(c, src[0]);
1127 break;
1128
1129 case nir_op_fsign:
1130 *dest = ntq_fsign(c, src[0]);
1131 break;
1132
1133 case nir_op_fabs:
1134 *dest = qir_FMAXABS(c, src[0], src[0]);
1135 break;
1136 case nir_op_iabs:
1137 *dest = qir_MAX(c, src[0],
1138 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1139 break;
1140
1141 case nir_op_ibitfield_extract:
1142 *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1143 break;
1144
1145 case nir_op_ubitfield_extract:
1146 *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1147 break;
1148
1149 case nir_op_usadd_4x8:
1150 *dest = qir_V8ADDS(c, src[0], src[1]);
1151 break;
1152
1153 case nir_op_ussub_4x8:
1154 *dest = qir_V8SUBS(c, src[0], src[1]);
1155 break;
1156
1157 case nir_op_umin_4x8:
1158 *dest = qir_V8MIN(c, src[0], src[1]);
1159 break;
1160
1161 case nir_op_umax_4x8:
1162 *dest = qir_V8MAX(c, src[0], src[1]);
1163 break;
1164
1165 case nir_op_umul_unorm_4x8:
1166 *dest = qir_V8MULD(c, src[0], src[1]);
1167 break;
1168
1169 default:
1170 fprintf(stderr, "unknown NIR ALU inst: ");
1171 nir_print_instr(&instr->instr, stderr);
1172 fprintf(stderr, "\n");
1173 abort();
1174 }
1175 }
1176
1177 static void
1178 emit_frag_end(struct vc4_compile *c)
1179 {
1180 struct qreg color;
1181 if (c->output_color_index != -1) {
1182 color = c->outputs[c->output_color_index];
1183 } else {
1184 color = qir_uniform_ui(c, 0);
1185 }
1186
1187 uint32_t discard_cond = QPU_COND_ALWAYS;
1188 if (c->discard.file != QFILE_NULL) {
1189 qir_SF(c, c->discard);
1190 discard_cond = QPU_COND_ZS;
1191 }
1192
1193 if (c->fs_key->stencil_enabled) {
1194 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 0));
1195 if (c->fs_key->stencil_twoside) {
1196 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 1));
1197 }
1198 if (c->fs_key->stencil_full_writemasks) {
1199 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 2));
1200 }
1201 }
1202
1203 if (c->output_sample_mask_index != -1) {
1204 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1205 }
1206
1207 if (c->fs_key->depth_enabled) {
1208 struct qreg z;
1209 if (c->output_position_index != -1) {
1210 z = qir_FTOI(c, qir_FMUL(c, c->outputs[c->output_position_index + 2],
1211 qir_uniform_f(c, 0xffffff)));
1212 } else {
1213 z = qir_FRAG_Z(c);
1214 }
1215 struct qinst *inst = qir_TLB_Z_WRITE(c, z);
1216 inst->cond = discard_cond;
1217 }
1218
1219 if (!c->msaa_per_sample_output) {
1220 struct qinst *inst = qir_TLB_COLOR_WRITE(c, color);
1221 inst->cond = discard_cond;
1222 } else {
1223 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1224 struct qinst *inst = qir_TLB_COLOR_WRITE_MS(c, c->sample_colors[i]);
1225 inst->cond = discard_cond;
1226 }
1227 }
1228 }
1229
1230 static void
1231 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1232 {
1233 struct qreg packed = qir_get_temp(c);
1234
1235 for (int i = 0; i < 2; i++) {
1236 struct qreg scale =
1237 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1238
1239 struct qreg packed_chan = packed;
1240 packed_chan.pack = QPU_PACK_A_16A + i;
1241
1242 qir_FTOI_dest(c, packed_chan,
1243 qir_FMUL(c,
1244 qir_FMUL(c,
1245 c->outputs[c->output_position_index + i],
1246 scale),
1247 rcp_w));
1248 }
1249
1250 qir_VPM_WRITE(c, packed);
1251 }
1252
1253 static void
1254 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1255 {
1256 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1257 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1258
1259 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1260 c->outputs[c->output_position_index + 2],
1261 zscale),
1262 rcp_w),
1263 zoffset));
1264 }
1265
1266 static void
1267 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1268 {
1269 qir_VPM_WRITE(c, rcp_w);
1270 }
1271
1272 static void
1273 emit_point_size_write(struct vc4_compile *c)
1274 {
1275 struct qreg point_size;
1276
1277 if (c->output_point_size_index != -1)
1278 point_size = c->outputs[c->output_point_size_index];
1279 else
1280 point_size = qir_uniform_f(c, 1.0);
1281
1282 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1283 * BCM21553).
1284 */
1285 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1286
1287 qir_VPM_WRITE(c, point_size);
1288 }
1289
1290 /**
1291 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1292 *
1293 * The simulator insists that there be at least one vertex attribute, so
1294 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1295 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1296 * to consume it here.
1297 */
1298 static void
1299 emit_stub_vpm_read(struct vc4_compile *c)
1300 {
1301 if (c->num_inputs)
1302 return;
1303
1304 c->vattr_sizes[0] = 4;
1305 struct qreg vpm = { QFILE_VPM, 0 };
1306 (void)qir_MOV(c, vpm);
1307 c->num_inputs++;
1308 }
1309
1310 static void
1311 emit_vert_end(struct vc4_compile *c,
1312 struct vc4_varying_slot *fs_inputs,
1313 uint32_t num_fs_inputs)
1314 {
1315 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1316
1317 emit_stub_vpm_read(c);
1318
1319 emit_scaled_viewport_write(c, rcp_w);
1320 emit_zs_write(c, rcp_w);
1321 emit_rcp_wc_write(c, rcp_w);
1322 if (c->vs_key->per_vertex_point_size)
1323 emit_point_size_write(c);
1324
1325 for (int i = 0; i < num_fs_inputs; i++) {
1326 struct vc4_varying_slot *input = &fs_inputs[i];
1327 int j;
1328
1329 for (j = 0; j < c->num_outputs; j++) {
1330 struct vc4_varying_slot *output =
1331 &c->output_slots[j];
1332
1333 if (input->slot == output->slot &&
1334 input->swizzle == output->swizzle) {
1335 qir_VPM_WRITE(c, c->outputs[j]);
1336 break;
1337 }
1338 }
1339 /* Emit padding if we didn't find a declared VS output for
1340 * this FS input.
1341 */
1342 if (j == c->num_outputs)
1343 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1344 }
1345 }
1346
1347 static void
1348 emit_coord_end(struct vc4_compile *c)
1349 {
1350 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1351
1352 emit_stub_vpm_read(c);
1353
1354 for (int i = 0; i < 4; i++)
1355 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1356
1357 emit_scaled_viewport_write(c, rcp_w);
1358 emit_zs_write(c, rcp_w);
1359 emit_rcp_wc_write(c, rcp_w);
1360 if (c->vs_key->per_vertex_point_size)
1361 emit_point_size_write(c);
1362 }
1363
1364 static void
1365 vc4_optimize_nir(struct nir_shader *s)
1366 {
1367 bool progress;
1368
1369 do {
1370 progress = false;
1371
1372 nir_lower_vars_to_ssa(s);
1373 nir_lower_alu_to_scalar(s);
1374
1375 progress = nir_copy_prop(s) || progress;
1376 progress = nir_opt_dce(s) || progress;
1377 progress = nir_opt_cse(s) || progress;
1378 progress = nir_opt_peephole_select(s) || progress;
1379 progress = nir_opt_algebraic(s) || progress;
1380 progress = nir_opt_constant_folding(s) || progress;
1381 progress = nir_opt_undef(s) || progress;
1382 } while (progress);
1383 }
1384
1385 static int
1386 driver_location_compare(const void *in_a, const void *in_b)
1387 {
1388 const nir_variable *const *a = in_a;
1389 const nir_variable *const *b = in_b;
1390
1391 return (*a)->data.driver_location - (*b)->data.driver_location;
1392 }
1393
1394 static void
1395 ntq_setup_inputs(struct vc4_compile *c)
1396 {
1397 unsigned num_entries = 0;
1398 nir_foreach_variable(var, &c->s->inputs)
1399 num_entries++;
1400
1401 nir_variable *vars[num_entries];
1402
1403 unsigned i = 0;
1404 nir_foreach_variable(var, &c->s->inputs)
1405 vars[i++] = var;
1406
1407 /* Sort the variables so that we emit the input setup in
1408 * driver_location order. This is required for VPM reads, whose data
1409 * is fetched into the VPM in driver_location (TGSI register index)
1410 * order.
1411 */
1412 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1413
1414 for (unsigned i = 0; i < num_entries; i++) {
1415 nir_variable *var = vars[i];
1416 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1417 unsigned loc = var->data.driver_location;
1418
1419 assert(array_len == 1);
1420 (void)array_len;
1421 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1422 (loc + 1) * 4);
1423
1424 if (c->stage == QSTAGE_FRAG) {
1425 if (var->data.location == VARYING_SLOT_POS) {
1426 emit_fragcoord_input(c, loc);
1427 } else if (var->data.location == VARYING_SLOT_FACE) {
1428 c->inputs[loc * 4 + 0] = qir_FRAG_REV_FLAG(c);
1429 } else if (var->data.location >= VARYING_SLOT_VAR0 &&
1430 (c->fs_key->point_sprite_mask &
1431 (1 << (var->data.location -
1432 VARYING_SLOT_VAR0)))) {
1433 c->inputs[loc * 4 + 0] = c->point_x;
1434 c->inputs[loc * 4 + 1] = c->point_y;
1435 } else {
1436 emit_fragment_input(c, loc, var->data.location);
1437 }
1438 } else {
1439 emit_vertex_input(c, loc);
1440 }
1441 }
1442 }
1443
1444 static void
1445 ntq_setup_outputs(struct vc4_compile *c)
1446 {
1447 nir_foreach_variable(var, &c->s->outputs) {
1448 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1449 unsigned loc = var->data.driver_location * 4;
1450
1451 assert(array_len == 1);
1452 (void)array_len;
1453
1454 for (int i = 0; i < 4; i++)
1455 add_output(c, loc + i, var->data.location, i);
1456
1457 if (c->stage == QSTAGE_FRAG) {
1458 switch (var->data.location) {
1459 case FRAG_RESULT_COLOR:
1460 case FRAG_RESULT_DATA0:
1461 c->output_color_index = loc;
1462 break;
1463 case FRAG_RESULT_DEPTH:
1464 c->output_position_index = loc;
1465 break;
1466 case FRAG_RESULT_SAMPLE_MASK:
1467 c->output_sample_mask_index = loc;
1468 break;
1469 }
1470 } else {
1471 switch (var->data.location) {
1472 case VARYING_SLOT_POS:
1473 c->output_position_index = loc;
1474 break;
1475 case VARYING_SLOT_PSIZ:
1476 c->output_point_size_index = loc;
1477 break;
1478 }
1479 }
1480 }
1481 }
1482
1483 static void
1484 ntq_setup_uniforms(struct vc4_compile *c)
1485 {
1486 nir_foreach_variable(var, &c->s->uniforms) {
1487 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1488 unsigned array_elem_size = 4 * sizeof(float);
1489
1490 declare_uniform_range(c, var->data.driver_location * array_elem_size,
1491 array_len * array_elem_size);
1492
1493 }
1494 }
1495
1496 /**
1497 * Sets up the mapping from nir_register to struct qreg *.
1498 *
1499 * Each nir_register gets a struct qreg per 32-bit component being stored.
1500 */
1501 static void
1502 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1503 {
1504 foreach_list_typed(nir_register, nir_reg, node, list) {
1505 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1506 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1507 array_len *
1508 nir_reg->num_components);
1509
1510 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1511
1512 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1513 qregs[i] = qir_uniform_ui(c, 0);
1514 }
1515 }
1516
1517 static void
1518 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1519 {
1520 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1521 for (int i = 0; i < instr->def.num_components; i++)
1522 qregs[i] = qir_uniform_ui(c, instr->value.u[i]);
1523
1524 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1525 }
1526
1527 static void
1528 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1529 {
1530 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1531
1532 /* QIR needs there to be *some* value, so pick 0 (same as for
1533 * ntq_setup_registers().
1534 */
1535 for (int i = 0; i < instr->def.num_components; i++)
1536 qregs[i] = qir_uniform_ui(c, 0);
1537 }
1538
1539 static void
1540 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1541 {
1542 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1543 nir_const_value *const_offset;
1544 unsigned offset;
1545 struct qreg *dest = NULL;
1546
1547 if (info->has_dest) {
1548 dest = ntq_get_dest(c, &instr->dest);
1549 }
1550
1551 switch (instr->intrinsic) {
1552 case nir_intrinsic_load_uniform:
1553 assert(instr->num_components == 1);
1554 const_offset = nir_src_as_const_value(instr->src[0]);
1555 if (const_offset) {
1556 offset = instr->const_index[0] + const_offset->u[0];
1557 assert(offset % 4 == 0);
1558 /* We need dwords */
1559 offset = offset / 4;
1560 if (offset < VC4_NIR_STATE_UNIFORM_OFFSET) {
1561 *dest = qir_uniform(c, QUNIFORM_UNIFORM,
1562 offset);
1563 } else {
1564 *dest = qir_uniform(c, offset -
1565 VC4_NIR_STATE_UNIFORM_OFFSET,
1566 0);
1567 }
1568 } else {
1569 *dest = indirect_uniform_load(c, instr);
1570 }
1571 break;
1572
1573 case nir_intrinsic_load_user_clip_plane:
1574 *dest = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1575 instr->const_index[0]);
1576 break;
1577
1578 case nir_intrinsic_load_sample_mask_in:
1579 *dest = qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0);
1580 break;
1581
1582 case nir_intrinsic_load_input:
1583 assert(instr->num_components == 1);
1584 const_offset = nir_src_as_const_value(instr->src[0]);
1585 assert(const_offset && "vc4 doesn't support indirect inputs");
1586 if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1587 assert(const_offset->u[0] == 0);
1588 /* Reads of the per-sample color need to be done in
1589 * order.
1590 */
1591 int sample_index = (instr->const_index[0] -
1592 VC4_NIR_TLB_COLOR_READ_INPUT);
1593 for (int i = 0; i <= sample_index; i++) {
1594 if (c->color_reads[i].file == QFILE_NULL) {
1595 c->color_reads[i] =
1596 qir_TLB_COLOR_READ(c);
1597 }
1598 }
1599 *dest = c->color_reads[sample_index];
1600 } else {
1601 offset = instr->const_index[0] + const_offset->u[0];
1602 *dest = c->inputs[offset];
1603 }
1604 break;
1605
1606 case nir_intrinsic_store_output:
1607 const_offset = nir_src_as_const_value(instr->src[1]);
1608 assert(const_offset && "vc4 doesn't support indirect outputs");
1609 offset = instr->const_index[0] + const_offset->u[0];
1610
1611 /* MSAA color outputs are the only case where we have an
1612 * output that's not lowered to being a store of a single 32
1613 * bit value.
1614 */
1615 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1616 assert(offset == c->output_color_index);
1617 for (int i = 0; i < 4; i++) {
1618 c->sample_colors[i] =
1619 qir_MOV(c, ntq_get_src(c, instr->src[0],
1620 i));
1621 }
1622 } else {
1623 assert(instr->num_components == 1);
1624 c->outputs[offset] =
1625 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1626 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1627 }
1628 break;
1629
1630 case nir_intrinsic_discard:
1631 c->discard = qir_uniform_ui(c, ~0);
1632 break;
1633
1634 case nir_intrinsic_discard_if:
1635 if (c->discard.file == QFILE_NULL)
1636 c->discard = qir_uniform_ui(c, 0);
1637 c->discard = qir_OR(c, c->discard,
1638 ntq_get_src(c, instr->src[0], 0));
1639 break;
1640
1641 default:
1642 fprintf(stderr, "Unknown intrinsic: ");
1643 nir_print_instr(&instr->instr, stderr);
1644 fprintf(stderr, "\n");
1645 break;
1646 }
1647 }
1648
1649 static void
1650 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1651 {
1652 fprintf(stderr, "general IF statements not handled.\n");
1653 }
1654
1655 static void
1656 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1657 {
1658 switch (instr->type) {
1659 case nir_instr_type_alu:
1660 ntq_emit_alu(c, nir_instr_as_alu(instr));
1661 break;
1662
1663 case nir_instr_type_intrinsic:
1664 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1665 break;
1666
1667 case nir_instr_type_load_const:
1668 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1669 break;
1670
1671 case nir_instr_type_ssa_undef:
1672 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1673 break;
1674
1675 case nir_instr_type_tex:
1676 ntq_emit_tex(c, nir_instr_as_tex(instr));
1677 break;
1678
1679 default:
1680 fprintf(stderr, "Unknown NIR instr type: ");
1681 nir_print_instr(instr, stderr);
1682 fprintf(stderr, "\n");
1683 abort();
1684 }
1685 }
1686
1687 static void
1688 ntq_emit_block(struct vc4_compile *c, nir_block *block)
1689 {
1690 nir_foreach_instr(block, instr) {
1691 ntq_emit_instr(c, instr);
1692 }
1693 }
1694
1695 static void
1696 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
1697 {
1698 foreach_list_typed(nir_cf_node, node, node, list) {
1699 switch (node->type) {
1700 /* case nir_cf_node_loop: */
1701 case nir_cf_node_block:
1702 ntq_emit_block(c, nir_cf_node_as_block(node));
1703 break;
1704
1705 case nir_cf_node_if:
1706 ntq_emit_if(c, nir_cf_node_as_if(node));
1707 break;
1708
1709 default:
1710 assert(0);
1711 }
1712 }
1713 }
1714
1715 static void
1716 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
1717 {
1718 ntq_setup_registers(c, &impl->registers);
1719 ntq_emit_cf_list(c, &impl->body);
1720 }
1721
1722 static void
1723 nir_to_qir(struct vc4_compile *c)
1724 {
1725 ntq_setup_inputs(c);
1726 ntq_setup_outputs(c);
1727 ntq_setup_uniforms(c);
1728 ntq_setup_registers(c, &c->s->registers);
1729
1730 /* Find the main function and emit the body. */
1731 nir_foreach_function(c->s, function) {
1732 assert(strcmp(function->name, "main") == 0);
1733 assert(function->impl);
1734 ntq_emit_impl(c, function->impl);
1735 }
1736 }
1737
1738 static const nir_shader_compiler_options nir_options = {
1739 .lower_extract_byte = true,
1740 .lower_extract_word = true,
1741 .lower_ffma = true,
1742 .lower_flrp = true,
1743 .lower_fpow = true,
1744 .lower_fsat = true,
1745 .lower_fsqrt = true,
1746 .lower_negate = true,
1747 };
1748
1749 static bool
1750 count_nir_instrs_in_block(nir_block *block, void *state)
1751 {
1752 int *count = (int *) state;
1753 nir_foreach_instr(block, instr) {
1754 *count = *count + 1;
1755 }
1756 return true;
1757 }
1758
1759 static int
1760 count_nir_instrs(nir_shader *nir)
1761 {
1762 int count = 0;
1763 nir_foreach_function(nir, function) {
1764 if (!function->impl)
1765 continue;
1766 nir_foreach_block(function->impl, count_nir_instrs_in_block, &count);
1767 }
1768 return count;
1769 }
1770
1771 static struct vc4_compile *
1772 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
1773 struct vc4_key *key)
1774 {
1775 struct vc4_compile *c = qir_compile_init();
1776
1777 c->stage = stage;
1778 c->shader_state = &key->shader_state->base;
1779 c->program_id = key->shader_state->program_id;
1780 c->variant_id = key->shader_state->compiled_variant_count++;
1781
1782 c->key = key;
1783 switch (stage) {
1784 case QSTAGE_FRAG:
1785 c->fs_key = (struct vc4_fs_key *)key;
1786 if (c->fs_key->is_points) {
1787 c->point_x = emit_fragment_varying(c, ~0, 0);
1788 c->point_y = emit_fragment_varying(c, ~0, 0);
1789 } else if (c->fs_key->is_lines) {
1790 c->line_x = emit_fragment_varying(c, ~0, 0);
1791 }
1792 break;
1793 case QSTAGE_VERT:
1794 c->vs_key = (struct vc4_vs_key *)key;
1795 break;
1796 case QSTAGE_COORD:
1797 c->vs_key = (struct vc4_vs_key *)key;
1798 break;
1799 }
1800
1801 const struct tgsi_token *tokens = key->shader_state->base.tokens;
1802
1803 if (vc4_debug & VC4_DEBUG_TGSI) {
1804 fprintf(stderr, "%s prog %d/%d TGSI:\n",
1805 qir_get_stage_name(c->stage),
1806 c->program_id, c->variant_id);
1807 tgsi_dump(tokens, 0);
1808 }
1809
1810 c->s = tgsi_to_nir(tokens, &nir_options);
1811 nir_opt_global_to_local(c->s);
1812 nir_convert_to_ssa(c->s);
1813
1814 if (stage == QSTAGE_FRAG)
1815 vc4_nir_lower_blend(c);
1816
1817 struct nir_lower_tex_options tex_options = {
1818 /* We would need to implement txs, but we don't want the
1819 * int/float conversions
1820 */
1821 .lower_rect = false,
1822
1823 /* We want to use this, but we don't want to newton-raphson
1824 * its rcp.
1825 */
1826 .lower_txp = false,
1827
1828 /* Apply swizzles to all samplers. */
1829 .swizzle_result = ~0,
1830 };
1831
1832 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
1833 * The format swizzling applies before sRGB decode, and
1834 * ARB_texture_swizzle is the last thing before returning the sample.
1835 */
1836 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
1837 enum pipe_format format = c->key->tex[i].format;
1838
1839 if (!format)
1840 continue;
1841
1842 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
1843
1844 for (int j = 0; j < 4; j++) {
1845 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
1846
1847 if (arb_swiz <= 3) {
1848 tex_options.swizzles[i][j] =
1849 format_swizzle[arb_swiz];
1850 } else {
1851 tex_options.swizzles[i][j] = arb_swiz;
1852 }
1853
1854 /* If ARB_texture_swizzle is reading from the R, G, or
1855 * B channels of an sRGB texture, then we need to
1856 * apply sRGB decode to this channel at sample time.
1857 */
1858 if (arb_swiz < 3 && util_format_is_srgb(format)) {
1859 c->tex_srgb_decode[i] |= (1 << j);
1860 }
1861
1862 }
1863 }
1864
1865 nir_lower_tex(c->s, &tex_options);
1866
1867 if (c->fs_key && c->fs_key->light_twoside)
1868 nir_lower_two_sided_color(c->s);
1869
1870 if (stage == QSTAGE_FRAG)
1871 nir_lower_clip_fs(c->s, c->key->ucp_enables);
1872 else
1873 nir_lower_clip_vs(c->s, c->key->ucp_enables);
1874
1875 vc4_nir_lower_io(c);
1876 vc4_nir_lower_txf_ms(c);
1877 nir_lower_idiv(c->s);
1878 nir_lower_load_const_to_scalar(c->s);
1879
1880 vc4_optimize_nir(c->s);
1881
1882 nir_remove_dead_variables(c->s);
1883
1884 nir_convert_from_ssa(c->s, true);
1885
1886 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1887 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1888 qir_get_stage_name(c->stage),
1889 c->program_id, c->variant_id,
1890 count_nir_instrs(c->s));
1891 }
1892
1893 if (vc4_debug & VC4_DEBUG_NIR) {
1894 fprintf(stderr, "%s prog %d/%d NIR:\n",
1895 qir_get_stage_name(c->stage),
1896 c->program_id, c->variant_id);
1897 nir_print_shader(c->s, stderr);
1898 }
1899
1900 nir_to_qir(c);
1901
1902 switch (stage) {
1903 case QSTAGE_FRAG:
1904 emit_frag_end(c);
1905 break;
1906 case QSTAGE_VERT:
1907 emit_vert_end(c,
1908 vc4->prog.fs->input_slots,
1909 vc4->prog.fs->num_inputs);
1910 break;
1911 case QSTAGE_COORD:
1912 emit_coord_end(c);
1913 break;
1914 }
1915
1916 if (vc4_debug & VC4_DEBUG_QIR) {
1917 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
1918 qir_get_stage_name(c->stage),
1919 c->program_id, c->variant_id);
1920 qir_dump(c);
1921 }
1922
1923 qir_optimize(c);
1924 qir_lower_uniforms(c);
1925
1926 qir_schedule_instructions(c);
1927
1928 if (vc4_debug & VC4_DEBUG_QIR) {
1929 fprintf(stderr, "%s prog %d/%d QIR:\n",
1930 qir_get_stage_name(c->stage),
1931 c->program_id, c->variant_id);
1932 qir_dump(c);
1933 }
1934
1935 qir_reorder_uniforms(c);
1936 vc4_generate_code(vc4, c);
1937
1938 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1939 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1940 qir_get_stage_name(c->stage),
1941 c->program_id, c->variant_id,
1942 c->qpu_inst_count);
1943 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1944 qir_get_stage_name(c->stage),
1945 c->program_id, c->variant_id,
1946 c->num_uniforms);
1947 }
1948
1949 ralloc_free(c->s);
1950
1951 return c;
1952 }
1953
1954 static void *
1955 vc4_shader_state_create(struct pipe_context *pctx,
1956 const struct pipe_shader_state *cso)
1957 {
1958 struct vc4_context *vc4 = vc4_context(pctx);
1959 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
1960 if (!so)
1961 return NULL;
1962
1963 so->base.tokens = tgsi_dup_tokens(cso->tokens);
1964 so->program_id = vc4->next_uncompiled_program_id++;
1965
1966 return so;
1967 }
1968
1969 static void
1970 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
1971 struct vc4_compile *c)
1972 {
1973 int count = c->num_uniforms;
1974 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
1975
1976 uinfo->count = count;
1977 uinfo->data = ralloc_array(shader, uint32_t, count);
1978 memcpy(uinfo->data, c->uniform_data,
1979 count * sizeof(*uinfo->data));
1980 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
1981 memcpy(uinfo->contents, c->uniform_contents,
1982 count * sizeof(*uinfo->contents));
1983 uinfo->num_texture_samples = c->num_texture_samples;
1984
1985 vc4_set_shader_uniform_dirty_flags(shader);
1986 }
1987
1988 static struct vc4_compiled_shader *
1989 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
1990 struct vc4_key *key)
1991 {
1992 struct hash_table *ht;
1993 uint32_t key_size;
1994 if (stage == QSTAGE_FRAG) {
1995 ht = vc4->fs_cache;
1996 key_size = sizeof(struct vc4_fs_key);
1997 } else {
1998 ht = vc4->vs_cache;
1999 key_size = sizeof(struct vc4_vs_key);
2000 }
2001
2002 struct vc4_compiled_shader *shader;
2003 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2004 if (entry)
2005 return entry->data;
2006
2007 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
2008 shader = rzalloc(NULL, struct vc4_compiled_shader);
2009
2010 shader->program_id = vc4->next_compiled_program_id++;
2011 if (stage == QSTAGE_FRAG) {
2012 bool input_live[c->num_input_slots];
2013
2014 memset(input_live, 0, sizeof(input_live));
2015 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
2016 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
2017 if (inst->src[i].file == QFILE_VARY)
2018 input_live[inst->src[i].index] = true;
2019 }
2020 }
2021
2022 shader->input_slots = ralloc_array(shader,
2023 struct vc4_varying_slot,
2024 c->num_input_slots);
2025
2026 for (int i = 0; i < c->num_input_slots; i++) {
2027 struct vc4_varying_slot *slot = &c->input_slots[i];
2028
2029 if (!input_live[i])
2030 continue;
2031
2032 /* Skip non-VS-output inputs. */
2033 if (slot->slot == (uint8_t)~0)
2034 continue;
2035
2036 if (slot->slot == VARYING_SLOT_COL0 ||
2037 slot->slot == VARYING_SLOT_COL1 ||
2038 slot->slot == VARYING_SLOT_BFC0 ||
2039 slot->slot == VARYING_SLOT_BFC1) {
2040 shader->color_inputs |= (1 << shader->num_inputs);
2041 }
2042
2043 shader->input_slots[shader->num_inputs] = *slot;
2044 shader->num_inputs++;
2045 }
2046 } else {
2047 shader->num_inputs = c->num_inputs;
2048
2049 shader->vattr_offsets[0] = 0;
2050 for (int i = 0; i < 8; i++) {
2051 shader->vattr_offsets[i + 1] =
2052 shader->vattr_offsets[i] + c->vattr_sizes[i];
2053
2054 if (c->vattr_sizes[i])
2055 shader->vattrs_live |= (1 << i);
2056 }
2057 }
2058
2059 copy_uniform_state_to_shader(shader, c);
2060 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2061 c->qpu_inst_count * sizeof(uint64_t));
2062
2063 /* Copy the compiler UBO range state to the compiled shader, dropping
2064 * out arrays that were never referenced by an indirect load.
2065 *
2066 * (Note that QIR dead code elimination of an array access still
2067 * leaves that array alive, though)
2068 */
2069 if (c->num_ubo_ranges) {
2070 shader->num_ubo_ranges = c->num_ubo_ranges;
2071 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2072 c->num_ubo_ranges);
2073 uint32_t j = 0;
2074 for (int i = 0; i < c->num_uniform_ranges; i++) {
2075 struct vc4_compiler_ubo_range *range =
2076 &c->ubo_ranges[i];
2077 if (!range->used)
2078 continue;
2079
2080 shader->ubo_ranges[j].dst_offset = range->dst_offset;
2081 shader->ubo_ranges[j].src_offset = range->src_offset;
2082 shader->ubo_ranges[j].size = range->size;
2083 shader->ubo_size += c->ubo_ranges[i].size;
2084 j++;
2085 }
2086 }
2087 if (shader->ubo_size) {
2088 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2089 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2090 qir_get_stage_name(c->stage),
2091 c->program_id, c->variant_id,
2092 shader->ubo_size / 4);
2093 }
2094 }
2095
2096 qir_compile_destroy(c);
2097
2098 struct vc4_key *dup_key;
2099 dup_key = ralloc_size(shader, key_size);
2100 memcpy(dup_key, key, key_size);
2101 _mesa_hash_table_insert(ht, dup_key, shader);
2102
2103 return shader;
2104 }
2105
2106 static void
2107 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2108 struct vc4_texture_stateobj *texstate)
2109 {
2110 for (int i = 0; i < texstate->num_textures; i++) {
2111 struct pipe_sampler_view *sampler = texstate->textures[i];
2112 struct pipe_sampler_state *sampler_state =
2113 texstate->samplers[i];
2114
2115 if (!sampler)
2116 continue;
2117
2118 key->tex[i].format = sampler->format;
2119 key->tex[i].swizzle[0] = sampler->swizzle_r;
2120 key->tex[i].swizzle[1] = sampler->swizzle_g;
2121 key->tex[i].swizzle[2] = sampler->swizzle_b;
2122 key->tex[i].swizzle[3] = sampler->swizzle_a;
2123
2124 if (sampler->texture->nr_samples > 1) {
2125 key->tex[i].msaa_width = sampler->texture->width0;
2126 key->tex[i].msaa_height = sampler->texture->height0;
2127 } else if (sampler){
2128 key->tex[i].compare_mode = sampler_state->compare_mode;
2129 key->tex[i].compare_func = sampler_state->compare_func;
2130 key->tex[i].wrap_s = sampler_state->wrap_s;
2131 key->tex[i].wrap_t = sampler_state->wrap_t;
2132 }
2133 }
2134
2135 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2136 }
2137
2138 static void
2139 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2140 {
2141 struct vc4_fs_key local_key;
2142 struct vc4_fs_key *key = &local_key;
2143
2144 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2145 VC4_DIRTY_BLEND |
2146 VC4_DIRTY_FRAMEBUFFER |
2147 VC4_DIRTY_ZSA |
2148 VC4_DIRTY_RASTERIZER |
2149 VC4_DIRTY_FRAGTEX |
2150 VC4_DIRTY_TEXSTATE |
2151 VC4_DIRTY_UNCOMPILED_FS))) {
2152 return;
2153 }
2154
2155 memset(key, 0, sizeof(*key));
2156 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2157 key->base.shader_state = vc4->prog.bind_fs;
2158 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2159 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2160 prim_mode <= PIPE_PRIM_LINE_STRIP);
2161 key->blend = vc4->blend->rt[0];
2162 if (vc4->blend->logicop_enable) {
2163 key->logicop_func = vc4->blend->logicop_func;
2164 } else {
2165 key->logicop_func = PIPE_LOGICOP_COPY;
2166 }
2167 key->msaa = vc4->rasterizer->base.multisample;
2168 key->sample_coverage = (vc4->rasterizer->base.multisample &&
2169 vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2170 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2171 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2172 if (vc4->framebuffer.cbufs[0])
2173 key->color_format = vc4->framebuffer.cbufs[0]->format;
2174
2175 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2176 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2177 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2178 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2179 key->stencil_enabled);
2180 if (vc4->zsa->base.alpha.enabled) {
2181 key->alpha_test = true;
2182 key->alpha_test_func = vc4->zsa->base.alpha.func;
2183 }
2184
2185 if (key->is_points) {
2186 key->point_sprite_mask =
2187 vc4->rasterizer->base.sprite_coord_enable;
2188 key->point_coord_upper_left =
2189 (vc4->rasterizer->base.sprite_coord_mode ==
2190 PIPE_SPRITE_COORD_UPPER_LEFT);
2191 }
2192
2193 key->light_twoside = vc4->rasterizer->base.light_twoside;
2194
2195 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2196 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2197 if (vc4->prog.fs == old_fs)
2198 return;
2199
2200 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2201 if (vc4->rasterizer->base.flatshade &&
2202 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2203 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2204 }
2205 }
2206
2207 static void
2208 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2209 {
2210 struct vc4_vs_key local_key;
2211 struct vc4_vs_key *key = &local_key;
2212
2213 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2214 VC4_DIRTY_RASTERIZER |
2215 VC4_DIRTY_VERTTEX |
2216 VC4_DIRTY_TEXSTATE |
2217 VC4_DIRTY_VTXSTATE |
2218 VC4_DIRTY_UNCOMPILED_VS |
2219 VC4_DIRTY_COMPILED_FS))) {
2220 return;
2221 }
2222
2223 memset(key, 0, sizeof(*key));
2224 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2225 key->base.shader_state = vc4->prog.bind_vs;
2226 key->compiled_fs_id = vc4->prog.fs->program_id;
2227
2228 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2229 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2230
2231 key->per_vertex_point_size =
2232 (prim_mode == PIPE_PRIM_POINTS &&
2233 vc4->rasterizer->base.point_size_per_vertex);
2234
2235 struct vc4_compiled_shader *vs =
2236 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2237 if (vs != vc4->prog.vs) {
2238 vc4->prog.vs = vs;
2239 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2240 }
2241
2242 key->is_coord = true;
2243 struct vc4_compiled_shader *cs =
2244 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2245 if (cs != vc4->prog.cs) {
2246 vc4->prog.cs = cs;
2247 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2248 }
2249 }
2250
2251 void
2252 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2253 {
2254 vc4_update_compiled_fs(vc4, prim_mode);
2255 vc4_update_compiled_vs(vc4, prim_mode);
2256 }
2257
2258 static uint32_t
2259 fs_cache_hash(const void *key)
2260 {
2261 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2262 }
2263
2264 static uint32_t
2265 vs_cache_hash(const void *key)
2266 {
2267 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2268 }
2269
2270 static bool
2271 fs_cache_compare(const void *key1, const void *key2)
2272 {
2273 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2274 }
2275
2276 static bool
2277 vs_cache_compare(const void *key1, const void *key2)
2278 {
2279 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2280 }
2281
2282 static void
2283 delete_from_cache_if_matches(struct hash_table *ht,
2284 struct hash_entry *entry,
2285 struct vc4_uncompiled_shader *so)
2286 {
2287 const struct vc4_key *key = entry->key;
2288
2289 if (key->shader_state == so) {
2290 struct vc4_compiled_shader *shader = entry->data;
2291 _mesa_hash_table_remove(ht, entry);
2292 vc4_bo_unreference(&shader->bo);
2293 ralloc_free(shader);
2294 }
2295 }
2296
2297 static void
2298 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2299 {
2300 struct vc4_context *vc4 = vc4_context(pctx);
2301 struct vc4_uncompiled_shader *so = hwcso;
2302
2303 struct hash_entry *entry;
2304 hash_table_foreach(vc4->fs_cache, entry)
2305 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2306 hash_table_foreach(vc4->vs_cache, entry)
2307 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2308
2309 free((void *)so->base.tokens);
2310 free(so);
2311 }
2312
2313 static void
2314 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2315 {
2316 struct vc4_context *vc4 = vc4_context(pctx);
2317 vc4->prog.bind_fs = hwcso;
2318 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2319 }
2320
2321 static void
2322 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2323 {
2324 struct vc4_context *vc4 = vc4_context(pctx);
2325 vc4->prog.bind_vs = hwcso;
2326 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2327 }
2328
2329 void
2330 vc4_program_init(struct pipe_context *pctx)
2331 {
2332 struct vc4_context *vc4 = vc4_context(pctx);
2333
2334 pctx->create_vs_state = vc4_shader_state_create;
2335 pctx->delete_vs_state = vc4_shader_state_delete;
2336
2337 pctx->create_fs_state = vc4_shader_state_create;
2338 pctx->delete_fs_state = vc4_shader_state_delete;
2339
2340 pctx->bind_fs_state = vc4_fp_state_bind;
2341 pctx->bind_vs_state = vc4_vp_state_bind;
2342
2343 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2344 fs_cache_compare);
2345 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2346 vs_cache_compare);
2347 }
2348
2349 void
2350 vc4_program_fini(struct pipe_context *pctx)
2351 {
2352 struct vc4_context *vc4 = vc4_context(pctx);
2353
2354 struct hash_entry *entry;
2355 hash_table_foreach(vc4->fs_cache, entry) {
2356 struct vc4_compiled_shader *shader = entry->data;
2357 vc4_bo_unreference(&shader->bo);
2358 ralloc_free(shader);
2359 _mesa_hash_table_remove(vc4->fs_cache, entry);
2360 }
2361
2362 hash_table_foreach(vc4->vs_cache, entry) {
2363 struct vc4_compiled_shader *shader = entry->data;
2364 vc4_bo_unreference(&shader->bo);
2365 ralloc_free(shader);
2366 _mesa_hash_table_remove(vc4->vs_cache, entry);
2367 }
2368 }