gallium/ttn: mark location specially in nir for color0-writes-all
[mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
1 /*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <inttypes.h>
26 #include "pipe/p_state.h"
27 #include "util/u_format.h"
28 #include "util/u_hash.h"
29 #include "util/u_math.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include "util/format_srgb.h"
33 #include "util/ralloc.h"
34 #include "util/hash_table.h"
35 #include "tgsi/tgsi_dump.h"
36 #include "tgsi/tgsi_info.h"
37 #include "tgsi/tgsi_lowering.h"
38 #include "tgsi/tgsi_parse.h"
39 #include "nir/tgsi_to_nir.h"
40
41 #include "vc4_context.h"
42 #include "vc4_qpu.h"
43 #include "vc4_qir.h"
44 #ifdef USE_VC4_SIMULATOR
45 #include "simpenrose/simpenrose.h"
46 #endif
47
48 struct vc4_key {
49 struct vc4_uncompiled_shader *shader_state;
50 struct {
51 enum pipe_format format;
52 unsigned compare_mode:1;
53 unsigned compare_func:3;
54 unsigned wrap_s:3;
55 unsigned wrap_t:3;
56 uint8_t swizzle[4];
57 } tex[VC4_MAX_TEXTURE_SAMPLERS];
58 uint8_t ucp_enables;
59 };
60
61 struct vc4_fs_key {
62 struct vc4_key base;
63 enum pipe_format color_format;
64 bool depth_enabled;
65 bool stencil_enabled;
66 bool stencil_twoside;
67 bool stencil_full_writemasks;
68 bool is_points;
69 bool is_lines;
70 bool alpha_test;
71 bool point_coord_upper_left;
72 bool light_twoside;
73 uint8_t alpha_test_func;
74 uint8_t logicop_func;
75 uint32_t point_sprite_mask;
76
77 struct pipe_rt_blend_state blend;
78 };
79
80 struct vc4_vs_key {
81 struct vc4_key base;
82
83 /**
84 * This is a proxy for the array of FS input semantics, which is
85 * larger than we would want to put in the key.
86 */
87 uint64_t compiled_fs_id;
88
89 enum pipe_format attr_formats[8];
90 bool is_coord;
91 bool per_vertex_point_size;
92 };
93
94 static void
95 resize_qreg_array(struct vc4_compile *c,
96 struct qreg **regs,
97 uint32_t *size,
98 uint32_t decl_size)
99 {
100 if (*size >= decl_size)
101 return;
102
103 uint32_t old_size = *size;
104 *size = MAX2(*size * 2, decl_size);
105 *regs = reralloc(c, *regs, struct qreg, *size);
106 if (!*regs) {
107 fprintf(stderr, "Malloc failure\n");
108 abort();
109 }
110
111 for (uint32_t i = old_size; i < *size; i++)
112 (*regs)[i] = c->undef;
113 }
114
115 static struct qreg
116 indirect_uniform_load(struct vc4_compile *c,
117 struct qreg indirect_offset,
118 unsigned offset)
119 {
120 struct vc4_compiler_ubo_range *range = NULL;
121 unsigned i;
122 for (i = 0; i < c->num_uniform_ranges; i++) {
123 range = &c->ubo_ranges[i];
124 if (offset >= range->src_offset &&
125 offset < range->src_offset + range->size) {
126 break;
127 }
128 }
129 /* The driver-location-based offset always has to be within a declared
130 * uniform range.
131 */
132 assert(range);
133 if (!range->used) {
134 range->used = true;
135 range->dst_offset = c->next_ubo_dst_offset;
136 c->next_ubo_dst_offset += range->size;
137 c->num_ubo_ranges++;
138 };
139
140 offset -= range->src_offset;
141 /* Translate the user's TGSI register index from the TGSI register
142 * base to a byte offset.
143 */
144 indirect_offset = qir_SHL(c, indirect_offset, qir_uniform_ui(c, 4));
145
146 /* Adjust for where we stored the TGSI register base. */
147 indirect_offset = qir_ADD(c, indirect_offset,
148 qir_uniform_ui(c, (range->dst_offset +
149 offset)));
150
151 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
152 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
153 indirect_offset = qir_MIN(c, indirect_offset,
154 qir_uniform_ui(c, (range->dst_offset +
155 range->size - 4)));
156
157 qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
158 struct qreg r4 = qir_TEX_RESULT(c);
159 c->num_texture_samples++;
160 return qir_MOV(c, r4);
161 }
162
163 static struct qreg *
164 ntq_get_dest(struct vc4_compile *c, nir_dest dest)
165 {
166 assert(!dest.is_ssa);
167 nir_register *reg = dest.reg.reg;
168 struct hash_entry *entry = _mesa_hash_table_search(c->def_ht, reg);
169 assert(reg->num_array_elems == 0);
170 assert(dest.reg.base_offset == 0);
171
172 struct qreg *qregs = entry->data;
173 return qregs;
174 }
175
176 static struct qreg
177 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
178 {
179 struct hash_entry *entry;
180 if (src.is_ssa) {
181 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
182 assert(i < src.ssa->num_components);
183 } else {
184 nir_register *reg = src.reg.reg;
185 entry = _mesa_hash_table_search(c->def_ht, reg);
186 assert(reg->num_array_elems == 0);
187 assert(src.reg.base_offset == 0);
188 assert(i < reg->num_components);
189 }
190
191 struct qreg *qregs = entry->data;
192 return qregs[i];
193 }
194
195 static struct qreg
196 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
197 unsigned src)
198 {
199 assert(util_is_power_of_two(instr->dest.write_mask));
200 unsigned chan = ffs(instr->dest.write_mask) - 1;
201 struct qreg r = ntq_get_src(c, instr->src[src].src,
202 instr->src[src].swizzle[chan]);
203
204 assert(!instr->src[src].abs);
205 assert(!instr->src[src].negate);
206
207 return r;
208 };
209
210 static struct qreg
211 get_swizzled_channel(struct vc4_compile *c,
212 struct qreg *srcs, int swiz)
213 {
214 switch (swiz) {
215 default:
216 case UTIL_FORMAT_SWIZZLE_NONE:
217 fprintf(stderr, "warning: unknown swizzle\n");
218 /* FALLTHROUGH */
219 case UTIL_FORMAT_SWIZZLE_0:
220 return qir_uniform_f(c, 0.0);
221 case UTIL_FORMAT_SWIZZLE_1:
222 return qir_uniform_f(c, 1.0);
223 case UTIL_FORMAT_SWIZZLE_X:
224 case UTIL_FORMAT_SWIZZLE_Y:
225 case UTIL_FORMAT_SWIZZLE_Z:
226 case UTIL_FORMAT_SWIZZLE_W:
227 return srcs[swiz];
228 }
229 }
230
231 static inline struct qreg
232 qir_SAT(struct vc4_compile *c, struct qreg val)
233 {
234 return qir_FMAX(c,
235 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
236 qir_uniform_f(c, 0.0));
237 }
238
239 static struct qreg
240 ntq_rcp(struct vc4_compile *c, struct qreg x)
241 {
242 struct qreg r = qir_RCP(c, x);
243
244 /* Apply a Newton-Raphson step to improve the accuracy. */
245 r = qir_FMUL(c, r, qir_FSUB(c,
246 qir_uniform_f(c, 2.0),
247 qir_FMUL(c, x, r)));
248
249 return r;
250 }
251
252 static struct qreg
253 ntq_rsq(struct vc4_compile *c, struct qreg x)
254 {
255 struct qreg r = qir_RSQ(c, x);
256
257 /* Apply a Newton-Raphson step to improve the accuracy. */
258 r = qir_FMUL(c, r, qir_FSUB(c,
259 qir_uniform_f(c, 1.5),
260 qir_FMUL(c,
261 qir_uniform_f(c, 0.5),
262 qir_FMUL(c, x,
263 qir_FMUL(c, r, r)))));
264
265 return r;
266 }
267
268 static struct qreg
269 qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
270 {
271 struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
272 struct qreg high = qir_POW(c,
273 qir_FMUL(c,
274 qir_FADD(c,
275 srgb,
276 qir_uniform_f(c, 0.055)),
277 qir_uniform_f(c, 1.0 / 1.055)),
278 qir_uniform_f(c, 2.4));
279
280 qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
281 return qir_SEL_X_Y_NS(c, low, high);
282 }
283
284 static struct qreg
285 qir_srgb_encode(struct vc4_compile *c, struct qreg linear)
286 {
287 struct qreg low = qir_FMUL(c, linear, qir_uniform_f(c, 12.92));
288 struct qreg high = qir_FSUB(c,
289 qir_FMUL(c,
290 qir_uniform_f(c, 1.055),
291 qir_POW(c,
292 linear,
293 qir_uniform_f(c, 0.41666))),
294 qir_uniform_f(c, 0.055));
295
296 qir_SF(c, qir_FSUB(c, linear, qir_uniform_f(c, 0.0031308)));
297 return qir_SEL_X_Y_NS(c, low, high);
298 }
299
300 static struct qreg
301 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
302 {
303 struct qreg src0_hi = qir_SHR(c, src0,
304 qir_uniform_ui(c, 24));
305 struct qreg src1_hi = qir_SHR(c, src1,
306 qir_uniform_ui(c, 24));
307
308 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
309 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
310 struct qreg lolo = qir_MUL24(c, src0, src1);
311
312 return qir_ADD(c, lolo, qir_SHL(c,
313 qir_ADD(c, hilo, lohi),
314 qir_uniform_ui(c, 24)));
315 }
316
317 static void
318 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
319 {
320 struct qreg s, t, r, lod, proj, compare;
321 bool is_txb = false, is_txl = false, has_proj = false;
322 unsigned unit = instr->sampler_index;
323
324 for (unsigned i = 0; i < instr->num_srcs; i++) {
325 switch (instr->src[i].src_type) {
326 case nir_tex_src_coord:
327 s = ntq_get_src(c, instr->src[i].src, 0);
328 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
329 t = qir_uniform_f(c, 0.5);
330 else
331 t = ntq_get_src(c, instr->src[i].src, 1);
332 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
333 r = ntq_get_src(c, instr->src[i].src, 2);
334 break;
335 case nir_tex_src_bias:
336 lod = ntq_get_src(c, instr->src[i].src, 0);
337 is_txb = true;
338 break;
339 case nir_tex_src_lod:
340 lod = ntq_get_src(c, instr->src[i].src, 0);
341 is_txl = true;
342 break;
343 case nir_tex_src_comparitor:
344 compare = ntq_get_src(c, instr->src[i].src, 0);
345 break;
346 case nir_tex_src_projector:
347 proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
348 s = qir_FMUL(c, s, proj);
349 t = qir_FMUL(c, t, proj);
350 has_proj = true;
351 break;
352 default:
353 unreachable("unknown texture source");
354 }
355 }
356
357 struct qreg texture_u[] = {
358 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
359 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
360 qir_uniform(c, QUNIFORM_CONSTANT, 0),
361 qir_uniform(c, QUNIFORM_CONSTANT, 0),
362 };
363 uint32_t next_texture_u = 0;
364
365 /* There is no native support for GL texture rectangle coordinates, so
366 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
367 * 1]).
368 */
369 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
370 s = qir_FMUL(c, s,
371 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
372 t = qir_FMUL(c, t,
373 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
374 }
375
376 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
377 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
378 unit | (is_txl << 16));
379 }
380
381 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
382 struct qreg ma = qir_FMAXABS(c, qir_FMAXABS(c, s, t), r);
383 struct qreg rcp_ma = qir_RCP(c, ma);
384 s = qir_FMUL(c, s, rcp_ma);
385 t = qir_FMUL(c, t, rcp_ma);
386 r = qir_FMUL(c, r, rcp_ma);
387
388 qir_TEX_R(c, r, texture_u[next_texture_u++]);
389 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
390 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
391 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
392 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
393 qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
394 texture_u[next_texture_u++]);
395 }
396
397 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
398 s = qir_SAT(c, s);
399 }
400
401 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
402 t = qir_SAT(c, t);
403 }
404
405 qir_TEX_T(c, t, texture_u[next_texture_u++]);
406
407 if (is_txl || is_txb)
408 qir_TEX_B(c, lod, texture_u[next_texture_u++]);
409
410 qir_TEX_S(c, s, texture_u[next_texture_u++]);
411
412 c->num_texture_samples++;
413 struct qreg r4 = qir_TEX_RESULT(c);
414
415 enum pipe_format format = c->key->tex[unit].format;
416
417 struct qreg unpacked[4];
418 if (util_format_is_depth_or_stencil(format)) {
419 struct qreg depthf = qir_ITOF(c, qir_SHR(c, r4,
420 qir_uniform_ui(c, 8)));
421 struct qreg normalized = qir_FMUL(c, depthf,
422 qir_uniform_f(c, 1.0f/0xffffff));
423
424 struct qreg depth_output;
425
426 struct qreg one = qir_uniform_f(c, 1.0f);
427 if (c->key->tex[unit].compare_mode) {
428 if (has_proj)
429 compare = qir_FMUL(c, compare, proj);
430
431 switch (c->key->tex[unit].compare_func) {
432 case PIPE_FUNC_NEVER:
433 depth_output = qir_uniform_f(c, 0.0f);
434 break;
435 case PIPE_FUNC_ALWAYS:
436 depth_output = one;
437 break;
438 case PIPE_FUNC_EQUAL:
439 qir_SF(c, qir_FSUB(c, compare, normalized));
440 depth_output = qir_SEL_X_0_ZS(c, one);
441 break;
442 case PIPE_FUNC_NOTEQUAL:
443 qir_SF(c, qir_FSUB(c, compare, normalized));
444 depth_output = qir_SEL_X_0_ZC(c, one);
445 break;
446 case PIPE_FUNC_GREATER:
447 qir_SF(c, qir_FSUB(c, compare, normalized));
448 depth_output = qir_SEL_X_0_NC(c, one);
449 break;
450 case PIPE_FUNC_GEQUAL:
451 qir_SF(c, qir_FSUB(c, normalized, compare));
452 depth_output = qir_SEL_X_0_NS(c, one);
453 break;
454 case PIPE_FUNC_LESS:
455 qir_SF(c, qir_FSUB(c, compare, normalized));
456 depth_output = qir_SEL_X_0_NS(c, one);
457 break;
458 case PIPE_FUNC_LEQUAL:
459 qir_SF(c, qir_FSUB(c, normalized, compare));
460 depth_output = qir_SEL_X_0_NC(c, one);
461 break;
462 }
463 } else {
464 depth_output = normalized;
465 }
466
467 for (int i = 0; i < 4; i++)
468 unpacked[i] = depth_output;
469 } else {
470 for (int i = 0; i < 4; i++)
471 unpacked[i] = qir_R4_UNPACK(c, r4, i);
472 }
473
474 const uint8_t *format_swiz = vc4_get_format_swizzle(format);
475 struct qreg texture_output[4];
476 for (int i = 0; i < 4; i++) {
477 texture_output[i] = get_swizzled_channel(c, unpacked,
478 format_swiz[i]);
479 }
480
481 if (util_format_is_srgb(format)) {
482 for (int i = 0; i < 3; i++)
483 texture_output[i] = qir_srgb_decode(c,
484 texture_output[i]);
485 }
486
487 struct qreg *dest = ntq_get_dest(c, instr->dest);
488 for (int i = 0; i < 4; i++) {
489 dest[i] = get_swizzled_channel(c, texture_output,
490 c->key->tex[unit].swizzle[i]);
491 }
492 }
493
494 /**
495 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
496 * to zero).
497 */
498 static struct qreg
499 ntq_ffract(struct vc4_compile *c, struct qreg src)
500 {
501 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
502 struct qreg diff = qir_FSUB(c, src, trunc);
503 qir_SF(c, diff);
504 return qir_SEL_X_Y_NS(c,
505 qir_FADD(c, diff, qir_uniform_f(c, 1.0)),
506 diff);
507 }
508
509 /**
510 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
511 * zero).
512 */
513 static struct qreg
514 ntq_ffloor(struct vc4_compile *c, struct qreg src)
515 {
516 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
517
518 /* This will be < 0 if we truncated and the truncation was of a value
519 * that was < 0 in the first place.
520 */
521 qir_SF(c, qir_FSUB(c, src, trunc));
522
523 return qir_SEL_X_Y_NS(c,
524 qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)),
525 trunc);
526 }
527
528 /**
529 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
530 * zero).
531 */
532 static struct qreg
533 ntq_fceil(struct vc4_compile *c, struct qreg src)
534 {
535 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
536
537 /* This will be < 0 if we truncated and the truncation was of a value
538 * that was > 0 in the first place.
539 */
540 qir_SF(c, qir_FSUB(c, trunc, src));
541
542 return qir_SEL_X_Y_NS(c,
543 qir_FADD(c, trunc, qir_uniform_f(c, 1.0)),
544 trunc);
545 }
546
547 static struct qreg
548 ntq_fsin(struct vc4_compile *c, struct qreg src)
549 {
550 float coeff[] = {
551 -2.0 * M_PI,
552 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
553 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
554 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
555 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
556 };
557
558 struct qreg scaled_x =
559 qir_FMUL(c,
560 src,
561 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
562
563 struct qreg x = qir_FADD(c,
564 ntq_ffract(c, scaled_x),
565 qir_uniform_f(c, -0.5));
566 struct qreg x2 = qir_FMUL(c, x, x);
567 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
568 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
569 x = qir_FMUL(c, x, x2);
570 sum = qir_FADD(c,
571 sum,
572 qir_FMUL(c,
573 x,
574 qir_uniform_f(c, coeff[i])));
575 }
576 return sum;
577 }
578
579 static struct qreg
580 ntq_fcos(struct vc4_compile *c, struct qreg src)
581 {
582 float coeff[] = {
583 -1.0f,
584 pow(2.0 * M_PI, 2) / (2 * 1),
585 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
586 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
587 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
588 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
589 };
590
591 struct qreg scaled_x =
592 qir_FMUL(c, src,
593 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
594 struct qreg x_frac = qir_FADD(c,
595 ntq_ffract(c, scaled_x),
596 qir_uniform_f(c, -0.5));
597
598 struct qreg sum = qir_uniform_f(c, coeff[0]);
599 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
600 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
601 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
602 if (i != 1)
603 x = qir_FMUL(c, x, x2);
604
605 struct qreg mul = qir_FMUL(c,
606 x,
607 qir_uniform_f(c, coeff[i]));
608 if (i == 0)
609 sum = mul;
610 else
611 sum = qir_FADD(c, sum, mul);
612 }
613 return sum;
614 }
615
616 static struct qreg
617 ntq_fsign(struct vc4_compile *c, struct qreg src)
618 {
619 qir_SF(c, src);
620 return qir_SEL_X_Y_NC(c,
621 qir_SEL_X_0_ZC(c, qir_uniform_f(c, 1.0)),
622 qir_uniform_f(c, -1.0));
623 }
624
625 static struct qreg
626 get_channel_from_vpm(struct vc4_compile *c,
627 struct qreg *vpm_reads,
628 uint8_t swiz,
629 const struct util_format_description *desc)
630 {
631 const struct util_format_channel_description *chan =
632 &desc->channel[swiz];
633 struct qreg temp;
634
635 if (swiz > UTIL_FORMAT_SWIZZLE_W)
636 return get_swizzled_channel(c, vpm_reads, swiz);
637 else if (chan->size == 32 &&
638 chan->type == UTIL_FORMAT_TYPE_FLOAT) {
639 return get_swizzled_channel(c, vpm_reads, swiz);
640 } else if (chan->size == 32 &&
641 chan->type == UTIL_FORMAT_TYPE_SIGNED) {
642 if (chan->normalized) {
643 return qir_FMUL(c,
644 qir_ITOF(c, vpm_reads[swiz]),
645 qir_uniform_f(c,
646 1.0 / 0x7fffffff));
647 } else {
648 return qir_ITOF(c, vpm_reads[swiz]);
649 }
650 } else if (chan->size == 8 &&
651 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
652 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
653 struct qreg vpm = vpm_reads[0];
654 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
655 temp = qir_XOR(c, vpm, qir_uniform_ui(c, 0x80808080));
656 if (chan->normalized) {
657 return qir_FSUB(c, qir_FMUL(c,
658 qir_UNPACK_8_F(c, temp, swiz),
659 qir_uniform_f(c, 2.0)),
660 qir_uniform_f(c, 1.0));
661 } else {
662 return qir_FADD(c,
663 qir_ITOF(c,
664 qir_UNPACK_8_I(c, temp,
665 swiz)),
666 qir_uniform_f(c, -128.0));
667 }
668 } else {
669 if (chan->normalized) {
670 return qir_UNPACK_8_F(c, vpm, swiz);
671 } else {
672 return qir_ITOF(c, qir_UNPACK_8_I(c, vpm, swiz));
673 }
674 }
675 } else if (chan->size == 16 &&
676 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
677 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
678 struct qreg vpm = vpm_reads[swiz / 2];
679
680 /* Note that UNPACK_16F eats a half float, not ints, so we use
681 * UNPACK_16_I for all of these.
682 */
683 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
684 temp = qir_ITOF(c, qir_UNPACK_16_I(c, vpm, swiz % 2));
685 if (chan->normalized) {
686 return qir_FMUL(c, temp,
687 qir_uniform_f(c, 1/32768.0f));
688 } else {
689 return temp;
690 }
691 } else {
692 /* UNPACK_16I sign-extends, so we have to emit ANDs. */
693 temp = vpm;
694 if (swiz == 1 || swiz == 3)
695 temp = qir_UNPACK_16_I(c, temp, 1);
696 temp = qir_AND(c, temp, qir_uniform_ui(c, 0xffff));
697 temp = qir_ITOF(c, temp);
698
699 if (chan->normalized) {
700 return qir_FMUL(c, temp,
701 qir_uniform_f(c, 1 / 65535.0));
702 } else {
703 return temp;
704 }
705 }
706 } else {
707 return c->undef;
708 }
709 }
710
711 static void
712 emit_vertex_input(struct vc4_compile *c, int attr)
713 {
714 enum pipe_format format = c->vs_key->attr_formats[attr];
715 uint32_t attr_size = util_format_get_blocksize(format);
716 struct qreg vpm_reads[4];
717
718 c->vattr_sizes[attr] = align(attr_size, 4);
719 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
720 struct qreg vpm = { QFILE_VPM, attr * 4 + i };
721 vpm_reads[i] = qir_MOV(c, vpm);
722 c->num_inputs++;
723 }
724
725 bool format_warned = false;
726 const struct util_format_description *desc =
727 util_format_description(format);
728
729 for (int i = 0; i < 4; i++) {
730 uint8_t swiz = desc->swizzle[i];
731 struct qreg result = get_channel_from_vpm(c, vpm_reads,
732 swiz, desc);
733
734 if (result.file == QFILE_NULL) {
735 if (!format_warned) {
736 fprintf(stderr,
737 "vtx element %d unsupported type: %s\n",
738 attr, util_format_name(format));
739 format_warned = true;
740 }
741 result = qir_uniform_f(c, 0.0);
742 }
743 c->inputs[attr * 4 + i] = result;
744 }
745 }
746
747 static void
748 emit_fragcoord_input(struct vc4_compile *c, int attr)
749 {
750 c->inputs[attr * 4 + 0] = qir_FRAG_X(c);
751 c->inputs[attr * 4 + 1] = qir_FRAG_Y(c);
752 c->inputs[attr * 4 + 2] =
753 qir_FMUL(c,
754 qir_ITOF(c, qir_FRAG_Z(c)),
755 qir_uniform_f(c, 1.0 / 0xffffff));
756 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
757 }
758
759 static void
760 emit_point_coord_input(struct vc4_compile *c, int attr)
761 {
762 if (c->point_x.file == QFILE_NULL) {
763 c->point_x = qir_uniform_f(c, 0.0);
764 c->point_y = qir_uniform_f(c, 0.0);
765 }
766
767 c->inputs[attr * 4 + 0] = c->point_x;
768 if (c->fs_key->point_coord_upper_left) {
769 c->inputs[attr * 4 + 1] = qir_FSUB(c,
770 qir_uniform_f(c, 1.0),
771 c->point_y);
772 } else {
773 c->inputs[attr * 4 + 1] = c->point_y;
774 }
775 c->inputs[attr * 4 + 2] = qir_uniform_f(c, 0.0);
776 c->inputs[attr * 4 + 3] = qir_uniform_f(c, 1.0);
777 }
778
779 static struct qreg
780 emit_fragment_varying(struct vc4_compile *c, uint8_t semantic,
781 uint8_t index, uint8_t swizzle)
782 {
783 uint32_t i = c->num_input_semantics++;
784 struct qreg vary = {
785 QFILE_VARY,
786 i
787 };
788
789 if (c->num_input_semantics >= c->input_semantics_array_size) {
790 c->input_semantics_array_size =
791 MAX2(4, c->input_semantics_array_size * 2);
792
793 c->input_semantics = reralloc(c, c->input_semantics,
794 struct vc4_varying_semantic,
795 c->input_semantics_array_size);
796 }
797
798 c->input_semantics[i].semantic = semantic;
799 c->input_semantics[i].index = index;
800 c->input_semantics[i].swizzle = swizzle;
801
802 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
803 }
804
805 static void
806 emit_fragment_input(struct vc4_compile *c, int attr,
807 unsigned semantic_name, unsigned semantic_index)
808 {
809 for (int i = 0; i < 4; i++) {
810 c->inputs[attr * 4 + i] =
811 emit_fragment_varying(c,
812 semantic_name,
813 semantic_index,
814 i);
815 c->num_inputs++;
816 }
817 }
818
819 static void
820 emit_face_input(struct vc4_compile *c, int attr)
821 {
822 c->inputs[attr * 4 + 0] = qir_FSUB(c,
823 qir_uniform_f(c, 1.0),
824 qir_FMUL(c,
825 qir_ITOF(c, qir_FRAG_REV_FLAG(c)),
826 qir_uniform_f(c, 2.0)));
827 c->inputs[attr * 4 + 1] = qir_uniform_f(c, 0.0);
828 c->inputs[attr * 4 + 2] = qir_uniform_f(c, 0.0);
829 c->inputs[attr * 4 + 3] = qir_uniform_f(c, 1.0);
830 }
831
832 static void
833 add_output(struct vc4_compile *c,
834 uint32_t decl_offset,
835 uint8_t semantic_name,
836 uint8_t semantic_index,
837 uint8_t semantic_swizzle)
838 {
839 uint32_t old_array_size = c->outputs_array_size;
840 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
841 decl_offset + 1);
842
843 if (old_array_size != c->outputs_array_size) {
844 c->output_semantics = reralloc(c,
845 c->output_semantics,
846 struct vc4_varying_semantic,
847 c->outputs_array_size);
848 }
849
850 c->output_semantics[decl_offset].semantic = semantic_name;
851 c->output_semantics[decl_offset].index = semantic_index;
852 c->output_semantics[decl_offset].swizzle = semantic_swizzle;
853 }
854
855 static void
856 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
857 {
858 unsigned array_id = c->num_uniform_ranges++;
859 if (array_id >= c->ubo_ranges_array_size) {
860 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
861 array_id + 1);
862 c->ubo_ranges = reralloc(c, c->ubo_ranges,
863 struct vc4_compiler_ubo_range,
864 c->ubo_ranges_array_size);
865 }
866
867 c->ubo_ranges[array_id].dst_offset = 0;
868 c->ubo_ranges[array_id].src_offset = start;
869 c->ubo_ranges[array_id].size = size;
870 c->ubo_ranges[array_id].used = false;
871 }
872
873 static void
874 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
875 {
876 /* Vectors are special in that they have non-scalarized writemasks,
877 * and just take the first swizzle channel for each argument in order
878 * into each writemask channel.
879 */
880 if (instr->op == nir_op_vec2 ||
881 instr->op == nir_op_vec3 ||
882 instr->op == nir_op_vec4) {
883 struct qreg srcs[4];
884 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
885 srcs[i] = ntq_get_src(c, instr->src[i].src,
886 instr->src[i].swizzle[0]);
887 struct qreg *dest = ntq_get_dest(c, instr->dest.dest);
888 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
889 dest[i] = srcs[i];
890 return;
891 }
892
893 /* General case: We can just grab the one used channel per src. */
894 struct qreg src[nir_op_infos[instr->op].num_inputs];
895 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
896 src[i] = ntq_get_alu_src(c, instr, i);
897 }
898
899 /* Pick the channel to store the output in. */
900 assert(!instr->dest.saturate);
901 struct qreg *dest = ntq_get_dest(c, instr->dest.dest);
902 assert(util_is_power_of_two(instr->dest.write_mask));
903 dest += ffs(instr->dest.write_mask) - 1;
904
905 switch (instr->op) {
906 case nir_op_fmov:
907 case nir_op_imov:
908 *dest = qir_MOV(c, src[0]);
909 break;
910 case nir_op_fmul:
911 *dest = qir_FMUL(c, src[0], src[1]);
912 break;
913 case nir_op_fadd:
914 *dest = qir_FADD(c, src[0], src[1]);
915 break;
916 case nir_op_fsub:
917 *dest = qir_FSUB(c, src[0], src[1]);
918 break;
919 case nir_op_fmin:
920 *dest = qir_FMIN(c, src[0], src[1]);
921 break;
922 case nir_op_fmax:
923 *dest = qir_FMAX(c, src[0], src[1]);
924 break;
925
926 case nir_op_f2i:
927 case nir_op_f2u:
928 *dest = qir_FTOI(c, src[0]);
929 break;
930 case nir_op_i2f:
931 case nir_op_u2f:
932 *dest = qir_ITOF(c, src[0]);
933 break;
934 case nir_op_b2f:
935 *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
936 break;
937 case nir_op_b2i:
938 *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
939 break;
940 case nir_op_i2b:
941 case nir_op_f2b:
942 qir_SF(c, src[0]);
943 *dest = qir_SEL_X_0_ZC(c, qir_uniform_ui(c, ~0));
944 break;
945
946 case nir_op_iadd:
947 *dest = qir_ADD(c, src[0], src[1]);
948 break;
949 case nir_op_ushr:
950 *dest = qir_SHR(c, src[0], src[1]);
951 break;
952 case nir_op_isub:
953 *dest = qir_SUB(c, src[0], src[1]);
954 break;
955 case nir_op_ishr:
956 *dest = qir_ASR(c, src[0], src[1]);
957 break;
958 case nir_op_ishl:
959 *dest = qir_SHL(c, src[0], src[1]);
960 break;
961 case nir_op_imin:
962 *dest = qir_MIN(c, src[0], src[1]);
963 break;
964 case nir_op_imax:
965 *dest = qir_MAX(c, src[0], src[1]);
966 break;
967 case nir_op_iand:
968 *dest = qir_AND(c, src[0], src[1]);
969 break;
970 case nir_op_ior:
971 *dest = qir_OR(c, src[0], src[1]);
972 break;
973 case nir_op_ixor:
974 *dest = qir_XOR(c, src[0], src[1]);
975 break;
976 case nir_op_inot:
977 *dest = qir_NOT(c, src[0]);
978 break;
979
980 case nir_op_imul:
981 *dest = ntq_umul(c, src[0], src[1]);
982 break;
983
984 case nir_op_seq:
985 qir_SF(c, qir_FSUB(c, src[0], src[1]));
986 *dest = qir_SEL_X_0_ZS(c, qir_uniform_f(c, 1.0));
987 break;
988 case nir_op_sne:
989 qir_SF(c, qir_FSUB(c, src[0], src[1]));
990 *dest = qir_SEL_X_0_ZC(c, qir_uniform_f(c, 1.0));
991 break;
992 case nir_op_sge:
993 qir_SF(c, qir_FSUB(c, src[0], src[1]));
994 *dest = qir_SEL_X_0_NC(c, qir_uniform_f(c, 1.0));
995 break;
996 case nir_op_slt:
997 qir_SF(c, qir_FSUB(c, src[0], src[1]));
998 *dest = qir_SEL_X_0_NS(c, qir_uniform_f(c, 1.0));
999 break;
1000 case nir_op_feq:
1001 qir_SF(c, qir_FSUB(c, src[0], src[1]));
1002 *dest = qir_SEL_X_0_ZS(c, qir_uniform_ui(c, ~0));
1003 break;
1004 case nir_op_fne:
1005 qir_SF(c, qir_FSUB(c, src[0], src[1]));
1006 *dest = qir_SEL_X_0_ZC(c, qir_uniform_ui(c, ~0));
1007 break;
1008 case nir_op_fge:
1009 qir_SF(c, qir_FSUB(c, src[0], src[1]));
1010 *dest = qir_SEL_X_0_NC(c, qir_uniform_ui(c, ~0));
1011 break;
1012 case nir_op_flt:
1013 qir_SF(c, qir_FSUB(c, src[0], src[1]));
1014 *dest = qir_SEL_X_0_NS(c, qir_uniform_ui(c, ~0));
1015 break;
1016 case nir_op_ieq:
1017 qir_SF(c, qir_SUB(c, src[0], src[1]));
1018 *dest = qir_SEL_X_0_ZS(c, qir_uniform_ui(c, ~0));
1019 break;
1020 case nir_op_ine:
1021 qir_SF(c, qir_SUB(c, src[0], src[1]));
1022 *dest = qir_SEL_X_0_ZC(c, qir_uniform_ui(c, ~0));
1023 break;
1024 case nir_op_ige:
1025 qir_SF(c, qir_SUB(c, src[0], src[1]));
1026 *dest = qir_SEL_X_0_NC(c, qir_uniform_ui(c, ~0));
1027 break;
1028 case nir_op_ilt:
1029 qir_SF(c, qir_SUB(c, src[0], src[1]));
1030 *dest = qir_SEL_X_0_NS(c, qir_uniform_ui(c, ~0));
1031 break;
1032
1033 case nir_op_bcsel:
1034 qir_SF(c, src[0]);
1035 *dest = qir_SEL_X_Y_NS(c, src[1], src[2]);
1036 break;
1037 case nir_op_fcsel:
1038 qir_SF(c, src[0]);
1039 *dest = qir_SEL_X_Y_ZC(c, src[1], src[2]);
1040 break;
1041
1042 case nir_op_frcp:
1043 *dest = ntq_rcp(c, src[0]);
1044 break;
1045 case nir_op_frsq:
1046 *dest = ntq_rsq(c, src[0]);
1047 break;
1048 case nir_op_fexp2:
1049 *dest = qir_EXP2(c, src[0]);
1050 break;
1051 case nir_op_flog2:
1052 *dest = qir_LOG2(c, src[0]);
1053 break;
1054
1055 case nir_op_ftrunc:
1056 *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
1057 break;
1058 case nir_op_fceil:
1059 *dest = ntq_fceil(c, src[0]);
1060 break;
1061 case nir_op_ffract:
1062 *dest = ntq_ffract(c, src[0]);
1063 break;
1064 case nir_op_ffloor:
1065 *dest = ntq_ffloor(c, src[0]);
1066 break;
1067
1068 case nir_op_fsin:
1069 *dest = ntq_fsin(c, src[0]);
1070 break;
1071 case nir_op_fcos:
1072 *dest = ntq_fcos(c, src[0]);
1073 break;
1074
1075 case nir_op_fsign:
1076 *dest = ntq_fsign(c, src[0]);
1077 break;
1078
1079 case nir_op_fabs:
1080 *dest = qir_FMAXABS(c, src[0], src[0]);
1081 break;
1082 case nir_op_iabs:
1083 *dest = qir_MAX(c, src[0],
1084 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1085 break;
1086
1087 default:
1088 fprintf(stderr, "unknown NIR ALU inst: ");
1089 nir_print_instr(&instr->instr, stderr);
1090 fprintf(stderr, "\n");
1091 abort();
1092 }
1093 }
1094
1095 static struct qreg
1096 vc4_blend_channel(struct vc4_compile *c,
1097 struct qreg *dst,
1098 struct qreg *src,
1099 struct qreg val,
1100 unsigned factor,
1101 int channel)
1102 {
1103 switch(factor) {
1104 case PIPE_BLENDFACTOR_ONE:
1105 return val;
1106 case PIPE_BLENDFACTOR_SRC_COLOR:
1107 return qir_FMUL(c, val, src[channel]);
1108 case PIPE_BLENDFACTOR_SRC_ALPHA:
1109 return qir_FMUL(c, val, src[3]);
1110 case PIPE_BLENDFACTOR_DST_ALPHA:
1111 return qir_FMUL(c, val, dst[3]);
1112 case PIPE_BLENDFACTOR_DST_COLOR:
1113 return qir_FMUL(c, val, dst[channel]);
1114 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
1115 if (channel != 3) {
1116 return qir_FMUL(c,
1117 val,
1118 qir_FMIN(c,
1119 src[3],
1120 qir_FSUB(c,
1121 qir_uniform_f(c, 1.0),
1122 dst[3])));
1123 } else {
1124 return val;
1125 }
1126 case PIPE_BLENDFACTOR_CONST_COLOR:
1127 return qir_FMUL(c, val,
1128 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR,
1129 channel));
1130 case PIPE_BLENDFACTOR_CONST_ALPHA:
1131 return qir_FMUL(c, val,
1132 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR, 3));
1133 case PIPE_BLENDFACTOR_ZERO:
1134 return qir_uniform_f(c, 0.0);
1135 case PIPE_BLENDFACTOR_INV_SRC_COLOR:
1136 return qir_FMUL(c, val, qir_FSUB(c, qir_uniform_f(c, 1.0),
1137 src[channel]));
1138 case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
1139 return qir_FMUL(c, val, qir_FSUB(c, qir_uniform_f(c, 1.0),
1140 src[3]));
1141 case PIPE_BLENDFACTOR_INV_DST_ALPHA:
1142 return qir_FMUL(c, val, qir_FSUB(c, qir_uniform_f(c, 1.0),
1143 dst[3]));
1144 case PIPE_BLENDFACTOR_INV_DST_COLOR:
1145 return qir_FMUL(c, val, qir_FSUB(c, qir_uniform_f(c, 1.0),
1146 dst[channel]));
1147 case PIPE_BLENDFACTOR_INV_CONST_COLOR:
1148 return qir_FMUL(c, val,
1149 qir_FSUB(c, qir_uniform_f(c, 1.0),
1150 qir_uniform(c,
1151 QUNIFORM_BLEND_CONST_COLOR,
1152 channel)));
1153 case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
1154 return qir_FMUL(c, val,
1155 qir_FSUB(c, qir_uniform_f(c, 1.0),
1156 qir_uniform(c,
1157 QUNIFORM_BLEND_CONST_COLOR,
1158 3)));
1159
1160 default:
1161 case PIPE_BLENDFACTOR_SRC1_COLOR:
1162 case PIPE_BLENDFACTOR_SRC1_ALPHA:
1163 case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
1164 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
1165 /* Unsupported. */
1166 fprintf(stderr, "Unknown blend factor %d\n", factor);
1167 return val;
1168 }
1169 }
1170
1171 static struct qreg
1172 vc4_blend_func(struct vc4_compile *c,
1173 struct qreg src, struct qreg dst,
1174 unsigned func)
1175 {
1176 switch (func) {
1177 case PIPE_BLEND_ADD:
1178 return qir_FADD(c, src, dst);
1179 case PIPE_BLEND_SUBTRACT:
1180 return qir_FSUB(c, src, dst);
1181 case PIPE_BLEND_REVERSE_SUBTRACT:
1182 return qir_FSUB(c, dst, src);
1183 case PIPE_BLEND_MIN:
1184 return qir_FMIN(c, src, dst);
1185 case PIPE_BLEND_MAX:
1186 return qir_FMAX(c, src, dst);
1187
1188 default:
1189 /* Unsupported. */
1190 fprintf(stderr, "Unknown blend func %d\n", func);
1191 return src;
1192
1193 }
1194 }
1195
1196 /**
1197 * Implements fixed function blending in shader code.
1198 *
1199 * VC4 doesn't have any hardware support for blending. Instead, you read the
1200 * current contents of the destination from the tile buffer after having
1201 * waited for the scoreboard (which is handled by vc4_qpu_emit.c), then do
1202 * math using your output color and that destination value, and update the
1203 * output color appropriately.
1204 */
1205 static void
1206 vc4_blend(struct vc4_compile *c, struct qreg *result,
1207 struct qreg *dst_color, struct qreg *src_color)
1208 {
1209 struct pipe_rt_blend_state *blend = &c->fs_key->blend;
1210
1211 if (!blend->blend_enable) {
1212 for (int i = 0; i < 4; i++)
1213 result[i] = src_color[i];
1214 return;
1215 }
1216
1217 struct qreg clamped_src[4];
1218 struct qreg clamped_dst[4];
1219 for (int i = 0; i < 4; i++) {
1220 clamped_src[i] = qir_SAT(c, src_color[i]);
1221 clamped_dst[i] = qir_SAT(c, dst_color[i]);
1222 }
1223 src_color = clamped_src;
1224 dst_color = clamped_dst;
1225
1226 struct qreg src_blend[4], dst_blend[4];
1227 for (int i = 0; i < 3; i++) {
1228 src_blend[i] = vc4_blend_channel(c,
1229 dst_color, src_color,
1230 src_color[i],
1231 blend->rgb_src_factor, i);
1232 dst_blend[i] = vc4_blend_channel(c,
1233 dst_color, src_color,
1234 dst_color[i],
1235 blend->rgb_dst_factor, i);
1236 }
1237 src_blend[3] = vc4_blend_channel(c,
1238 dst_color, src_color,
1239 src_color[3],
1240 blend->alpha_src_factor, 3);
1241 dst_blend[3] = vc4_blend_channel(c,
1242 dst_color, src_color,
1243 dst_color[3],
1244 blend->alpha_dst_factor, 3);
1245
1246 for (int i = 0; i < 3; i++) {
1247 result[i] = vc4_blend_func(c,
1248 src_blend[i], dst_blend[i],
1249 blend->rgb_func);
1250 }
1251 result[3] = vc4_blend_func(c,
1252 src_blend[3], dst_blend[3],
1253 blend->alpha_func);
1254 }
1255
1256 static void
1257 clip_distance_discard(struct vc4_compile *c)
1258 {
1259 for (int i = 0; i < PIPE_MAX_CLIP_PLANES; i++) {
1260 if (!(c->key->ucp_enables & (1 << i)))
1261 continue;
1262
1263 struct qreg dist = emit_fragment_varying(c,
1264 TGSI_SEMANTIC_CLIPDIST,
1265 i,
1266 TGSI_SWIZZLE_X);
1267
1268 qir_SF(c, dist);
1269
1270 if (c->discard.file == QFILE_NULL)
1271 c->discard = qir_uniform_ui(c, 0);
1272
1273 c->discard = qir_SEL_X_Y_NS(c, qir_uniform_ui(c, ~0),
1274 c->discard);
1275 }
1276 }
1277
1278 static void
1279 alpha_test_discard(struct vc4_compile *c)
1280 {
1281 struct qreg src_alpha;
1282 struct qreg alpha_ref = qir_uniform(c, QUNIFORM_ALPHA_REF, 0);
1283
1284 if (!c->fs_key->alpha_test)
1285 return;
1286
1287 if (c->output_color_index != -1)
1288 src_alpha = c->outputs[c->output_color_index + 3];
1289 else
1290 src_alpha = qir_uniform_f(c, 1.0);
1291
1292 if (c->discard.file == QFILE_NULL)
1293 c->discard = qir_uniform_ui(c, 0);
1294
1295 switch (c->fs_key->alpha_test_func) {
1296 case PIPE_FUNC_NEVER:
1297 c->discard = qir_uniform_ui(c, ~0);
1298 break;
1299 case PIPE_FUNC_ALWAYS:
1300 break;
1301 case PIPE_FUNC_EQUAL:
1302 qir_SF(c, qir_FSUB(c, src_alpha, alpha_ref));
1303 c->discard = qir_SEL_X_Y_ZS(c, c->discard,
1304 qir_uniform_ui(c, ~0));
1305 break;
1306 case PIPE_FUNC_NOTEQUAL:
1307 qir_SF(c, qir_FSUB(c, src_alpha, alpha_ref));
1308 c->discard = qir_SEL_X_Y_ZC(c, c->discard,
1309 qir_uniform_ui(c, ~0));
1310 break;
1311 case PIPE_FUNC_GREATER:
1312 qir_SF(c, qir_FSUB(c, src_alpha, alpha_ref));
1313 c->discard = qir_SEL_X_Y_NC(c, c->discard,
1314 qir_uniform_ui(c, ~0));
1315 break;
1316 case PIPE_FUNC_GEQUAL:
1317 qir_SF(c, qir_FSUB(c, alpha_ref, src_alpha));
1318 c->discard = qir_SEL_X_Y_NS(c, c->discard,
1319 qir_uniform_ui(c, ~0));
1320 break;
1321 case PIPE_FUNC_LESS:
1322 qir_SF(c, qir_FSUB(c, src_alpha, alpha_ref));
1323 c->discard = qir_SEL_X_Y_NS(c, c->discard,
1324 qir_uniform_ui(c, ~0));
1325 break;
1326 case PIPE_FUNC_LEQUAL:
1327 qir_SF(c, qir_FSUB(c, alpha_ref, src_alpha));
1328 c->discard = qir_SEL_X_Y_NC(c, c->discard,
1329 qir_uniform_ui(c, ~0));
1330 break;
1331 }
1332 }
1333
1334 static struct qreg
1335 vc4_logicop(struct vc4_compile *c, struct qreg src, struct qreg dst)
1336 {
1337 switch (c->fs_key->logicop_func) {
1338 case PIPE_LOGICOP_CLEAR:
1339 return qir_uniform_f(c, 0.0);
1340 case PIPE_LOGICOP_NOR:
1341 return qir_NOT(c, qir_OR(c, src, dst));
1342 case PIPE_LOGICOP_AND_INVERTED:
1343 return qir_AND(c, qir_NOT(c, src), dst);
1344 case PIPE_LOGICOP_COPY_INVERTED:
1345 return qir_NOT(c, src);
1346 case PIPE_LOGICOP_AND_REVERSE:
1347 return qir_AND(c, src, qir_NOT(c, dst));
1348 case PIPE_LOGICOP_INVERT:
1349 return qir_NOT(c, dst);
1350 case PIPE_LOGICOP_XOR:
1351 return qir_XOR(c, src, dst);
1352 case PIPE_LOGICOP_NAND:
1353 return qir_NOT(c, qir_AND(c, src, dst));
1354 case PIPE_LOGICOP_AND:
1355 return qir_AND(c, src, dst);
1356 case PIPE_LOGICOP_EQUIV:
1357 return qir_NOT(c, qir_XOR(c, src, dst));
1358 case PIPE_LOGICOP_NOOP:
1359 return dst;
1360 case PIPE_LOGICOP_OR_INVERTED:
1361 return qir_OR(c, qir_NOT(c, src), dst);
1362 case PIPE_LOGICOP_OR_REVERSE:
1363 return qir_OR(c, src, qir_NOT(c, dst));
1364 case PIPE_LOGICOP_OR:
1365 return qir_OR(c, src, dst);
1366 case PIPE_LOGICOP_SET:
1367 return qir_uniform_ui(c, ~0);
1368 case PIPE_LOGICOP_COPY:
1369 default:
1370 return src;
1371 }
1372 }
1373
1374 /**
1375 * Applies the GL blending pipeline and returns the packed (8888) output
1376 * color.
1377 */
1378 static struct qreg
1379 blend_pipeline(struct vc4_compile *c)
1380 {
1381 enum pipe_format color_format = c->fs_key->color_format;
1382 const uint8_t *format_swiz = vc4_get_format_swizzle(color_format);
1383 struct qreg tlb_read_color[4] = { c->undef, c->undef, c->undef, c->undef };
1384 struct qreg dst_color[4] = { c->undef, c->undef, c->undef, c->undef };
1385 struct qreg linear_dst_color[4] = { c->undef, c->undef, c->undef, c->undef };
1386 struct qreg packed_dst_color = c->undef;
1387
1388 if (c->fs_key->blend.blend_enable ||
1389 c->fs_key->blend.colormask != 0xf ||
1390 c->fs_key->logicop_func != PIPE_LOGICOP_COPY) {
1391 struct qreg r4 = qir_TLB_COLOR_READ(c);
1392 for (int i = 0; i < 4; i++)
1393 tlb_read_color[i] = qir_R4_UNPACK(c, r4, i);
1394 for (int i = 0; i < 4; i++) {
1395 dst_color[i] = get_swizzled_channel(c,
1396 tlb_read_color,
1397 format_swiz[i]);
1398 if (util_format_is_srgb(color_format) && i != 3) {
1399 linear_dst_color[i] =
1400 qir_srgb_decode(c, dst_color[i]);
1401 } else {
1402 linear_dst_color[i] = dst_color[i];
1403 }
1404 }
1405
1406 /* Save the packed value for logic ops. Can't reuse r4
1407 * because other things might smash it (like sRGB)
1408 */
1409 packed_dst_color = qir_MOV(c, r4);
1410 }
1411
1412 struct qreg undef_array[4] = { c->undef, c->undef, c->undef, c->undef };
1413 const struct qreg *output_colors = (c->output_color_index != -1 ?
1414 c->outputs + c->output_color_index :
1415 undef_array);
1416 struct qreg blend_src_color[4];
1417 for (int i = 0; i < 4; i++)
1418 blend_src_color[i] = output_colors[i];
1419
1420 struct qreg blend_color[4];
1421 vc4_blend(c, blend_color, linear_dst_color, blend_src_color);
1422
1423 if (util_format_is_srgb(color_format)) {
1424 for (int i = 0; i < 3; i++)
1425 blend_color[i] = qir_srgb_encode(c, blend_color[i]);
1426 }
1427
1428 /* Debug: Sometimes you're getting a black output and just want to see
1429 * if the FS is getting executed at all. Spam magenta into the color
1430 * output.
1431 */
1432 if (0) {
1433 blend_color[0] = qir_uniform_f(c, 1.0);
1434 blend_color[1] = qir_uniform_f(c, 0.0);
1435 blend_color[2] = qir_uniform_f(c, 1.0);
1436 blend_color[3] = qir_uniform_f(c, 0.5);
1437 }
1438
1439 struct qreg swizzled_outputs[4];
1440 for (int i = 0; i < 4; i++) {
1441 swizzled_outputs[i] = get_swizzled_channel(c, blend_color,
1442 format_swiz[i]);
1443 }
1444
1445 struct qreg packed_color = c->undef;
1446 for (int i = 0; i < 4; i++) {
1447 if (swizzled_outputs[i].file == QFILE_NULL)
1448 continue;
1449 if (packed_color.file == QFILE_NULL) {
1450 packed_color = qir_PACK_8888_F(c, swizzled_outputs[i]);
1451 } else {
1452 packed_color = qir_PACK_8_F(c,
1453 packed_color,
1454 swizzled_outputs[i],
1455 i);
1456 }
1457 }
1458
1459 if (packed_color.file == QFILE_NULL)
1460 packed_color = qir_uniform_ui(c, 0);
1461
1462 if (c->fs_key->logicop_func != PIPE_LOGICOP_COPY) {
1463 packed_color = vc4_logicop(c, packed_color, packed_dst_color);
1464 }
1465
1466 /* If the bit isn't set in the color mask, then just return the
1467 * original dst color, instead.
1468 */
1469 uint32_t colormask = 0xffffffff;
1470 for (int i = 0; i < 4; i++) {
1471 if (format_swiz[i] < 4 &&
1472 !(c->fs_key->blend.colormask & (1 << format_swiz[i]))) {
1473 colormask &= ~(0xff << (i * 8));
1474 }
1475 }
1476 if (colormask != 0xffffffff) {
1477 packed_color = qir_OR(c,
1478 qir_AND(c, packed_color,
1479 qir_uniform_ui(c, colormask)),
1480 qir_AND(c, packed_dst_color,
1481 qir_uniform_ui(c, ~colormask)));
1482 }
1483
1484 return packed_color;
1485 }
1486
1487 static void
1488 emit_frag_end(struct vc4_compile *c)
1489 {
1490 clip_distance_discard(c);
1491 alpha_test_discard(c);
1492 struct qreg color = blend_pipeline(c);
1493
1494 if (c->discard.file != QFILE_NULL)
1495 qir_TLB_DISCARD_SETUP(c, c->discard);
1496
1497 if (c->fs_key->stencil_enabled) {
1498 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 0));
1499 if (c->fs_key->stencil_twoside) {
1500 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 1));
1501 }
1502 if (c->fs_key->stencil_full_writemasks) {
1503 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 2));
1504 }
1505 }
1506
1507 if (c->fs_key->depth_enabled) {
1508 struct qreg z;
1509 if (c->output_position_index != -1) {
1510 z = qir_FTOI(c, qir_FMUL(c, c->outputs[c->output_position_index + 2],
1511 qir_uniform_f(c, 0xffffff)));
1512 } else {
1513 z = qir_FRAG_Z(c);
1514 }
1515 qir_TLB_Z_WRITE(c, z);
1516 }
1517
1518 qir_TLB_COLOR_WRITE(c, color);
1519 }
1520
1521 static void
1522 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1523 {
1524 struct qreg xyi[2];
1525
1526 for (int i = 0; i < 2; i++) {
1527 struct qreg scale =
1528 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1529
1530 xyi[i] = qir_FTOI(c, qir_FMUL(c,
1531 qir_FMUL(c,
1532 c->outputs[c->output_position_index + i],
1533 scale),
1534 rcp_w));
1535 }
1536
1537 qir_VPM_WRITE(c, qir_PACK_SCALED(c, xyi[0], xyi[1]));
1538 }
1539
1540 static void
1541 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1542 {
1543 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1544 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1545
1546 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1547 c->outputs[c->output_position_index + 2],
1548 zscale),
1549 rcp_w),
1550 zoffset));
1551 }
1552
1553 static void
1554 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1555 {
1556 qir_VPM_WRITE(c, rcp_w);
1557 }
1558
1559 static void
1560 emit_point_size_write(struct vc4_compile *c)
1561 {
1562 struct qreg point_size;
1563
1564 if (c->output_point_size_index != -1)
1565 point_size = c->outputs[c->output_point_size_index + 3];
1566 else
1567 point_size = qir_uniform_f(c, 1.0);
1568
1569 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1570 * BCM21553).
1571 */
1572 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1573
1574 qir_VPM_WRITE(c, point_size);
1575 }
1576
1577 /**
1578 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1579 *
1580 * The simulator insists that there be at least one vertex attribute, so
1581 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1582 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1583 * to consume it here.
1584 */
1585 static void
1586 emit_stub_vpm_read(struct vc4_compile *c)
1587 {
1588 if (c->num_inputs)
1589 return;
1590
1591 c->vattr_sizes[0] = 4;
1592 struct qreg vpm = { QFILE_VPM, 0 };
1593 (void)qir_MOV(c, vpm);
1594 c->num_inputs++;
1595 }
1596
1597 static void
1598 emit_ucp_clipdistance(struct vc4_compile *c)
1599 {
1600 unsigned cv;
1601 if (c->output_clipvertex_index != -1)
1602 cv = c->output_clipvertex_index;
1603 else if (c->output_position_index != -1)
1604 cv = c->output_position_index;
1605 else
1606 return;
1607
1608 for (int plane = 0; plane < PIPE_MAX_CLIP_PLANES; plane++) {
1609 if (!(c->key->ucp_enables & (1 << plane)))
1610 continue;
1611
1612 /* Pick the next outputs[] that hasn't been written to, since
1613 * there are no other program writes left to be processed at
1614 * this point. If something had been declared but not written
1615 * (like a w component), we'll just smash over the top of it.
1616 */
1617 uint32_t output_index = c->num_outputs++;
1618 add_output(c, output_index,
1619 TGSI_SEMANTIC_CLIPDIST,
1620 plane,
1621 TGSI_SWIZZLE_X);
1622
1623
1624 struct qreg dist = qir_uniform_f(c, 0.0);
1625 for (int i = 0; i < 4; i++) {
1626 struct qreg pos_chan = c->outputs[cv + i];
1627 struct qreg ucp =
1628 qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1629 plane * 4 + i);
1630 dist = qir_FADD(c, dist, qir_FMUL(c, pos_chan, ucp));
1631 }
1632
1633 c->outputs[output_index] = dist;
1634 }
1635 }
1636
1637 static void
1638 emit_vert_end(struct vc4_compile *c,
1639 struct vc4_varying_semantic *fs_inputs,
1640 uint32_t num_fs_inputs)
1641 {
1642 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1643
1644 emit_stub_vpm_read(c);
1645 emit_ucp_clipdistance(c);
1646
1647 emit_scaled_viewport_write(c, rcp_w);
1648 emit_zs_write(c, rcp_w);
1649 emit_rcp_wc_write(c, rcp_w);
1650 if (c->vs_key->per_vertex_point_size)
1651 emit_point_size_write(c);
1652
1653 for (int i = 0; i < num_fs_inputs; i++) {
1654 struct vc4_varying_semantic *input = &fs_inputs[i];
1655 int j;
1656
1657 for (j = 0; j < c->num_outputs; j++) {
1658 struct vc4_varying_semantic *output =
1659 &c->output_semantics[j];
1660
1661 if (input->semantic == output->semantic &&
1662 input->index == output->index &&
1663 input->swizzle == output->swizzle) {
1664 qir_VPM_WRITE(c, c->outputs[j]);
1665 break;
1666 }
1667 }
1668 /* Emit padding if we didn't find a declared VS output for
1669 * this FS input.
1670 */
1671 if (j == c->num_outputs)
1672 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1673 }
1674 }
1675
1676 static void
1677 emit_coord_end(struct vc4_compile *c)
1678 {
1679 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1680
1681 emit_stub_vpm_read(c);
1682
1683 for (int i = 0; i < 4; i++)
1684 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1685
1686 emit_scaled_viewport_write(c, rcp_w);
1687 emit_zs_write(c, rcp_w);
1688 emit_rcp_wc_write(c, rcp_w);
1689 if (c->vs_key->per_vertex_point_size)
1690 emit_point_size_write(c);
1691 }
1692
1693 static void
1694 vc4_optimize_nir(struct nir_shader *s)
1695 {
1696 bool progress;
1697
1698 do {
1699 progress = false;
1700
1701 nir_lower_vars_to_ssa(s);
1702 nir_lower_alu_to_scalar(s);
1703
1704 progress = nir_copy_prop(s) || progress;
1705 progress = nir_opt_dce(s) || progress;
1706 progress = nir_opt_cse(s) || progress;
1707 progress = nir_opt_peephole_select(s) || progress;
1708 progress = nir_opt_algebraic(s) || progress;
1709 progress = nir_opt_constant_folding(s) || progress;
1710 } while (progress);
1711 }
1712
1713 static int
1714 driver_location_compare(const void *in_a, const void *in_b)
1715 {
1716 const nir_variable *const *a = in_a;
1717 const nir_variable *const *b = in_b;
1718
1719 return (*a)->data.driver_location - (*b)->data.driver_location;
1720 }
1721
1722 static void
1723 ntq_setup_inputs(struct vc4_compile *c)
1724 {
1725 unsigned num_entries = 0;
1726 foreach_list_typed(nir_variable, var, node, &c->s->inputs)
1727 num_entries++;
1728
1729 nir_variable *vars[num_entries];
1730
1731 unsigned i = 0;
1732 foreach_list_typed(nir_variable, var, node, &c->s->inputs)
1733 vars[i++] = var;
1734
1735 /* Sort the variables so that we emit the input setup in
1736 * driver_location order. This is required for VPM reads, whose data
1737 * is fetched into the VPM in driver_location (TGSI register index)
1738 * order.
1739 */
1740 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1741
1742 for (unsigned i = 0; i < num_entries; i++) {
1743 nir_variable *var = vars[i];
1744 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1745 /* XXX: map loc slots to semantics */
1746 unsigned semantic_name = var->data.location;
1747 unsigned semantic_index = var->data.index;
1748 unsigned loc = var->data.driver_location;
1749
1750 assert(array_len == 1);
1751 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1752 (loc + 1) * 4);
1753
1754 if (c->stage == QSTAGE_FRAG) {
1755 if (semantic_name == TGSI_SEMANTIC_POSITION) {
1756 emit_fragcoord_input(c, loc);
1757 } else if (semantic_name == TGSI_SEMANTIC_FACE) {
1758 emit_face_input(c, loc);
1759 } else if (semantic_name == TGSI_SEMANTIC_GENERIC &&
1760 (c->fs_key->point_sprite_mask &
1761 (1 << semantic_index))) {
1762 emit_point_coord_input(c, loc);
1763 } else {
1764 emit_fragment_input(c, loc,
1765 semantic_name,
1766 semantic_index);
1767 }
1768 } else {
1769 emit_vertex_input(c, loc);
1770 }
1771 }
1772 }
1773
1774 static void
1775 ntq_setup_outputs(struct vc4_compile *c)
1776 {
1777 foreach_list_typed(nir_variable, var, node, &c->s->outputs) {
1778 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1779 /* XXX: map loc slots to semantics */
1780 unsigned semantic_name = var->data.location;
1781 unsigned semantic_index = var->data.index;
1782 unsigned loc = var->data.driver_location * 4;
1783
1784 assert(array_len == 1);
1785
1786 /* NIR hack to pass through
1787 * TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS */
1788 if (semantic_name == TGSI_SEMANTIC_COLOR &&
1789 semantic_index == -1)
1790 semantic_index = 0;
1791
1792 for (int i = 0; i < 4; i++) {
1793 add_output(c,
1794 loc + i,
1795 semantic_name,
1796 semantic_index,
1797 i);
1798 }
1799
1800 switch (semantic_name) {
1801 case TGSI_SEMANTIC_POSITION:
1802 c->output_position_index = loc;
1803 break;
1804 case TGSI_SEMANTIC_CLIPVERTEX:
1805 c->output_clipvertex_index = loc;
1806 break;
1807 case TGSI_SEMANTIC_COLOR:
1808 c->output_color_index = loc;
1809 break;
1810 case TGSI_SEMANTIC_PSIZE:
1811 c->output_point_size_index = loc;
1812 break;
1813 }
1814
1815 }
1816 }
1817
1818 static void
1819 ntq_setup_uniforms(struct vc4_compile *c)
1820 {
1821 foreach_list_typed(nir_variable, var, node, &c->s->uniforms) {
1822 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1823 unsigned array_elem_size = 4 * sizeof(float);
1824
1825 declare_uniform_range(c, var->data.driver_location * array_elem_size,
1826 array_len * array_elem_size);
1827
1828 }
1829 }
1830
1831 /**
1832 * Sets up the mapping from nir_register to struct qreg *.
1833 *
1834 * Each nir_register gets a struct qreg per 32-bit component being stored.
1835 */
1836 static void
1837 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1838 {
1839 foreach_list_typed(nir_register, nir_reg, node, list) {
1840 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1841 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1842 array_len *
1843 nir_reg->num_components);
1844
1845 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1846
1847 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1848 qregs[i] = qir_uniform_ui(c, 0);
1849 }
1850 }
1851
1852 static void
1853 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1854 {
1855 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1856 instr->def.num_components);
1857 for (int i = 0; i < instr->def.num_components; i++)
1858 qregs[i] = qir_uniform_ui(c, instr->value.u[i]);
1859
1860 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1861 }
1862
1863 static void
1864 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1865 {
1866 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1867 struct qreg *dest = NULL;
1868
1869 if (info->has_dest) {
1870 dest = ntq_get_dest(c, instr->dest);
1871 }
1872
1873 switch (instr->intrinsic) {
1874 case nir_intrinsic_load_uniform:
1875 for (int i = 0; i < instr->num_components; i++) {
1876 dest[i] = qir_uniform(c, QUNIFORM_UNIFORM,
1877 instr->const_index[0] * 4 + i);
1878 }
1879 break;
1880
1881 case nir_intrinsic_load_uniform_indirect:
1882 for (int i = 0; i < instr->num_components; i++) {
1883 dest[i] = indirect_uniform_load(c,
1884 ntq_get_src(c, instr->src[0], 0),
1885 (instr->const_index[0] *
1886 4 + i) * sizeof(float));
1887 }
1888
1889 break;
1890
1891 case nir_intrinsic_load_input:
1892 for (int i = 0; i < instr->num_components; i++)
1893 dest[i] = c->inputs[instr->const_index[0] * 4 + i];
1894
1895 break;
1896
1897 case nir_intrinsic_store_output:
1898 for (int i = 0; i < instr->num_components; i++) {
1899 c->outputs[instr->const_index[0] * 4 + i] =
1900 qir_MOV(c, ntq_get_src(c, instr->src[0], i));
1901 }
1902 c->num_outputs = MAX2(c->num_outputs,
1903 instr->const_index[0] * 4 +
1904 instr->num_components + 1);
1905 break;
1906
1907 case nir_intrinsic_discard:
1908 c->discard = qir_uniform_ui(c, ~0);
1909 break;
1910
1911 case nir_intrinsic_discard_if:
1912 if (c->discard.file == QFILE_NULL)
1913 c->discard = qir_uniform_ui(c, 0);
1914 c->discard = qir_OR(c, c->discard,
1915 ntq_get_src(c, instr->src[0], 0));
1916 break;
1917
1918 default:
1919 fprintf(stderr, "Unknown intrinsic: ");
1920 nir_print_instr(&instr->instr, stderr);
1921 fprintf(stderr, "\n");
1922 break;
1923 }
1924 }
1925
1926 static void
1927 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1928 {
1929 fprintf(stderr, "general IF statements not handled.\n");
1930 }
1931
1932 static void
1933 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1934 {
1935 switch (instr->type) {
1936 case nir_instr_type_alu:
1937 ntq_emit_alu(c, nir_instr_as_alu(instr));
1938 break;
1939
1940 case nir_instr_type_intrinsic:
1941 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1942 break;
1943
1944 case nir_instr_type_load_const:
1945 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1946 break;
1947
1948 case nir_instr_type_tex:
1949 ntq_emit_tex(c, nir_instr_as_tex(instr));
1950 break;
1951
1952 default:
1953 fprintf(stderr, "Unknown NIR instr type: ");
1954 nir_print_instr(instr, stderr);
1955 fprintf(stderr, "\n");
1956 abort();
1957 }
1958 }
1959
1960 static void
1961 ntq_emit_block(struct vc4_compile *c, nir_block *block)
1962 {
1963 nir_foreach_instr(block, instr) {
1964 ntq_emit_instr(c, instr);
1965 }
1966 }
1967
1968 static void
1969 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
1970 {
1971 foreach_list_typed(nir_cf_node, node, node, list) {
1972 switch (node->type) {
1973 /* case nir_cf_node_loop: */
1974 case nir_cf_node_block:
1975 ntq_emit_block(c, nir_cf_node_as_block(node));
1976 break;
1977
1978 case nir_cf_node_if:
1979 ntq_emit_if(c, nir_cf_node_as_if(node));
1980 break;
1981
1982 default:
1983 assert(0);
1984 }
1985 }
1986 }
1987
1988 static void
1989 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
1990 {
1991 ntq_setup_registers(c, &impl->registers);
1992 ntq_emit_cf_list(c, &impl->body);
1993 }
1994
1995 static void
1996 nir_to_qir(struct vc4_compile *c)
1997 {
1998 ntq_setup_inputs(c);
1999 ntq_setup_outputs(c);
2000 ntq_setup_uniforms(c);
2001 ntq_setup_registers(c, &c->s->registers);
2002
2003 /* Find the main function and emit the body. */
2004 nir_foreach_overload(c->s, overload) {
2005 assert(strcmp(overload->function->name, "main") == 0);
2006 assert(overload->impl);
2007 ntq_emit_impl(c, overload->impl);
2008 }
2009 }
2010
2011 static const nir_shader_compiler_options nir_options = {
2012 .lower_ffma = true,
2013 .lower_flrp = true,
2014 .lower_fpow = true,
2015 .lower_fsat = true,
2016 .lower_fsqrt = true,
2017 .lower_negate = true,
2018 };
2019
2020 static bool
2021 count_nir_instrs_in_block(nir_block *block, void *state)
2022 {
2023 int *count = (int *) state;
2024 nir_foreach_instr(block, instr) {
2025 *count = *count + 1;
2026 }
2027 return true;
2028 }
2029
2030 static int
2031 count_nir_instrs(nir_shader *nir)
2032 {
2033 int count = 0;
2034 nir_foreach_overload(nir, overload) {
2035 if (!overload->impl)
2036 continue;
2037 nir_foreach_block(overload->impl, count_nir_instrs_in_block, &count);
2038 }
2039 return count;
2040 }
2041
2042 static struct vc4_compile *
2043 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
2044 struct vc4_key *key)
2045 {
2046 struct vc4_compile *c = qir_compile_init();
2047
2048 c->stage = stage;
2049 c->shader_state = &key->shader_state->base;
2050 c->program_id = key->shader_state->program_id;
2051 c->variant_id = key->shader_state->compiled_variant_count++;
2052
2053 c->key = key;
2054 switch (stage) {
2055 case QSTAGE_FRAG:
2056 c->fs_key = (struct vc4_fs_key *)key;
2057 if (c->fs_key->is_points) {
2058 c->point_x = emit_fragment_varying(c, ~0, ~0, 0);
2059 c->point_y = emit_fragment_varying(c, ~0, ~0, 0);
2060 } else if (c->fs_key->is_lines) {
2061 c->line_x = emit_fragment_varying(c, ~0, ~0, 0);
2062 }
2063 break;
2064 case QSTAGE_VERT:
2065 c->vs_key = (struct vc4_vs_key *)key;
2066 break;
2067 case QSTAGE_COORD:
2068 c->vs_key = (struct vc4_vs_key *)key;
2069 break;
2070 }
2071
2072 const struct tgsi_token *tokens = key->shader_state->base.tokens;
2073 if (c->fs_key && c->fs_key->light_twoside) {
2074 if (!key->shader_state->twoside_tokens) {
2075 const struct tgsi_lowering_config lowering_config = {
2076 .color_two_side = true,
2077 };
2078 struct tgsi_shader_info info;
2079 key->shader_state->twoside_tokens =
2080 tgsi_transform_lowering(&lowering_config,
2081 key->shader_state->base.tokens,
2082 &info);
2083
2084 /* If no transformation occurred, then NULL is
2085 * returned and we just use our original tokens.
2086 */
2087 if (!key->shader_state->twoside_tokens) {
2088 key->shader_state->twoside_tokens =
2089 key->shader_state->base.tokens;
2090 }
2091 }
2092 tokens = key->shader_state->twoside_tokens;
2093 }
2094
2095 if (vc4_debug & VC4_DEBUG_TGSI) {
2096 fprintf(stderr, "%s prog %d/%d TGSI:\n",
2097 qir_get_stage_name(c->stage),
2098 c->program_id, c->variant_id);
2099 tgsi_dump(tokens, 0);
2100 }
2101
2102 c->s = tgsi_to_nir(tokens, &nir_options);
2103 nir_opt_global_to_local(c->s);
2104 nir_convert_to_ssa(c->s);
2105 nir_lower_idiv(c->s);
2106
2107 vc4_optimize_nir(c->s);
2108
2109 nir_remove_dead_variables(c->s);
2110
2111 nir_convert_from_ssa(c->s, false);
2112
2113 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2114 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2115 qir_get_stage_name(c->stage),
2116 c->program_id, c->variant_id,
2117 count_nir_instrs(c->s));
2118 }
2119
2120 if (vc4_debug & VC4_DEBUG_NIR) {
2121 fprintf(stderr, "%s prog %d/%d NIR:\n",
2122 qir_get_stage_name(c->stage),
2123 c->program_id, c->variant_id);
2124 nir_print_shader(c->s, stderr);
2125 }
2126
2127 nir_to_qir(c);
2128
2129 switch (stage) {
2130 case QSTAGE_FRAG:
2131 emit_frag_end(c);
2132 break;
2133 case QSTAGE_VERT:
2134 emit_vert_end(c,
2135 vc4->prog.fs->input_semantics,
2136 vc4->prog.fs->num_inputs);
2137 break;
2138 case QSTAGE_COORD:
2139 emit_coord_end(c);
2140 break;
2141 }
2142
2143 if (vc4_debug & VC4_DEBUG_QIR) {
2144 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
2145 qir_get_stage_name(c->stage),
2146 c->program_id, c->variant_id);
2147 qir_dump(c);
2148 }
2149
2150 qir_optimize(c);
2151 qir_lower_uniforms(c);
2152
2153 if (vc4_debug & VC4_DEBUG_QIR) {
2154 fprintf(stderr, "%s prog %d/%d QIR:\n",
2155 qir_get_stage_name(c->stage),
2156 c->program_id, c->variant_id);
2157 qir_dump(c);
2158 }
2159 qir_reorder_uniforms(c);
2160 vc4_generate_code(vc4, c);
2161
2162 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2163 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2164 qir_get_stage_name(c->stage),
2165 c->program_id, c->variant_id,
2166 c->qpu_inst_count);
2167 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2168 qir_get_stage_name(c->stage),
2169 c->program_id, c->variant_id,
2170 c->num_uniforms);
2171 }
2172
2173 ralloc_free(c->s);
2174
2175 return c;
2176 }
2177
2178 static void *
2179 vc4_shader_state_create(struct pipe_context *pctx,
2180 const struct pipe_shader_state *cso)
2181 {
2182 struct vc4_context *vc4 = vc4_context(pctx);
2183 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
2184 if (!so)
2185 return NULL;
2186
2187 so->base.tokens = tgsi_dup_tokens(cso->tokens);
2188 so->program_id = vc4->next_uncompiled_program_id++;
2189
2190 return so;
2191 }
2192
2193 static void
2194 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
2195 struct vc4_compile *c)
2196 {
2197 int count = c->num_uniforms;
2198 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2199
2200 uinfo->count = count;
2201 uinfo->data = ralloc_array(shader, uint32_t, count);
2202 memcpy(uinfo->data, c->uniform_data,
2203 count * sizeof(*uinfo->data));
2204 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2205 memcpy(uinfo->contents, c->uniform_contents,
2206 count * sizeof(*uinfo->contents));
2207 uinfo->num_texture_samples = c->num_texture_samples;
2208 }
2209
2210 static struct vc4_compiled_shader *
2211 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2212 struct vc4_key *key)
2213 {
2214 struct hash_table *ht;
2215 uint32_t key_size;
2216 if (stage == QSTAGE_FRAG) {
2217 ht = vc4->fs_cache;
2218 key_size = sizeof(struct vc4_fs_key);
2219 } else {
2220 ht = vc4->vs_cache;
2221 key_size = sizeof(struct vc4_vs_key);
2222 }
2223
2224 struct vc4_compiled_shader *shader;
2225 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2226 if (entry)
2227 return entry->data;
2228
2229 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
2230 shader = rzalloc(NULL, struct vc4_compiled_shader);
2231
2232 shader->program_id = vc4->next_compiled_program_id++;
2233 if (stage == QSTAGE_FRAG) {
2234 bool input_live[c->num_input_semantics];
2235
2236 memset(input_live, 0, sizeof(input_live));
2237 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
2238 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
2239 if (inst->src[i].file == QFILE_VARY)
2240 input_live[inst->src[i].index] = true;
2241 }
2242 }
2243
2244 shader->input_semantics = ralloc_array(shader,
2245 struct vc4_varying_semantic,
2246 c->num_input_semantics);
2247
2248 for (int i = 0; i < c->num_input_semantics; i++) {
2249 struct vc4_varying_semantic *sem = &c->input_semantics[i];
2250
2251 if (!input_live[i])
2252 continue;
2253
2254 /* Skip non-VS-output inputs. */
2255 if (sem->semantic == (uint8_t)~0)
2256 continue;
2257
2258 if (sem->semantic == TGSI_SEMANTIC_COLOR ||
2259 sem->semantic == TGSI_SEMANTIC_BCOLOR) {
2260 shader->color_inputs |= (1 << shader->num_inputs);
2261 }
2262
2263 shader->input_semantics[shader->num_inputs] = *sem;
2264 shader->num_inputs++;
2265 }
2266 } else {
2267 shader->num_inputs = c->num_inputs;
2268
2269 shader->vattr_offsets[0] = 0;
2270 for (int i = 0; i < 8; i++) {
2271 shader->vattr_offsets[i + 1] =
2272 shader->vattr_offsets[i] + c->vattr_sizes[i];
2273
2274 if (c->vattr_sizes[i])
2275 shader->vattrs_live |= (1 << i);
2276 }
2277 }
2278
2279 copy_uniform_state_to_shader(shader, c);
2280 shader->bo = vc4_bo_alloc_mem(vc4->screen, c->qpu_insts,
2281 c->qpu_inst_count * sizeof(uint64_t),
2282 "code");
2283
2284 /* Copy the compiler UBO range state to the compiled shader, dropping
2285 * out arrays that were never referenced by an indirect load.
2286 *
2287 * (Note that QIR dead code elimination of an array access still
2288 * leaves that array alive, though)
2289 */
2290 if (c->num_ubo_ranges) {
2291 shader->num_ubo_ranges = c->num_ubo_ranges;
2292 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2293 c->num_ubo_ranges);
2294 uint32_t j = 0;
2295 for (int i = 0; i < c->num_uniform_ranges; i++) {
2296 struct vc4_compiler_ubo_range *range =
2297 &c->ubo_ranges[i];
2298 if (!range->used)
2299 continue;
2300
2301 shader->ubo_ranges[j].dst_offset = range->dst_offset;
2302 shader->ubo_ranges[j].src_offset = range->src_offset;
2303 shader->ubo_ranges[j].size = range->size;
2304 shader->ubo_size += c->ubo_ranges[i].size;
2305 j++;
2306 }
2307 }
2308 if (shader->ubo_size) {
2309 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2310 qir_get_stage_name(c->stage),
2311 c->program_id, c->variant_id,
2312 shader->ubo_size / 4);
2313 }
2314
2315 qir_compile_destroy(c);
2316
2317 struct vc4_key *dup_key;
2318 dup_key = ralloc_size(shader, key_size);
2319 memcpy(dup_key, key, key_size);
2320 _mesa_hash_table_insert(ht, dup_key, shader);
2321
2322 return shader;
2323 }
2324
2325 static void
2326 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2327 struct vc4_texture_stateobj *texstate)
2328 {
2329 for (int i = 0; i < texstate->num_textures; i++) {
2330 struct pipe_sampler_view *sampler = texstate->textures[i];
2331 struct pipe_sampler_state *sampler_state =
2332 texstate->samplers[i];
2333
2334 if (sampler) {
2335 key->tex[i].format = sampler->format;
2336 key->tex[i].swizzle[0] = sampler->swizzle_r;
2337 key->tex[i].swizzle[1] = sampler->swizzle_g;
2338 key->tex[i].swizzle[2] = sampler->swizzle_b;
2339 key->tex[i].swizzle[3] = sampler->swizzle_a;
2340 key->tex[i].compare_mode = sampler_state->compare_mode;
2341 key->tex[i].compare_func = sampler_state->compare_func;
2342 key->tex[i].wrap_s = sampler_state->wrap_s;
2343 key->tex[i].wrap_t = sampler_state->wrap_t;
2344 }
2345 }
2346
2347 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2348 }
2349
2350 static void
2351 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2352 {
2353 struct vc4_fs_key local_key;
2354 struct vc4_fs_key *key = &local_key;
2355
2356 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2357 VC4_DIRTY_BLEND |
2358 VC4_DIRTY_FRAMEBUFFER |
2359 VC4_DIRTY_ZSA |
2360 VC4_DIRTY_RASTERIZER |
2361 VC4_DIRTY_FRAGTEX |
2362 VC4_DIRTY_TEXSTATE |
2363 VC4_DIRTY_UNCOMPILED_FS))) {
2364 return;
2365 }
2366
2367 memset(key, 0, sizeof(*key));
2368 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2369 key->base.shader_state = vc4->prog.bind_fs;
2370 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2371 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2372 prim_mode <= PIPE_PRIM_LINE_STRIP);
2373 key->blend = vc4->blend->rt[0];
2374 if (vc4->blend->logicop_enable) {
2375 key->logicop_func = vc4->blend->logicop_func;
2376 } else {
2377 key->logicop_func = PIPE_LOGICOP_COPY;
2378 }
2379 if (vc4->framebuffer.cbufs[0])
2380 key->color_format = vc4->framebuffer.cbufs[0]->format;
2381
2382 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2383 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2384 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2385 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2386 key->stencil_enabled);
2387 if (vc4->zsa->base.alpha.enabled) {
2388 key->alpha_test = true;
2389 key->alpha_test_func = vc4->zsa->base.alpha.func;
2390 }
2391
2392 if (key->is_points) {
2393 key->point_sprite_mask =
2394 vc4->rasterizer->base.sprite_coord_enable;
2395 key->point_coord_upper_left =
2396 (vc4->rasterizer->base.sprite_coord_mode ==
2397 PIPE_SPRITE_COORD_UPPER_LEFT);
2398 }
2399
2400 key->light_twoside = vc4->rasterizer->base.light_twoside;
2401
2402 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2403 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2404 if (vc4->prog.fs == old_fs)
2405 return;
2406
2407 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2408 if (vc4->rasterizer->base.flatshade &&
2409 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2410 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2411 }
2412 }
2413
2414 static void
2415 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2416 {
2417 struct vc4_vs_key local_key;
2418 struct vc4_vs_key *key = &local_key;
2419
2420 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2421 VC4_DIRTY_RASTERIZER |
2422 VC4_DIRTY_VERTTEX |
2423 VC4_DIRTY_TEXSTATE |
2424 VC4_DIRTY_VTXSTATE |
2425 VC4_DIRTY_UNCOMPILED_VS |
2426 VC4_DIRTY_COMPILED_FS))) {
2427 return;
2428 }
2429
2430 memset(key, 0, sizeof(*key));
2431 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2432 key->base.shader_state = vc4->prog.bind_vs;
2433 key->compiled_fs_id = vc4->prog.fs->program_id;
2434
2435 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2436 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2437
2438 key->per_vertex_point_size =
2439 (prim_mode == PIPE_PRIM_POINTS &&
2440 vc4->rasterizer->base.point_size_per_vertex);
2441
2442 vc4->prog.vs = vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2443 key->is_coord = true;
2444 vc4->prog.cs = vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2445 }
2446
2447 void
2448 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2449 {
2450 vc4_update_compiled_fs(vc4, prim_mode);
2451 vc4_update_compiled_vs(vc4, prim_mode);
2452 }
2453
2454 static uint32_t
2455 fs_cache_hash(const void *key)
2456 {
2457 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2458 }
2459
2460 static uint32_t
2461 vs_cache_hash(const void *key)
2462 {
2463 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2464 }
2465
2466 static bool
2467 fs_cache_compare(const void *key1, const void *key2)
2468 {
2469 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2470 }
2471
2472 static bool
2473 vs_cache_compare(const void *key1, const void *key2)
2474 {
2475 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2476 }
2477
2478 static void
2479 delete_from_cache_if_matches(struct hash_table *ht,
2480 struct hash_entry *entry,
2481 struct vc4_uncompiled_shader *so)
2482 {
2483 const struct vc4_key *key = entry->key;
2484
2485 if (key->shader_state == so) {
2486 struct vc4_compiled_shader *shader = entry->data;
2487 _mesa_hash_table_remove(ht, entry);
2488 vc4_bo_unreference(&shader->bo);
2489 ralloc_free(shader);
2490 }
2491 }
2492
2493 static void
2494 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2495 {
2496 struct vc4_context *vc4 = vc4_context(pctx);
2497 struct vc4_uncompiled_shader *so = hwcso;
2498
2499 struct hash_entry *entry;
2500 hash_table_foreach(vc4->fs_cache, entry)
2501 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2502 hash_table_foreach(vc4->vs_cache, entry)
2503 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2504
2505 if (so->twoside_tokens != so->base.tokens)
2506 free((void *)so->twoside_tokens);
2507 free((void *)so->base.tokens);
2508 free(so);
2509 }
2510
2511 static uint32_t translate_wrap(uint32_t p_wrap, bool using_nearest)
2512 {
2513 switch (p_wrap) {
2514 case PIPE_TEX_WRAP_REPEAT:
2515 return 0;
2516 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
2517 return 1;
2518 case PIPE_TEX_WRAP_MIRROR_REPEAT:
2519 return 2;
2520 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
2521 return 3;
2522 case PIPE_TEX_WRAP_CLAMP:
2523 return (using_nearest ? 1 : 3);
2524 default:
2525 fprintf(stderr, "Unknown wrap mode %d\n", p_wrap);
2526 assert(!"not reached");
2527 return 0;
2528 }
2529 }
2530
2531 static void
2532 write_texture_p0(struct vc4_context *vc4,
2533 struct vc4_texture_stateobj *texstate,
2534 uint32_t unit)
2535 {
2536 struct pipe_sampler_view *texture = texstate->textures[unit];
2537 struct vc4_resource *rsc = vc4_resource(texture->texture);
2538
2539 cl_reloc(vc4, &vc4->uniforms, rsc->bo,
2540 VC4_SET_FIELD(rsc->slices[0].offset >> 12, VC4_TEX_P0_OFFSET) |
2541 VC4_SET_FIELD(texture->u.tex.last_level -
2542 texture->u.tex.first_level, VC4_TEX_P0_MIPLVLS) |
2543 VC4_SET_FIELD(texture->target == PIPE_TEXTURE_CUBE,
2544 VC4_TEX_P0_CMMODE) |
2545 VC4_SET_FIELD(rsc->vc4_format & 15, VC4_TEX_P0_TYPE));
2546 }
2547
2548 static void
2549 write_texture_p1(struct vc4_context *vc4,
2550 struct vc4_texture_stateobj *texstate,
2551 uint32_t unit)
2552 {
2553 struct pipe_sampler_view *texture = texstate->textures[unit];
2554 struct vc4_resource *rsc = vc4_resource(texture->texture);
2555 struct pipe_sampler_state *sampler = texstate->samplers[unit];
2556 static const uint8_t minfilter_map[6] = {
2557 VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR,
2558 VC4_TEX_P1_MINFILT_LIN_MIP_NEAR,
2559 VC4_TEX_P1_MINFILT_NEAR_MIP_LIN,
2560 VC4_TEX_P1_MINFILT_LIN_MIP_LIN,
2561 VC4_TEX_P1_MINFILT_NEAREST,
2562 VC4_TEX_P1_MINFILT_LINEAR,
2563 };
2564 static const uint32_t magfilter_map[] = {
2565 [PIPE_TEX_FILTER_NEAREST] = VC4_TEX_P1_MAGFILT_NEAREST,
2566 [PIPE_TEX_FILTER_LINEAR] = VC4_TEX_P1_MAGFILT_LINEAR,
2567 };
2568
2569 bool either_nearest =
2570 (sampler->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST ||
2571 sampler->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST);
2572
2573 cl_aligned_u32(&vc4->uniforms,
2574 VC4_SET_FIELD(rsc->vc4_format >> 4, VC4_TEX_P1_TYPE4) |
2575 VC4_SET_FIELD(texture->texture->height0 & 2047,
2576 VC4_TEX_P1_HEIGHT) |
2577 VC4_SET_FIELD(texture->texture->width0 & 2047,
2578 VC4_TEX_P1_WIDTH) |
2579 VC4_SET_FIELD(magfilter_map[sampler->mag_img_filter],
2580 VC4_TEX_P1_MAGFILT) |
2581 VC4_SET_FIELD(minfilter_map[sampler->min_mip_filter * 2 +
2582 sampler->min_img_filter],
2583 VC4_TEX_P1_MINFILT) |
2584 VC4_SET_FIELD(translate_wrap(sampler->wrap_s, either_nearest),
2585 VC4_TEX_P1_WRAP_S) |
2586 VC4_SET_FIELD(translate_wrap(sampler->wrap_t, either_nearest),
2587 VC4_TEX_P1_WRAP_T));
2588 }
2589
2590 static void
2591 write_texture_p2(struct vc4_context *vc4,
2592 struct vc4_texture_stateobj *texstate,
2593 uint32_t data)
2594 {
2595 uint32_t unit = data & 0xffff;
2596 struct pipe_sampler_view *texture = texstate->textures[unit];
2597 struct vc4_resource *rsc = vc4_resource(texture->texture);
2598
2599 cl_aligned_u32(&vc4->uniforms,
2600 VC4_SET_FIELD(VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE,
2601 VC4_TEX_P2_PTYPE) |
2602 VC4_SET_FIELD(rsc->cube_map_stride >> 12, VC4_TEX_P2_CMST) |
2603 VC4_SET_FIELD((data >> 16) & 1, VC4_TEX_P2_BSLOD));
2604 }
2605
2606
2607 #define SWIZ(x,y,z,w) { \
2608 UTIL_FORMAT_SWIZZLE_##x, \
2609 UTIL_FORMAT_SWIZZLE_##y, \
2610 UTIL_FORMAT_SWIZZLE_##z, \
2611 UTIL_FORMAT_SWIZZLE_##w \
2612 }
2613
2614 static void
2615 write_texture_border_color(struct vc4_context *vc4,
2616 struct vc4_texture_stateobj *texstate,
2617 uint32_t unit)
2618 {
2619 struct pipe_sampler_state *sampler = texstate->samplers[unit];
2620 struct pipe_sampler_view *texture = texstate->textures[unit];
2621 struct vc4_resource *rsc = vc4_resource(texture->texture);
2622 union util_color uc;
2623
2624 const struct util_format_description *tex_format_desc =
2625 util_format_description(texture->format);
2626
2627 float border_color[4];
2628 for (int i = 0; i < 4; i++)
2629 border_color[i] = sampler->border_color.f[i];
2630 if (util_format_is_srgb(texture->format)) {
2631 for (int i = 0; i < 3; i++)
2632 border_color[i] =
2633 util_format_linear_to_srgb_float(border_color[i]);
2634 }
2635
2636 /* Turn the border color into the layout of channels that it would
2637 * have when stored as texture contents.
2638 */
2639 float storage_color[4];
2640 util_format_unswizzle_4f(storage_color,
2641 border_color,
2642 tex_format_desc->swizzle);
2643
2644 /* Now, pack so that when the vc4_format-sampled texture contents are
2645 * replaced with our border color, the vc4_get_format_swizzle()
2646 * swizzling will get the right channels.
2647 */
2648 if (util_format_is_depth_or_stencil(texture->format)) {
2649 uc.ui[0] = util_pack_z(PIPE_FORMAT_Z24X8_UNORM,
2650 sampler->border_color.f[0]) << 8;
2651 } else {
2652 switch (rsc->vc4_format) {
2653 default:
2654 case VC4_TEXTURE_TYPE_RGBA8888:
2655 util_pack_color(storage_color,
2656 PIPE_FORMAT_R8G8B8A8_UNORM, &uc);
2657 break;
2658 case VC4_TEXTURE_TYPE_RGBA4444:
2659 util_pack_color(storage_color,
2660 PIPE_FORMAT_A8B8G8R8_UNORM, &uc);
2661 break;
2662 case VC4_TEXTURE_TYPE_RGB565:
2663 util_pack_color(storage_color,
2664 PIPE_FORMAT_B8G8R8A8_UNORM, &uc);
2665 break;
2666 case VC4_TEXTURE_TYPE_ALPHA:
2667 uc.ui[0] = float_to_ubyte(storage_color[0]) << 24;
2668 break;
2669 case VC4_TEXTURE_TYPE_LUMALPHA:
2670 uc.ui[0] = ((float_to_ubyte(storage_color[1]) << 24) |
2671 (float_to_ubyte(storage_color[0]) << 0));
2672 break;
2673 }
2674 }
2675
2676 cl_aligned_u32(&vc4->uniforms, uc.ui[0]);
2677 }
2678
2679 static uint32_t
2680 get_texrect_scale(struct vc4_texture_stateobj *texstate,
2681 enum quniform_contents contents,
2682 uint32_t data)
2683 {
2684 struct pipe_sampler_view *texture = texstate->textures[data];
2685 uint32_t dim;
2686
2687 if (contents == QUNIFORM_TEXRECT_SCALE_X)
2688 dim = texture->texture->width0;
2689 else
2690 dim = texture->texture->height0;
2691
2692 return fui(1.0f / dim);
2693 }
2694
2695 static struct vc4_bo *
2696 vc4_upload_ubo(struct vc4_context *vc4, struct vc4_compiled_shader *shader,
2697 const uint32_t *gallium_uniforms)
2698 {
2699 if (!shader->ubo_size)
2700 return NULL;
2701
2702 struct vc4_bo *ubo = vc4_bo_alloc(vc4->screen, shader->ubo_size, "ubo");
2703 uint32_t *data = vc4_bo_map(ubo);
2704 for (uint32_t i = 0; i < shader->num_ubo_ranges; i++) {
2705 memcpy(data + shader->ubo_ranges[i].dst_offset,
2706 gallium_uniforms + shader->ubo_ranges[i].src_offset,
2707 shader->ubo_ranges[i].size);
2708 }
2709
2710 return ubo;
2711 }
2712
2713 void
2714 vc4_write_uniforms(struct vc4_context *vc4, struct vc4_compiled_shader *shader,
2715 struct vc4_constbuf_stateobj *cb,
2716 struct vc4_texture_stateobj *texstate)
2717 {
2718 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2719 const uint32_t *gallium_uniforms = cb->cb[0].user_buffer;
2720 struct vc4_bo *ubo = vc4_upload_ubo(vc4, shader, gallium_uniforms);
2721
2722 cl_ensure_space(&vc4->uniforms, (uinfo->count +
2723 uinfo->num_texture_samples) * 4);
2724
2725 cl_start_shader_reloc(&vc4->uniforms, uinfo->num_texture_samples);
2726
2727 for (int i = 0; i < uinfo->count; i++) {
2728
2729 switch (uinfo->contents[i]) {
2730 case QUNIFORM_CONSTANT:
2731 cl_aligned_u32(&vc4->uniforms, uinfo->data[i]);
2732 break;
2733 case QUNIFORM_UNIFORM:
2734 cl_aligned_u32(&vc4->uniforms,
2735 gallium_uniforms[uinfo->data[i]]);
2736 break;
2737 case QUNIFORM_VIEWPORT_X_SCALE:
2738 cl_aligned_f(&vc4->uniforms, vc4->viewport.scale[0] * 16.0f);
2739 break;
2740 case QUNIFORM_VIEWPORT_Y_SCALE:
2741 cl_aligned_f(&vc4->uniforms, vc4->viewport.scale[1] * 16.0f);
2742 break;
2743
2744 case QUNIFORM_VIEWPORT_Z_OFFSET:
2745 cl_aligned_f(&vc4->uniforms, vc4->viewport.translate[2]);
2746 break;
2747 case QUNIFORM_VIEWPORT_Z_SCALE:
2748 cl_aligned_f(&vc4->uniforms, vc4->viewport.scale[2]);
2749 break;
2750
2751 case QUNIFORM_USER_CLIP_PLANE:
2752 cl_aligned_f(&vc4->uniforms,
2753 vc4->clip.ucp[uinfo->data[i] / 4][uinfo->data[i] % 4]);
2754 break;
2755
2756 case QUNIFORM_TEXTURE_CONFIG_P0:
2757 write_texture_p0(vc4, texstate, uinfo->data[i]);
2758 break;
2759
2760 case QUNIFORM_TEXTURE_CONFIG_P1:
2761 write_texture_p1(vc4, texstate, uinfo->data[i]);
2762 break;
2763
2764 case QUNIFORM_TEXTURE_CONFIG_P2:
2765 write_texture_p2(vc4, texstate, uinfo->data[i]);
2766 break;
2767
2768 case QUNIFORM_UBO_ADDR:
2769 cl_aligned_reloc(vc4, &vc4->uniforms, ubo, 0);
2770 break;
2771
2772 case QUNIFORM_TEXTURE_BORDER_COLOR:
2773 write_texture_border_color(vc4, texstate, uinfo->data[i]);
2774 break;
2775
2776 case QUNIFORM_TEXRECT_SCALE_X:
2777 case QUNIFORM_TEXRECT_SCALE_Y:
2778 cl_aligned_u32(&vc4->uniforms,
2779 get_texrect_scale(texstate,
2780 uinfo->contents[i],
2781 uinfo->data[i]));
2782 break;
2783
2784 case QUNIFORM_BLEND_CONST_COLOR:
2785 cl_aligned_f(&vc4->uniforms,
2786 CLAMP(vc4->blend_color.color[uinfo->data[i]], 0, 1));
2787 break;
2788
2789 case QUNIFORM_STENCIL:
2790 cl_aligned_u32(&vc4->uniforms,
2791 vc4->zsa->stencil_uniforms[uinfo->data[i]] |
2792 (uinfo->data[i] <= 1 ?
2793 (vc4->stencil_ref.ref_value[uinfo->data[i]] << 8) :
2794 0));
2795 break;
2796
2797 case QUNIFORM_ALPHA_REF:
2798 cl_aligned_f(&vc4->uniforms,
2799 vc4->zsa->base.alpha.ref_value);
2800 break;
2801 }
2802 #if 0
2803 uint32_t written_val = *(uint32_t *)(vc4->uniforms.next - 4);
2804 fprintf(stderr, "%p: %d / 0x%08x (%f)\n",
2805 shader, i, written_val, uif(written_val));
2806 #endif
2807 }
2808 }
2809
2810 static void
2811 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2812 {
2813 struct vc4_context *vc4 = vc4_context(pctx);
2814 vc4->prog.bind_fs = hwcso;
2815 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2816 }
2817
2818 static void
2819 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2820 {
2821 struct vc4_context *vc4 = vc4_context(pctx);
2822 vc4->prog.bind_vs = hwcso;
2823 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2824 }
2825
2826 void
2827 vc4_program_init(struct pipe_context *pctx)
2828 {
2829 struct vc4_context *vc4 = vc4_context(pctx);
2830
2831 pctx->create_vs_state = vc4_shader_state_create;
2832 pctx->delete_vs_state = vc4_shader_state_delete;
2833
2834 pctx->create_fs_state = vc4_shader_state_create;
2835 pctx->delete_fs_state = vc4_shader_state_delete;
2836
2837 pctx->bind_fs_state = vc4_fp_state_bind;
2838 pctx->bind_vs_state = vc4_vp_state_bind;
2839
2840 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2841 fs_cache_compare);
2842 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2843 vs_cache_compare);
2844 }
2845
2846 void
2847 vc4_program_fini(struct pipe_context *pctx)
2848 {
2849 struct vc4_context *vc4 = vc4_context(pctx);
2850
2851 struct hash_entry *entry;
2852 hash_table_foreach(vc4->fs_cache, entry) {
2853 struct vc4_compiled_shader *shader = entry->data;
2854 vc4_bo_unreference(&shader->bo);
2855 ralloc_free(shader);
2856 _mesa_hash_table_remove(vc4->fs_cache, entry);
2857 }
2858
2859 hash_table_foreach(vc4->vs_cache, entry) {
2860 struct vc4_compiled_shader *shader = entry->data;
2861 vc4_bo_unreference(&shader->bo);
2862 ralloc_free(shader);
2863 _mesa_hash_table_remove(vc4->vs_cache, entry);
2864 }
2865 }