2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
40 #ifdef USE_VC4_SIMULATOR
41 #include "simpenrose/simpenrose.h"
45 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
);
48 resize_qreg_array(struct vc4_compile
*c
,
53 if (*size
>= decl_size
)
56 uint32_t old_size
= *size
;
57 *size
= MAX2(*size
* 2, decl_size
);
58 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
60 fprintf(stderr
, "Malloc failure\n");
64 for (uint32_t i
= old_size
; i
< *size
; i
++)
65 (*regs
)[i
] = c
->undef
;
69 indirect_uniform_load(struct vc4_compile
*c
, nir_intrinsic_instr
*intr
)
71 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
72 uint32_t offset
= intr
->const_index
[0];
73 struct vc4_compiler_ubo_range
*range
= NULL
;
75 for (i
= 0; i
< c
->num_uniform_ranges
; i
++) {
76 range
= &c
->ubo_ranges
[i
];
77 if (offset
>= range
->src_offset
&&
78 offset
< range
->src_offset
+ range
->size
) {
82 /* The driver-location-based offset always has to be within a declared
88 range
->dst_offset
= c
->next_ubo_dst_offset
;
89 c
->next_ubo_dst_offset
+= range
->size
;
93 offset
-= range
->src_offset
;
95 /* Adjust for where we stored the TGSI register base. */
96 indirect_offset
= qir_ADD(c
, indirect_offset
,
97 qir_uniform_ui(c
, (range
->dst_offset
+
100 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
101 indirect_offset
= qir_MAX(c
, indirect_offset
, qir_uniform_ui(c
, 0));
102 indirect_offset
= qir_MIN(c
, indirect_offset
,
103 qir_uniform_ui(c
, (range
->dst_offset
+
106 qir_TEX_DIRECT(c
, indirect_offset
, qir_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
107 c
->num_texture_samples
++;
108 return qir_TEX_RESULT(c
);
111 nir_ssa_def
*vc4_nir_get_state_uniform(struct nir_builder
*b
,
112 enum quniform_contents contents
)
114 nir_intrinsic_instr
*intr
=
115 nir_intrinsic_instr_create(b
->shader
,
116 nir_intrinsic_load_uniform
);
117 intr
->const_index
[0] = (VC4_NIR_STATE_UNIFORM_OFFSET
+ contents
) * 4;
118 intr
->num_components
= 1;
119 intr
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
120 nir_ssa_dest_init(&intr
->instr
, &intr
->dest
, 1, 32, NULL
);
121 nir_builder_instr_insert(b
, &intr
->instr
);
122 return &intr
->dest
.ssa
;
126 vc4_nir_get_swizzled_channel(nir_builder
*b
, nir_ssa_def
**srcs
, int swiz
)
130 case PIPE_SWIZZLE_NONE
:
131 fprintf(stderr
, "warning: unknown swizzle\n");
134 return nir_imm_float(b
, 0.0);
136 return nir_imm_float(b
, 1.0);
146 ntq_init_ssa_def(struct vc4_compile
*c
, nir_ssa_def
*def
)
148 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
149 def
->num_components
);
150 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
155 ntq_get_dest(struct vc4_compile
*c
, nir_dest
*dest
)
158 struct qreg
*qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
159 for (int i
= 0; i
< dest
->ssa
.num_components
; i
++)
163 nir_register
*reg
= dest
->reg
.reg
;
164 assert(dest
->reg
.base_offset
== 0);
165 assert(reg
->num_array_elems
== 0);
166 struct hash_entry
*entry
=
167 _mesa_hash_table_search(c
->def_ht
, reg
);
173 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
)
175 struct hash_entry
*entry
;
177 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
178 assert(i
< src
.ssa
->num_components
);
180 nir_register
*reg
= src
.reg
.reg
;
181 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
182 assert(reg
->num_array_elems
== 0);
183 assert(src
.reg
.base_offset
== 0);
184 assert(i
< reg
->num_components
);
187 struct qreg
*qregs
= entry
->data
;
192 ntq_get_alu_src(struct vc4_compile
*c
, nir_alu_instr
*instr
,
195 assert(util_is_power_of_two(instr
->dest
.write_mask
));
196 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
197 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
198 instr
->src
[src
].swizzle
[chan
]);
200 assert(!instr
->src
[src
].abs
);
201 assert(!instr
->src
[src
].negate
);
206 static inline struct qreg
207 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
210 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
211 qir_uniform_f(c
, 0.0));
215 ntq_rcp(struct vc4_compile
*c
, struct qreg x
)
217 struct qreg r
= qir_RCP(c
, x
);
219 /* Apply a Newton-Raphson step to improve the accuracy. */
220 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
221 qir_uniform_f(c
, 2.0),
228 ntq_rsq(struct vc4_compile
*c
, struct qreg x
)
230 struct qreg r
= qir_RSQ(c
, x
);
232 /* Apply a Newton-Raphson step to improve the accuracy. */
233 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
234 qir_uniform_f(c
, 1.5),
236 qir_uniform_f(c
, 0.5),
238 qir_FMUL(c
, r
, r
)))));
244 qir_srgb_decode(struct vc4_compile
*c
, struct qreg srgb
)
246 struct qreg low
= qir_FMUL(c
, srgb
, qir_uniform_f(c
, 1.0 / 12.92));
247 struct qreg high
= qir_POW(c
,
251 qir_uniform_f(c
, 0.055)),
252 qir_uniform_f(c
, 1.0 / 1.055)),
253 qir_uniform_f(c
, 2.4));
255 qir_SF(c
, qir_FSUB(c
, srgb
, qir_uniform_f(c
, 0.04045)));
256 return qir_SEL(c
, QPU_COND_NS
, low
, high
);
260 ntq_umul(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
262 struct qreg src0_hi
= qir_SHR(c
, src0
,
263 qir_uniform_ui(c
, 24));
264 struct qreg src1_hi
= qir_SHR(c
, src1
,
265 qir_uniform_ui(c
, 24));
267 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1
);
268 struct qreg lohi
= qir_MUL24(c
, src0
, src1_hi
);
269 struct qreg lolo
= qir_MUL24(c
, src0
, src1
);
271 return qir_ADD(c
, lolo
, qir_SHL(c
,
272 qir_ADD(c
, hilo
, lohi
),
273 qir_uniform_ui(c
, 24)));
277 ntq_scale_depth_texture(struct vc4_compile
*c
, struct qreg src
)
279 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, src
,
280 qir_uniform_ui(c
, 8)));
281 return qir_FMUL(c
, depthf
, qir_uniform_f(c
, 1.0f
/0xffffff));
285 * Emits a lowered TXF_MS from an MSAA texture.
287 * The addressing math has been lowered in NIR, and now we just need to read
291 ntq_emit_txf(struct vc4_compile
*c
, nir_tex_instr
*instr
)
293 uint32_t tile_width
= 32;
294 uint32_t tile_height
= 32;
295 uint32_t tile_size
= (tile_height
* tile_width
*
296 VC4_MAX_SAMPLES
* sizeof(uint32_t));
298 unsigned unit
= instr
->texture_index
;
299 uint32_t w
= align(c
->key
->tex
[unit
].msaa_width
, tile_width
);
300 uint32_t w_tiles
= w
/ tile_width
;
301 uint32_t h
= align(c
->key
->tex
[unit
].msaa_height
, tile_height
);
302 uint32_t h_tiles
= h
/ tile_height
;
303 uint32_t size
= w_tiles
* h_tiles
* tile_size
;
306 assert(instr
->num_srcs
== 1);
307 assert(instr
->src
[0].src_type
== nir_tex_src_coord
);
308 addr
= ntq_get_src(c
, instr
->src
[0].src
, 0);
310 /* Perform the clamping required by kernel validation. */
311 addr
= qir_MAX(c
, addr
, qir_uniform_ui(c
, 0));
312 addr
= qir_MIN(c
, addr
, qir_uniform_ui(c
, size
- 4));
314 qir_TEX_DIRECT(c
, addr
, qir_uniform(c
, QUNIFORM_TEXTURE_MSAA_ADDR
, unit
));
316 struct qreg tex
= qir_TEX_RESULT(c
);
317 c
->num_texture_samples
++;
319 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
);
320 enum pipe_format format
= c
->key
->tex
[unit
].format
;
321 if (util_format_is_depth_or_stencil(format
)) {
322 struct qreg scaled
= ntq_scale_depth_texture(c
, tex
);
323 for (int i
= 0; i
< 4; i
++)
326 for (int i
= 0; i
< 4; i
++)
327 dest
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
330 for (int i
= 0; i
< 4; i
++) {
331 if (c
->tex_srgb_decode
[unit
] & (1 << i
))
332 dest
[i
] = qir_srgb_decode(c
, dest
[i
]);
337 ntq_emit_tex(struct vc4_compile
*c
, nir_tex_instr
*instr
)
339 struct qreg s
, t
, r
, lod
, proj
, compare
;
340 bool is_txb
= false, is_txl
= false, has_proj
= false;
341 unsigned unit
= instr
->texture_index
;
343 if (instr
->op
== nir_texop_txf
) {
344 ntq_emit_txf(c
, instr
);
348 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
349 switch (instr
->src
[i
].src_type
) {
350 case nir_tex_src_coord
:
351 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
352 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
)
353 t
= qir_uniform_f(c
, 0.5);
355 t
= ntq_get_src(c
, instr
->src
[i
].src
, 1);
356 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
357 r
= ntq_get_src(c
, instr
->src
[i
].src
, 2);
359 case nir_tex_src_bias
:
360 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
363 case nir_tex_src_lod
:
364 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
367 case nir_tex_src_comparitor
:
368 compare
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
370 case nir_tex_src_projector
:
371 proj
= qir_RCP(c
, ntq_get_src(c
, instr
->src
[i
].src
, 0));
372 s
= qir_FMUL(c
, s
, proj
);
373 t
= qir_FMUL(c
, t
, proj
);
377 unreachable("unknown texture source");
381 struct qreg texture_u
[] = {
382 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
383 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
384 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
385 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
387 uint32_t next_texture_u
= 0;
389 /* There is no native support for GL texture rectangle coordinates, so
390 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
393 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
395 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_X
, unit
));
397 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_Y
, unit
));
400 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
|| is_txl
) {
401 texture_u
[2] = qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
402 unit
| (is_txl
<< 16));
405 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
406 struct qreg ma
= qir_FMAXABS(c
, qir_FMAXABS(c
, s
, t
), r
);
407 struct qreg rcp_ma
= qir_RCP(c
, ma
);
408 s
= qir_FMUL(c
, s
, rcp_ma
);
409 t
= qir_FMUL(c
, t
, rcp_ma
);
410 r
= qir_FMUL(c
, r
, rcp_ma
);
412 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
413 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
414 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
415 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
416 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
417 qir_TEX_R(c
, qir_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
418 texture_u
[next_texture_u
++]);
421 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
425 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
429 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
431 if (is_txl
|| is_txb
)
432 qir_TEX_B(c
, lod
, texture_u
[next_texture_u
++]);
434 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
436 c
->num_texture_samples
++;
437 struct qreg tex
= qir_TEX_RESULT(c
);
439 enum pipe_format format
= c
->key
->tex
[unit
].format
;
441 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
);
442 if (util_format_is_depth_or_stencil(format
)) {
443 struct qreg normalized
= ntq_scale_depth_texture(c
, tex
);
444 struct qreg depth_output
;
446 struct qreg u0
= qir_uniform_f(c
, 0.0f
);
447 struct qreg u1
= qir_uniform_f(c
, 1.0f
);
448 if (c
->key
->tex
[unit
].compare_mode
) {
450 compare
= qir_FMUL(c
, compare
, proj
);
452 switch (c
->key
->tex
[unit
].compare_func
) {
453 case PIPE_FUNC_NEVER
:
454 depth_output
= qir_uniform_f(c
, 0.0f
);
456 case PIPE_FUNC_ALWAYS
:
459 case PIPE_FUNC_EQUAL
:
460 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
461 depth_output
= qir_SEL(c
, QPU_COND_ZS
, u1
, u0
);
463 case PIPE_FUNC_NOTEQUAL
:
464 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
465 depth_output
= qir_SEL(c
, QPU_COND_ZC
, u1
, u0
);
467 case PIPE_FUNC_GREATER
:
468 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
469 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
471 case PIPE_FUNC_GEQUAL
:
472 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
473 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
476 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
477 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
479 case PIPE_FUNC_LEQUAL
:
480 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
481 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
485 depth_output
= normalized
;
488 for (int i
= 0; i
< 4; i
++)
489 dest
[i
] = depth_output
;
491 for (int i
= 0; i
< 4; i
++)
492 dest
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
495 for (int i
= 0; i
< 4; i
++) {
496 if (c
->tex_srgb_decode
[unit
] & (1 << i
))
497 dest
[i
] = qir_srgb_decode(c
, dest
[i
]);
502 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
506 ntq_ffract(struct vc4_compile
*c
, struct qreg src
)
508 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
509 struct qreg diff
= qir_FSUB(c
, src
, trunc
);
511 return qir_SEL(c
, QPU_COND_NS
,
512 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)), diff
);
516 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
520 ntq_ffloor(struct vc4_compile
*c
, struct qreg src
)
522 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
524 /* This will be < 0 if we truncated and the truncation was of a value
525 * that was < 0 in the first place.
527 qir_SF(c
, qir_FSUB(c
, src
, trunc
));
529 return qir_SEL(c
, QPU_COND_NS
,
530 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)), trunc
);
534 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
538 ntq_fceil(struct vc4_compile
*c
, struct qreg src
)
540 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
542 /* This will be < 0 if we truncated and the truncation was of a value
543 * that was > 0 in the first place.
545 qir_SF(c
, qir_FSUB(c
, trunc
, src
));
547 return qir_SEL(c
, QPU_COND_NS
,
548 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)), trunc
);
552 ntq_fsin(struct vc4_compile
*c
, struct qreg src
)
556 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
557 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
558 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
559 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
562 struct qreg scaled_x
=
565 qir_uniform_f(c
, 1.0 / (M_PI
* 2.0)));
567 struct qreg x
= qir_FADD(c
,
568 ntq_ffract(c
, scaled_x
),
569 qir_uniform_f(c
, -0.5));
570 struct qreg x2
= qir_FMUL(c
, x
, x
);
571 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
572 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
573 x
= qir_FMUL(c
, x
, x2
);
578 qir_uniform_f(c
, coeff
[i
])));
584 ntq_fcos(struct vc4_compile
*c
, struct qreg src
)
588 pow(2.0 * M_PI
, 2) / (2 * 1),
589 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
590 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
591 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
592 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
595 struct qreg scaled_x
=
597 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
598 struct qreg x_frac
= qir_FADD(c
,
599 ntq_ffract(c
, scaled_x
),
600 qir_uniform_f(c
, -0.5));
602 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
603 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
604 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
605 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
607 x
= qir_FMUL(c
, x
, x2
);
609 struct qreg mul
= qir_FMUL(c
,
611 qir_uniform_f(c
, coeff
[i
]));
615 sum
= qir_FADD(c
, sum
, mul
);
621 ntq_fsign(struct vc4_compile
*c
, struct qreg src
)
623 struct qreg t
= qir_get_temp(c
);
626 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 0.0));
627 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_ZC
;
628 qir_MOV_dest(c
, t
, qir_uniform_f(c
, -1.0))->cond
= QPU_COND_NS
;
633 emit_vertex_input(struct vc4_compile
*c
, int attr
)
635 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
636 uint32_t attr_size
= util_format_get_blocksize(format
);
638 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
639 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
640 c
->inputs
[attr
* 4 + i
] =
641 qir_MOV(c
, qir_reg(QFILE_VPM
, attr
* 4 + i
));
647 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
649 c
->inputs
[attr
* 4 + 0] = qir_ITOF(c
, qir_reg(QFILE_FRAG_X
, 0));
650 c
->inputs
[attr
* 4 + 1] = qir_ITOF(c
, qir_reg(QFILE_FRAG_Y
, 0));
651 c
->inputs
[attr
* 4 + 2] =
653 qir_ITOF(c
, qir_FRAG_Z(c
)),
654 qir_uniform_f(c
, 1.0 / 0xffffff));
655 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
659 emit_fragment_varying(struct vc4_compile
*c
, gl_varying_slot slot
,
662 uint32_t i
= c
->num_input_slots
++;
668 if (c
->num_input_slots
>= c
->input_slots_array_size
) {
669 c
->input_slots_array_size
=
670 MAX2(4, c
->input_slots_array_size
* 2);
672 c
->input_slots
= reralloc(c
, c
->input_slots
,
673 struct vc4_varying_slot
,
674 c
->input_slots_array_size
);
677 c
->input_slots
[i
].slot
= slot
;
678 c
->input_slots
[i
].swizzle
= swizzle
;
680 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
684 emit_fragment_input(struct vc4_compile
*c
, int attr
, gl_varying_slot slot
)
686 for (int i
= 0; i
< 4; i
++) {
687 c
->inputs
[attr
* 4 + i
] =
688 emit_fragment_varying(c
, slot
, i
);
694 add_output(struct vc4_compile
*c
,
695 uint32_t decl_offset
,
699 uint32_t old_array_size
= c
->outputs_array_size
;
700 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
703 if (old_array_size
!= c
->outputs_array_size
) {
704 c
->output_slots
= reralloc(c
,
706 struct vc4_varying_slot
,
707 c
->outputs_array_size
);
710 c
->output_slots
[decl_offset
].slot
= slot
;
711 c
->output_slots
[decl_offset
].swizzle
= swizzle
;
715 declare_uniform_range(struct vc4_compile
*c
, uint32_t start
, uint32_t size
)
717 unsigned array_id
= c
->num_uniform_ranges
++;
718 if (array_id
>= c
->ubo_ranges_array_size
) {
719 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
721 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
722 struct vc4_compiler_ubo_range
,
723 c
->ubo_ranges_array_size
);
726 c
->ubo_ranges
[array_id
].dst_offset
= 0;
727 c
->ubo_ranges
[array_id
].src_offset
= start
;
728 c
->ubo_ranges
[array_id
].size
= size
;
729 c
->ubo_ranges
[array_id
].used
= false;
733 ntq_src_is_only_ssa_def_user(nir_src
*src
)
738 if (!list_empty(&src
->ssa
->if_uses
))
741 return (src
->ssa
->uses
.next
== &src
->use_link
&&
742 src
->ssa
->uses
.next
->next
== &src
->ssa
->uses
);
746 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
749 * However, as an optimization, it tries to find the instructions generating
750 * the sources to be packed and just emit the pack flag there, if possible.
753 ntq_emit_pack_unorm_4x8(struct vc4_compile
*c
, nir_alu_instr
*instr
)
755 struct qreg result
= qir_get_temp(c
);
756 struct nir_alu_instr
*vec4
= NULL
;
758 /* If packing from a vec4 op (as expected), identify it so that we can
759 * peek back at what generated its sources.
761 if (instr
->src
[0].src
.is_ssa
&&
762 instr
->src
[0].src
.ssa
->parent_instr
->type
== nir_instr_type_alu
&&
763 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
)->op
==
765 vec4
= nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
768 /* If the pack is replicating the same channel 4 times, use the 8888
769 * pack flag. This is common for blending using the alpha
772 if (instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[1] &&
773 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[2] &&
774 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[3]) {
775 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
776 *dest
= qir_PACK_8888_F(c
,
777 ntq_get_src(c
, instr
->src
[0].src
,
778 instr
->src
[0].swizzle
[0]));
782 for (int i
= 0; i
< 4; i
++) {
783 int swiz
= instr
->src
[0].swizzle
[i
];
786 src
= ntq_get_src(c
, vec4
->src
[swiz
].src
,
787 vec4
->src
[swiz
].swizzle
[0]);
789 src
= ntq_get_src(c
, instr
->src
[0].src
, swiz
);
793 ntq_src_is_only_ssa_def_user(&vec4
->src
[swiz
].src
) &&
794 src
.file
== QFILE_TEMP
&&
795 c
->defs
[src
.index
] &&
796 qir_is_mul(c
->defs
[src
.index
]) &&
797 !c
->defs
[src
.index
]->dst
.pack
) {
798 struct qinst
*rewrite
= c
->defs
[src
.index
];
799 c
->defs
[src
.index
] = NULL
;
800 rewrite
->dst
= result
;
801 rewrite
->dst
.pack
= QPU_PACK_MUL_8A
+ i
;
805 qir_PACK_8_F(c
, result
, src
, i
);
808 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
812 /** Handles sign-extended bitfield extracts for 16 bits. */
814 ntq_emit_ibfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
817 assert(bits
.file
== QFILE_UNIF
&&
818 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
819 c
->uniform_data
[bits
.index
] == 16);
821 assert(offset
.file
== QFILE_UNIF
&&
822 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
823 int offset_bit
= c
->uniform_data
[offset
.index
];
824 assert(offset_bit
% 16 == 0);
826 return qir_UNPACK_16_I(c
, base
, offset_bit
/ 16);
829 /** Handles unsigned bitfield extracts for 8 bits. */
831 ntq_emit_ubfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
834 assert(bits
.file
== QFILE_UNIF
&&
835 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
836 c
->uniform_data
[bits
.index
] == 8);
838 assert(offset
.file
== QFILE_UNIF
&&
839 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
840 int offset_bit
= c
->uniform_data
[offset
.index
];
841 assert(offset_bit
% 8 == 0);
843 return qir_UNPACK_8_I(c
, base
, offset_bit
/ 8);
847 * If compare_instr is a valid comparison instruction, emits the
848 * compare_instr's comparison and returns the sel_instr's return value based
849 * on the compare_instr's result.
852 ntq_emit_comparison(struct vc4_compile
*c
, struct qreg
*dest
,
853 nir_alu_instr
*compare_instr
,
854 nir_alu_instr
*sel_instr
)
858 switch (compare_instr
->op
) {
884 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
885 struct qreg src1
= ntq_get_alu_src(c
, compare_instr
, 1);
887 unsigned unsized_type
=
888 nir_alu_type_get_base_type(nir_op_infos
[compare_instr
->op
].input_types
[0]);
889 if (unsized_type
== nir_type_float
)
890 qir_SF(c
, qir_FSUB(c
, src0
, src1
));
892 qir_SF(c
, qir_SUB(c
, src0
, src1
));
894 switch (sel_instr
->op
) {
899 *dest
= qir_SEL(c
, cond
,
900 qir_uniform_f(c
, 1.0), qir_uniform_f(c
, 0.0));
904 *dest
= qir_SEL(c
, cond
,
905 ntq_get_alu_src(c
, sel_instr
, 1),
906 ntq_get_alu_src(c
, sel_instr
, 2));
910 *dest
= qir_SEL(c
, cond
,
911 qir_uniform_ui(c
, ~0), qir_uniform_ui(c
, 0));
919 * Attempts to fold a comparison generating a boolean result into the
920 * condition code for selecting between two values, instead of comparing the
921 * boolean result against 0 to generate the condition code.
923 static struct qreg
ntq_emit_bcsel(struct vc4_compile
*c
, nir_alu_instr
*instr
,
926 if (!instr
->src
[0].src
.is_ssa
)
928 nir_alu_instr
*compare
=
929 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
934 if (ntq_emit_comparison(c
, &dest
, compare
, instr
))
939 return qir_SEL(c
, QPU_COND_NS
, src
[1], src
[2]);
943 ntq_emit_alu(struct vc4_compile
*c
, nir_alu_instr
*instr
)
945 /* Vectors are special in that they have non-scalarized writemasks,
946 * and just take the first swizzle channel for each argument in order
947 * into each writemask channel.
949 if (instr
->op
== nir_op_vec2
||
950 instr
->op
== nir_op_vec3
||
951 instr
->op
== nir_op_vec4
) {
953 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
954 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
955 instr
->src
[i
].swizzle
[0]);
956 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
957 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
962 if (instr
->op
== nir_op_pack_unorm_4x8
) {
963 ntq_emit_pack_unorm_4x8(c
, instr
);
967 if (instr
->op
== nir_op_unpack_unorm_4x8
) {
968 struct qreg src
= ntq_get_src(c
, instr
->src
[0].src
,
969 instr
->src
[0].swizzle
[0]);
970 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
971 for (int i
= 0; i
< 4; i
++) {
972 if (instr
->dest
.write_mask
& (1 << i
))
973 dest
[i
] = qir_UNPACK_8_F(c
, src
, i
);
978 /* General case: We can just grab the one used channel per src. */
979 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
980 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
981 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
984 /* Pick the channel to store the output in. */
985 assert(!instr
->dest
.saturate
);
986 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
987 assert(util_is_power_of_two(instr
->dest
.write_mask
));
988 dest
+= ffs(instr
->dest
.write_mask
) - 1;
993 *dest
= qir_MOV(c
, src
[0]);
996 *dest
= qir_FMUL(c
, src
[0], src
[1]);
999 *dest
= qir_FADD(c
, src
[0], src
[1]);
1002 *dest
= qir_FSUB(c
, src
[0], src
[1]);
1005 *dest
= qir_FMIN(c
, src
[0], src
[1]);
1008 *dest
= qir_FMAX(c
, src
[0], src
[1]);
1013 *dest
= qir_FTOI(c
, src
[0]);
1017 *dest
= qir_ITOF(c
, src
[0]);
1020 *dest
= qir_AND(c
, src
[0], qir_uniform_f(c
, 1.0));
1023 *dest
= qir_AND(c
, src
[0], qir_uniform_ui(c
, 1));
1028 *dest
= qir_SEL(c
, QPU_COND_ZC
,
1029 qir_uniform_ui(c
, ~0),
1030 qir_uniform_ui(c
, 0));
1034 *dest
= qir_ADD(c
, src
[0], src
[1]);
1037 *dest
= qir_SHR(c
, src
[0], src
[1]);
1040 *dest
= qir_SUB(c
, src
[0], src
[1]);
1043 *dest
= qir_ASR(c
, src
[0], src
[1]);
1046 *dest
= qir_SHL(c
, src
[0], src
[1]);
1049 *dest
= qir_MIN(c
, src
[0], src
[1]);
1052 *dest
= qir_MAX(c
, src
[0], src
[1]);
1055 *dest
= qir_AND(c
, src
[0], src
[1]);
1058 *dest
= qir_OR(c
, src
[0], src
[1]);
1061 *dest
= qir_XOR(c
, src
[0], src
[1]);
1064 *dest
= qir_NOT(c
, src
[0]);
1068 *dest
= ntq_umul(c
, src
[0], src
[1]);
1084 if (!ntq_emit_comparison(c
, dest
, instr
, instr
)) {
1085 fprintf(stderr
, "Bad comparison instruction\n");
1090 *dest
= ntq_emit_bcsel(c
, instr
, src
);
1094 *dest
= qir_SEL(c
, QPU_COND_ZC
, src
[1], src
[2]);
1098 *dest
= ntq_rcp(c
, src
[0]);
1101 *dest
= ntq_rsq(c
, src
[0]);
1104 *dest
= qir_EXP2(c
, src
[0]);
1107 *dest
= qir_LOG2(c
, src
[0]);
1111 *dest
= qir_ITOF(c
, qir_FTOI(c
, src
[0]));
1114 *dest
= ntq_fceil(c
, src
[0]);
1117 *dest
= ntq_ffract(c
, src
[0]);
1120 *dest
= ntq_ffloor(c
, src
[0]);
1124 *dest
= ntq_fsin(c
, src
[0]);
1127 *dest
= ntq_fcos(c
, src
[0]);
1131 *dest
= ntq_fsign(c
, src
[0]);
1135 *dest
= qir_FMAXABS(c
, src
[0], src
[0]);
1138 *dest
= qir_MAX(c
, src
[0],
1139 qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0]));
1142 case nir_op_ibitfield_extract
:
1143 *dest
= ntq_emit_ibfe(c
, src
[0], src
[1], src
[2]);
1146 case nir_op_ubitfield_extract
:
1147 *dest
= ntq_emit_ubfe(c
, src
[0], src
[1], src
[2]);
1150 case nir_op_usadd_4x8
:
1151 *dest
= qir_V8ADDS(c
, src
[0], src
[1]);
1154 case nir_op_ussub_4x8
:
1155 *dest
= qir_V8SUBS(c
, src
[0], src
[1]);
1158 case nir_op_umin_4x8
:
1159 *dest
= qir_V8MIN(c
, src
[0], src
[1]);
1162 case nir_op_umax_4x8
:
1163 *dest
= qir_V8MAX(c
, src
[0], src
[1]);
1166 case nir_op_umul_unorm_4x8
:
1167 *dest
= qir_V8MULD(c
, src
[0], src
[1]);
1171 fprintf(stderr
, "unknown NIR ALU inst: ");
1172 nir_print_instr(&instr
->instr
, stderr
);
1173 fprintf(stderr
, "\n");
1179 emit_frag_end(struct vc4_compile
*c
)
1182 if (c
->output_color_index
!= -1) {
1183 color
= c
->outputs
[c
->output_color_index
];
1185 color
= qir_uniform_ui(c
, 0);
1188 uint32_t discard_cond
= QPU_COND_ALWAYS
;
1189 if (c
->discard
.file
!= QFILE_NULL
) {
1190 qir_SF(c
, c
->discard
);
1191 discard_cond
= QPU_COND_ZS
;
1194 if (c
->fs_key
->stencil_enabled
) {
1195 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1196 qir_uniform(c
, QUNIFORM_STENCIL
, 0));
1197 if (c
->fs_key
->stencil_twoside
) {
1198 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1199 qir_uniform(c
, QUNIFORM_STENCIL
, 1));
1201 if (c
->fs_key
->stencil_full_writemasks
) {
1202 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1203 qir_uniform(c
, QUNIFORM_STENCIL
, 2));
1207 if (c
->output_sample_mask_index
!= -1) {
1208 qir_MS_MASK(c
, c
->outputs
[c
->output_sample_mask_index
]);
1211 if (c
->fs_key
->depth_enabled
) {
1212 if (c
->output_position_index
!= -1) {
1213 qir_FTOI_dest(c
, qir_reg(QFILE_TLB_Z_WRITE
, 0),
1215 c
->outputs
[c
->output_position_index
+ 2],
1216 qir_uniform_f(c
, 0xffffff)))->cond
= discard_cond
;
1218 qir_MOV_dest(c
, qir_reg(QFILE_TLB_Z_WRITE
, 0),
1219 qir_FRAG_Z(c
))->cond
= discard_cond
;
1223 if (!c
->msaa_per_sample_output
) {
1224 qir_MOV_dest(c
, qir_reg(QFILE_TLB_COLOR_WRITE
, 0),
1225 color
)->cond
= discard_cond
;
1227 for (int i
= 0; i
< VC4_MAX_SAMPLES
; i
++) {
1228 qir_MOV_dest(c
, qir_reg(QFILE_TLB_COLOR_WRITE_MS
, 0),
1229 c
->sample_colors
[i
])->cond
= discard_cond
;
1235 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1237 struct qreg packed
= qir_get_temp(c
);
1239 for (int i
= 0; i
< 2; i
++) {
1241 qir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1243 struct qreg packed_chan
= packed
;
1244 packed_chan
.pack
= QPU_PACK_A_16A
+ i
;
1246 qir_FTOI_dest(c
, packed_chan
,
1249 c
->outputs
[c
->output_position_index
+ i
],
1254 qir_VPM_WRITE(c
, packed
);
1258 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1260 struct qreg zscale
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1261 struct qreg zoffset
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1263 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1264 c
->outputs
[c
->output_position_index
+ 2],
1271 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1273 qir_VPM_WRITE(c
, rcp_w
);
1277 emit_point_size_write(struct vc4_compile
*c
)
1279 struct qreg point_size
;
1281 if (c
->output_point_size_index
!= -1)
1282 point_size
= c
->outputs
[c
->output_point_size_index
];
1284 point_size
= qir_uniform_f(c
, 1.0);
1286 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1289 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1291 qir_VPM_WRITE(c
, point_size
);
1295 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1297 * The simulator insists that there be at least one vertex attribute, so
1298 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1299 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1300 * to consume it here.
1303 emit_stub_vpm_read(struct vc4_compile
*c
)
1308 c
->vattr_sizes
[0] = 4;
1309 (void)qir_MOV(c
, qir_reg(QFILE_VPM
, 0));
1314 emit_vert_end(struct vc4_compile
*c
,
1315 struct vc4_varying_slot
*fs_inputs
,
1316 uint32_t num_fs_inputs
)
1318 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1320 emit_stub_vpm_read(c
);
1322 emit_scaled_viewport_write(c
, rcp_w
);
1323 emit_zs_write(c
, rcp_w
);
1324 emit_rcp_wc_write(c
, rcp_w
);
1325 if (c
->vs_key
->per_vertex_point_size
)
1326 emit_point_size_write(c
);
1328 for (int i
= 0; i
< num_fs_inputs
; i
++) {
1329 struct vc4_varying_slot
*input
= &fs_inputs
[i
];
1332 for (j
= 0; j
< c
->num_outputs
; j
++) {
1333 struct vc4_varying_slot
*output
=
1334 &c
->output_slots
[j
];
1336 if (input
->slot
== output
->slot
&&
1337 input
->swizzle
== output
->swizzle
) {
1338 qir_VPM_WRITE(c
, c
->outputs
[j
]);
1342 /* Emit padding if we didn't find a declared VS output for
1345 if (j
== c
->num_outputs
)
1346 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
1351 emit_coord_end(struct vc4_compile
*c
)
1353 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1355 emit_stub_vpm_read(c
);
1357 for (int i
= 0; i
< 4; i
++)
1358 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
1360 emit_scaled_viewport_write(c
, rcp_w
);
1361 emit_zs_write(c
, rcp_w
);
1362 emit_rcp_wc_write(c
, rcp_w
);
1363 if (c
->vs_key
->per_vertex_point_size
)
1364 emit_point_size_write(c
);
1368 vc4_optimize_nir(struct nir_shader
*s
)
1375 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1376 NIR_PASS_V(s
, nir_lower_alu_to_scalar
);
1378 NIR_PASS(progress
, s
, nir_copy_prop
);
1379 NIR_PASS(progress
, s
, nir_opt_dce
);
1380 NIR_PASS(progress
, s
, nir_opt_cse
);
1381 NIR_PASS(progress
, s
, nir_opt_peephole_select
);
1382 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1383 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1384 NIR_PASS(progress
, s
, nir_opt_undef
);
1389 driver_location_compare(const void *in_a
, const void *in_b
)
1391 const nir_variable
*const *a
= in_a
;
1392 const nir_variable
*const *b
= in_b
;
1394 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1398 ntq_setup_inputs(struct vc4_compile
*c
)
1400 unsigned num_entries
= 0;
1401 nir_foreach_variable(var
, &c
->s
->inputs
)
1404 nir_variable
*vars
[num_entries
];
1407 nir_foreach_variable(var
, &c
->s
->inputs
)
1410 /* Sort the variables so that we emit the input setup in
1411 * driver_location order. This is required for VPM reads, whose data
1412 * is fetched into the VPM in driver_location (TGSI register index)
1415 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1417 for (unsigned i
= 0; i
< num_entries
; i
++) {
1418 nir_variable
*var
= vars
[i
];
1419 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1420 unsigned loc
= var
->data
.driver_location
;
1422 assert(array_len
== 1);
1424 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1427 if (c
->stage
== QSTAGE_FRAG
) {
1428 if (var
->data
.location
== VARYING_SLOT_POS
) {
1429 emit_fragcoord_input(c
, loc
);
1430 } else if (var
->data
.location
== VARYING_SLOT_FACE
) {
1431 c
->inputs
[loc
* 4 + 0] =
1432 qir_ITOF(c
, qir_reg(QFILE_FRAG_REV_FLAG
,
1434 } else if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1435 (c
->fs_key
->point_sprite_mask
&
1436 (1 << (var
->data
.location
-
1437 VARYING_SLOT_VAR0
)))) {
1438 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1439 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1441 emit_fragment_input(c
, loc
, var
->data
.location
);
1444 emit_vertex_input(c
, loc
);
1450 ntq_setup_outputs(struct vc4_compile
*c
)
1452 nir_foreach_variable(var
, &c
->s
->outputs
) {
1453 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1454 unsigned loc
= var
->data
.driver_location
* 4;
1456 assert(array_len
== 1);
1459 for (int i
= 0; i
< 4; i
++)
1460 add_output(c
, loc
+ i
, var
->data
.location
, i
);
1462 if (c
->stage
== QSTAGE_FRAG
) {
1463 switch (var
->data
.location
) {
1464 case FRAG_RESULT_COLOR
:
1465 case FRAG_RESULT_DATA0
:
1466 c
->output_color_index
= loc
;
1468 case FRAG_RESULT_DEPTH
:
1469 c
->output_position_index
= loc
;
1471 case FRAG_RESULT_SAMPLE_MASK
:
1472 c
->output_sample_mask_index
= loc
;
1476 switch (var
->data
.location
) {
1477 case VARYING_SLOT_POS
:
1478 c
->output_position_index
= loc
;
1480 case VARYING_SLOT_PSIZ
:
1481 c
->output_point_size_index
= loc
;
1489 ntq_setup_uniforms(struct vc4_compile
*c
)
1491 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1492 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1493 unsigned array_elem_size
= 4 * sizeof(float);
1495 declare_uniform_range(c
, var
->data
.driver_location
* array_elem_size
,
1496 array_len
* array_elem_size
);
1502 * Sets up the mapping from nir_register to struct qreg *.
1504 * Each nir_register gets a struct qreg per 32-bit component being stored.
1507 ntq_setup_registers(struct vc4_compile
*c
, struct exec_list
*list
)
1509 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1510 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1511 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1513 nir_reg
->num_components
);
1515 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1517 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1518 qregs
[i
] = qir_uniform_ui(c
, 0);
1523 ntq_emit_load_const(struct vc4_compile
*c
, nir_load_const_instr
*instr
)
1525 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1526 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1527 qregs
[i
] = qir_uniform_ui(c
, instr
->value
.u32
[i
]);
1529 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1533 ntq_emit_ssa_undef(struct vc4_compile
*c
, nir_ssa_undef_instr
*instr
)
1535 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1537 /* QIR needs there to be *some* value, so pick 0 (same as for
1538 * ntq_setup_registers().
1540 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1541 qregs
[i
] = qir_uniform_ui(c
, 0);
1545 ntq_emit_intrinsic(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1547 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
1548 nir_const_value
*const_offset
;
1550 struct qreg
*dest
= NULL
;
1552 if (info
->has_dest
) {
1553 dest
= ntq_get_dest(c
, &instr
->dest
);
1556 switch (instr
->intrinsic
) {
1557 case nir_intrinsic_load_uniform
:
1558 assert(instr
->num_components
== 1);
1559 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1561 offset
= instr
->const_index
[0] + const_offset
->u32
[0];
1562 assert(offset
% 4 == 0);
1563 /* We need dwords */
1564 offset
= offset
/ 4;
1565 if (offset
< VC4_NIR_STATE_UNIFORM_OFFSET
) {
1566 *dest
= qir_uniform(c
, QUNIFORM_UNIFORM
,
1569 *dest
= qir_uniform(c
, offset
-
1570 VC4_NIR_STATE_UNIFORM_OFFSET
,
1574 *dest
= indirect_uniform_load(c
, instr
);
1578 case nir_intrinsic_load_user_clip_plane
:
1579 for (int i
= 0; i
< instr
->num_components
; i
++) {
1580 dest
[i
] = qir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1581 instr
->const_index
[0] * 4 + i
);
1585 case nir_intrinsic_load_sample_mask_in
:
1586 *dest
= qir_uniform(c
, QUNIFORM_SAMPLE_MASK
, 0);
1589 case nir_intrinsic_load_input
:
1590 assert(instr
->num_components
== 1);
1591 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1592 assert(const_offset
&& "vc4 doesn't support indirect inputs");
1593 if (instr
->const_index
[0] >= VC4_NIR_TLB_COLOR_READ_INPUT
) {
1594 assert(const_offset
->u32
[0] == 0);
1595 /* Reads of the per-sample color need to be done in
1598 int sample_index
= (instr
->const_index
[0] -
1599 VC4_NIR_TLB_COLOR_READ_INPUT
);
1600 for (int i
= 0; i
<= sample_index
; i
++) {
1601 if (c
->color_reads
[i
].file
== QFILE_NULL
) {
1603 qir_TLB_COLOR_READ(c
);
1606 *dest
= c
->color_reads
[sample_index
];
1608 offset
= instr
->const_index
[0] + const_offset
->u32
[0];
1609 *dest
= c
->inputs
[offset
];
1613 case nir_intrinsic_store_output
:
1614 const_offset
= nir_src_as_const_value(instr
->src
[1]);
1615 assert(const_offset
&& "vc4 doesn't support indirect outputs");
1616 offset
= instr
->const_index
[0] + const_offset
->u32
[0];
1618 /* MSAA color outputs are the only case where we have an
1619 * output that's not lowered to being a store of a single 32
1622 if (c
->stage
== QSTAGE_FRAG
&& instr
->num_components
== 4) {
1623 assert(offset
== c
->output_color_index
);
1624 for (int i
= 0; i
< 4; i
++) {
1625 c
->sample_colors
[i
] =
1626 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0],
1630 assert(instr
->num_components
== 1);
1631 c
->outputs
[offset
] =
1632 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0], 0));
1633 c
->num_outputs
= MAX2(c
->num_outputs
, offset
+ 1);
1637 case nir_intrinsic_discard
:
1638 c
->discard
= qir_uniform_ui(c
, ~0);
1641 case nir_intrinsic_discard_if
:
1642 if (c
->discard
.file
== QFILE_NULL
)
1643 c
->discard
= qir_uniform_ui(c
, 0);
1644 c
->discard
= qir_OR(c
, c
->discard
,
1645 ntq_get_src(c
, instr
->src
[0], 0));
1649 fprintf(stderr
, "Unknown intrinsic: ");
1650 nir_print_instr(&instr
->instr
, stderr
);
1651 fprintf(stderr
, "\n");
1657 ntq_emit_if(struct vc4_compile
*c
, nir_if
*if_stmt
)
1659 fprintf(stderr
, "general IF statements not handled.\n");
1663 ntq_emit_instr(struct vc4_compile
*c
, nir_instr
*instr
)
1665 switch (instr
->type
) {
1666 case nir_instr_type_alu
:
1667 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1670 case nir_instr_type_intrinsic
:
1671 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1674 case nir_instr_type_load_const
:
1675 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1678 case nir_instr_type_ssa_undef
:
1679 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1682 case nir_instr_type_tex
:
1683 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1687 fprintf(stderr
, "Unknown NIR instr type: ");
1688 nir_print_instr(instr
, stderr
);
1689 fprintf(stderr
, "\n");
1695 ntq_emit_block(struct vc4_compile
*c
, nir_block
*block
)
1697 nir_foreach_instr(block
, instr
) {
1698 ntq_emit_instr(c
, instr
);
1702 static void ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
);
1705 ntq_emit_loop(struct vc4_compile
*c
, nir_loop
*nloop
)
1707 fprintf(stderr
, "LOOPS not fully handled. Rendering errors likely.\n");
1708 ntq_emit_cf_list(c
, &nloop
->body
);
1712 ntq_emit_function(struct vc4_compile
*c
, nir_function_impl
*func
)
1714 fprintf(stderr
, "FUNCTIONS not handled.\n");
1719 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
)
1721 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1722 switch (node
->type
) {
1723 case nir_cf_node_block
:
1724 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1727 case nir_cf_node_if
:
1728 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1731 case nir_cf_node_loop
:
1732 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
1735 case nir_cf_node_function
:
1736 ntq_emit_function(c
, nir_cf_node_as_function(node
));
1740 fprintf(stderr
, "Unknown NIR node type\n");
1747 ntq_emit_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
1749 ntq_setup_registers(c
, &impl
->registers
);
1750 ntq_emit_cf_list(c
, &impl
->body
);
1754 nir_to_qir(struct vc4_compile
*c
)
1756 ntq_setup_inputs(c
);
1757 ntq_setup_outputs(c
);
1758 ntq_setup_uniforms(c
);
1759 ntq_setup_registers(c
, &c
->s
->registers
);
1761 /* Find the main function and emit the body. */
1762 nir_foreach_function(c
->s
, function
) {
1763 assert(strcmp(function
->name
, "main") == 0);
1764 assert(function
->impl
);
1765 ntq_emit_impl(c
, function
->impl
);
1769 static const nir_shader_compiler_options nir_options
= {
1770 .lower_extract_byte
= true,
1771 .lower_extract_word
= true,
1776 .lower_fsqrt
= true,
1777 .lower_negate
= true,
1781 count_nir_instrs_in_block(nir_block
*block
, void *state
)
1783 int *count
= (int *) state
;
1784 nir_foreach_instr(block
, instr
) {
1785 *count
= *count
+ 1;
1791 count_nir_instrs(nir_shader
*nir
)
1794 nir_foreach_function(nir
, function
) {
1795 if (!function
->impl
)
1797 nir_foreach_block_call(function
->impl
, count_nir_instrs_in_block
, &count
);
1802 static struct vc4_compile
*
1803 vc4_shader_ntq(struct vc4_context
*vc4
, enum qstage stage
,
1804 struct vc4_key
*key
)
1806 struct vc4_compile
*c
= qir_compile_init();
1809 c
->shader_state
= &key
->shader_state
->base
;
1810 c
->program_id
= key
->shader_state
->program_id
;
1811 c
->variant_id
= key
->shader_state
->compiled_variant_count
++;
1816 c
->fs_key
= (struct vc4_fs_key
*)key
;
1817 if (c
->fs_key
->is_points
) {
1818 c
->point_x
= emit_fragment_varying(c
, ~0, 0);
1819 c
->point_y
= emit_fragment_varying(c
, ~0, 0);
1820 } else if (c
->fs_key
->is_lines
) {
1821 c
->line_x
= emit_fragment_varying(c
, ~0, 0);
1825 c
->vs_key
= (struct vc4_vs_key
*)key
;
1828 c
->vs_key
= (struct vc4_vs_key
*)key
;
1832 const struct tgsi_token
*tokens
= key
->shader_state
->base
.tokens
;
1834 if (vc4_debug
& VC4_DEBUG_TGSI
) {
1835 fprintf(stderr
, "%s prog %d/%d TGSI:\n",
1836 qir_get_stage_name(c
->stage
),
1837 c
->program_id
, c
->variant_id
);
1838 tgsi_dump(tokens
, 0);
1841 c
->s
= tgsi_to_nir(tokens
, &nir_options
);
1842 NIR_PASS_V(c
->s
, nir_opt_global_to_local
);
1843 NIR_PASS_V(c
->s
, nir_convert_to_ssa
);
1845 if (stage
== QSTAGE_FRAG
)
1846 NIR_PASS_V(c
->s
, vc4_nir_lower_blend
, c
);
1848 struct nir_lower_tex_options tex_options
= {
1849 /* We would need to implement txs, but we don't want the
1850 * int/float conversions
1852 .lower_rect
= false,
1854 /* We want to use this, but we don't want to newton-raphson
1859 /* Apply swizzles to all samplers. */
1860 .swizzle_result
= ~0,
1863 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
1864 * The format swizzling applies before sRGB decode, and
1865 * ARB_texture_swizzle is the last thing before returning the sample.
1867 for (int i
= 0; i
< ARRAY_SIZE(key
->tex
); i
++) {
1868 enum pipe_format format
= c
->key
->tex
[i
].format
;
1873 const uint8_t *format_swizzle
= vc4_get_format_swizzle(format
);
1875 for (int j
= 0; j
< 4; j
++) {
1876 uint8_t arb_swiz
= c
->key
->tex
[i
].swizzle
[j
];
1878 if (arb_swiz
<= 3) {
1879 tex_options
.swizzles
[i
][j
] =
1880 format_swizzle
[arb_swiz
];
1882 tex_options
.swizzles
[i
][j
] = arb_swiz
;
1885 /* If ARB_texture_swizzle is reading from the R, G, or
1886 * B channels of an sRGB texture, then we need to
1887 * apply sRGB decode to this channel at sample time.
1889 if (arb_swiz
< 3 && util_format_is_srgb(format
)) {
1890 c
->tex_srgb_decode
[i
] |= (1 << j
);
1896 NIR_PASS_V(c
->s
, nir_lower_tex
, &tex_options
);
1898 if (c
->fs_key
&& c
->fs_key
->light_twoside
)
1899 NIR_PASS_V(c
->s
, nir_lower_two_sided_color
);
1901 if (stage
== QSTAGE_FRAG
)
1902 NIR_PASS_V(c
->s
, nir_lower_clip_fs
, c
->key
->ucp_enables
);
1904 NIR_PASS_V(c
->s
, nir_lower_clip_vs
, c
->key
->ucp_enables
);
1906 NIR_PASS_V(c
->s
, vc4_nir_lower_io
, c
);
1907 NIR_PASS_V(c
->s
, vc4_nir_lower_txf_ms
, c
);
1908 NIR_PASS_V(c
->s
, nir_lower_idiv
);
1909 NIR_PASS_V(c
->s
, nir_lower_load_const_to_scalar
);
1911 vc4_optimize_nir(c
->s
);
1913 NIR_PASS_V(c
->s
, nir_remove_dead_variables
, nir_var_local
);
1914 NIR_PASS_V(c
->s
, nir_convert_from_ssa
, true);
1916 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
1917 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1918 qir_get_stage_name(c
->stage
),
1919 c
->program_id
, c
->variant_id
,
1920 count_nir_instrs(c
->s
));
1923 if (vc4_debug
& VC4_DEBUG_NIR
) {
1924 fprintf(stderr
, "%s prog %d/%d NIR:\n",
1925 qir_get_stage_name(c
->stage
),
1926 c
->program_id
, c
->variant_id
);
1927 nir_print_shader(c
->s
, stderr
);
1938 vc4
->prog
.fs
->input_slots
,
1939 vc4
->prog
.fs
->num_inputs
);
1946 if (vc4_debug
& VC4_DEBUG_QIR
) {
1947 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
1948 qir_get_stage_name(c
->stage
),
1949 c
->program_id
, c
->variant_id
);
1954 qir_lower_uniforms(c
);
1956 qir_schedule_instructions(c
);
1958 if (vc4_debug
& VC4_DEBUG_QIR
) {
1959 fprintf(stderr
, "%s prog %d/%d QIR:\n",
1960 qir_get_stage_name(c
->stage
),
1961 c
->program_id
, c
->variant_id
);
1965 qir_reorder_uniforms(c
);
1966 vc4_generate_code(vc4
, c
);
1968 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
1969 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1970 qir_get_stage_name(c
->stage
),
1971 c
->program_id
, c
->variant_id
,
1973 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1974 qir_get_stage_name(c
->stage
),
1975 c
->program_id
, c
->variant_id
,
1985 vc4_shader_state_create(struct pipe_context
*pctx
,
1986 const struct pipe_shader_state
*cso
)
1988 struct vc4_context
*vc4
= vc4_context(pctx
);
1989 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
1993 so
->base
.tokens
= tgsi_dup_tokens(cso
->tokens
);
1994 so
->program_id
= vc4
->next_uncompiled_program_id
++;
2000 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
2001 struct vc4_compile
*c
)
2003 int count
= c
->num_uniforms
;
2004 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2006 uinfo
->count
= count
;
2007 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
2008 memcpy(uinfo
->data
, c
->uniform_data
,
2009 count
* sizeof(*uinfo
->data
));
2010 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
2011 memcpy(uinfo
->contents
, c
->uniform_contents
,
2012 count
* sizeof(*uinfo
->contents
));
2013 uinfo
->num_texture_samples
= c
->num_texture_samples
;
2015 vc4_set_shader_uniform_dirty_flags(shader
);
2018 static struct vc4_compiled_shader
*
2019 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
2020 struct vc4_key
*key
)
2022 struct hash_table
*ht
;
2024 if (stage
== QSTAGE_FRAG
) {
2026 key_size
= sizeof(struct vc4_fs_key
);
2029 key_size
= sizeof(struct vc4_vs_key
);
2032 struct vc4_compiled_shader
*shader
;
2033 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
2037 struct vc4_compile
*c
= vc4_shader_ntq(vc4
, stage
, key
);
2038 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
2040 shader
->program_id
= vc4
->next_compiled_program_id
++;
2041 if (stage
== QSTAGE_FRAG
) {
2042 bool input_live
[c
->num_input_slots
];
2044 memset(input_live
, 0, sizeof(input_live
));
2045 list_for_each_entry(struct qinst
, inst
, &c
->instructions
, link
) {
2046 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
2047 if (inst
->src
[i
].file
== QFILE_VARY
)
2048 input_live
[inst
->src
[i
].index
] = true;
2052 shader
->input_slots
= ralloc_array(shader
,
2053 struct vc4_varying_slot
,
2054 c
->num_input_slots
);
2056 for (int i
= 0; i
< c
->num_input_slots
; i
++) {
2057 struct vc4_varying_slot
*slot
= &c
->input_slots
[i
];
2062 /* Skip non-VS-output inputs. */
2063 if (slot
->slot
== (uint8_t)~0)
2066 if (slot
->slot
== VARYING_SLOT_COL0
||
2067 slot
->slot
== VARYING_SLOT_COL1
||
2068 slot
->slot
== VARYING_SLOT_BFC0
||
2069 slot
->slot
== VARYING_SLOT_BFC1
) {
2070 shader
->color_inputs
|= (1 << shader
->num_inputs
);
2073 shader
->input_slots
[shader
->num_inputs
] = *slot
;
2074 shader
->num_inputs
++;
2077 shader
->num_inputs
= c
->num_inputs
;
2079 shader
->vattr_offsets
[0] = 0;
2080 for (int i
= 0; i
< 8; i
++) {
2081 shader
->vattr_offsets
[i
+ 1] =
2082 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
2084 if (c
->vattr_sizes
[i
])
2085 shader
->vattrs_live
|= (1 << i
);
2089 copy_uniform_state_to_shader(shader
, c
);
2090 shader
->bo
= vc4_bo_alloc_shader(vc4
->screen
, c
->qpu_insts
,
2091 c
->qpu_inst_count
* sizeof(uint64_t));
2093 /* Copy the compiler UBO range state to the compiled shader, dropping
2094 * out arrays that were never referenced by an indirect load.
2096 * (Note that QIR dead code elimination of an array access still
2097 * leaves that array alive, though)
2099 if (c
->num_ubo_ranges
) {
2100 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
2101 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
2104 for (int i
= 0; i
< c
->num_uniform_ranges
; i
++) {
2105 struct vc4_compiler_ubo_range
*range
=
2110 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
2111 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
2112 shader
->ubo_ranges
[j
].size
= range
->size
;
2113 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
2117 if (shader
->ubo_size
) {
2118 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2119 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2120 qir_get_stage_name(c
->stage
),
2121 c
->program_id
, c
->variant_id
,
2122 shader
->ubo_size
/ 4);
2126 qir_compile_destroy(c
);
2128 struct vc4_key
*dup_key
;
2129 dup_key
= ralloc_size(shader
, key_size
);
2130 memcpy(dup_key
, key
, key_size
);
2131 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2137 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2138 struct vc4_texture_stateobj
*texstate
)
2140 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2141 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2142 struct pipe_sampler_state
*sampler_state
=
2143 texstate
->samplers
[i
];
2148 key
->tex
[i
].format
= sampler
->format
;
2149 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2150 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2151 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2152 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2154 if (sampler
->texture
->nr_samples
> 1) {
2155 key
->tex
[i
].msaa_width
= sampler
->texture
->width0
;
2156 key
->tex
[i
].msaa_height
= sampler
->texture
->height0
;
2157 } else if (sampler
){
2158 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2159 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2160 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2161 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2165 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2169 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2171 struct vc4_fs_key local_key
;
2172 struct vc4_fs_key
*key
= &local_key
;
2174 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2176 VC4_DIRTY_FRAMEBUFFER
|
2178 VC4_DIRTY_RASTERIZER
|
2180 VC4_DIRTY_TEXSTATE
|
2181 VC4_DIRTY_UNCOMPILED_FS
))) {
2185 memset(key
, 0, sizeof(*key
));
2186 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2187 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2188 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2189 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2190 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2191 key
->blend
= vc4
->blend
->rt
[0];
2192 if (vc4
->blend
->logicop_enable
) {
2193 key
->logicop_func
= vc4
->blend
->logicop_func
;
2195 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2198 key
->msaa
= vc4
->rasterizer
->base
.multisample
;
2199 key
->sample_coverage
= (vc4
->rasterizer
->base
.multisample
&&
2200 vc4
->sample_mask
!= (1 << VC4_MAX_SAMPLES
) - 1);
2201 key
->sample_alpha_to_coverage
= vc4
->blend
->alpha_to_coverage
;
2202 key
->sample_alpha_to_one
= vc4
->blend
->alpha_to_one
;
2205 if (vc4
->framebuffer
.cbufs
[0])
2206 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2208 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2209 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2210 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2211 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2212 key
->stencil_enabled
);
2213 if (vc4
->zsa
->base
.alpha
.enabled
) {
2214 key
->alpha_test
= true;
2215 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2218 if (key
->is_points
) {
2219 key
->point_sprite_mask
=
2220 vc4
->rasterizer
->base
.sprite_coord_enable
;
2221 key
->point_coord_upper_left
=
2222 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2223 PIPE_SPRITE_COORD_UPPER_LEFT
);
2226 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2228 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2229 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2230 if (vc4
->prog
.fs
== old_fs
)
2233 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2234 if (vc4
->rasterizer
->base
.flatshade
&&
2235 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2236 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2241 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2243 struct vc4_vs_key local_key
;
2244 struct vc4_vs_key
*key
= &local_key
;
2246 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2247 VC4_DIRTY_RASTERIZER
|
2249 VC4_DIRTY_TEXSTATE
|
2250 VC4_DIRTY_VTXSTATE
|
2251 VC4_DIRTY_UNCOMPILED_VS
|
2252 VC4_DIRTY_COMPILED_FS
))) {
2256 memset(key
, 0, sizeof(*key
));
2257 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2258 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2259 key
->compiled_fs_id
= vc4
->prog
.fs
->program_id
;
2261 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2262 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2264 key
->per_vertex_point_size
=
2265 (prim_mode
== PIPE_PRIM_POINTS
&&
2266 vc4
->rasterizer
->base
.point_size_per_vertex
);
2268 struct vc4_compiled_shader
*vs
=
2269 vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2270 if (vs
!= vc4
->prog
.vs
) {
2272 vc4
->dirty
|= VC4_DIRTY_COMPILED_VS
;
2275 key
->is_coord
= true;
2276 struct vc4_compiled_shader
*cs
=
2277 vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2278 if (cs
!= vc4
->prog
.cs
) {
2280 vc4
->dirty
|= VC4_DIRTY_COMPILED_CS
;
2285 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2287 vc4_update_compiled_fs(vc4
, prim_mode
);
2288 vc4_update_compiled_vs(vc4
, prim_mode
);
2292 fs_cache_hash(const void *key
)
2294 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2298 vs_cache_hash(const void *key
)
2300 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2304 fs_cache_compare(const void *key1
, const void *key2
)
2306 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2310 vs_cache_compare(const void *key1
, const void *key2
)
2312 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2316 delete_from_cache_if_matches(struct hash_table
*ht
,
2317 struct hash_entry
*entry
,
2318 struct vc4_uncompiled_shader
*so
)
2320 const struct vc4_key
*key
= entry
->key
;
2322 if (key
->shader_state
== so
) {
2323 struct vc4_compiled_shader
*shader
= entry
->data
;
2324 _mesa_hash_table_remove(ht
, entry
);
2325 vc4_bo_unreference(&shader
->bo
);
2326 ralloc_free(shader
);
2331 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2333 struct vc4_context
*vc4
= vc4_context(pctx
);
2334 struct vc4_uncompiled_shader
*so
= hwcso
;
2336 struct hash_entry
*entry
;
2337 hash_table_foreach(vc4
->fs_cache
, entry
)
2338 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2339 hash_table_foreach(vc4
->vs_cache
, entry
)
2340 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2342 free((void *)so
->base
.tokens
);
2347 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2349 struct vc4_context
*vc4
= vc4_context(pctx
);
2350 vc4
->prog
.bind_fs
= hwcso
;
2351 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2355 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2357 struct vc4_context
*vc4
= vc4_context(pctx
);
2358 vc4
->prog
.bind_vs
= hwcso
;
2359 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2363 vc4_program_init(struct pipe_context
*pctx
)
2365 struct vc4_context
*vc4
= vc4_context(pctx
);
2367 pctx
->create_vs_state
= vc4_shader_state_create
;
2368 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2370 pctx
->create_fs_state
= vc4_shader_state_create
;
2371 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2373 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2374 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2376 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2378 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2383 vc4_program_fini(struct pipe_context
*pctx
)
2385 struct vc4_context
*vc4
= vc4_context(pctx
);
2387 struct hash_entry
*entry
;
2388 hash_table_foreach(vc4
->fs_cache
, entry
) {
2389 struct vc4_compiled_shader
*shader
= entry
->data
;
2390 vc4_bo_unreference(&shader
->bo
);
2391 ralloc_free(shader
);
2392 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2395 hash_table_foreach(vc4
->vs_cache
, entry
) {
2396 struct vc4_compiled_shader
*shader
= entry
->data
;
2397 vc4_bo_unreference(&shader
->bo
);
2398 ralloc_free(shader
);
2399 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);