2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_lowering.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "glsl/nir/nir.h"
36 #include "glsl/nir/nir_builder.h"
37 #include "nir/tgsi_to_nir.h"
38 #include "vc4_context.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
);
49 resize_qreg_array(struct vc4_compile
*c
,
54 if (*size
>= decl_size
)
57 uint32_t old_size
= *size
;
58 *size
= MAX2(*size
* 2, decl_size
);
59 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
61 fprintf(stderr
, "Malloc failure\n");
65 for (uint32_t i
= old_size
; i
< *size
; i
++)
66 (*regs
)[i
] = c
->undef
;
70 indirect_uniform_load(struct vc4_compile
*c
, nir_intrinsic_instr
*intr
)
72 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
73 uint32_t offset
= intr
->const_index
[0];
74 struct vc4_compiler_ubo_range
*range
= NULL
;
76 for (i
= 0; i
< c
->num_uniform_ranges
; i
++) {
77 range
= &c
->ubo_ranges
[i
];
78 if (offset
>= range
->src_offset
&&
79 offset
< range
->src_offset
+ range
->size
) {
83 /* The driver-location-based offset always has to be within a declared
89 range
->dst_offset
= c
->next_ubo_dst_offset
;
90 c
->next_ubo_dst_offset
+= range
->size
;
94 offset
-= range
->src_offset
;
96 /* Adjust for where we stored the TGSI register base. */
97 indirect_offset
= qir_ADD(c
, indirect_offset
,
98 qir_uniform_ui(c
, (range
->dst_offset
+
101 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
102 indirect_offset
= qir_MAX(c
, indirect_offset
, qir_uniform_ui(c
, 0));
103 indirect_offset
= qir_MIN(c
, indirect_offset
,
104 qir_uniform_ui(c
, (range
->dst_offset
+
107 qir_TEX_DIRECT(c
, indirect_offset
, qir_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
108 c
->num_texture_samples
++;
109 return qir_TEX_RESULT(c
);
112 nir_ssa_def
*vc4_nir_get_state_uniform(struct nir_builder
*b
,
113 enum quniform_contents contents
)
115 nir_intrinsic_instr
*intr
=
116 nir_intrinsic_instr_create(b
->shader
,
117 nir_intrinsic_load_uniform
);
118 intr
->const_index
[0] = (VC4_NIR_STATE_UNIFORM_OFFSET
+ contents
) * 4;
119 intr
->num_components
= 1;
120 intr
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
121 nir_ssa_dest_init(&intr
->instr
, &intr
->dest
, 1, NULL
);
122 nir_builder_instr_insert(b
, &intr
->instr
);
123 return &intr
->dest
.ssa
;
127 vc4_nir_get_swizzled_channel(nir_builder
*b
, nir_ssa_def
**srcs
, int swiz
)
131 case UTIL_FORMAT_SWIZZLE_NONE
:
132 fprintf(stderr
, "warning: unknown swizzle\n");
134 case UTIL_FORMAT_SWIZZLE_0
:
135 return nir_imm_float(b
, 0.0);
136 case UTIL_FORMAT_SWIZZLE_1
:
137 return nir_imm_float(b
, 1.0);
138 case UTIL_FORMAT_SWIZZLE_X
:
139 case UTIL_FORMAT_SWIZZLE_Y
:
140 case UTIL_FORMAT_SWIZZLE_Z
:
141 case UTIL_FORMAT_SWIZZLE_W
:
147 ntq_init_ssa_def(struct vc4_compile
*c
, nir_ssa_def
*def
)
149 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
150 def
->num_components
);
151 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
156 ntq_get_dest(struct vc4_compile
*c
, nir_dest
*dest
)
159 struct qreg
*qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
160 for (int i
= 0; i
< dest
->ssa
.num_components
; i
++)
164 nir_register
*reg
= dest
->reg
.reg
;
165 assert(dest
->reg
.base_offset
== 0);
166 assert(reg
->num_array_elems
== 0);
167 struct hash_entry
*entry
=
168 _mesa_hash_table_search(c
->def_ht
, reg
);
174 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
)
176 struct hash_entry
*entry
;
178 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
179 assert(i
< src
.ssa
->num_components
);
181 nir_register
*reg
= src
.reg
.reg
;
182 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
183 assert(reg
->num_array_elems
== 0);
184 assert(src
.reg
.base_offset
== 0);
185 assert(i
< reg
->num_components
);
188 struct qreg
*qregs
= entry
->data
;
193 ntq_get_alu_src(struct vc4_compile
*c
, nir_alu_instr
*instr
,
196 assert(util_is_power_of_two(instr
->dest
.write_mask
));
197 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
198 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
199 instr
->src
[src
].swizzle
[chan
]);
201 assert(!instr
->src
[src
].abs
);
202 assert(!instr
->src
[src
].negate
);
208 get_swizzled_channel(struct vc4_compile
*c
,
209 struct qreg
*srcs
, int swiz
)
213 case UTIL_FORMAT_SWIZZLE_NONE
:
214 fprintf(stderr
, "warning: unknown swizzle\n");
216 case UTIL_FORMAT_SWIZZLE_0
:
217 return qir_uniform_f(c
, 0.0);
218 case UTIL_FORMAT_SWIZZLE_1
:
219 return qir_uniform_f(c
, 1.0);
220 case UTIL_FORMAT_SWIZZLE_X
:
221 case UTIL_FORMAT_SWIZZLE_Y
:
222 case UTIL_FORMAT_SWIZZLE_Z
:
223 case UTIL_FORMAT_SWIZZLE_W
:
228 static inline struct qreg
229 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
232 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
233 qir_uniform_f(c
, 0.0));
237 ntq_rcp(struct vc4_compile
*c
, struct qreg x
)
239 struct qreg r
= qir_RCP(c
, x
);
241 /* Apply a Newton-Raphson step to improve the accuracy. */
242 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
243 qir_uniform_f(c
, 2.0),
250 ntq_rsq(struct vc4_compile
*c
, struct qreg x
)
252 struct qreg r
= qir_RSQ(c
, x
);
254 /* Apply a Newton-Raphson step to improve the accuracy. */
255 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
256 qir_uniform_f(c
, 1.5),
258 qir_uniform_f(c
, 0.5),
260 qir_FMUL(c
, r
, r
)))));
266 qir_srgb_decode(struct vc4_compile
*c
, struct qreg srgb
)
268 struct qreg low
= qir_FMUL(c
, srgb
, qir_uniform_f(c
, 1.0 / 12.92));
269 struct qreg high
= qir_POW(c
,
273 qir_uniform_f(c
, 0.055)),
274 qir_uniform_f(c
, 1.0 / 1.055)),
275 qir_uniform_f(c
, 2.4));
277 qir_SF(c
, qir_FSUB(c
, srgb
, qir_uniform_f(c
, 0.04045)));
278 return qir_SEL(c
, QPU_COND_NS
, low
, high
);
282 ntq_umul(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
284 struct qreg src0_hi
= qir_SHR(c
, src0
,
285 qir_uniform_ui(c
, 24));
286 struct qreg src1_hi
= qir_SHR(c
, src1
,
287 qir_uniform_ui(c
, 24));
289 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1
);
290 struct qreg lohi
= qir_MUL24(c
, src0
, src1_hi
);
291 struct qreg lolo
= qir_MUL24(c
, src0
, src1
);
293 return qir_ADD(c
, lolo
, qir_SHL(c
,
294 qir_ADD(c
, hilo
, lohi
),
295 qir_uniform_ui(c
, 24)));
299 ntq_scale_depth_texture(struct vc4_compile
*c
, struct qreg src
)
301 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, src
,
302 qir_uniform_ui(c
, 8)));
303 return qir_FMUL(c
, depthf
, qir_uniform_f(c
, 1.0f
/0xffffff));
307 * Emits a lowered TXF_MS from an MSAA texture.
309 * The addressing math has been lowered in NIR, and now we just need to read
313 ntq_emit_txf(struct vc4_compile
*c
, nir_tex_instr
*instr
)
315 uint32_t tile_width
= 32;
316 uint32_t tile_height
= 32;
317 uint32_t tile_size
= (tile_height
* tile_width
*
318 VC4_MAX_SAMPLES
* sizeof(uint32_t));
320 unsigned unit
= instr
->sampler_index
;
321 uint32_t w
= align(c
->key
->tex
[unit
].msaa_width
, tile_width
);
322 uint32_t w_tiles
= w
/ tile_width
;
323 uint32_t h
= align(c
->key
->tex
[unit
].msaa_height
, tile_height
);
324 uint32_t h_tiles
= h
/ tile_height
;
325 uint32_t size
= w_tiles
* h_tiles
* tile_size
;
328 assert(instr
->num_srcs
== 1);
329 assert(instr
->src
[0].src_type
== nir_tex_src_coord
);
330 addr
= ntq_get_src(c
, instr
->src
[0].src
, 0);
332 /* Perform the clamping required by kernel validation. */
333 addr
= qir_MAX(c
, addr
, qir_uniform_ui(c
, 0));
334 addr
= qir_MIN(c
, addr
, qir_uniform_ui(c
, size
- 4));
336 qir_TEX_DIRECT(c
, addr
, qir_uniform(c
, QUNIFORM_TEXTURE_MSAA_ADDR
, unit
));
338 struct qreg tex
= qir_TEX_RESULT(c
);
339 c
->num_texture_samples
++;
341 struct qreg texture_output
[4];
342 enum pipe_format format
= c
->key
->tex
[unit
].format
;
343 if (util_format_is_depth_or_stencil(format
)) {
344 struct qreg scaled
= ntq_scale_depth_texture(c
, tex
);
345 for (int i
= 0; i
< 4; i
++)
346 texture_output
[i
] = scaled
;
348 struct qreg tex_result_unpacked
[4];
349 for (int i
= 0; i
< 4; i
++)
350 tex_result_unpacked
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
352 const uint8_t *format_swiz
=
353 vc4_get_format_swizzle(c
->key
->tex
[unit
].format
);
354 for (int i
= 0; i
< 4; i
++) {
356 get_swizzled_channel(c
, tex_result_unpacked
,
361 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
);
362 for (int i
= 0; i
< 4; i
++) {
363 dest
[i
] = get_swizzled_channel(c
, texture_output
,
364 c
->key
->tex
[unit
].swizzle
[i
]);
369 ntq_emit_tex(struct vc4_compile
*c
, nir_tex_instr
*instr
)
371 struct qreg s
, t
, r
, lod
, proj
, compare
;
372 bool is_txb
= false, is_txl
= false, has_proj
= false;
373 unsigned unit
= instr
->sampler_index
;
375 if (instr
->op
== nir_texop_txf
) {
376 ntq_emit_txf(c
, instr
);
380 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
381 switch (instr
->src
[i
].src_type
) {
382 case nir_tex_src_coord
:
383 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
384 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
)
385 t
= qir_uniform_f(c
, 0.5);
387 t
= ntq_get_src(c
, instr
->src
[i
].src
, 1);
388 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
389 r
= ntq_get_src(c
, instr
->src
[i
].src
, 2);
391 case nir_tex_src_bias
:
392 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
395 case nir_tex_src_lod
:
396 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
399 case nir_tex_src_comparitor
:
400 compare
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
402 case nir_tex_src_projector
:
403 proj
= qir_RCP(c
, ntq_get_src(c
, instr
->src
[i
].src
, 0));
404 s
= qir_FMUL(c
, s
, proj
);
405 t
= qir_FMUL(c
, t
, proj
);
409 unreachable("unknown texture source");
413 struct qreg texture_u
[] = {
414 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
415 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
416 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
417 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
419 uint32_t next_texture_u
= 0;
421 /* There is no native support for GL texture rectangle coordinates, so
422 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
425 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
427 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_X
, unit
));
429 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_Y
, unit
));
432 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
|| is_txl
) {
433 texture_u
[2] = qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
434 unit
| (is_txl
<< 16));
437 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
438 struct qreg ma
= qir_FMAXABS(c
, qir_FMAXABS(c
, s
, t
), r
);
439 struct qreg rcp_ma
= qir_RCP(c
, ma
);
440 s
= qir_FMUL(c
, s
, rcp_ma
);
441 t
= qir_FMUL(c
, t
, rcp_ma
);
442 r
= qir_FMUL(c
, r
, rcp_ma
);
444 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
445 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
446 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
447 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
448 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
449 qir_TEX_R(c
, qir_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
450 texture_u
[next_texture_u
++]);
453 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
457 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
461 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
463 if (is_txl
|| is_txb
)
464 qir_TEX_B(c
, lod
, texture_u
[next_texture_u
++]);
466 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
468 c
->num_texture_samples
++;
469 struct qreg tex
= qir_TEX_RESULT(c
);
471 enum pipe_format format
= c
->key
->tex
[unit
].format
;
473 struct qreg unpacked
[4];
474 if (util_format_is_depth_or_stencil(format
)) {
475 struct qreg normalized
= ntq_scale_depth_texture(c
, tex
);
476 struct qreg depth_output
;
478 struct qreg u0
= qir_uniform_f(c
, 0.0f
);
479 struct qreg u1
= qir_uniform_f(c
, 1.0f
);
480 if (c
->key
->tex
[unit
].compare_mode
) {
482 compare
= qir_FMUL(c
, compare
, proj
);
484 switch (c
->key
->tex
[unit
].compare_func
) {
485 case PIPE_FUNC_NEVER
:
486 depth_output
= qir_uniform_f(c
, 0.0f
);
488 case PIPE_FUNC_ALWAYS
:
491 case PIPE_FUNC_EQUAL
:
492 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
493 depth_output
= qir_SEL(c
, QPU_COND_ZS
, u1
, u0
);
495 case PIPE_FUNC_NOTEQUAL
:
496 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
497 depth_output
= qir_SEL(c
, QPU_COND_ZC
, u1
, u0
);
499 case PIPE_FUNC_GREATER
:
500 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
501 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
503 case PIPE_FUNC_GEQUAL
:
504 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
505 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
508 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
509 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
511 case PIPE_FUNC_LEQUAL
:
512 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
513 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
517 depth_output
= normalized
;
520 for (int i
= 0; i
< 4; i
++)
521 unpacked
[i
] = depth_output
;
523 for (int i
= 0; i
< 4; i
++)
524 unpacked
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
527 const uint8_t *format_swiz
= vc4_get_format_swizzle(format
);
528 struct qreg texture_output
[4];
529 for (int i
= 0; i
< 4; i
++) {
530 texture_output
[i
] = get_swizzled_channel(c
, unpacked
,
534 if (util_format_is_srgb(format
)) {
535 for (int i
= 0; i
< 3; i
++)
536 texture_output
[i
] = qir_srgb_decode(c
,
540 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
);
541 for (int i
= 0; i
< 4; i
++) {
542 dest
[i
] = get_swizzled_channel(c
, texture_output
,
543 c
->key
->tex
[unit
].swizzle
[i
]);
548 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
552 ntq_ffract(struct vc4_compile
*c
, struct qreg src
)
554 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
555 struct qreg diff
= qir_FSUB(c
, src
, trunc
);
557 return qir_SEL(c
, QPU_COND_NS
,
558 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)), diff
);
562 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
566 ntq_ffloor(struct vc4_compile
*c
, struct qreg src
)
568 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
570 /* This will be < 0 if we truncated and the truncation was of a value
571 * that was < 0 in the first place.
573 qir_SF(c
, qir_FSUB(c
, src
, trunc
));
575 return qir_SEL(c
, QPU_COND_NS
,
576 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)), trunc
);
580 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
584 ntq_fceil(struct vc4_compile
*c
, struct qreg src
)
586 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
588 /* This will be < 0 if we truncated and the truncation was of a value
589 * that was > 0 in the first place.
591 qir_SF(c
, qir_FSUB(c
, trunc
, src
));
593 return qir_SEL(c
, QPU_COND_NS
,
594 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)), trunc
);
598 ntq_fsin(struct vc4_compile
*c
, struct qreg src
)
602 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
603 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
604 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
605 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
608 struct qreg scaled_x
=
611 qir_uniform_f(c
, 1.0 / (M_PI
* 2.0)));
613 struct qreg x
= qir_FADD(c
,
614 ntq_ffract(c
, scaled_x
),
615 qir_uniform_f(c
, -0.5));
616 struct qreg x2
= qir_FMUL(c
, x
, x
);
617 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
618 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
619 x
= qir_FMUL(c
, x
, x2
);
624 qir_uniform_f(c
, coeff
[i
])));
630 ntq_fcos(struct vc4_compile
*c
, struct qreg src
)
634 pow(2.0 * M_PI
, 2) / (2 * 1),
635 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
636 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
637 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
638 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
641 struct qreg scaled_x
=
643 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
644 struct qreg x_frac
= qir_FADD(c
,
645 ntq_ffract(c
, scaled_x
),
646 qir_uniform_f(c
, -0.5));
648 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
649 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
650 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
651 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
653 x
= qir_FMUL(c
, x
, x2
);
655 struct qreg mul
= qir_FMUL(c
,
657 qir_uniform_f(c
, coeff
[i
]));
661 sum
= qir_FADD(c
, sum
, mul
);
667 ntq_fsign(struct vc4_compile
*c
, struct qreg src
)
669 struct qreg t
= qir_get_temp(c
);
672 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 0.0));
673 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_ZC
;
674 qir_MOV_dest(c
, t
, qir_uniform_f(c
, -1.0))->cond
= QPU_COND_NS
;
679 emit_vertex_input(struct vc4_compile
*c
, int attr
)
681 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
682 uint32_t attr_size
= util_format_get_blocksize(format
);
684 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
685 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
686 struct qreg vpm
= { QFILE_VPM
, attr
* 4 + i
};
687 c
->inputs
[attr
* 4 + i
] = qir_MOV(c
, vpm
);
693 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
695 c
->inputs
[attr
* 4 + 0] = qir_FRAG_X(c
);
696 c
->inputs
[attr
* 4 + 1] = qir_FRAG_Y(c
);
697 c
->inputs
[attr
* 4 + 2] =
699 qir_ITOF(c
, qir_FRAG_Z(c
)),
700 qir_uniform_f(c
, 1.0 / 0xffffff));
701 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
705 emit_fragment_varying(struct vc4_compile
*c
, gl_varying_slot slot
,
708 uint32_t i
= c
->num_input_slots
++;
714 if (c
->num_input_slots
>= c
->input_slots_array_size
) {
715 c
->input_slots_array_size
=
716 MAX2(4, c
->input_slots_array_size
* 2);
718 c
->input_slots
= reralloc(c
, c
->input_slots
,
719 struct vc4_varying_slot
,
720 c
->input_slots_array_size
);
723 c
->input_slots
[i
].slot
= slot
;
724 c
->input_slots
[i
].swizzle
= swizzle
;
726 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
730 emit_fragment_input(struct vc4_compile
*c
, int attr
, gl_varying_slot slot
)
732 for (int i
= 0; i
< 4; i
++) {
733 c
->inputs
[attr
* 4 + i
] =
734 emit_fragment_varying(c
, slot
, i
);
740 add_output(struct vc4_compile
*c
,
741 uint32_t decl_offset
,
745 uint32_t old_array_size
= c
->outputs_array_size
;
746 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
749 if (old_array_size
!= c
->outputs_array_size
) {
750 c
->output_slots
= reralloc(c
,
752 struct vc4_varying_slot
,
753 c
->outputs_array_size
);
756 c
->output_slots
[decl_offset
].slot
= slot
;
757 c
->output_slots
[decl_offset
].swizzle
= swizzle
;
761 declare_uniform_range(struct vc4_compile
*c
, uint32_t start
, uint32_t size
)
763 unsigned array_id
= c
->num_uniform_ranges
++;
764 if (array_id
>= c
->ubo_ranges_array_size
) {
765 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
767 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
768 struct vc4_compiler_ubo_range
,
769 c
->ubo_ranges_array_size
);
772 c
->ubo_ranges
[array_id
].dst_offset
= 0;
773 c
->ubo_ranges
[array_id
].src_offset
= start
;
774 c
->ubo_ranges
[array_id
].size
= size
;
775 c
->ubo_ranges
[array_id
].used
= false;
779 ntq_src_is_only_ssa_def_user(nir_src
*src
)
784 if (!list_empty(&src
->ssa
->if_uses
))
787 return (src
->ssa
->uses
.next
== &src
->use_link
&&
788 src
->ssa
->uses
.next
->next
== &src
->ssa
->uses
);
792 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
795 * However, as an optimization, it tries to find the instructions generating
796 * the sources to be packed and just emit the pack flag there, if possible.
799 ntq_emit_pack_unorm_4x8(struct vc4_compile
*c
, nir_alu_instr
*instr
)
801 struct qreg result
= qir_get_temp(c
);
802 struct nir_alu_instr
*vec4
= NULL
;
804 /* If packing from a vec4 op (as expected), identify it so that we can
805 * peek back at what generated its sources.
807 if (instr
->src
[0].src
.is_ssa
&&
808 instr
->src
[0].src
.ssa
->parent_instr
->type
== nir_instr_type_alu
&&
809 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
)->op
==
811 vec4
= nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
814 /* If the pack is replicating the same channel 4 times, use the 8888
815 * pack flag. This is common for blending using the alpha
818 if (instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[1] &&
819 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[2] &&
820 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[3]) {
821 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
822 *dest
= qir_PACK_8888_F(c
,
823 ntq_get_src(c
, instr
->src
[0].src
,
824 instr
->src
[0].swizzle
[0]));
828 for (int i
= 0; i
< 4; i
++) {
829 int swiz
= instr
->src
[0].swizzle
[i
];
832 src
= ntq_get_src(c
, vec4
->src
[swiz
].src
,
833 vec4
->src
[swiz
].swizzle
[0]);
835 src
= ntq_get_src(c
, instr
->src
[0].src
, swiz
);
839 ntq_src_is_only_ssa_def_user(&vec4
->src
[swiz
].src
) &&
840 src
.file
== QFILE_TEMP
&&
841 c
->defs
[src
.index
] &&
842 qir_is_mul(c
->defs
[src
.index
]) &&
843 !c
->defs
[src
.index
]->dst
.pack
) {
844 struct qinst
*rewrite
= c
->defs
[src
.index
];
845 c
->defs
[src
.index
] = NULL
;
846 rewrite
->dst
= result
;
847 rewrite
->dst
.pack
= QPU_PACK_MUL_8A
+ i
;
851 qir_PACK_8_F(c
, result
, src
, i
);
854 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
858 /** Handles sign-extended bitfield extracts for 16 bits. */
860 ntq_emit_ibfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
863 assert(bits
.file
== QFILE_UNIF
&&
864 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
865 c
->uniform_data
[bits
.index
] == 16);
867 assert(offset
.file
== QFILE_UNIF
&&
868 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
869 int offset_bit
= c
->uniform_data
[offset
.index
];
870 assert(offset_bit
% 16 == 0);
872 return qir_UNPACK_16_I(c
, base
, offset_bit
/ 16);
875 /** Handles unsigned bitfield extracts for 8 bits. */
877 ntq_emit_ubfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
880 assert(bits
.file
== QFILE_UNIF
&&
881 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
882 c
->uniform_data
[bits
.index
] == 8);
884 assert(offset
.file
== QFILE_UNIF
&&
885 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
886 int offset_bit
= c
->uniform_data
[offset
.index
];
887 assert(offset_bit
% 8 == 0);
889 return qir_UNPACK_8_I(c
, base
, offset_bit
/ 8);
893 ntq_emit_comparison(struct vc4_compile
*c
, nir_alu_instr
*instr
,
894 struct qreg src0
, struct qreg src1
)
921 unreachable("bad ALU op for comparison");
924 if (nir_op_infos
[instr
->op
].input_types
[0] == nir_type_float
)
925 qir_SF(c
, qir_FSUB(c
, src0
, src1
));
927 qir_SF(c
, qir_SUB(c
, src0
, src1
));
934 return qir_SEL(c
, cond
,
935 qir_uniform_f(c
, 1.0), qir_uniform_f(c
, 0.0));
937 return qir_SEL(c
, cond
,
938 qir_uniform_ui(c
, ~0), qir_uniform_ui(c
, 0.0));
943 ntq_emit_alu(struct vc4_compile
*c
, nir_alu_instr
*instr
)
945 /* Vectors are special in that they have non-scalarized writemasks,
946 * and just take the first swizzle channel for each argument in order
947 * into each writemask channel.
949 if (instr
->op
== nir_op_vec2
||
950 instr
->op
== nir_op_vec3
||
951 instr
->op
== nir_op_vec4
) {
953 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
954 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
955 instr
->src
[i
].swizzle
[0]);
956 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
957 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
962 if (instr
->op
== nir_op_pack_unorm_4x8
) {
963 ntq_emit_pack_unorm_4x8(c
, instr
);
967 if (instr
->op
== nir_op_unpack_unorm_4x8
) {
968 struct qreg src
= ntq_get_src(c
, instr
->src
[0].src
,
969 instr
->src
[0].swizzle
[0]);
970 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
971 for (int i
= 0; i
< 4; i
++) {
972 if (instr
->dest
.write_mask
& (1 << i
))
973 dest
[i
] = qir_UNPACK_8_F(c
, src
, i
);
978 /* General case: We can just grab the one used channel per src. */
979 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
980 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
981 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
984 /* Pick the channel to store the output in. */
985 assert(!instr
->dest
.saturate
);
986 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
987 assert(util_is_power_of_two(instr
->dest
.write_mask
));
988 dest
+= ffs(instr
->dest
.write_mask
) - 1;
993 *dest
= qir_MOV(c
, src
[0]);
996 *dest
= qir_FMUL(c
, src
[0], src
[1]);
999 *dest
= qir_FADD(c
, src
[0], src
[1]);
1002 *dest
= qir_FSUB(c
, src
[0], src
[1]);
1005 *dest
= qir_FMIN(c
, src
[0], src
[1]);
1008 *dest
= qir_FMAX(c
, src
[0], src
[1]);
1013 *dest
= qir_FTOI(c
, src
[0]);
1017 *dest
= qir_ITOF(c
, src
[0]);
1020 *dest
= qir_AND(c
, src
[0], qir_uniform_f(c
, 1.0));
1023 *dest
= qir_AND(c
, src
[0], qir_uniform_ui(c
, 1));
1028 *dest
= qir_SEL(c
, QPU_COND_ZC
,
1029 qir_uniform_ui(c
, ~0),
1030 qir_uniform_ui(c
, 0));
1034 *dest
= qir_ADD(c
, src
[0], src
[1]);
1037 *dest
= qir_SHR(c
, src
[0], src
[1]);
1040 *dest
= qir_SUB(c
, src
[0], src
[1]);
1043 *dest
= qir_ASR(c
, src
[0], src
[1]);
1046 *dest
= qir_SHL(c
, src
[0], src
[1]);
1049 *dest
= qir_MIN(c
, src
[0], src
[1]);
1052 *dest
= qir_MAX(c
, src
[0], src
[1]);
1055 *dest
= qir_AND(c
, src
[0], src
[1]);
1058 *dest
= qir_OR(c
, src
[0], src
[1]);
1061 *dest
= qir_XOR(c
, src
[0], src
[1]);
1064 *dest
= qir_NOT(c
, src
[0]);
1068 *dest
= ntq_umul(c
, src
[0], src
[1]);
1084 *dest
= ntq_emit_comparison(c
, instr
, src
[0], src
[1]);
1089 *dest
= qir_SEL(c
, QPU_COND_NS
, src
[1], src
[2]);
1093 *dest
= qir_SEL(c
, QPU_COND_ZC
, src
[1], src
[2]);
1097 *dest
= ntq_rcp(c
, src
[0]);
1100 *dest
= ntq_rsq(c
, src
[0]);
1103 *dest
= qir_EXP2(c
, src
[0]);
1106 *dest
= qir_LOG2(c
, src
[0]);
1110 *dest
= qir_ITOF(c
, qir_FTOI(c
, src
[0]));
1113 *dest
= ntq_fceil(c
, src
[0]);
1116 *dest
= ntq_ffract(c
, src
[0]);
1119 *dest
= ntq_ffloor(c
, src
[0]);
1123 *dest
= ntq_fsin(c
, src
[0]);
1126 *dest
= ntq_fcos(c
, src
[0]);
1130 *dest
= ntq_fsign(c
, src
[0]);
1134 *dest
= qir_FMAXABS(c
, src
[0], src
[0]);
1137 *dest
= qir_MAX(c
, src
[0],
1138 qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0]));
1141 case nir_op_ibitfield_extract
:
1142 *dest
= ntq_emit_ibfe(c
, src
[0], src
[1], src
[2]);
1145 case nir_op_ubitfield_extract
:
1146 *dest
= ntq_emit_ubfe(c
, src
[0], src
[1], src
[2]);
1149 case nir_op_usadd_4x8
:
1150 *dest
= qir_V8ADDS(c
, src
[0], src
[1]);
1153 case nir_op_ussub_4x8
:
1154 *dest
= qir_V8SUBS(c
, src
[0], src
[1]);
1157 case nir_op_umin_4x8
:
1158 *dest
= qir_V8MIN(c
, src
[0], src
[1]);
1161 case nir_op_umax_4x8
:
1162 *dest
= qir_V8MAX(c
, src
[0], src
[1]);
1165 case nir_op_umul_unorm_4x8
:
1166 *dest
= qir_V8MULD(c
, src
[0], src
[1]);
1170 fprintf(stderr
, "unknown NIR ALU inst: ");
1171 nir_print_instr(&instr
->instr
, stderr
);
1172 fprintf(stderr
, "\n");
1178 emit_frag_end(struct vc4_compile
*c
)
1181 if (c
->output_color_index
!= -1) {
1182 color
= c
->outputs
[c
->output_color_index
];
1184 color
= qir_uniform_ui(c
, 0);
1187 if (c
->discard
.file
!= QFILE_NULL
)
1188 qir_TLB_DISCARD_SETUP(c
, c
->discard
);
1190 if (c
->fs_key
->stencil_enabled
) {
1191 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 0));
1192 if (c
->fs_key
->stencil_twoside
) {
1193 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 1));
1195 if (c
->fs_key
->stencil_full_writemasks
) {
1196 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 2));
1200 if (c
->output_sample_mask_index
!= -1) {
1201 qir_MS_MASK(c
, c
->outputs
[c
->output_sample_mask_index
]);
1204 if (c
->fs_key
->depth_enabled
) {
1206 if (c
->output_position_index
!= -1) {
1207 z
= qir_FTOI(c
, qir_FMUL(c
, c
->outputs
[c
->output_position_index
+ 2],
1208 qir_uniform_f(c
, 0xffffff)));
1212 qir_TLB_Z_WRITE(c
, z
);
1215 if (!c
->msaa_per_sample_output
) {
1216 qir_TLB_COLOR_WRITE(c
, color
);
1218 for (int i
= 0; i
< VC4_MAX_SAMPLES
; i
++)
1219 qir_TLB_COLOR_WRITE_MS(c
, c
->sample_colors
[i
]);
1224 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1226 struct qreg packed
= qir_get_temp(c
);
1228 for (int i
= 0; i
< 2; i
++) {
1230 qir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1232 struct qreg packed_chan
= packed
;
1233 packed_chan
.pack
= QPU_PACK_A_16A
+ i
;
1235 qir_FTOI_dest(c
, packed_chan
,
1238 c
->outputs
[c
->output_position_index
+ i
],
1243 qir_VPM_WRITE(c
, packed
);
1247 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1249 struct qreg zscale
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1250 struct qreg zoffset
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1252 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1253 c
->outputs
[c
->output_position_index
+ 2],
1260 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1262 qir_VPM_WRITE(c
, rcp_w
);
1266 emit_point_size_write(struct vc4_compile
*c
)
1268 struct qreg point_size
;
1270 if (c
->output_point_size_index
!= -1)
1271 point_size
= c
->outputs
[c
->output_point_size_index
];
1273 point_size
= qir_uniform_f(c
, 1.0);
1275 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1278 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1280 qir_VPM_WRITE(c
, point_size
);
1284 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1286 * The simulator insists that there be at least one vertex attribute, so
1287 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1288 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1289 * to consume it here.
1292 emit_stub_vpm_read(struct vc4_compile
*c
)
1297 c
->vattr_sizes
[0] = 4;
1298 struct qreg vpm
= { QFILE_VPM
, 0 };
1299 (void)qir_MOV(c
, vpm
);
1304 emit_vert_end(struct vc4_compile
*c
,
1305 struct vc4_varying_slot
*fs_inputs
,
1306 uint32_t num_fs_inputs
)
1308 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1310 emit_stub_vpm_read(c
);
1312 emit_scaled_viewport_write(c
, rcp_w
);
1313 emit_zs_write(c
, rcp_w
);
1314 emit_rcp_wc_write(c
, rcp_w
);
1315 if (c
->vs_key
->per_vertex_point_size
)
1316 emit_point_size_write(c
);
1318 for (int i
= 0; i
< num_fs_inputs
; i
++) {
1319 struct vc4_varying_slot
*input
= &fs_inputs
[i
];
1322 for (j
= 0; j
< c
->num_outputs
; j
++) {
1323 struct vc4_varying_slot
*output
=
1324 &c
->output_slots
[j
];
1326 if (input
->slot
== output
->slot
&&
1327 input
->swizzle
== output
->swizzle
) {
1328 qir_VPM_WRITE(c
, c
->outputs
[j
]);
1332 /* Emit padding if we didn't find a declared VS output for
1335 if (j
== c
->num_outputs
)
1336 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
1341 emit_coord_end(struct vc4_compile
*c
)
1343 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1345 emit_stub_vpm_read(c
);
1347 for (int i
= 0; i
< 4; i
++)
1348 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
1350 emit_scaled_viewport_write(c
, rcp_w
);
1351 emit_zs_write(c
, rcp_w
);
1352 emit_rcp_wc_write(c
, rcp_w
);
1353 if (c
->vs_key
->per_vertex_point_size
)
1354 emit_point_size_write(c
);
1358 vc4_optimize_nir(struct nir_shader
*s
)
1365 nir_lower_vars_to_ssa(s
);
1366 nir_lower_alu_to_scalar(s
);
1368 progress
= nir_copy_prop(s
) || progress
;
1369 progress
= nir_opt_dce(s
) || progress
;
1370 progress
= nir_opt_cse(s
) || progress
;
1371 progress
= nir_opt_peephole_select(s
) || progress
;
1372 progress
= nir_opt_algebraic(s
) || progress
;
1373 progress
= nir_opt_constant_folding(s
) || progress
;
1374 progress
= nir_opt_undef(s
) || progress
;
1379 driver_location_compare(const void *in_a
, const void *in_b
)
1381 const nir_variable
*const *a
= in_a
;
1382 const nir_variable
*const *b
= in_b
;
1384 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1388 ntq_setup_inputs(struct vc4_compile
*c
)
1390 unsigned num_entries
= 0;
1391 nir_foreach_variable(var
, &c
->s
->inputs
)
1394 nir_variable
*vars
[num_entries
];
1397 nir_foreach_variable(var
, &c
->s
->inputs
)
1400 /* Sort the variables so that we emit the input setup in
1401 * driver_location order. This is required for VPM reads, whose data
1402 * is fetched into the VPM in driver_location (TGSI register index)
1405 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1407 for (unsigned i
= 0; i
< num_entries
; i
++) {
1408 nir_variable
*var
= vars
[i
];
1409 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1410 unsigned loc
= var
->data
.driver_location
;
1412 assert(array_len
== 1);
1414 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1417 if (c
->stage
== QSTAGE_FRAG
) {
1418 if (var
->data
.location
== VARYING_SLOT_POS
) {
1419 emit_fragcoord_input(c
, loc
);
1420 } else if (var
->data
.location
== VARYING_SLOT_FACE
) {
1421 c
->inputs
[loc
* 4 + 0] = qir_FRAG_REV_FLAG(c
);
1422 } else if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1423 (c
->fs_key
->point_sprite_mask
&
1424 (1 << (var
->data
.location
-
1425 VARYING_SLOT_VAR0
)))) {
1426 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1427 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1429 emit_fragment_input(c
, loc
, var
->data
.location
);
1432 emit_vertex_input(c
, loc
);
1438 ntq_setup_outputs(struct vc4_compile
*c
)
1440 nir_foreach_variable(var
, &c
->s
->outputs
) {
1441 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1442 unsigned loc
= var
->data
.driver_location
* 4;
1444 assert(array_len
== 1);
1447 for (int i
= 0; i
< 4; i
++)
1448 add_output(c
, loc
+ i
, var
->data
.location
, i
);
1450 if (c
->stage
== QSTAGE_FRAG
) {
1451 switch (var
->data
.location
) {
1452 case FRAG_RESULT_COLOR
:
1453 case FRAG_RESULT_DATA0
:
1454 c
->output_color_index
= loc
;
1456 case FRAG_RESULT_DEPTH
:
1457 c
->output_position_index
= loc
;
1459 case FRAG_RESULT_SAMPLE_MASK
:
1460 c
->output_sample_mask_index
= loc
;
1464 switch (var
->data
.location
) {
1465 case VARYING_SLOT_POS
:
1466 c
->output_position_index
= loc
;
1468 case VARYING_SLOT_PSIZ
:
1469 c
->output_point_size_index
= loc
;
1477 ntq_setup_uniforms(struct vc4_compile
*c
)
1479 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1480 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1481 unsigned array_elem_size
= 4 * sizeof(float);
1483 declare_uniform_range(c
, var
->data
.driver_location
* array_elem_size
,
1484 array_len
* array_elem_size
);
1490 * Sets up the mapping from nir_register to struct qreg *.
1492 * Each nir_register gets a struct qreg per 32-bit component being stored.
1495 ntq_setup_registers(struct vc4_compile
*c
, struct exec_list
*list
)
1497 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1498 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1499 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1501 nir_reg
->num_components
);
1503 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1505 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1506 qregs
[i
] = qir_uniform_ui(c
, 0);
1511 ntq_emit_load_const(struct vc4_compile
*c
, nir_load_const_instr
*instr
)
1513 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1514 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1515 qregs
[i
] = qir_uniform_ui(c
, instr
->value
.u
[i
]);
1517 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1521 ntq_emit_ssa_undef(struct vc4_compile
*c
, nir_ssa_undef_instr
*instr
)
1523 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1525 /* QIR needs there to be *some* value, so pick 0 (same as for
1526 * ntq_setup_registers().
1528 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1529 qregs
[i
] = qir_uniform_ui(c
, 0);
1533 ntq_emit_intrinsic(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1535 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
1536 nir_const_value
*const_offset
;
1538 struct qreg
*dest
= NULL
;
1540 if (info
->has_dest
) {
1541 dest
= ntq_get_dest(c
, &instr
->dest
);
1544 switch (instr
->intrinsic
) {
1545 case nir_intrinsic_load_uniform
:
1546 assert(instr
->num_components
== 1);
1547 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1549 offset
= instr
->const_index
[0] + const_offset
->u
[0];
1550 assert(offset
% 4 == 0);
1551 /* We need dwords */
1552 offset
= offset
/ 4;
1553 if (offset
< VC4_NIR_STATE_UNIFORM_OFFSET
) {
1554 *dest
= qir_uniform(c
, QUNIFORM_UNIFORM
,
1557 *dest
= qir_uniform(c
, offset
-
1558 VC4_NIR_STATE_UNIFORM_OFFSET
,
1562 *dest
= indirect_uniform_load(c
, instr
);
1566 case nir_intrinsic_load_user_clip_plane
:
1567 *dest
= qir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1568 instr
->const_index
[0]);
1571 case nir_intrinsic_load_sample_mask_in
:
1572 *dest
= qir_uniform(c
, QUNIFORM_SAMPLE_MASK
, 0);
1575 case nir_intrinsic_load_input
:
1576 assert(instr
->num_components
== 1);
1577 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1578 assert(const_offset
&& "vc4 doesn't support indirect inputs");
1579 if (instr
->const_index
[0] >= VC4_NIR_TLB_COLOR_READ_INPUT
) {
1580 assert(const_offset
->u
[0] == 0);
1581 /* Reads of the per-sample color need to be done in
1584 int sample_index
= (instr
->const_index
[0] -
1585 VC4_NIR_TLB_COLOR_READ_INPUT
);
1586 for (int i
= 0; i
<= sample_index
; i
++) {
1587 if (c
->color_reads
[i
].file
== QFILE_NULL
) {
1589 qir_TLB_COLOR_READ(c
);
1592 *dest
= c
->color_reads
[sample_index
];
1594 offset
= instr
->const_index
[0] + const_offset
->u
[0];
1595 *dest
= c
->inputs
[offset
];
1599 case nir_intrinsic_store_output
:
1600 const_offset
= nir_src_as_const_value(instr
->src
[1]);
1601 assert(const_offset
&& "vc4 doesn't support indirect outputs");
1602 offset
= instr
->const_index
[0] + const_offset
->u
[0];
1604 /* MSAA color outputs are the only case where we have an
1605 * output that's not lowered to being a store of a single 32
1608 if (c
->stage
== QSTAGE_FRAG
&& instr
->num_components
== 4) {
1609 assert(offset
== c
->output_color_index
);
1610 for (int i
= 0; i
< 4; i
++) {
1611 c
->sample_colors
[i
] =
1612 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0],
1616 assert(instr
->num_components
== 1);
1617 c
->outputs
[offset
] =
1618 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0], 0));
1619 c
->num_outputs
= MAX2(c
->num_outputs
, offset
+ 1);
1623 case nir_intrinsic_discard
:
1624 c
->discard
= qir_uniform_ui(c
, ~0);
1627 case nir_intrinsic_discard_if
:
1628 if (c
->discard
.file
== QFILE_NULL
)
1629 c
->discard
= qir_uniform_ui(c
, 0);
1630 c
->discard
= qir_OR(c
, c
->discard
,
1631 ntq_get_src(c
, instr
->src
[0], 0));
1635 fprintf(stderr
, "Unknown intrinsic: ");
1636 nir_print_instr(&instr
->instr
, stderr
);
1637 fprintf(stderr
, "\n");
1643 ntq_emit_if(struct vc4_compile
*c
, nir_if
*if_stmt
)
1645 fprintf(stderr
, "general IF statements not handled.\n");
1649 ntq_emit_instr(struct vc4_compile
*c
, nir_instr
*instr
)
1651 switch (instr
->type
) {
1652 case nir_instr_type_alu
:
1653 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1656 case nir_instr_type_intrinsic
:
1657 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1660 case nir_instr_type_load_const
:
1661 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1664 case nir_instr_type_ssa_undef
:
1665 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1668 case nir_instr_type_tex
:
1669 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1673 fprintf(stderr
, "Unknown NIR instr type: ");
1674 nir_print_instr(instr
, stderr
);
1675 fprintf(stderr
, "\n");
1681 ntq_emit_block(struct vc4_compile
*c
, nir_block
*block
)
1683 nir_foreach_instr(block
, instr
) {
1684 ntq_emit_instr(c
, instr
);
1689 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
)
1691 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1692 switch (node
->type
) {
1693 /* case nir_cf_node_loop: */
1694 case nir_cf_node_block
:
1695 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1698 case nir_cf_node_if
:
1699 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1709 ntq_emit_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
1711 ntq_setup_registers(c
, &impl
->registers
);
1712 ntq_emit_cf_list(c
, &impl
->body
);
1716 nir_to_qir(struct vc4_compile
*c
)
1718 ntq_setup_inputs(c
);
1719 ntq_setup_outputs(c
);
1720 ntq_setup_uniforms(c
);
1721 ntq_setup_registers(c
, &c
->s
->registers
);
1723 /* Find the main function and emit the body. */
1724 nir_foreach_function(c
->s
, function
) {
1725 assert(strcmp(function
->name
, "main") == 0);
1726 assert(function
->impl
);
1727 ntq_emit_impl(c
, function
->impl
);
1731 static const nir_shader_compiler_options nir_options
= {
1736 .lower_fsqrt
= true,
1737 .lower_negate
= true,
1741 count_nir_instrs_in_block(nir_block
*block
, void *state
)
1743 int *count
= (int *) state
;
1744 nir_foreach_instr(block
, instr
) {
1745 *count
= *count
+ 1;
1751 count_nir_instrs(nir_shader
*nir
)
1754 nir_foreach_function(nir
, function
) {
1755 if (!function
->impl
)
1757 nir_foreach_block(function
->impl
, count_nir_instrs_in_block
, &count
);
1762 static struct vc4_compile
*
1763 vc4_shader_ntq(struct vc4_context
*vc4
, enum qstage stage
,
1764 struct vc4_key
*key
)
1766 struct vc4_compile
*c
= qir_compile_init();
1769 c
->shader_state
= &key
->shader_state
->base
;
1770 c
->program_id
= key
->shader_state
->program_id
;
1771 c
->variant_id
= key
->shader_state
->compiled_variant_count
++;
1776 c
->fs_key
= (struct vc4_fs_key
*)key
;
1777 if (c
->fs_key
->is_points
) {
1778 c
->point_x
= emit_fragment_varying(c
, ~0, 0);
1779 c
->point_y
= emit_fragment_varying(c
, ~0, 0);
1780 } else if (c
->fs_key
->is_lines
) {
1781 c
->line_x
= emit_fragment_varying(c
, ~0, 0);
1785 c
->vs_key
= (struct vc4_vs_key
*)key
;
1788 c
->vs_key
= (struct vc4_vs_key
*)key
;
1792 const struct tgsi_token
*tokens
= key
->shader_state
->base
.tokens
;
1794 if (vc4_debug
& VC4_DEBUG_TGSI
) {
1795 fprintf(stderr
, "%s prog %d/%d TGSI:\n",
1796 qir_get_stage_name(c
->stage
),
1797 c
->program_id
, c
->variant_id
);
1798 tgsi_dump(tokens
, 0);
1801 c
->s
= tgsi_to_nir(tokens
, &nir_options
);
1802 nir_opt_global_to_local(c
->s
);
1803 nir_convert_to_ssa(c
->s
);
1805 if (stage
== QSTAGE_FRAG
)
1806 vc4_nir_lower_blend(c
);
1808 if (c
->fs_key
&& c
->fs_key
->light_twoside
)
1809 nir_lower_two_sided_color(c
->s
);
1811 if (stage
== QSTAGE_FRAG
)
1812 nir_lower_clip_fs(c
->s
, c
->key
->ucp_enables
);
1814 nir_lower_clip_vs(c
->s
, c
->key
->ucp_enables
);
1816 vc4_nir_lower_io(c
);
1817 vc4_nir_lower_txf_ms(c
);
1818 nir_lower_idiv(c
->s
);
1819 nir_lower_load_const_to_scalar(c
->s
);
1821 vc4_optimize_nir(c
->s
);
1823 nir_remove_dead_variables(c
->s
);
1825 nir_convert_from_ssa(c
->s
, true);
1827 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
1828 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1829 qir_get_stage_name(c
->stage
),
1830 c
->program_id
, c
->variant_id
,
1831 count_nir_instrs(c
->s
));
1834 if (vc4_debug
& VC4_DEBUG_NIR
) {
1835 fprintf(stderr
, "%s prog %d/%d NIR:\n",
1836 qir_get_stage_name(c
->stage
),
1837 c
->program_id
, c
->variant_id
);
1838 nir_print_shader(c
->s
, stderr
);
1849 vc4
->prog
.fs
->input_slots
,
1850 vc4
->prog
.fs
->num_inputs
);
1857 if (vc4_debug
& VC4_DEBUG_QIR
) {
1858 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
1859 qir_get_stage_name(c
->stage
),
1860 c
->program_id
, c
->variant_id
);
1865 qir_lower_uniforms(c
);
1867 qir_schedule_instructions(c
);
1869 if (vc4_debug
& VC4_DEBUG_QIR
) {
1870 fprintf(stderr
, "%s prog %d/%d QIR:\n",
1871 qir_get_stage_name(c
->stage
),
1872 c
->program_id
, c
->variant_id
);
1876 qir_reorder_uniforms(c
);
1877 vc4_generate_code(vc4
, c
);
1879 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
1880 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1881 qir_get_stage_name(c
->stage
),
1882 c
->program_id
, c
->variant_id
,
1884 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1885 qir_get_stage_name(c
->stage
),
1886 c
->program_id
, c
->variant_id
,
1896 vc4_shader_state_create(struct pipe_context
*pctx
,
1897 const struct pipe_shader_state
*cso
)
1899 struct vc4_context
*vc4
= vc4_context(pctx
);
1900 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
1904 so
->base
.tokens
= tgsi_dup_tokens(cso
->tokens
);
1905 so
->program_id
= vc4
->next_uncompiled_program_id
++;
1911 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
1912 struct vc4_compile
*c
)
1914 int count
= c
->num_uniforms
;
1915 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
1917 uinfo
->count
= count
;
1918 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
1919 memcpy(uinfo
->data
, c
->uniform_data
,
1920 count
* sizeof(*uinfo
->data
));
1921 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
1922 memcpy(uinfo
->contents
, c
->uniform_contents
,
1923 count
* sizeof(*uinfo
->contents
));
1924 uinfo
->num_texture_samples
= c
->num_texture_samples
;
1926 vc4_set_shader_uniform_dirty_flags(shader
);
1929 static struct vc4_compiled_shader
*
1930 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
1931 struct vc4_key
*key
)
1933 struct hash_table
*ht
;
1935 if (stage
== QSTAGE_FRAG
) {
1937 key_size
= sizeof(struct vc4_fs_key
);
1940 key_size
= sizeof(struct vc4_vs_key
);
1943 struct vc4_compiled_shader
*shader
;
1944 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
1948 struct vc4_compile
*c
= vc4_shader_ntq(vc4
, stage
, key
);
1949 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
1951 shader
->program_id
= vc4
->next_compiled_program_id
++;
1952 if (stage
== QSTAGE_FRAG
) {
1953 bool input_live
[c
->num_input_slots
];
1955 memset(input_live
, 0, sizeof(input_live
));
1956 list_for_each_entry(struct qinst
, inst
, &c
->instructions
, link
) {
1957 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
1958 if (inst
->src
[i
].file
== QFILE_VARY
)
1959 input_live
[inst
->src
[i
].index
] = true;
1963 shader
->input_slots
= ralloc_array(shader
,
1964 struct vc4_varying_slot
,
1965 c
->num_input_slots
);
1967 for (int i
= 0; i
< c
->num_input_slots
; i
++) {
1968 struct vc4_varying_slot
*slot
= &c
->input_slots
[i
];
1973 /* Skip non-VS-output inputs. */
1974 if (slot
->slot
== (uint8_t)~0)
1977 if (slot
->slot
== VARYING_SLOT_COL0
||
1978 slot
->slot
== VARYING_SLOT_COL1
||
1979 slot
->slot
== VARYING_SLOT_BFC0
||
1980 slot
->slot
== VARYING_SLOT_BFC1
) {
1981 shader
->color_inputs
|= (1 << shader
->num_inputs
);
1984 shader
->input_slots
[shader
->num_inputs
] = *slot
;
1985 shader
->num_inputs
++;
1988 shader
->num_inputs
= c
->num_inputs
;
1990 shader
->vattr_offsets
[0] = 0;
1991 for (int i
= 0; i
< 8; i
++) {
1992 shader
->vattr_offsets
[i
+ 1] =
1993 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
1995 if (c
->vattr_sizes
[i
])
1996 shader
->vattrs_live
|= (1 << i
);
2000 copy_uniform_state_to_shader(shader
, c
);
2001 shader
->bo
= vc4_bo_alloc_shader(vc4
->screen
, c
->qpu_insts
,
2002 c
->qpu_inst_count
* sizeof(uint64_t));
2004 /* Copy the compiler UBO range state to the compiled shader, dropping
2005 * out arrays that were never referenced by an indirect load.
2007 * (Note that QIR dead code elimination of an array access still
2008 * leaves that array alive, though)
2010 if (c
->num_ubo_ranges
) {
2011 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
2012 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
2015 for (int i
= 0; i
< c
->num_uniform_ranges
; i
++) {
2016 struct vc4_compiler_ubo_range
*range
=
2021 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
2022 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
2023 shader
->ubo_ranges
[j
].size
= range
->size
;
2024 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
2028 if (shader
->ubo_size
) {
2029 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2030 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2031 qir_get_stage_name(c
->stage
),
2032 c
->program_id
, c
->variant_id
,
2033 shader
->ubo_size
/ 4);
2037 qir_compile_destroy(c
);
2039 struct vc4_key
*dup_key
;
2040 dup_key
= ralloc_size(shader
, key_size
);
2041 memcpy(dup_key
, key
, key_size
);
2042 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2048 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2049 struct vc4_texture_stateobj
*texstate
)
2051 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2052 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2053 struct pipe_sampler_state
*sampler_state
=
2054 texstate
->samplers
[i
];
2059 key
->tex
[i
].format
= sampler
->format
;
2060 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2061 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2062 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2063 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2065 if (sampler
->texture
->nr_samples
> 1) {
2066 key
->tex
[i
].msaa_width
= sampler
->texture
->width0
;
2067 key
->tex
[i
].msaa_height
= sampler
->texture
->height0
;
2068 } else if (sampler
){
2069 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2070 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2071 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2072 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2076 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2080 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2082 struct vc4_fs_key local_key
;
2083 struct vc4_fs_key
*key
= &local_key
;
2085 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2087 VC4_DIRTY_FRAMEBUFFER
|
2089 VC4_DIRTY_RASTERIZER
|
2091 VC4_DIRTY_TEXSTATE
|
2092 VC4_DIRTY_UNCOMPILED_FS
))) {
2096 memset(key
, 0, sizeof(*key
));
2097 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2098 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2099 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2100 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2101 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2102 key
->blend
= vc4
->blend
->rt
[0];
2103 if (vc4
->blend
->logicop_enable
) {
2104 key
->logicop_func
= vc4
->blend
->logicop_func
;
2106 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2108 key
->msaa
= vc4
->rasterizer
->base
.multisample
;
2109 key
->sample_coverage
= (vc4
->rasterizer
->base
.multisample
&&
2110 vc4
->sample_mask
!= (1 << VC4_MAX_SAMPLES
) - 1);
2111 key
->sample_alpha_to_coverage
= vc4
->blend
->alpha_to_coverage
;
2112 key
->sample_alpha_to_one
= vc4
->blend
->alpha_to_one
;
2113 if (vc4
->framebuffer
.cbufs
[0])
2114 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2116 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2117 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2118 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2119 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2120 key
->stencil_enabled
);
2121 if (vc4
->zsa
->base
.alpha
.enabled
) {
2122 key
->alpha_test
= true;
2123 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2126 if (key
->is_points
) {
2127 key
->point_sprite_mask
=
2128 vc4
->rasterizer
->base
.sprite_coord_enable
;
2129 key
->point_coord_upper_left
=
2130 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2131 PIPE_SPRITE_COORD_UPPER_LEFT
);
2134 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2136 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2137 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2138 if (vc4
->prog
.fs
== old_fs
)
2141 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2142 if (vc4
->rasterizer
->base
.flatshade
&&
2143 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2144 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2149 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2151 struct vc4_vs_key local_key
;
2152 struct vc4_vs_key
*key
= &local_key
;
2154 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2155 VC4_DIRTY_RASTERIZER
|
2157 VC4_DIRTY_TEXSTATE
|
2158 VC4_DIRTY_VTXSTATE
|
2159 VC4_DIRTY_UNCOMPILED_VS
|
2160 VC4_DIRTY_COMPILED_FS
))) {
2164 memset(key
, 0, sizeof(*key
));
2165 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2166 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2167 key
->compiled_fs_id
= vc4
->prog
.fs
->program_id
;
2169 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2170 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2172 key
->per_vertex_point_size
=
2173 (prim_mode
== PIPE_PRIM_POINTS
&&
2174 vc4
->rasterizer
->base
.point_size_per_vertex
);
2176 struct vc4_compiled_shader
*vs
=
2177 vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2178 if (vs
!= vc4
->prog
.vs
) {
2180 vc4
->dirty
|= VC4_DIRTY_COMPILED_VS
;
2183 key
->is_coord
= true;
2184 struct vc4_compiled_shader
*cs
=
2185 vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2186 if (cs
!= vc4
->prog
.cs
) {
2188 vc4
->dirty
|= VC4_DIRTY_COMPILED_CS
;
2193 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2195 vc4_update_compiled_fs(vc4
, prim_mode
);
2196 vc4_update_compiled_vs(vc4
, prim_mode
);
2200 fs_cache_hash(const void *key
)
2202 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2206 vs_cache_hash(const void *key
)
2208 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2212 fs_cache_compare(const void *key1
, const void *key2
)
2214 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2218 vs_cache_compare(const void *key1
, const void *key2
)
2220 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2224 delete_from_cache_if_matches(struct hash_table
*ht
,
2225 struct hash_entry
*entry
,
2226 struct vc4_uncompiled_shader
*so
)
2228 const struct vc4_key
*key
= entry
->key
;
2230 if (key
->shader_state
== so
) {
2231 struct vc4_compiled_shader
*shader
= entry
->data
;
2232 _mesa_hash_table_remove(ht
, entry
);
2233 vc4_bo_unreference(&shader
->bo
);
2234 ralloc_free(shader
);
2239 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2241 struct vc4_context
*vc4
= vc4_context(pctx
);
2242 struct vc4_uncompiled_shader
*so
= hwcso
;
2244 struct hash_entry
*entry
;
2245 hash_table_foreach(vc4
->fs_cache
, entry
)
2246 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2247 hash_table_foreach(vc4
->vs_cache
, entry
)
2248 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2250 free((void *)so
->base
.tokens
);
2255 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2257 struct vc4_context
*vc4
= vc4_context(pctx
);
2258 vc4
->prog
.bind_fs
= hwcso
;
2259 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2263 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2265 struct vc4_context
*vc4
= vc4_context(pctx
);
2266 vc4
->prog
.bind_vs
= hwcso
;
2267 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2271 vc4_program_init(struct pipe_context
*pctx
)
2273 struct vc4_context
*vc4
= vc4_context(pctx
);
2275 pctx
->create_vs_state
= vc4_shader_state_create
;
2276 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2278 pctx
->create_fs_state
= vc4_shader_state_create
;
2279 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2281 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2282 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2284 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2286 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2291 vc4_program_fini(struct pipe_context
*pctx
)
2293 struct vc4_context
*vc4
= vc4_context(pctx
);
2295 struct hash_entry
*entry
;
2296 hash_table_foreach(vc4
->fs_cache
, entry
) {
2297 struct vc4_compiled_shader
*shader
= entry
->data
;
2298 vc4_bo_unreference(&shader
->bo
);
2299 ralloc_free(shader
);
2300 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2303 hash_table_foreach(vc4
->vs_cache
, entry
) {
2304 struct vc4_compiled_shader
*shader
= entry
->data
;
2305 vc4_bo_unreference(&shader
->bo
);
2306 ralloc_free(shader
);
2307 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);