2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_lowering.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "glsl/nir/nir.h"
36 #include "glsl/nir/nir_builder.h"
37 #include "nir/tgsi_to_nir.h"
38 #include "vc4_context.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
);
49 resize_qreg_array(struct vc4_compile
*c
,
54 if (*size
>= decl_size
)
57 uint32_t old_size
= *size
;
58 *size
= MAX2(*size
* 2, decl_size
);
59 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
61 fprintf(stderr
, "Malloc failure\n");
65 for (uint32_t i
= old_size
; i
< *size
; i
++)
66 (*regs
)[i
] = c
->undef
;
70 indirect_uniform_load(struct vc4_compile
*c
, nir_intrinsic_instr
*intr
)
72 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
73 uint32_t offset
= intr
->const_index
[0];
74 struct vc4_compiler_ubo_range
*range
= NULL
;
76 for (i
= 0; i
< c
->num_uniform_ranges
; i
++) {
77 range
= &c
->ubo_ranges
[i
];
78 if (offset
>= range
->src_offset
&&
79 offset
< range
->src_offset
+ range
->size
) {
83 /* The driver-location-based offset always has to be within a declared
89 range
->dst_offset
= c
->next_ubo_dst_offset
;
90 c
->next_ubo_dst_offset
+= range
->size
;
94 offset
-= range
->src_offset
;
96 /* Adjust for where we stored the TGSI register base. */
97 indirect_offset
= qir_ADD(c
, indirect_offset
,
98 qir_uniform_ui(c
, (range
->dst_offset
+
101 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
102 indirect_offset
= qir_MAX(c
, indirect_offset
, qir_uniform_ui(c
, 0));
103 indirect_offset
= qir_MIN(c
, indirect_offset
,
104 qir_uniform_ui(c
, (range
->dst_offset
+
107 qir_TEX_DIRECT(c
, indirect_offset
, qir_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
108 c
->num_texture_samples
++;
109 return qir_TEX_RESULT(c
);
112 nir_ssa_def
*vc4_nir_get_state_uniform(struct nir_builder
*b
,
113 enum quniform_contents contents
)
115 nir_intrinsic_instr
*intr
=
116 nir_intrinsic_instr_create(b
->shader
,
117 nir_intrinsic_load_uniform
);
118 intr
->const_index
[0] = VC4_NIR_STATE_UNIFORM_OFFSET
+ contents
;
119 intr
->num_components
= 1;
120 nir_ssa_dest_init(&intr
->instr
, &intr
->dest
, 1, NULL
);
121 nir_builder_instr_insert(b
, &intr
->instr
);
122 return &intr
->dest
.ssa
;
126 vc4_nir_get_swizzled_channel(nir_builder
*b
, nir_ssa_def
**srcs
, int swiz
)
130 case UTIL_FORMAT_SWIZZLE_NONE
:
131 fprintf(stderr
, "warning: unknown swizzle\n");
133 case UTIL_FORMAT_SWIZZLE_0
:
134 return nir_imm_float(b
, 0.0);
135 case UTIL_FORMAT_SWIZZLE_1
:
136 return nir_imm_float(b
, 1.0);
137 case UTIL_FORMAT_SWIZZLE_X
:
138 case UTIL_FORMAT_SWIZZLE_Y
:
139 case UTIL_FORMAT_SWIZZLE_Z
:
140 case UTIL_FORMAT_SWIZZLE_W
:
146 ntq_init_ssa_def(struct vc4_compile
*c
, nir_ssa_def
*def
)
148 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
149 def
->num_components
);
150 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
155 ntq_get_dest(struct vc4_compile
*c
, nir_dest
*dest
)
158 struct qreg
*qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
159 for (int i
= 0; i
< dest
->ssa
.num_components
; i
++)
163 nir_register
*reg
= dest
->reg
.reg
;
164 assert(dest
->reg
.base_offset
== 0);
165 assert(reg
->num_array_elems
== 0);
166 struct hash_entry
*entry
=
167 _mesa_hash_table_search(c
->def_ht
, reg
);
173 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
)
175 struct hash_entry
*entry
;
177 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
178 assert(i
< src
.ssa
->num_components
);
180 nir_register
*reg
= src
.reg
.reg
;
181 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
182 assert(reg
->num_array_elems
== 0);
183 assert(src
.reg
.base_offset
== 0);
184 assert(i
< reg
->num_components
);
187 struct qreg
*qregs
= entry
->data
;
192 ntq_get_alu_src(struct vc4_compile
*c
, nir_alu_instr
*instr
,
195 assert(util_is_power_of_two(instr
->dest
.write_mask
));
196 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
197 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
198 instr
->src
[src
].swizzle
[chan
]);
200 assert(!instr
->src
[src
].abs
);
201 assert(!instr
->src
[src
].negate
);
207 get_swizzled_channel(struct vc4_compile
*c
,
208 struct qreg
*srcs
, int swiz
)
212 case UTIL_FORMAT_SWIZZLE_NONE
:
213 fprintf(stderr
, "warning: unknown swizzle\n");
215 case UTIL_FORMAT_SWIZZLE_0
:
216 return qir_uniform_f(c
, 0.0);
217 case UTIL_FORMAT_SWIZZLE_1
:
218 return qir_uniform_f(c
, 1.0);
219 case UTIL_FORMAT_SWIZZLE_X
:
220 case UTIL_FORMAT_SWIZZLE_Y
:
221 case UTIL_FORMAT_SWIZZLE_Z
:
222 case UTIL_FORMAT_SWIZZLE_W
:
227 static inline struct qreg
228 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
231 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
232 qir_uniform_f(c
, 0.0));
236 ntq_rcp(struct vc4_compile
*c
, struct qreg x
)
238 struct qreg r
= qir_RCP(c
, x
);
240 /* Apply a Newton-Raphson step to improve the accuracy. */
241 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
242 qir_uniform_f(c
, 2.0),
249 ntq_rsq(struct vc4_compile
*c
, struct qreg x
)
251 struct qreg r
= qir_RSQ(c
, x
);
253 /* Apply a Newton-Raphson step to improve the accuracy. */
254 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
255 qir_uniform_f(c
, 1.5),
257 qir_uniform_f(c
, 0.5),
259 qir_FMUL(c
, r
, r
)))));
265 qir_srgb_decode(struct vc4_compile
*c
, struct qreg srgb
)
267 struct qreg low
= qir_FMUL(c
, srgb
, qir_uniform_f(c
, 1.0 / 12.92));
268 struct qreg high
= qir_POW(c
,
272 qir_uniform_f(c
, 0.055)),
273 qir_uniform_f(c
, 1.0 / 1.055)),
274 qir_uniform_f(c
, 2.4));
276 qir_SF(c
, qir_FSUB(c
, srgb
, qir_uniform_f(c
, 0.04045)));
277 return qir_SEL_X_Y_NS(c
, low
, high
);
281 ntq_umul(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
283 struct qreg src0_hi
= qir_SHR(c
, src0
,
284 qir_uniform_ui(c
, 24));
285 struct qreg src1_hi
= qir_SHR(c
, src1
,
286 qir_uniform_ui(c
, 24));
288 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1
);
289 struct qreg lohi
= qir_MUL24(c
, src0
, src1_hi
);
290 struct qreg lolo
= qir_MUL24(c
, src0
, src1
);
292 return qir_ADD(c
, lolo
, qir_SHL(c
,
293 qir_ADD(c
, hilo
, lohi
),
294 qir_uniform_ui(c
, 24)));
298 ntq_emit_tex(struct vc4_compile
*c
, nir_tex_instr
*instr
)
300 struct qreg s
, t
, r
, lod
, proj
, compare
;
301 bool is_txb
= false, is_txl
= false, has_proj
= false;
302 unsigned unit
= instr
->sampler_index
;
304 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
305 switch (instr
->src
[i
].src_type
) {
306 case nir_tex_src_coord
:
307 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
308 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
)
309 t
= qir_uniform_f(c
, 0.5);
311 t
= ntq_get_src(c
, instr
->src
[i
].src
, 1);
312 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
313 r
= ntq_get_src(c
, instr
->src
[i
].src
, 2);
315 case nir_tex_src_bias
:
316 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
319 case nir_tex_src_lod
:
320 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
323 case nir_tex_src_comparitor
:
324 compare
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
326 case nir_tex_src_projector
:
327 proj
= qir_RCP(c
, ntq_get_src(c
, instr
->src
[i
].src
, 0));
328 s
= qir_FMUL(c
, s
, proj
);
329 t
= qir_FMUL(c
, t
, proj
);
333 unreachable("unknown texture source");
337 struct qreg texture_u
[] = {
338 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
339 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
340 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
341 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
343 uint32_t next_texture_u
= 0;
345 /* There is no native support for GL texture rectangle coordinates, so
346 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
349 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
351 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_X
, unit
));
353 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_Y
, unit
));
356 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
|| is_txl
) {
357 texture_u
[2] = qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
358 unit
| (is_txl
<< 16));
361 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
362 struct qreg ma
= qir_FMAXABS(c
, qir_FMAXABS(c
, s
, t
), r
);
363 struct qreg rcp_ma
= qir_RCP(c
, ma
);
364 s
= qir_FMUL(c
, s
, rcp_ma
);
365 t
= qir_FMUL(c
, t
, rcp_ma
);
366 r
= qir_FMUL(c
, r
, rcp_ma
);
368 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
369 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
370 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
371 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
372 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
373 qir_TEX_R(c
, qir_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
374 texture_u
[next_texture_u
++]);
377 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
381 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
385 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
387 if (is_txl
|| is_txb
)
388 qir_TEX_B(c
, lod
, texture_u
[next_texture_u
++]);
390 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
392 c
->num_texture_samples
++;
393 struct qreg tex
= qir_TEX_RESULT(c
);
395 enum pipe_format format
= c
->key
->tex
[unit
].format
;
397 struct qreg unpacked
[4];
398 if (util_format_is_depth_or_stencil(format
)) {
399 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, tex
,
400 qir_uniform_ui(c
, 8)));
401 struct qreg normalized
= qir_FMUL(c
, depthf
,
402 qir_uniform_f(c
, 1.0f
/0xffffff));
404 struct qreg depth_output
;
406 struct qreg one
= qir_uniform_f(c
, 1.0f
);
407 if (c
->key
->tex
[unit
].compare_mode
) {
409 compare
= qir_FMUL(c
, compare
, proj
);
411 switch (c
->key
->tex
[unit
].compare_func
) {
412 case PIPE_FUNC_NEVER
:
413 depth_output
= qir_uniform_f(c
, 0.0f
);
415 case PIPE_FUNC_ALWAYS
:
418 case PIPE_FUNC_EQUAL
:
419 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
420 depth_output
= qir_SEL_X_0_ZS(c
, one
);
422 case PIPE_FUNC_NOTEQUAL
:
423 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
424 depth_output
= qir_SEL_X_0_ZC(c
, one
);
426 case PIPE_FUNC_GREATER
:
427 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
428 depth_output
= qir_SEL_X_0_NC(c
, one
);
430 case PIPE_FUNC_GEQUAL
:
431 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
432 depth_output
= qir_SEL_X_0_NS(c
, one
);
435 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
436 depth_output
= qir_SEL_X_0_NS(c
, one
);
438 case PIPE_FUNC_LEQUAL
:
439 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
440 depth_output
= qir_SEL_X_0_NC(c
, one
);
444 depth_output
= normalized
;
447 for (int i
= 0; i
< 4; i
++)
448 unpacked
[i
] = depth_output
;
450 for (int i
= 0; i
< 4; i
++)
451 unpacked
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
454 const uint8_t *format_swiz
= vc4_get_format_swizzle(format
);
455 struct qreg texture_output
[4];
456 for (int i
= 0; i
< 4; i
++) {
457 texture_output
[i
] = get_swizzled_channel(c
, unpacked
,
461 if (util_format_is_srgb(format
)) {
462 for (int i
= 0; i
< 3; i
++)
463 texture_output
[i
] = qir_srgb_decode(c
,
467 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
);
468 for (int i
= 0; i
< 4; i
++) {
469 dest
[i
] = get_swizzled_channel(c
, texture_output
,
470 c
->key
->tex
[unit
].swizzle
[i
]);
475 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
479 ntq_ffract(struct vc4_compile
*c
, struct qreg src
)
481 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
482 struct qreg diff
= qir_FSUB(c
, src
, trunc
);
484 return qir_SEL_X_Y_NS(c
,
485 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)),
490 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
494 ntq_ffloor(struct vc4_compile
*c
, struct qreg src
)
496 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
498 /* This will be < 0 if we truncated and the truncation was of a value
499 * that was < 0 in the first place.
501 qir_SF(c
, qir_FSUB(c
, src
, trunc
));
503 return qir_SEL_X_Y_NS(c
,
504 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)),
509 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
513 ntq_fceil(struct vc4_compile
*c
, struct qreg src
)
515 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
517 /* This will be < 0 if we truncated and the truncation was of a value
518 * that was > 0 in the first place.
520 qir_SF(c
, qir_FSUB(c
, trunc
, src
));
522 return qir_SEL_X_Y_NS(c
,
523 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)),
528 ntq_fsin(struct vc4_compile
*c
, struct qreg src
)
532 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
533 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
534 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
535 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
538 struct qreg scaled_x
=
541 qir_uniform_f(c
, 1.0 / (M_PI
* 2.0)));
543 struct qreg x
= qir_FADD(c
,
544 ntq_ffract(c
, scaled_x
),
545 qir_uniform_f(c
, -0.5));
546 struct qreg x2
= qir_FMUL(c
, x
, x
);
547 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
548 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
549 x
= qir_FMUL(c
, x
, x2
);
554 qir_uniform_f(c
, coeff
[i
])));
560 ntq_fcos(struct vc4_compile
*c
, struct qreg src
)
564 pow(2.0 * M_PI
, 2) / (2 * 1),
565 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
566 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
567 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
568 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
571 struct qreg scaled_x
=
573 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
574 struct qreg x_frac
= qir_FADD(c
,
575 ntq_ffract(c
, scaled_x
),
576 qir_uniform_f(c
, -0.5));
578 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
579 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
580 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
581 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
583 x
= qir_FMUL(c
, x
, x2
);
585 struct qreg mul
= qir_FMUL(c
,
587 qir_uniform_f(c
, coeff
[i
]));
591 sum
= qir_FADD(c
, sum
, mul
);
597 ntq_fsign(struct vc4_compile
*c
, struct qreg src
)
600 return qir_SEL_X_Y_NC(c
,
601 qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0)),
602 qir_uniform_f(c
, -1.0));
606 get_channel_from_vpm(struct vc4_compile
*c
,
607 struct qreg
*vpm_reads
,
609 const struct util_format_description
*desc
)
611 const struct util_format_channel_description
*chan
=
612 &desc
->channel
[swiz
];
615 if (swiz
> UTIL_FORMAT_SWIZZLE_W
)
616 return get_swizzled_channel(c
, vpm_reads
, swiz
);
617 else if (chan
->size
== 32 &&
618 chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
619 return get_swizzled_channel(c
, vpm_reads
, swiz
);
620 } else if (chan
->size
== 32 &&
621 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
622 if (chan
->normalized
) {
624 qir_ITOF(c
, vpm_reads
[swiz
]),
628 return qir_ITOF(c
, vpm_reads
[swiz
]);
630 } else if (chan
->size
== 8 &&
631 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
632 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
633 struct qreg vpm
= vpm_reads
[0];
634 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
635 temp
= qir_XOR(c
, vpm
, qir_uniform_ui(c
, 0x80808080));
636 if (chan
->normalized
) {
637 return qir_FSUB(c
, qir_FMUL(c
,
638 qir_UNPACK_8_F(c
, temp
, swiz
),
639 qir_uniform_f(c
, 2.0)),
640 qir_uniform_f(c
, 1.0));
644 qir_UNPACK_8_I(c
, temp
,
646 qir_uniform_f(c
, -128.0));
649 if (chan
->normalized
) {
650 return qir_UNPACK_8_F(c
, vpm
, swiz
);
652 return qir_ITOF(c
, qir_UNPACK_8_I(c
, vpm
, swiz
));
655 } else if (chan
->size
== 16 &&
656 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
657 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
658 struct qreg vpm
= vpm_reads
[swiz
/ 2];
660 /* Note that UNPACK_16F eats a half float, not ints, so we use
661 * UNPACK_16_I for all of these.
663 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
664 temp
= qir_ITOF(c
, qir_UNPACK_16_I(c
, vpm
, swiz
% 2));
665 if (chan
->normalized
) {
666 return qir_FMUL(c
, temp
,
667 qir_uniform_f(c
, 1/32768.0f
));
672 /* UNPACK_16I sign-extends, so we have to emit ANDs. */
674 if (swiz
== 1 || swiz
== 3)
675 temp
= qir_UNPACK_16_I(c
, temp
, 1);
676 temp
= qir_AND(c
, temp
, qir_uniform_ui(c
, 0xffff));
677 temp
= qir_ITOF(c
, temp
);
679 if (chan
->normalized
) {
680 return qir_FMUL(c
, temp
,
681 qir_uniform_f(c
, 1 / 65535.0));
692 emit_vertex_input(struct vc4_compile
*c
, int attr
)
694 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
695 uint32_t attr_size
= util_format_get_blocksize(format
);
696 struct qreg vpm_reads
[4];
698 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
699 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
700 struct qreg vpm
= { QFILE_VPM
, attr
* 4 + i
};
701 vpm_reads
[i
] = qir_MOV(c
, vpm
);
705 bool format_warned
= false;
706 const struct util_format_description
*desc
=
707 util_format_description(format
);
709 for (int i
= 0; i
< 4; i
++) {
710 uint8_t swiz
= desc
->swizzle
[i
];
711 struct qreg result
= get_channel_from_vpm(c
, vpm_reads
,
714 if (result
.file
== QFILE_NULL
) {
715 if (!format_warned
) {
717 "vtx element %d unsupported type: %s\n",
718 attr
, util_format_name(format
));
719 format_warned
= true;
721 result
= qir_uniform_f(c
, 0.0);
723 c
->inputs
[attr
* 4 + i
] = result
;
728 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
730 c
->inputs
[attr
* 4 + 0] = qir_FRAG_X(c
);
731 c
->inputs
[attr
* 4 + 1] = qir_FRAG_Y(c
);
732 c
->inputs
[attr
* 4 + 2] =
734 qir_ITOF(c
, qir_FRAG_Z(c
)),
735 qir_uniform_f(c
, 1.0 / 0xffffff));
736 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
740 emit_fragment_varying(struct vc4_compile
*c
, gl_varying_slot slot
,
743 uint32_t i
= c
->num_input_slots
++;
749 if (c
->num_input_slots
>= c
->input_slots_array_size
) {
750 c
->input_slots_array_size
=
751 MAX2(4, c
->input_slots_array_size
* 2);
753 c
->input_slots
= reralloc(c
, c
->input_slots
,
754 struct vc4_varying_slot
,
755 c
->input_slots_array_size
);
758 c
->input_slots
[i
].slot
= slot
;
759 c
->input_slots
[i
].swizzle
= swizzle
;
761 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
765 emit_fragment_input(struct vc4_compile
*c
, int attr
, gl_varying_slot slot
)
767 for (int i
= 0; i
< 4; i
++) {
768 c
->inputs
[attr
* 4 + i
] =
769 emit_fragment_varying(c
, slot
, i
);
775 add_output(struct vc4_compile
*c
,
776 uint32_t decl_offset
,
780 uint32_t old_array_size
= c
->outputs_array_size
;
781 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
784 if (old_array_size
!= c
->outputs_array_size
) {
785 c
->output_slots
= reralloc(c
,
787 struct vc4_varying_slot
,
788 c
->outputs_array_size
);
791 c
->output_slots
[decl_offset
].slot
= slot
;
792 c
->output_slots
[decl_offset
].swizzle
= swizzle
;
796 declare_uniform_range(struct vc4_compile
*c
, uint32_t start
, uint32_t size
)
798 unsigned array_id
= c
->num_uniform_ranges
++;
799 if (array_id
>= c
->ubo_ranges_array_size
) {
800 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
802 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
803 struct vc4_compiler_ubo_range
,
804 c
->ubo_ranges_array_size
);
807 c
->ubo_ranges
[array_id
].dst_offset
= 0;
808 c
->ubo_ranges
[array_id
].src_offset
= start
;
809 c
->ubo_ranges
[array_id
].size
= size
;
810 c
->ubo_ranges
[array_id
].used
= false;
814 ntq_src_is_only_ssa_def_user(nir_src
*src
)
819 if (!list_empty(&src
->ssa
->if_uses
))
822 return (src
->ssa
->uses
.next
== &src
->use_link
&&
823 src
->ssa
->uses
.next
->next
== &src
->ssa
->uses
);
827 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
830 * However, as an optimization, it tries to find the instructions generating
831 * the sources to be packed and just emit the pack flag there, if possible.
834 ntq_emit_pack_unorm_4x8(struct vc4_compile
*c
, nir_alu_instr
*instr
)
836 struct qreg result
= qir_get_temp(c
);
837 struct nir_alu_instr
*vec4
= NULL
;
839 /* If packing from a vec4 op (as expected), identify it so that we can
840 * peek back at what generated its sources.
842 if (instr
->src
[0].src
.is_ssa
&&
843 instr
->src
[0].src
.ssa
->parent_instr
->type
== nir_instr_type_alu
&&
844 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
)->op
==
846 vec4
= nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
849 for (int i
= 0; i
< 4; i
++) {
850 int swiz
= instr
->src
[0].swizzle
[i
];
853 src
= ntq_get_src(c
, vec4
->src
[swiz
].src
,
854 vec4
->src
[swiz
].swizzle
[0]);
856 src
= ntq_get_src(c
, instr
->src
[0].src
, swiz
);
860 ntq_src_is_only_ssa_def_user(&vec4
->src
[swiz
].src
) &&
861 src
.file
== QFILE_TEMP
&&
862 c
->defs
[src
.index
] &&
863 qir_is_mul(c
->defs
[src
.index
]) &&
864 !c
->defs
[src
.index
]->dst
.pack
) {
865 struct qinst
*rewrite
= c
->defs
[src
.index
];
866 c
->defs
[src
.index
] = NULL
;
867 rewrite
->dst
= result
;
868 rewrite
->dst
.pack
= QPU_PACK_MUL_8A
+ i
;
872 qir_PACK_8_F(c
, result
, src
, i
);
875 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
880 ntq_emit_alu(struct vc4_compile
*c
, nir_alu_instr
*instr
)
882 /* Vectors are special in that they have non-scalarized writemasks,
883 * and just take the first swizzle channel for each argument in order
884 * into each writemask channel.
886 if (instr
->op
== nir_op_vec2
||
887 instr
->op
== nir_op_vec3
||
888 instr
->op
== nir_op_vec4
) {
890 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
891 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
892 instr
->src
[i
].swizzle
[0]);
893 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
894 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
899 if (instr
->op
== nir_op_pack_unorm_4x8
) {
900 ntq_emit_pack_unorm_4x8(c
, instr
);
904 if (instr
->op
== nir_op_unpack_unorm_4x8
) {
905 struct qreg src
= ntq_get_src(c
, instr
->src
[0].src
,
906 instr
->src
[0].swizzle
[0]);
907 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
908 for (int i
= 0; i
< 4; i
++) {
909 if (instr
->dest
.write_mask
& (1 << i
))
910 dest
[i
] = qir_UNPACK_8_F(c
, src
, i
);
915 /* General case: We can just grab the one used channel per src. */
916 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
917 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
918 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
921 /* Pick the channel to store the output in. */
922 assert(!instr
->dest
.saturate
);
923 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
.dest
);
924 assert(util_is_power_of_two(instr
->dest
.write_mask
));
925 dest
+= ffs(instr
->dest
.write_mask
) - 1;
930 *dest
= qir_MOV(c
, src
[0]);
933 *dest
= qir_FMUL(c
, src
[0], src
[1]);
936 *dest
= qir_FADD(c
, src
[0], src
[1]);
939 *dest
= qir_FSUB(c
, src
[0], src
[1]);
942 *dest
= qir_FMIN(c
, src
[0], src
[1]);
945 *dest
= qir_FMAX(c
, src
[0], src
[1]);
950 *dest
= qir_FTOI(c
, src
[0]);
954 *dest
= qir_ITOF(c
, src
[0]);
957 *dest
= qir_AND(c
, src
[0], qir_uniform_f(c
, 1.0));
960 *dest
= qir_AND(c
, src
[0], qir_uniform_ui(c
, 1));
965 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
969 *dest
= qir_ADD(c
, src
[0], src
[1]);
972 *dest
= qir_SHR(c
, src
[0], src
[1]);
975 *dest
= qir_SUB(c
, src
[0], src
[1]);
978 *dest
= qir_ASR(c
, src
[0], src
[1]);
981 *dest
= qir_SHL(c
, src
[0], src
[1]);
984 *dest
= qir_MIN(c
, src
[0], src
[1]);
987 *dest
= qir_MAX(c
, src
[0], src
[1]);
990 *dest
= qir_AND(c
, src
[0], src
[1]);
993 *dest
= qir_OR(c
, src
[0], src
[1]);
996 *dest
= qir_XOR(c
, src
[0], src
[1]);
999 *dest
= qir_NOT(c
, src
[0]);
1003 *dest
= ntq_umul(c
, src
[0], src
[1]);
1007 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1008 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_f(c
, 1.0));
1011 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1012 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0));
1015 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1016 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_f(c
, 1.0));
1019 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1020 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_f(c
, 1.0));
1023 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1024 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
1027 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1028 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
1031 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1032 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
1035 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1036 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
1039 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1040 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
1043 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1044 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
1047 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1048 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
1051 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1052 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
1057 *dest
= qir_SEL_X_Y_NS(c
, src
[1], src
[2]);
1061 *dest
= qir_SEL_X_Y_ZC(c
, src
[1], src
[2]);
1065 *dest
= ntq_rcp(c
, src
[0]);
1068 *dest
= ntq_rsq(c
, src
[0]);
1071 *dest
= qir_EXP2(c
, src
[0]);
1074 *dest
= qir_LOG2(c
, src
[0]);
1078 *dest
= qir_ITOF(c
, qir_FTOI(c
, src
[0]));
1081 *dest
= ntq_fceil(c
, src
[0]);
1084 *dest
= ntq_ffract(c
, src
[0]);
1087 *dest
= ntq_ffloor(c
, src
[0]);
1091 *dest
= ntq_fsin(c
, src
[0]);
1094 *dest
= ntq_fcos(c
, src
[0]);
1098 *dest
= ntq_fsign(c
, src
[0]);
1102 *dest
= qir_FMAXABS(c
, src
[0], src
[0]);
1105 *dest
= qir_MAX(c
, src
[0],
1106 qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0]));
1110 fprintf(stderr
, "unknown NIR ALU inst: ");
1111 nir_print_instr(&instr
->instr
, stderr
);
1112 fprintf(stderr
, "\n");
1118 clip_distance_discard(struct vc4_compile
*c
)
1120 for (int i
= 0; i
< PIPE_MAX_CLIP_PLANES
; i
++) {
1121 if (!(c
->key
->ucp_enables
& (1 << i
)))
1125 emit_fragment_varying(c
,
1126 VARYING_SLOT_CLIP_DIST0
+ (i
/ 4),
1131 if (c
->discard
.file
== QFILE_NULL
)
1132 c
->discard
= qir_uniform_ui(c
, 0);
1134 c
->discard
= qir_SEL_X_Y_NS(c
, qir_uniform_ui(c
, ~0),
1140 emit_frag_end(struct vc4_compile
*c
)
1142 clip_distance_discard(c
);
1145 if (c
->output_color_index
!= -1) {
1146 color
= c
->outputs
[c
->output_color_index
];
1148 color
= qir_uniform_ui(c
, 0);
1151 if (c
->discard
.file
!= QFILE_NULL
)
1152 qir_TLB_DISCARD_SETUP(c
, c
->discard
);
1154 if (c
->fs_key
->stencil_enabled
) {
1155 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 0));
1156 if (c
->fs_key
->stencil_twoside
) {
1157 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 1));
1159 if (c
->fs_key
->stencil_full_writemasks
) {
1160 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 2));
1164 if (c
->fs_key
->depth_enabled
) {
1166 if (c
->output_position_index
!= -1) {
1167 z
= qir_FTOI(c
, qir_FMUL(c
, c
->outputs
[c
->output_position_index
+ 2],
1168 qir_uniform_f(c
, 0xffffff)));
1172 qir_TLB_Z_WRITE(c
, z
);
1175 qir_TLB_COLOR_WRITE(c
, color
);
1179 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1181 struct qreg packed
= qir_get_temp(c
);
1183 for (int i
= 0; i
< 2; i
++) {
1185 qir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1187 struct qreg packed_chan
= packed
;
1188 packed_chan
.pack
= QPU_PACK_A_16A
+ i
;
1190 qir_FTOI_dest(c
, packed_chan
,
1193 c
->outputs
[c
->output_position_index
+ i
],
1198 qir_VPM_WRITE(c
, packed
);
1202 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1204 struct qreg zscale
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1205 struct qreg zoffset
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1207 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1208 c
->outputs
[c
->output_position_index
+ 2],
1215 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1217 qir_VPM_WRITE(c
, rcp_w
);
1221 emit_point_size_write(struct vc4_compile
*c
)
1223 struct qreg point_size
;
1225 if (c
->output_point_size_index
!= -1)
1226 point_size
= c
->outputs
[c
->output_point_size_index
+ 3];
1228 point_size
= qir_uniform_f(c
, 1.0);
1230 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1233 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1235 qir_VPM_WRITE(c
, point_size
);
1239 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1241 * The simulator insists that there be at least one vertex attribute, so
1242 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1243 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1244 * to consume it here.
1247 emit_stub_vpm_read(struct vc4_compile
*c
)
1252 c
->vattr_sizes
[0] = 4;
1253 struct qreg vpm
= { QFILE_VPM
, 0 };
1254 (void)qir_MOV(c
, vpm
);
1259 emit_ucp_clipdistance(struct vc4_compile
*c
)
1262 if (c
->output_clipvertex_index
!= -1)
1263 cv
= c
->output_clipvertex_index
;
1264 else if (c
->output_position_index
!= -1)
1265 cv
= c
->output_position_index
;
1269 for (int plane
= 0; plane
< PIPE_MAX_CLIP_PLANES
; plane
++) {
1270 if (!(c
->key
->ucp_enables
& (1 << plane
)))
1273 /* Pick the next outputs[] that hasn't been written to, since
1274 * there are no other program writes left to be processed at
1275 * this point. If something had been declared but not written
1276 * (like a w component), we'll just smash over the top of it.
1278 uint32_t output_index
= c
->num_outputs
++;
1279 add_output(c
, output_index
,
1280 VARYING_SLOT_CLIP_DIST0
+ plane
/ 4,
1284 struct qreg dist
= qir_uniform_f(c
, 0.0);
1285 for (int i
= 0; i
< 4; i
++) {
1286 struct qreg pos_chan
= c
->outputs
[cv
+ i
];
1288 qir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1290 dist
= qir_FADD(c
, dist
, qir_FMUL(c
, pos_chan
, ucp
));
1293 c
->outputs
[output_index
] = dist
;
1298 emit_vert_end(struct vc4_compile
*c
,
1299 struct vc4_varying_slot
*fs_inputs
,
1300 uint32_t num_fs_inputs
)
1302 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1304 emit_stub_vpm_read(c
);
1305 emit_ucp_clipdistance(c
);
1307 emit_scaled_viewport_write(c
, rcp_w
);
1308 emit_zs_write(c
, rcp_w
);
1309 emit_rcp_wc_write(c
, rcp_w
);
1310 if (c
->vs_key
->per_vertex_point_size
)
1311 emit_point_size_write(c
);
1313 for (int i
= 0; i
< num_fs_inputs
; i
++) {
1314 struct vc4_varying_slot
*input
= &fs_inputs
[i
];
1317 for (j
= 0; j
< c
->num_outputs
; j
++) {
1318 struct vc4_varying_slot
*output
=
1319 &c
->output_slots
[j
];
1321 if (input
->slot
== output
->slot
&&
1322 input
->swizzle
== output
->swizzle
) {
1323 qir_VPM_WRITE(c
, c
->outputs
[j
]);
1327 /* Emit padding if we didn't find a declared VS output for
1330 if (j
== c
->num_outputs
)
1331 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
1336 emit_coord_end(struct vc4_compile
*c
)
1338 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1340 emit_stub_vpm_read(c
);
1342 for (int i
= 0; i
< 4; i
++)
1343 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
1345 emit_scaled_viewport_write(c
, rcp_w
);
1346 emit_zs_write(c
, rcp_w
);
1347 emit_rcp_wc_write(c
, rcp_w
);
1348 if (c
->vs_key
->per_vertex_point_size
)
1349 emit_point_size_write(c
);
1353 vc4_optimize_nir(struct nir_shader
*s
)
1360 nir_lower_vars_to_ssa(s
);
1361 nir_lower_alu_to_scalar(s
);
1363 progress
= nir_copy_prop(s
) || progress
;
1364 progress
= nir_opt_dce(s
) || progress
;
1365 progress
= nir_opt_cse(s
) || progress
;
1366 progress
= nir_opt_peephole_select(s
) || progress
;
1367 progress
= nir_opt_algebraic(s
) || progress
;
1368 progress
= nir_opt_constant_folding(s
) || progress
;
1369 progress
= nir_opt_undef(s
) || progress
;
1374 driver_location_compare(const void *in_a
, const void *in_b
)
1376 const nir_variable
*const *a
= in_a
;
1377 const nir_variable
*const *b
= in_b
;
1379 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1383 ntq_setup_inputs(struct vc4_compile
*c
)
1385 unsigned num_entries
= 0;
1386 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->inputs
)
1389 nir_variable
*vars
[num_entries
];
1392 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->inputs
)
1395 /* Sort the variables so that we emit the input setup in
1396 * driver_location order. This is required for VPM reads, whose data
1397 * is fetched into the VPM in driver_location (TGSI register index)
1400 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1402 for (unsigned i
= 0; i
< num_entries
; i
++) {
1403 nir_variable
*var
= vars
[i
];
1404 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1405 unsigned loc
= var
->data
.driver_location
;
1407 assert(array_len
== 1);
1409 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1412 if (c
->stage
== QSTAGE_FRAG
) {
1413 if (var
->data
.location
== VARYING_SLOT_POS
) {
1414 emit_fragcoord_input(c
, loc
);
1415 } else if (var
->data
.location
== VARYING_SLOT_FACE
) {
1416 c
->inputs
[loc
* 4 + 0] = qir_FRAG_REV_FLAG(c
);
1417 } else if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1418 (c
->fs_key
->point_sprite_mask
&
1419 (1 << (var
->data
.location
-
1420 VARYING_SLOT_VAR0
)))) {
1421 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1422 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1424 emit_fragment_input(c
, loc
, var
->data
.location
);
1427 emit_vertex_input(c
, loc
);
1433 ntq_setup_outputs(struct vc4_compile
*c
)
1435 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->outputs
) {
1436 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1437 unsigned loc
= var
->data
.driver_location
* 4;
1439 assert(array_len
== 1);
1442 for (int i
= 0; i
< 4; i
++)
1443 add_output(c
, loc
+ i
, var
->data
.location
, i
);
1445 if (c
->stage
== QSTAGE_FRAG
) {
1446 switch (var
->data
.location
) {
1447 case FRAG_RESULT_COLOR
:
1448 case FRAG_RESULT_DATA0
:
1449 c
->output_color_index
= loc
;
1451 case FRAG_RESULT_DEPTH
:
1452 c
->output_position_index
= loc
;
1456 switch (var
->data
.location
) {
1457 case VARYING_SLOT_POS
:
1458 c
->output_position_index
= loc
;
1460 case VARYING_SLOT_CLIP_VERTEX
:
1461 c
->output_clipvertex_index
= loc
;
1463 case VARYING_SLOT_PSIZ
:
1464 c
->output_point_size_index
= loc
;
1472 ntq_setup_uniforms(struct vc4_compile
*c
)
1474 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->uniforms
) {
1475 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1476 unsigned array_elem_size
= 4 * sizeof(float);
1478 declare_uniform_range(c
, var
->data
.driver_location
* array_elem_size
,
1479 array_len
* array_elem_size
);
1485 * Sets up the mapping from nir_register to struct qreg *.
1487 * Each nir_register gets a struct qreg per 32-bit component being stored.
1490 ntq_setup_registers(struct vc4_compile
*c
, struct exec_list
*list
)
1492 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1493 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1494 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1496 nir_reg
->num_components
);
1498 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1500 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1501 qregs
[i
] = qir_uniform_ui(c
, 0);
1506 ntq_emit_load_const(struct vc4_compile
*c
, nir_load_const_instr
*instr
)
1508 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1509 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1510 qregs
[i
] = qir_uniform_ui(c
, instr
->value
.u
[i
]);
1512 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1516 ntq_emit_ssa_undef(struct vc4_compile
*c
, nir_ssa_undef_instr
*instr
)
1518 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1520 /* QIR needs there to be *some* value, so pick 0 (same as for
1521 * ntq_setup_registers().
1523 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1524 qregs
[i
] = qir_uniform_ui(c
, 0);
1528 ntq_emit_intrinsic(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1530 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
1531 struct qreg
*dest
= NULL
;
1533 if (info
->has_dest
) {
1534 dest
= ntq_get_dest(c
, &instr
->dest
);
1537 switch (instr
->intrinsic
) {
1538 case nir_intrinsic_load_uniform
:
1539 assert(instr
->num_components
== 1);
1540 if (instr
->const_index
[0] < VC4_NIR_STATE_UNIFORM_OFFSET
) {
1541 *dest
= qir_uniform(c
, QUNIFORM_UNIFORM
,
1542 instr
->const_index
[0]);
1544 *dest
= qir_uniform(c
, instr
->const_index
[0] -
1545 VC4_NIR_STATE_UNIFORM_OFFSET
,
1550 case nir_intrinsic_load_uniform_indirect
:
1551 *dest
= indirect_uniform_load(c
, instr
);
1555 case nir_intrinsic_load_input
:
1556 assert(instr
->num_components
== 1);
1557 if (instr
->const_index
[0] == VC4_NIR_TLB_COLOR_READ_INPUT
) {
1558 *dest
= qir_TLB_COLOR_READ(c
);
1560 *dest
= c
->inputs
[instr
->const_index
[0]];
1564 case nir_intrinsic_store_output
:
1565 assert(instr
->num_components
== 1);
1566 c
->outputs
[instr
->const_index
[0]] =
1567 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0], 0));
1568 c
->num_outputs
= MAX2(c
->num_outputs
, instr
->const_index
[0] + 1);
1571 case nir_intrinsic_discard
:
1572 c
->discard
= qir_uniform_ui(c
, ~0);
1575 case nir_intrinsic_discard_if
:
1576 if (c
->discard
.file
== QFILE_NULL
)
1577 c
->discard
= qir_uniform_ui(c
, 0);
1578 c
->discard
= qir_OR(c
, c
->discard
,
1579 ntq_get_src(c
, instr
->src
[0], 0));
1583 fprintf(stderr
, "Unknown intrinsic: ");
1584 nir_print_instr(&instr
->instr
, stderr
);
1585 fprintf(stderr
, "\n");
1591 ntq_emit_if(struct vc4_compile
*c
, nir_if
*if_stmt
)
1593 fprintf(stderr
, "general IF statements not handled.\n");
1597 ntq_emit_instr(struct vc4_compile
*c
, nir_instr
*instr
)
1599 switch (instr
->type
) {
1600 case nir_instr_type_alu
:
1601 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1604 case nir_instr_type_intrinsic
:
1605 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1608 case nir_instr_type_load_const
:
1609 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1612 case nir_instr_type_ssa_undef
:
1613 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1616 case nir_instr_type_tex
:
1617 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1621 fprintf(stderr
, "Unknown NIR instr type: ");
1622 nir_print_instr(instr
, stderr
);
1623 fprintf(stderr
, "\n");
1629 ntq_emit_block(struct vc4_compile
*c
, nir_block
*block
)
1631 nir_foreach_instr(block
, instr
) {
1632 ntq_emit_instr(c
, instr
);
1637 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
)
1639 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1640 switch (node
->type
) {
1641 /* case nir_cf_node_loop: */
1642 case nir_cf_node_block
:
1643 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1646 case nir_cf_node_if
:
1647 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1657 ntq_emit_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
1659 ntq_setup_registers(c
, &impl
->registers
);
1660 ntq_emit_cf_list(c
, &impl
->body
);
1664 nir_to_qir(struct vc4_compile
*c
)
1666 ntq_setup_inputs(c
);
1667 ntq_setup_outputs(c
);
1668 ntq_setup_uniforms(c
);
1669 ntq_setup_registers(c
, &c
->s
->registers
);
1671 /* Find the main function and emit the body. */
1672 nir_foreach_overload(c
->s
, overload
) {
1673 assert(strcmp(overload
->function
->name
, "main") == 0);
1674 assert(overload
->impl
);
1675 ntq_emit_impl(c
, overload
->impl
);
1679 static const nir_shader_compiler_options nir_options
= {
1684 .lower_fsqrt
= true,
1685 .lower_negate
= true,
1689 count_nir_instrs_in_block(nir_block
*block
, void *state
)
1691 int *count
= (int *) state
;
1692 nir_foreach_instr(block
, instr
) {
1693 *count
= *count
+ 1;
1699 count_nir_instrs(nir_shader
*nir
)
1702 nir_foreach_overload(nir
, overload
) {
1703 if (!overload
->impl
)
1705 nir_foreach_block(overload
->impl
, count_nir_instrs_in_block
, &count
);
1710 static struct vc4_compile
*
1711 vc4_shader_ntq(struct vc4_context
*vc4
, enum qstage stage
,
1712 struct vc4_key
*key
)
1714 struct vc4_compile
*c
= qir_compile_init();
1717 c
->shader_state
= &key
->shader_state
->base
;
1718 c
->program_id
= key
->shader_state
->program_id
;
1719 c
->variant_id
= key
->shader_state
->compiled_variant_count
++;
1724 c
->fs_key
= (struct vc4_fs_key
*)key
;
1725 if (c
->fs_key
->is_points
) {
1726 c
->point_x
= emit_fragment_varying(c
, ~0, 0);
1727 c
->point_y
= emit_fragment_varying(c
, ~0, 0);
1728 } else if (c
->fs_key
->is_lines
) {
1729 c
->line_x
= emit_fragment_varying(c
, ~0, 0);
1733 c
->vs_key
= (struct vc4_vs_key
*)key
;
1736 c
->vs_key
= (struct vc4_vs_key
*)key
;
1740 const struct tgsi_token
*tokens
= key
->shader_state
->base
.tokens
;
1741 if (c
->fs_key
&& c
->fs_key
->light_twoside
) {
1742 if (!key
->shader_state
->twoside_tokens
) {
1743 const struct tgsi_lowering_config lowering_config
= {
1744 .color_two_side
= true,
1746 struct tgsi_shader_info info
;
1747 key
->shader_state
->twoside_tokens
=
1748 tgsi_transform_lowering(&lowering_config
,
1749 key
->shader_state
->base
.tokens
,
1752 /* If no transformation occurred, then NULL is
1753 * returned and we just use our original tokens.
1755 if (!key
->shader_state
->twoside_tokens
) {
1756 key
->shader_state
->twoside_tokens
=
1757 key
->shader_state
->base
.tokens
;
1760 tokens
= key
->shader_state
->twoside_tokens
;
1763 if (vc4_debug
& VC4_DEBUG_TGSI
) {
1764 fprintf(stderr
, "%s prog %d/%d TGSI:\n",
1765 qir_get_stage_name(c
->stage
),
1766 c
->program_id
, c
->variant_id
);
1767 tgsi_dump(tokens
, 0);
1770 c
->s
= tgsi_to_nir(tokens
, &nir_options
);
1771 nir_opt_global_to_local(c
->s
);
1772 nir_convert_to_ssa(c
->s
);
1773 if (stage
== QSTAGE_FRAG
)
1774 vc4_nir_lower_blend(c
);
1775 vc4_nir_lower_io(c
);
1776 nir_lower_idiv(c
->s
);
1777 nir_lower_load_const_to_scalar(c
->s
);
1779 vc4_optimize_nir(c
->s
);
1781 nir_remove_dead_variables(c
->s
);
1783 nir_convert_from_ssa(c
->s
, true);
1785 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
1786 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1787 qir_get_stage_name(c
->stage
),
1788 c
->program_id
, c
->variant_id
,
1789 count_nir_instrs(c
->s
));
1792 if (vc4_debug
& VC4_DEBUG_NIR
) {
1793 fprintf(stderr
, "%s prog %d/%d NIR:\n",
1794 qir_get_stage_name(c
->stage
),
1795 c
->program_id
, c
->variant_id
);
1796 nir_print_shader(c
->s
, stderr
);
1807 vc4
->prog
.fs
->input_slots
,
1808 vc4
->prog
.fs
->num_inputs
);
1815 if (vc4_debug
& VC4_DEBUG_QIR
) {
1816 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
1817 qir_get_stage_name(c
->stage
),
1818 c
->program_id
, c
->variant_id
);
1823 qir_lower_uniforms(c
);
1825 if (vc4_debug
& VC4_DEBUG_QIR
) {
1826 fprintf(stderr
, "%s prog %d/%d QIR:\n",
1827 qir_get_stage_name(c
->stage
),
1828 c
->program_id
, c
->variant_id
);
1831 qir_reorder_uniforms(c
);
1832 vc4_generate_code(vc4
, c
);
1834 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
1835 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1836 qir_get_stage_name(c
->stage
),
1837 c
->program_id
, c
->variant_id
,
1839 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1840 qir_get_stage_name(c
->stage
),
1841 c
->program_id
, c
->variant_id
,
1851 vc4_shader_state_create(struct pipe_context
*pctx
,
1852 const struct pipe_shader_state
*cso
)
1854 struct vc4_context
*vc4
= vc4_context(pctx
);
1855 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
1859 so
->base
.tokens
= tgsi_dup_tokens(cso
->tokens
);
1860 so
->program_id
= vc4
->next_uncompiled_program_id
++;
1866 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
1867 struct vc4_compile
*c
)
1869 int count
= c
->num_uniforms
;
1870 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
1872 uinfo
->count
= count
;
1873 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
1874 memcpy(uinfo
->data
, c
->uniform_data
,
1875 count
* sizeof(*uinfo
->data
));
1876 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
1877 memcpy(uinfo
->contents
, c
->uniform_contents
,
1878 count
* sizeof(*uinfo
->contents
));
1879 uinfo
->num_texture_samples
= c
->num_texture_samples
;
1881 vc4_set_shader_uniform_dirty_flags(shader
);
1884 static struct vc4_compiled_shader
*
1885 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
1886 struct vc4_key
*key
)
1888 struct hash_table
*ht
;
1890 if (stage
== QSTAGE_FRAG
) {
1892 key_size
= sizeof(struct vc4_fs_key
);
1895 key_size
= sizeof(struct vc4_vs_key
);
1898 struct vc4_compiled_shader
*shader
;
1899 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
1903 struct vc4_compile
*c
= vc4_shader_ntq(vc4
, stage
, key
);
1904 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
1906 shader
->program_id
= vc4
->next_compiled_program_id
++;
1907 if (stage
== QSTAGE_FRAG
) {
1908 bool input_live
[c
->num_input_slots
];
1910 memset(input_live
, 0, sizeof(input_live
));
1911 list_for_each_entry(struct qinst
, inst
, &c
->instructions
, link
) {
1912 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
1913 if (inst
->src
[i
].file
== QFILE_VARY
)
1914 input_live
[inst
->src
[i
].index
] = true;
1918 shader
->input_slots
= ralloc_array(shader
,
1919 struct vc4_varying_slot
,
1920 c
->num_input_slots
);
1922 for (int i
= 0; i
< c
->num_input_slots
; i
++) {
1923 struct vc4_varying_slot
*slot
= &c
->input_slots
[i
];
1928 /* Skip non-VS-output inputs. */
1929 if (slot
->slot
== (uint8_t)~0)
1932 if (slot
->slot
== VARYING_SLOT_COL0
||
1933 slot
->slot
== VARYING_SLOT_COL1
||
1934 slot
->slot
== VARYING_SLOT_BFC0
||
1935 slot
->slot
== VARYING_SLOT_BFC1
) {
1936 shader
->color_inputs
|= (1 << shader
->num_inputs
);
1939 shader
->input_slots
[shader
->num_inputs
] = *slot
;
1940 shader
->num_inputs
++;
1943 shader
->num_inputs
= c
->num_inputs
;
1945 shader
->vattr_offsets
[0] = 0;
1946 for (int i
= 0; i
< 8; i
++) {
1947 shader
->vattr_offsets
[i
+ 1] =
1948 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
1950 if (c
->vattr_sizes
[i
])
1951 shader
->vattrs_live
|= (1 << i
);
1955 copy_uniform_state_to_shader(shader
, c
);
1956 shader
->bo
= vc4_bo_alloc_shader(vc4
->screen
, c
->qpu_insts
,
1957 c
->qpu_inst_count
* sizeof(uint64_t));
1959 /* Copy the compiler UBO range state to the compiled shader, dropping
1960 * out arrays that were never referenced by an indirect load.
1962 * (Note that QIR dead code elimination of an array access still
1963 * leaves that array alive, though)
1965 if (c
->num_ubo_ranges
) {
1966 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
1967 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
1970 for (int i
= 0; i
< c
->num_uniform_ranges
; i
++) {
1971 struct vc4_compiler_ubo_range
*range
=
1976 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
1977 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
1978 shader
->ubo_ranges
[j
].size
= range
->size
;
1979 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
1983 if (shader
->ubo_size
) {
1984 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
1985 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
1986 qir_get_stage_name(c
->stage
),
1987 c
->program_id
, c
->variant_id
,
1988 shader
->ubo_size
/ 4);
1992 qir_compile_destroy(c
);
1994 struct vc4_key
*dup_key
;
1995 dup_key
= ralloc_size(shader
, key_size
);
1996 memcpy(dup_key
, key
, key_size
);
1997 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2003 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2004 struct vc4_texture_stateobj
*texstate
)
2006 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2007 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2008 struct pipe_sampler_state
*sampler_state
=
2009 texstate
->samplers
[i
];
2012 key
->tex
[i
].format
= sampler
->format
;
2013 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2014 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2015 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2016 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2017 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2018 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2019 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2020 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2024 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2028 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2030 struct vc4_fs_key local_key
;
2031 struct vc4_fs_key
*key
= &local_key
;
2033 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2035 VC4_DIRTY_FRAMEBUFFER
|
2037 VC4_DIRTY_RASTERIZER
|
2039 VC4_DIRTY_TEXSTATE
|
2040 VC4_DIRTY_UNCOMPILED_FS
))) {
2044 memset(key
, 0, sizeof(*key
));
2045 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2046 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2047 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2048 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2049 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2050 key
->blend
= vc4
->blend
->rt
[0];
2051 if (vc4
->blend
->logicop_enable
) {
2052 key
->logicop_func
= vc4
->blend
->logicop_func
;
2054 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2056 if (vc4
->framebuffer
.cbufs
[0])
2057 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2059 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2060 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2061 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2062 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2063 key
->stencil_enabled
);
2064 if (vc4
->zsa
->base
.alpha
.enabled
) {
2065 key
->alpha_test
= true;
2066 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2069 if (key
->is_points
) {
2070 key
->point_sprite_mask
=
2071 vc4
->rasterizer
->base
.sprite_coord_enable
;
2072 key
->point_coord_upper_left
=
2073 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2074 PIPE_SPRITE_COORD_UPPER_LEFT
);
2077 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2079 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2080 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2081 if (vc4
->prog
.fs
== old_fs
)
2084 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2085 if (vc4
->rasterizer
->base
.flatshade
&&
2086 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2087 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2092 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2094 struct vc4_vs_key local_key
;
2095 struct vc4_vs_key
*key
= &local_key
;
2097 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2098 VC4_DIRTY_RASTERIZER
|
2100 VC4_DIRTY_TEXSTATE
|
2101 VC4_DIRTY_VTXSTATE
|
2102 VC4_DIRTY_UNCOMPILED_VS
|
2103 VC4_DIRTY_COMPILED_FS
))) {
2107 memset(key
, 0, sizeof(*key
));
2108 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2109 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2110 key
->compiled_fs_id
= vc4
->prog
.fs
->program_id
;
2112 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2113 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2115 key
->per_vertex_point_size
=
2116 (prim_mode
== PIPE_PRIM_POINTS
&&
2117 vc4
->rasterizer
->base
.point_size_per_vertex
);
2119 struct vc4_compiled_shader
*vs
=
2120 vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2121 if (vs
!= vc4
->prog
.vs
) {
2123 vc4
->dirty
|= VC4_DIRTY_COMPILED_VS
;
2126 key
->is_coord
= true;
2127 struct vc4_compiled_shader
*cs
=
2128 vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2129 if (cs
!= vc4
->prog
.cs
) {
2131 vc4
->dirty
|= VC4_DIRTY_COMPILED_CS
;
2136 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2138 vc4_update_compiled_fs(vc4
, prim_mode
);
2139 vc4_update_compiled_vs(vc4
, prim_mode
);
2143 fs_cache_hash(const void *key
)
2145 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2149 vs_cache_hash(const void *key
)
2151 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2155 fs_cache_compare(const void *key1
, const void *key2
)
2157 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2161 vs_cache_compare(const void *key1
, const void *key2
)
2163 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2167 delete_from_cache_if_matches(struct hash_table
*ht
,
2168 struct hash_entry
*entry
,
2169 struct vc4_uncompiled_shader
*so
)
2171 const struct vc4_key
*key
= entry
->key
;
2173 if (key
->shader_state
== so
) {
2174 struct vc4_compiled_shader
*shader
= entry
->data
;
2175 _mesa_hash_table_remove(ht
, entry
);
2176 vc4_bo_unreference(&shader
->bo
);
2177 ralloc_free(shader
);
2182 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2184 struct vc4_context
*vc4
= vc4_context(pctx
);
2185 struct vc4_uncompiled_shader
*so
= hwcso
;
2187 struct hash_entry
*entry
;
2188 hash_table_foreach(vc4
->fs_cache
, entry
)
2189 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2190 hash_table_foreach(vc4
->vs_cache
, entry
)
2191 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2193 if (so
->twoside_tokens
!= so
->base
.tokens
)
2194 free((void *)so
->twoside_tokens
);
2195 free((void *)so
->base
.tokens
);
2200 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2202 struct vc4_context
*vc4
= vc4_context(pctx
);
2203 vc4
->prog
.bind_fs
= hwcso
;
2204 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2208 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2210 struct vc4_context
*vc4
= vc4_context(pctx
);
2211 vc4
->prog
.bind_vs
= hwcso
;
2212 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2216 vc4_program_init(struct pipe_context
*pctx
)
2218 struct vc4_context
*vc4
= vc4_context(pctx
);
2220 pctx
->create_vs_state
= vc4_shader_state_create
;
2221 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2223 pctx
->create_fs_state
= vc4_shader_state_create
;
2224 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2226 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2227 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2229 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2231 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2236 vc4_program_fini(struct pipe_context
*pctx
)
2238 struct vc4_context
*vc4
= vc4_context(pctx
);
2240 struct hash_entry
*entry
;
2241 hash_table_foreach(vc4
->fs_cache
, entry
) {
2242 struct vc4_compiled_shader
*shader
= entry
->data
;
2243 vc4_bo_unreference(&shader
->bo
);
2244 ralloc_free(shader
);
2245 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2248 hash_table_foreach(vc4
->vs_cache
, entry
) {
2249 struct vc4_compiled_shader
*shader
= entry
->data
;
2250 vc4_bo_unreference(&shader
->bo
);
2251 ralloc_free(shader
);
2252 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);