2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_info.h"
34 #include "tgsi/tgsi_lowering.h"
35 #include "tgsi/tgsi_parse.h"
36 #include "nir/tgsi_to_nir.h"
38 #include "vc4_context.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 resize_qreg_array(struct vc4_compile
*c
,
51 if (*size
>= decl_size
)
54 uint32_t old_size
= *size
;
55 *size
= MAX2(*size
* 2, decl_size
);
56 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
58 fprintf(stderr
, "Malloc failure\n");
62 for (uint32_t i
= old_size
; i
< *size
; i
++)
63 (*regs
)[i
] = c
->undef
;
67 indirect_uniform_load(struct vc4_compile
*c
,
68 struct qreg indirect_offset
,
71 struct vc4_compiler_ubo_range
*range
= NULL
;
73 for (i
= 0; i
< c
->num_uniform_ranges
; i
++) {
74 range
= &c
->ubo_ranges
[i
];
75 if (offset
>= range
->src_offset
&&
76 offset
< range
->src_offset
+ range
->size
) {
80 /* The driver-location-based offset always has to be within a declared
86 range
->dst_offset
= c
->next_ubo_dst_offset
;
87 c
->next_ubo_dst_offset
+= range
->size
;
91 offset
-= range
->src_offset
;
92 /* Translate the user's TGSI register index from the TGSI register
93 * base to a byte offset.
95 indirect_offset
= qir_SHL(c
, indirect_offset
, qir_uniform_ui(c
, 4));
97 /* Adjust for where we stored the TGSI register base. */
98 indirect_offset
= qir_ADD(c
, indirect_offset
,
99 qir_uniform_ui(c
, (range
->dst_offset
+
102 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
103 indirect_offset
= qir_MAX(c
, indirect_offset
, qir_uniform_ui(c
, 0));
104 indirect_offset
= qir_MIN(c
, indirect_offset
,
105 qir_uniform_ui(c
, (range
->dst_offset
+
108 qir_TEX_DIRECT(c
, indirect_offset
, qir_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
109 struct qreg r4
= qir_TEX_RESULT(c
);
110 c
->num_texture_samples
++;
111 return qir_MOV(c
, r4
);
115 ntq_get_dest(struct vc4_compile
*c
, nir_dest dest
)
117 assert(!dest
.is_ssa
);
118 nir_register
*reg
= dest
.reg
.reg
;
119 struct hash_entry
*entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
120 assert(reg
->num_array_elems
== 0);
121 assert(dest
.reg
.base_offset
== 0);
123 struct qreg
*qregs
= entry
->data
;
128 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
)
130 struct hash_entry
*entry
;
132 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
133 assert(i
< src
.ssa
->num_components
);
135 nir_register
*reg
= src
.reg
.reg
;
136 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
137 assert(reg
->num_array_elems
== 0);
138 assert(src
.reg
.base_offset
== 0);
139 assert(i
< reg
->num_components
);
142 struct qreg
*qregs
= entry
->data
;
147 ntq_get_alu_src(struct vc4_compile
*c
, nir_alu_instr
*instr
,
150 assert(util_is_power_of_two(instr
->dest
.write_mask
));
151 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
152 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
153 instr
->src
[src
].swizzle
[chan
]);
155 assert(!instr
->src
[src
].abs
);
156 assert(!instr
->src
[src
].negate
);
162 get_swizzled_channel(struct vc4_compile
*c
,
163 struct qreg
*srcs
, int swiz
)
167 case UTIL_FORMAT_SWIZZLE_NONE
:
168 fprintf(stderr
, "warning: unknown swizzle\n");
170 case UTIL_FORMAT_SWIZZLE_0
:
171 return qir_uniform_f(c
, 0.0);
172 case UTIL_FORMAT_SWIZZLE_1
:
173 return qir_uniform_f(c
, 1.0);
174 case UTIL_FORMAT_SWIZZLE_X
:
175 case UTIL_FORMAT_SWIZZLE_Y
:
176 case UTIL_FORMAT_SWIZZLE_Z
:
177 case UTIL_FORMAT_SWIZZLE_W
:
182 static inline struct qreg
183 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
186 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
187 qir_uniform_f(c
, 0.0));
191 ntq_rcp(struct vc4_compile
*c
, struct qreg x
)
193 struct qreg r
= qir_RCP(c
, x
);
195 /* Apply a Newton-Raphson step to improve the accuracy. */
196 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
197 qir_uniform_f(c
, 2.0),
204 ntq_rsq(struct vc4_compile
*c
, struct qreg x
)
206 struct qreg r
= qir_RSQ(c
, x
);
208 /* Apply a Newton-Raphson step to improve the accuracy. */
209 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
210 qir_uniform_f(c
, 1.5),
212 qir_uniform_f(c
, 0.5),
214 qir_FMUL(c
, r
, r
)))));
220 qir_srgb_decode(struct vc4_compile
*c
, struct qreg srgb
)
222 struct qreg low
= qir_FMUL(c
, srgb
, qir_uniform_f(c
, 1.0 / 12.92));
223 struct qreg high
= qir_POW(c
,
227 qir_uniform_f(c
, 0.055)),
228 qir_uniform_f(c
, 1.0 / 1.055)),
229 qir_uniform_f(c
, 2.4));
231 qir_SF(c
, qir_FSUB(c
, srgb
, qir_uniform_f(c
, 0.04045)));
232 return qir_SEL_X_Y_NS(c
, low
, high
);
236 qir_srgb_encode(struct vc4_compile
*c
, struct qreg linear
)
238 struct qreg low
= qir_FMUL(c
, linear
, qir_uniform_f(c
, 12.92));
239 struct qreg high
= qir_FSUB(c
,
241 qir_uniform_f(c
, 1.055),
244 qir_uniform_f(c
, 0.41666))),
245 qir_uniform_f(c
, 0.055));
247 qir_SF(c
, qir_FSUB(c
, linear
, qir_uniform_f(c
, 0.0031308)));
248 return qir_SEL_X_Y_NS(c
, low
, high
);
252 ntq_umul(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
254 struct qreg src0_hi
= qir_SHR(c
, src0
,
255 qir_uniform_ui(c
, 24));
256 struct qreg src1_hi
= qir_SHR(c
, src1
,
257 qir_uniform_ui(c
, 24));
259 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1
);
260 struct qreg lohi
= qir_MUL24(c
, src0
, src1_hi
);
261 struct qreg lolo
= qir_MUL24(c
, src0
, src1
);
263 return qir_ADD(c
, lolo
, qir_SHL(c
,
264 qir_ADD(c
, hilo
, lohi
),
265 qir_uniform_ui(c
, 24)));
269 ntq_emit_tex(struct vc4_compile
*c
, nir_tex_instr
*instr
)
271 struct qreg s
, t
, r
, lod
, proj
, compare
;
272 bool is_txb
= false, is_txl
= false, has_proj
= false;
273 unsigned unit
= instr
->sampler_index
;
275 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
276 switch (instr
->src
[i
].src_type
) {
277 case nir_tex_src_coord
:
278 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
279 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
)
280 t
= qir_uniform_f(c
, 0.5);
282 t
= ntq_get_src(c
, instr
->src
[i
].src
, 1);
283 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
284 r
= ntq_get_src(c
, instr
->src
[i
].src
, 2);
286 case nir_tex_src_bias
:
287 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
290 case nir_tex_src_lod
:
291 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
294 case nir_tex_src_comparitor
:
295 compare
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
297 case nir_tex_src_projector
:
298 proj
= qir_RCP(c
, ntq_get_src(c
, instr
->src
[i
].src
, 0));
299 s
= qir_FMUL(c
, s
, proj
);
300 t
= qir_FMUL(c
, t
, proj
);
304 unreachable("unknown texture source");
308 struct qreg texture_u
[] = {
309 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
310 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
311 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
312 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
314 uint32_t next_texture_u
= 0;
316 /* There is no native support for GL texture rectangle coordinates, so
317 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
320 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
322 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_X
, unit
));
324 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_Y
, unit
));
327 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
|| is_txl
) {
328 texture_u
[2] = qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
329 unit
| (is_txl
<< 16));
332 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
333 struct qreg ma
= qir_FMAXABS(c
, qir_FMAXABS(c
, s
, t
), r
);
334 struct qreg rcp_ma
= qir_RCP(c
, ma
);
335 s
= qir_FMUL(c
, s
, rcp_ma
);
336 t
= qir_FMUL(c
, t
, rcp_ma
);
337 r
= qir_FMUL(c
, r
, rcp_ma
);
339 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
340 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
341 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
342 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
343 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
344 qir_TEX_R(c
, qir_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
345 texture_u
[next_texture_u
++]);
348 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
352 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
356 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
358 if (is_txl
|| is_txb
)
359 qir_TEX_B(c
, lod
, texture_u
[next_texture_u
++]);
361 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
363 c
->num_texture_samples
++;
364 struct qreg r4
= qir_TEX_RESULT(c
);
366 enum pipe_format format
= c
->key
->tex
[unit
].format
;
368 struct qreg unpacked
[4];
369 if (util_format_is_depth_or_stencil(format
)) {
370 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, r4
,
371 qir_uniform_ui(c
, 8)));
372 struct qreg normalized
= qir_FMUL(c
, depthf
,
373 qir_uniform_f(c
, 1.0f
/0xffffff));
375 struct qreg depth_output
;
377 struct qreg one
= qir_uniform_f(c
, 1.0f
);
378 if (c
->key
->tex
[unit
].compare_mode
) {
380 compare
= qir_FMUL(c
, compare
, proj
);
382 switch (c
->key
->tex
[unit
].compare_func
) {
383 case PIPE_FUNC_NEVER
:
384 depth_output
= qir_uniform_f(c
, 0.0f
);
386 case PIPE_FUNC_ALWAYS
:
389 case PIPE_FUNC_EQUAL
:
390 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
391 depth_output
= qir_SEL_X_0_ZS(c
, one
);
393 case PIPE_FUNC_NOTEQUAL
:
394 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
395 depth_output
= qir_SEL_X_0_ZC(c
, one
);
397 case PIPE_FUNC_GREATER
:
398 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
399 depth_output
= qir_SEL_X_0_NC(c
, one
);
401 case PIPE_FUNC_GEQUAL
:
402 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
403 depth_output
= qir_SEL_X_0_NS(c
, one
);
406 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
407 depth_output
= qir_SEL_X_0_NS(c
, one
);
409 case PIPE_FUNC_LEQUAL
:
410 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
411 depth_output
= qir_SEL_X_0_NC(c
, one
);
415 depth_output
= normalized
;
418 for (int i
= 0; i
< 4; i
++)
419 unpacked
[i
] = depth_output
;
421 for (int i
= 0; i
< 4; i
++)
422 unpacked
[i
] = qir_R4_UNPACK(c
, r4
, i
);
425 const uint8_t *format_swiz
= vc4_get_format_swizzle(format
);
426 struct qreg texture_output
[4];
427 for (int i
= 0; i
< 4; i
++) {
428 texture_output
[i
] = get_swizzled_channel(c
, unpacked
,
432 if (util_format_is_srgb(format
)) {
433 for (int i
= 0; i
< 3; i
++)
434 texture_output
[i
] = qir_srgb_decode(c
,
438 struct qreg
*dest
= ntq_get_dest(c
, instr
->dest
);
439 for (int i
= 0; i
< 4; i
++) {
440 dest
[i
] = get_swizzled_channel(c
, texture_output
,
441 c
->key
->tex
[unit
].swizzle
[i
]);
446 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
450 ntq_ffract(struct vc4_compile
*c
, struct qreg src
)
452 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
453 struct qreg diff
= qir_FSUB(c
, src
, trunc
);
455 return qir_SEL_X_Y_NS(c
,
456 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)),
461 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
465 ntq_ffloor(struct vc4_compile
*c
, struct qreg src
)
467 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
469 /* This will be < 0 if we truncated and the truncation was of a value
470 * that was < 0 in the first place.
472 qir_SF(c
, qir_FSUB(c
, src
, trunc
));
474 return qir_SEL_X_Y_NS(c
,
475 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)),
480 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
484 ntq_fceil(struct vc4_compile
*c
, struct qreg src
)
486 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
488 /* This will be < 0 if we truncated and the truncation was of a value
489 * that was > 0 in the first place.
491 qir_SF(c
, qir_FSUB(c
, trunc
, src
));
493 return qir_SEL_X_Y_NS(c
,
494 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)),
499 ntq_fsin(struct vc4_compile
*c
, struct qreg src
)
503 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
504 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
505 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
506 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
509 struct qreg scaled_x
=
512 qir_uniform_f(c
, 1.0 / (M_PI
* 2.0)));
514 struct qreg x
= qir_FADD(c
,
515 ntq_ffract(c
, scaled_x
),
516 qir_uniform_f(c
, -0.5));
517 struct qreg x2
= qir_FMUL(c
, x
, x
);
518 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
519 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
520 x
= qir_FMUL(c
, x
, x2
);
525 qir_uniform_f(c
, coeff
[i
])));
531 ntq_fcos(struct vc4_compile
*c
, struct qreg src
)
535 pow(2.0 * M_PI
, 2) / (2 * 1),
536 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
537 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
538 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
539 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
542 struct qreg scaled_x
=
544 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
545 struct qreg x_frac
= qir_FADD(c
,
546 ntq_ffract(c
, scaled_x
),
547 qir_uniform_f(c
, -0.5));
549 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
550 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
551 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
552 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
554 x
= qir_FMUL(c
, x
, x2
);
556 struct qreg mul
= qir_FMUL(c
,
558 qir_uniform_f(c
, coeff
[i
]));
562 sum
= qir_FADD(c
, sum
, mul
);
568 ntq_fsign(struct vc4_compile
*c
, struct qreg src
)
571 return qir_SEL_X_Y_NC(c
,
572 qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0)),
573 qir_uniform_f(c
, -1.0));
577 get_channel_from_vpm(struct vc4_compile
*c
,
578 struct qreg
*vpm_reads
,
580 const struct util_format_description
*desc
)
582 const struct util_format_channel_description
*chan
=
583 &desc
->channel
[swiz
];
586 if (swiz
> UTIL_FORMAT_SWIZZLE_W
)
587 return get_swizzled_channel(c
, vpm_reads
, swiz
);
588 else if (chan
->size
== 32 &&
589 chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
590 return get_swizzled_channel(c
, vpm_reads
, swiz
);
591 } else if (chan
->size
== 32 &&
592 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
593 if (chan
->normalized
) {
595 qir_ITOF(c
, vpm_reads
[swiz
]),
599 return qir_ITOF(c
, vpm_reads
[swiz
]);
601 } else if (chan
->size
== 8 &&
602 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
603 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
604 struct qreg vpm
= vpm_reads
[0];
605 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
606 temp
= qir_XOR(c
, vpm
, qir_uniform_ui(c
, 0x80808080));
607 if (chan
->normalized
) {
608 return qir_FSUB(c
, qir_FMUL(c
,
609 qir_UNPACK_8_F(c
, temp
, swiz
),
610 qir_uniform_f(c
, 2.0)),
611 qir_uniform_f(c
, 1.0));
615 qir_UNPACK_8_I(c
, temp
,
617 qir_uniform_f(c
, -128.0));
620 if (chan
->normalized
) {
621 return qir_UNPACK_8_F(c
, vpm
, swiz
);
623 return qir_ITOF(c
, qir_UNPACK_8_I(c
, vpm
, swiz
));
626 } else if (chan
->size
== 16 &&
627 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
628 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
629 struct qreg vpm
= vpm_reads
[swiz
/ 2];
631 /* Note that UNPACK_16F eats a half float, not ints, so we use
632 * UNPACK_16_I for all of these.
634 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
635 temp
= qir_ITOF(c
, qir_UNPACK_16_I(c
, vpm
, swiz
% 2));
636 if (chan
->normalized
) {
637 return qir_FMUL(c
, temp
,
638 qir_uniform_f(c
, 1/32768.0f
));
643 /* UNPACK_16I sign-extends, so we have to emit ANDs. */
645 if (swiz
== 1 || swiz
== 3)
646 temp
= qir_UNPACK_16_I(c
, temp
, 1);
647 temp
= qir_AND(c
, temp
, qir_uniform_ui(c
, 0xffff));
648 temp
= qir_ITOF(c
, temp
);
650 if (chan
->normalized
) {
651 return qir_FMUL(c
, temp
,
652 qir_uniform_f(c
, 1 / 65535.0));
663 emit_vertex_input(struct vc4_compile
*c
, int attr
)
665 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
666 uint32_t attr_size
= util_format_get_blocksize(format
);
667 struct qreg vpm_reads
[4];
669 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
670 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
671 struct qreg vpm
= { QFILE_VPM
, attr
* 4 + i
};
672 vpm_reads
[i
] = qir_MOV(c
, vpm
);
676 bool format_warned
= false;
677 const struct util_format_description
*desc
=
678 util_format_description(format
);
680 for (int i
= 0; i
< 4; i
++) {
681 uint8_t swiz
= desc
->swizzle
[i
];
682 struct qreg result
= get_channel_from_vpm(c
, vpm_reads
,
685 if (result
.file
== QFILE_NULL
) {
686 if (!format_warned
) {
688 "vtx element %d unsupported type: %s\n",
689 attr
, util_format_name(format
));
690 format_warned
= true;
692 result
= qir_uniform_f(c
, 0.0);
694 c
->inputs
[attr
* 4 + i
] = result
;
699 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
701 c
->inputs
[attr
* 4 + 0] = qir_FRAG_X(c
);
702 c
->inputs
[attr
* 4 + 1] = qir_FRAG_Y(c
);
703 c
->inputs
[attr
* 4 + 2] =
705 qir_ITOF(c
, qir_FRAG_Z(c
)),
706 qir_uniform_f(c
, 1.0 / 0xffffff));
707 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
711 emit_point_coord_input(struct vc4_compile
*c
, int attr
)
713 if (c
->point_x
.file
== QFILE_NULL
) {
714 c
->point_x
= qir_uniform_f(c
, 0.0);
715 c
->point_y
= qir_uniform_f(c
, 0.0);
718 c
->inputs
[attr
* 4 + 0] = c
->point_x
;
719 if (c
->fs_key
->point_coord_upper_left
) {
720 c
->inputs
[attr
* 4 + 1] = qir_FSUB(c
,
721 qir_uniform_f(c
, 1.0),
724 c
->inputs
[attr
* 4 + 1] = c
->point_y
;
726 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
727 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
731 emit_fragment_varying(struct vc4_compile
*c
, uint8_t semantic
,
732 uint8_t index
, uint8_t swizzle
)
734 uint32_t i
= c
->num_input_semantics
++;
740 if (c
->num_input_semantics
>= c
->input_semantics_array_size
) {
741 c
->input_semantics_array_size
=
742 MAX2(4, c
->input_semantics_array_size
* 2);
744 c
->input_semantics
= reralloc(c
, c
->input_semantics
,
745 struct vc4_varying_semantic
,
746 c
->input_semantics_array_size
);
749 c
->input_semantics
[i
].semantic
= semantic
;
750 c
->input_semantics
[i
].index
= index
;
751 c
->input_semantics
[i
].swizzle
= swizzle
;
753 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
757 emit_fragment_input(struct vc4_compile
*c
, int attr
,
758 unsigned semantic_name
, unsigned semantic_index
)
760 for (int i
= 0; i
< 4; i
++) {
761 c
->inputs
[attr
* 4 + i
] =
762 emit_fragment_varying(c
,
771 emit_face_input(struct vc4_compile
*c
, int attr
)
773 c
->inputs
[attr
* 4 + 0] = qir_FSUB(c
,
774 qir_uniform_f(c
, 1.0),
776 qir_ITOF(c
, qir_FRAG_REV_FLAG(c
)),
777 qir_uniform_f(c
, 2.0)));
778 c
->inputs
[attr
* 4 + 1] = qir_uniform_f(c
, 0.0);
779 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
780 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
784 add_output(struct vc4_compile
*c
,
785 uint32_t decl_offset
,
786 uint8_t semantic_name
,
787 uint8_t semantic_index
,
788 uint8_t semantic_swizzle
)
790 uint32_t old_array_size
= c
->outputs_array_size
;
791 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
794 if (old_array_size
!= c
->outputs_array_size
) {
795 c
->output_semantics
= reralloc(c
,
797 struct vc4_varying_semantic
,
798 c
->outputs_array_size
);
801 c
->output_semantics
[decl_offset
].semantic
= semantic_name
;
802 c
->output_semantics
[decl_offset
].index
= semantic_index
;
803 c
->output_semantics
[decl_offset
].swizzle
= semantic_swizzle
;
807 declare_uniform_range(struct vc4_compile
*c
, uint32_t start
, uint32_t size
)
809 unsigned array_id
= c
->num_uniform_ranges
++;
810 if (array_id
>= c
->ubo_ranges_array_size
) {
811 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
813 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
814 struct vc4_compiler_ubo_range
,
815 c
->ubo_ranges_array_size
);
818 c
->ubo_ranges
[array_id
].dst_offset
= 0;
819 c
->ubo_ranges
[array_id
].src_offset
= start
;
820 c
->ubo_ranges
[array_id
].size
= size
;
821 c
->ubo_ranges
[array_id
].used
= false;
825 ntq_emit_alu(struct vc4_compile
*c
, nir_alu_instr
*instr
)
827 /* Vectors are special in that they have non-scalarized writemasks,
828 * and just take the first swizzle channel for each argument in order
829 * into each writemask channel.
831 if (instr
->op
== nir_op_vec2
||
832 instr
->op
== nir_op_vec3
||
833 instr
->op
== nir_op_vec4
) {
835 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
836 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
837 instr
->src
[i
].swizzle
[0]);
838 struct qreg
*dest
= ntq_get_dest(c
, instr
->dest
.dest
);
839 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
844 /* General case: We can just grab the one used channel per src. */
845 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
846 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
847 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
850 /* Pick the channel to store the output in. */
851 assert(!instr
->dest
.saturate
);
852 struct qreg
*dest
= ntq_get_dest(c
, instr
->dest
.dest
);
853 assert(util_is_power_of_two(instr
->dest
.write_mask
));
854 dest
+= ffs(instr
->dest
.write_mask
) - 1;
859 *dest
= qir_MOV(c
, src
[0]);
862 *dest
= qir_FMUL(c
, src
[0], src
[1]);
865 *dest
= qir_FADD(c
, src
[0], src
[1]);
868 *dest
= qir_FSUB(c
, src
[0], src
[1]);
871 *dest
= qir_FMIN(c
, src
[0], src
[1]);
874 *dest
= qir_FMAX(c
, src
[0], src
[1]);
879 *dest
= qir_FTOI(c
, src
[0]);
883 *dest
= qir_ITOF(c
, src
[0]);
886 *dest
= qir_AND(c
, src
[0], qir_uniform_f(c
, 1.0));
889 *dest
= qir_AND(c
, src
[0], qir_uniform_ui(c
, 1));
894 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
898 *dest
= qir_ADD(c
, src
[0], src
[1]);
901 *dest
= qir_SHR(c
, src
[0], src
[1]);
904 *dest
= qir_SUB(c
, src
[0], src
[1]);
907 *dest
= qir_ASR(c
, src
[0], src
[1]);
910 *dest
= qir_SHL(c
, src
[0], src
[1]);
913 *dest
= qir_MIN(c
, src
[0], src
[1]);
916 *dest
= qir_MAX(c
, src
[0], src
[1]);
919 *dest
= qir_AND(c
, src
[0], src
[1]);
922 *dest
= qir_OR(c
, src
[0], src
[1]);
925 *dest
= qir_XOR(c
, src
[0], src
[1]);
928 *dest
= qir_NOT(c
, src
[0]);
932 *dest
= ntq_umul(c
, src
[0], src
[1]);
936 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
937 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_f(c
, 1.0));
940 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
941 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0));
944 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
945 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_f(c
, 1.0));
948 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
949 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_f(c
, 1.0));
952 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
953 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
956 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
957 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
960 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
961 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
964 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
965 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
968 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
969 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
972 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
973 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
976 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
977 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
980 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
981 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
986 *dest
= qir_SEL_X_Y_NS(c
, src
[1], src
[2]);
990 *dest
= qir_SEL_X_Y_ZC(c
, src
[1], src
[2]);
994 *dest
= ntq_rcp(c
, src
[0]);
997 *dest
= ntq_rsq(c
, src
[0]);
1000 *dest
= qir_EXP2(c
, src
[0]);
1003 *dest
= qir_LOG2(c
, src
[0]);
1007 *dest
= qir_ITOF(c
, qir_FTOI(c
, src
[0]));
1010 *dest
= ntq_fceil(c
, src
[0]);
1013 *dest
= ntq_ffract(c
, src
[0]);
1016 *dest
= ntq_ffloor(c
, src
[0]);
1020 *dest
= ntq_fsin(c
, src
[0]);
1023 *dest
= ntq_fcos(c
, src
[0]);
1027 *dest
= ntq_fsign(c
, src
[0]);
1031 *dest
= qir_FMAXABS(c
, src
[0], src
[0]);
1034 *dest
= qir_MAX(c
, src
[0],
1035 qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0]));
1039 fprintf(stderr
, "unknown NIR ALU inst: ");
1040 nir_print_instr(&instr
->instr
, stderr
);
1041 fprintf(stderr
, "\n");
1047 vc4_blend_channel(struct vc4_compile
*c
,
1055 case PIPE_BLENDFACTOR_ONE
:
1057 case PIPE_BLENDFACTOR_SRC_COLOR
:
1058 return qir_FMUL(c
, val
, src
[channel
]);
1059 case PIPE_BLENDFACTOR_SRC_ALPHA
:
1060 return qir_FMUL(c
, val
, src
[3]);
1061 case PIPE_BLENDFACTOR_DST_ALPHA
:
1062 return qir_FMUL(c
, val
, dst
[3]);
1063 case PIPE_BLENDFACTOR_DST_COLOR
:
1064 return qir_FMUL(c
, val
, dst
[channel
]);
1065 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
1072 qir_uniform_f(c
, 1.0),
1077 case PIPE_BLENDFACTOR_CONST_COLOR
:
1078 return qir_FMUL(c
, val
,
1079 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR
,
1081 case PIPE_BLENDFACTOR_CONST_ALPHA
:
1082 return qir_FMUL(c
, val
,
1083 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR
, 3));
1084 case PIPE_BLENDFACTOR_ZERO
:
1085 return qir_uniform_f(c
, 0.0);
1086 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
1087 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1089 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
1090 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1092 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
1093 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1095 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
1096 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1098 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
1099 return qir_FMUL(c
, val
,
1100 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1102 QUNIFORM_BLEND_CONST_COLOR
,
1104 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
1105 return qir_FMUL(c
, val
,
1106 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1108 QUNIFORM_BLEND_CONST_COLOR
,
1112 case PIPE_BLENDFACTOR_SRC1_COLOR
:
1113 case PIPE_BLENDFACTOR_SRC1_ALPHA
:
1114 case PIPE_BLENDFACTOR_INV_SRC1_COLOR
:
1115 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA
:
1117 fprintf(stderr
, "Unknown blend factor %d\n", factor
);
1123 vc4_blend_func(struct vc4_compile
*c
,
1124 struct qreg src
, struct qreg dst
,
1128 case PIPE_BLEND_ADD
:
1129 return qir_FADD(c
, src
, dst
);
1130 case PIPE_BLEND_SUBTRACT
:
1131 return qir_FSUB(c
, src
, dst
);
1132 case PIPE_BLEND_REVERSE_SUBTRACT
:
1133 return qir_FSUB(c
, dst
, src
);
1134 case PIPE_BLEND_MIN
:
1135 return qir_FMIN(c
, src
, dst
);
1136 case PIPE_BLEND_MAX
:
1137 return qir_FMAX(c
, src
, dst
);
1141 fprintf(stderr
, "Unknown blend func %d\n", func
);
1148 * Implements fixed function blending in shader code.
1150 * VC4 doesn't have any hardware support for blending. Instead, you read the
1151 * current contents of the destination from the tile buffer after having
1152 * waited for the scoreboard (which is handled by vc4_qpu_emit.c), then do
1153 * math using your output color and that destination value, and update the
1154 * output color appropriately.
1157 vc4_blend(struct vc4_compile
*c
, struct qreg
*result
,
1158 struct qreg
*dst_color
, struct qreg
*src_color
)
1160 struct pipe_rt_blend_state
*blend
= &c
->fs_key
->blend
;
1162 if (!blend
->blend_enable
) {
1163 for (int i
= 0; i
< 4; i
++)
1164 result
[i
] = src_color
[i
];
1168 struct qreg clamped_src
[4];
1169 struct qreg clamped_dst
[4];
1170 for (int i
= 0; i
< 4; i
++) {
1171 clamped_src
[i
] = qir_SAT(c
, src_color
[i
]);
1172 clamped_dst
[i
] = qir_SAT(c
, dst_color
[i
]);
1174 src_color
= clamped_src
;
1175 dst_color
= clamped_dst
;
1177 struct qreg src_blend
[4], dst_blend
[4];
1178 for (int i
= 0; i
< 3; i
++) {
1179 src_blend
[i
] = vc4_blend_channel(c
,
1180 dst_color
, src_color
,
1182 blend
->rgb_src_factor
, i
);
1183 dst_blend
[i
] = vc4_blend_channel(c
,
1184 dst_color
, src_color
,
1186 blend
->rgb_dst_factor
, i
);
1188 src_blend
[3] = vc4_blend_channel(c
,
1189 dst_color
, src_color
,
1191 blend
->alpha_src_factor
, 3);
1192 dst_blend
[3] = vc4_blend_channel(c
,
1193 dst_color
, src_color
,
1195 blend
->alpha_dst_factor
, 3);
1197 for (int i
= 0; i
< 3; i
++) {
1198 result
[i
] = vc4_blend_func(c
,
1199 src_blend
[i
], dst_blend
[i
],
1202 result
[3] = vc4_blend_func(c
,
1203 src_blend
[3], dst_blend
[3],
1208 clip_distance_discard(struct vc4_compile
*c
)
1210 for (int i
= 0; i
< PIPE_MAX_CLIP_PLANES
; i
++) {
1211 if (!(c
->key
->ucp_enables
& (1 << i
)))
1214 struct qreg dist
= emit_fragment_varying(c
,
1215 TGSI_SEMANTIC_CLIPDIST
,
1221 if (c
->discard
.file
== QFILE_NULL
)
1222 c
->discard
= qir_uniform_ui(c
, 0);
1224 c
->discard
= qir_SEL_X_Y_NS(c
, qir_uniform_ui(c
, ~0),
1230 alpha_test_discard(struct vc4_compile
*c
)
1232 struct qreg src_alpha
;
1233 struct qreg alpha_ref
= qir_uniform(c
, QUNIFORM_ALPHA_REF
, 0);
1235 if (!c
->fs_key
->alpha_test
)
1238 if (c
->output_color_index
!= -1)
1239 src_alpha
= c
->outputs
[c
->output_color_index
+ 3];
1241 src_alpha
= qir_uniform_f(c
, 1.0);
1243 if (c
->discard
.file
== QFILE_NULL
)
1244 c
->discard
= qir_uniform_ui(c
, 0);
1246 switch (c
->fs_key
->alpha_test_func
) {
1247 case PIPE_FUNC_NEVER
:
1248 c
->discard
= qir_uniform_ui(c
, ~0);
1250 case PIPE_FUNC_ALWAYS
:
1252 case PIPE_FUNC_EQUAL
:
1253 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1254 c
->discard
= qir_SEL_X_Y_ZS(c
, c
->discard
,
1255 qir_uniform_ui(c
, ~0));
1257 case PIPE_FUNC_NOTEQUAL
:
1258 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1259 c
->discard
= qir_SEL_X_Y_ZC(c
, c
->discard
,
1260 qir_uniform_ui(c
, ~0));
1262 case PIPE_FUNC_GREATER
:
1263 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1264 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1265 qir_uniform_ui(c
, ~0));
1267 case PIPE_FUNC_GEQUAL
:
1268 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1269 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1270 qir_uniform_ui(c
, ~0));
1272 case PIPE_FUNC_LESS
:
1273 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1274 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1275 qir_uniform_ui(c
, ~0));
1277 case PIPE_FUNC_LEQUAL
:
1278 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1279 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1280 qir_uniform_ui(c
, ~0));
1286 vc4_logicop(struct vc4_compile
*c
, struct qreg src
, struct qreg dst
)
1288 switch (c
->fs_key
->logicop_func
) {
1289 case PIPE_LOGICOP_CLEAR
:
1290 return qir_uniform_f(c
, 0.0);
1291 case PIPE_LOGICOP_NOR
:
1292 return qir_NOT(c
, qir_OR(c
, src
, dst
));
1293 case PIPE_LOGICOP_AND_INVERTED
:
1294 return qir_AND(c
, qir_NOT(c
, src
), dst
);
1295 case PIPE_LOGICOP_COPY_INVERTED
:
1296 return qir_NOT(c
, src
);
1297 case PIPE_LOGICOP_AND_REVERSE
:
1298 return qir_AND(c
, src
, qir_NOT(c
, dst
));
1299 case PIPE_LOGICOP_INVERT
:
1300 return qir_NOT(c
, dst
);
1301 case PIPE_LOGICOP_XOR
:
1302 return qir_XOR(c
, src
, dst
);
1303 case PIPE_LOGICOP_NAND
:
1304 return qir_NOT(c
, qir_AND(c
, src
, dst
));
1305 case PIPE_LOGICOP_AND
:
1306 return qir_AND(c
, src
, dst
);
1307 case PIPE_LOGICOP_EQUIV
:
1308 return qir_NOT(c
, qir_XOR(c
, src
, dst
));
1309 case PIPE_LOGICOP_NOOP
:
1311 case PIPE_LOGICOP_OR_INVERTED
:
1312 return qir_OR(c
, qir_NOT(c
, src
), dst
);
1313 case PIPE_LOGICOP_OR_REVERSE
:
1314 return qir_OR(c
, src
, qir_NOT(c
, dst
));
1315 case PIPE_LOGICOP_OR
:
1316 return qir_OR(c
, src
, dst
);
1317 case PIPE_LOGICOP_SET
:
1318 return qir_uniform_ui(c
, ~0);
1319 case PIPE_LOGICOP_COPY
:
1326 * Applies the GL blending pipeline and returns the packed (8888) output
1330 blend_pipeline(struct vc4_compile
*c
)
1332 enum pipe_format color_format
= c
->fs_key
->color_format
;
1333 const uint8_t *format_swiz
= vc4_get_format_swizzle(color_format
);
1334 struct qreg tlb_read_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1335 struct qreg dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1336 struct qreg linear_dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1337 struct qreg packed_dst_color
= c
->undef
;
1339 if (c
->fs_key
->blend
.blend_enable
||
1340 c
->fs_key
->blend
.colormask
!= 0xf ||
1341 c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1342 struct qreg r4
= qir_TLB_COLOR_READ(c
);
1343 for (int i
= 0; i
< 4; i
++)
1344 tlb_read_color
[i
] = qir_R4_UNPACK(c
, r4
, i
);
1345 for (int i
= 0; i
< 4; i
++) {
1346 dst_color
[i
] = get_swizzled_channel(c
,
1349 if (util_format_is_srgb(color_format
) && i
!= 3) {
1350 linear_dst_color
[i
] =
1351 qir_srgb_decode(c
, dst_color
[i
]);
1353 linear_dst_color
[i
] = dst_color
[i
];
1357 /* Save the packed value for logic ops. Can't reuse r4
1358 * because other things might smash it (like sRGB)
1360 packed_dst_color
= qir_MOV(c
, r4
);
1363 struct qreg undef_array
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1364 const struct qreg
*output_colors
= (c
->output_color_index
!= -1 ?
1365 c
->outputs
+ c
->output_color_index
:
1367 struct qreg blend_src_color
[4];
1368 for (int i
= 0; i
< 4; i
++)
1369 blend_src_color
[i
] = output_colors
[i
];
1371 struct qreg blend_color
[4];
1372 vc4_blend(c
, blend_color
, linear_dst_color
, blend_src_color
);
1374 if (util_format_is_srgb(color_format
)) {
1375 for (int i
= 0; i
< 3; i
++)
1376 blend_color
[i
] = qir_srgb_encode(c
, blend_color
[i
]);
1379 /* Debug: Sometimes you're getting a black output and just want to see
1380 * if the FS is getting executed at all. Spam magenta into the color
1384 blend_color
[0] = qir_uniform_f(c
, 1.0);
1385 blend_color
[1] = qir_uniform_f(c
, 0.0);
1386 blend_color
[2] = qir_uniform_f(c
, 1.0);
1387 blend_color
[3] = qir_uniform_f(c
, 0.5);
1390 struct qreg swizzled_outputs
[4];
1391 for (int i
= 0; i
< 4; i
++) {
1392 swizzled_outputs
[i
] = get_swizzled_channel(c
, blend_color
,
1396 struct qreg packed_color
= c
->undef
;
1397 for (int i
= 0; i
< 4; i
++) {
1398 if (swizzled_outputs
[i
].file
== QFILE_NULL
)
1400 if (packed_color
.file
== QFILE_NULL
) {
1401 packed_color
= qir_PACK_8888_F(c
, swizzled_outputs
[i
]);
1403 packed_color
= qir_PACK_8_F(c
,
1405 swizzled_outputs
[i
],
1410 if (packed_color
.file
== QFILE_NULL
)
1411 packed_color
= qir_uniform_ui(c
, 0);
1413 if (c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1414 packed_color
= vc4_logicop(c
, packed_color
, packed_dst_color
);
1417 /* If the bit isn't set in the color mask, then just return the
1418 * original dst color, instead.
1420 uint32_t colormask
= 0xffffffff;
1421 for (int i
= 0; i
< 4; i
++) {
1422 if (format_swiz
[i
] < 4 &&
1423 !(c
->fs_key
->blend
.colormask
& (1 << format_swiz
[i
]))) {
1424 colormask
&= ~(0xff << (i
* 8));
1427 if (colormask
!= 0xffffffff) {
1428 packed_color
= qir_OR(c
,
1429 qir_AND(c
, packed_color
,
1430 qir_uniform_ui(c
, colormask
)),
1431 qir_AND(c
, packed_dst_color
,
1432 qir_uniform_ui(c
, ~colormask
)));
1435 return packed_color
;
1439 emit_frag_end(struct vc4_compile
*c
)
1441 clip_distance_discard(c
);
1442 alpha_test_discard(c
);
1443 struct qreg color
= blend_pipeline(c
);
1445 if (c
->discard
.file
!= QFILE_NULL
)
1446 qir_TLB_DISCARD_SETUP(c
, c
->discard
);
1448 if (c
->fs_key
->stencil_enabled
) {
1449 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 0));
1450 if (c
->fs_key
->stencil_twoside
) {
1451 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 1));
1453 if (c
->fs_key
->stencil_full_writemasks
) {
1454 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 2));
1458 if (c
->fs_key
->depth_enabled
) {
1460 if (c
->output_position_index
!= -1) {
1461 z
= qir_FTOI(c
, qir_FMUL(c
, c
->outputs
[c
->output_position_index
+ 2],
1462 qir_uniform_f(c
, 0xffffff)));
1466 qir_TLB_Z_WRITE(c
, z
);
1469 qir_TLB_COLOR_WRITE(c
, color
);
1473 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1477 for (int i
= 0; i
< 2; i
++) {
1479 qir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1481 xyi
[i
] = qir_FTOI(c
, qir_FMUL(c
,
1483 c
->outputs
[c
->output_position_index
+ i
],
1488 qir_VPM_WRITE(c
, qir_PACK_SCALED(c
, xyi
[0], xyi
[1]));
1492 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1494 struct qreg zscale
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1495 struct qreg zoffset
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1497 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1498 c
->outputs
[c
->output_position_index
+ 2],
1505 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1507 qir_VPM_WRITE(c
, rcp_w
);
1511 emit_point_size_write(struct vc4_compile
*c
)
1513 struct qreg point_size
;
1515 if (c
->output_point_size_index
!= -1)
1516 point_size
= c
->outputs
[c
->output_point_size_index
+ 3];
1518 point_size
= qir_uniform_f(c
, 1.0);
1520 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1523 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1525 qir_VPM_WRITE(c
, point_size
);
1529 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1531 * The simulator insists that there be at least one vertex attribute, so
1532 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1533 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1534 * to consume it here.
1537 emit_stub_vpm_read(struct vc4_compile
*c
)
1542 c
->vattr_sizes
[0] = 4;
1543 struct qreg vpm
= { QFILE_VPM
, 0 };
1544 (void)qir_MOV(c
, vpm
);
1549 emit_ucp_clipdistance(struct vc4_compile
*c
)
1552 if (c
->output_clipvertex_index
!= -1)
1553 cv
= c
->output_clipvertex_index
;
1554 else if (c
->output_position_index
!= -1)
1555 cv
= c
->output_position_index
;
1559 for (int plane
= 0; plane
< PIPE_MAX_CLIP_PLANES
; plane
++) {
1560 if (!(c
->key
->ucp_enables
& (1 << plane
)))
1563 /* Pick the next outputs[] that hasn't been written to, since
1564 * there are no other program writes left to be processed at
1565 * this point. If something had been declared but not written
1566 * (like a w component), we'll just smash over the top of it.
1568 uint32_t output_index
= c
->num_outputs
++;
1569 add_output(c
, output_index
,
1570 TGSI_SEMANTIC_CLIPDIST
,
1575 struct qreg dist
= qir_uniform_f(c
, 0.0);
1576 for (int i
= 0; i
< 4; i
++) {
1577 struct qreg pos_chan
= c
->outputs
[cv
+ i
];
1579 qir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1581 dist
= qir_FADD(c
, dist
, qir_FMUL(c
, pos_chan
, ucp
));
1584 c
->outputs
[output_index
] = dist
;
1589 emit_vert_end(struct vc4_compile
*c
,
1590 struct vc4_varying_semantic
*fs_inputs
,
1591 uint32_t num_fs_inputs
)
1593 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1595 emit_stub_vpm_read(c
);
1596 emit_ucp_clipdistance(c
);
1598 emit_scaled_viewport_write(c
, rcp_w
);
1599 emit_zs_write(c
, rcp_w
);
1600 emit_rcp_wc_write(c
, rcp_w
);
1601 if (c
->vs_key
->per_vertex_point_size
)
1602 emit_point_size_write(c
);
1604 for (int i
= 0; i
< num_fs_inputs
; i
++) {
1605 struct vc4_varying_semantic
*input
= &fs_inputs
[i
];
1608 for (j
= 0; j
< c
->num_outputs
; j
++) {
1609 struct vc4_varying_semantic
*output
=
1610 &c
->output_semantics
[j
];
1612 if (input
->semantic
== output
->semantic
&&
1613 input
->index
== output
->index
&&
1614 input
->swizzle
== output
->swizzle
) {
1615 qir_VPM_WRITE(c
, c
->outputs
[j
]);
1619 /* Emit padding if we didn't find a declared VS output for
1622 if (j
== c
->num_outputs
)
1623 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
1628 emit_coord_end(struct vc4_compile
*c
)
1630 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1632 emit_stub_vpm_read(c
);
1634 for (int i
= 0; i
< 4; i
++)
1635 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
1637 emit_scaled_viewport_write(c
, rcp_w
);
1638 emit_zs_write(c
, rcp_w
);
1639 emit_rcp_wc_write(c
, rcp_w
);
1640 if (c
->vs_key
->per_vertex_point_size
)
1641 emit_point_size_write(c
);
1645 vc4_optimize_nir(struct nir_shader
*s
)
1652 nir_lower_vars_to_ssa(s
);
1653 nir_lower_alu_to_scalar(s
);
1655 progress
= nir_copy_prop(s
) || progress
;
1656 progress
= nir_opt_dce(s
) || progress
;
1657 progress
= nir_opt_cse(s
) || progress
;
1658 progress
= nir_opt_peephole_select(s
) || progress
;
1659 progress
= nir_opt_algebraic(s
) || progress
;
1660 progress
= nir_opt_constant_folding(s
) || progress
;
1665 driver_location_compare(const void *in_a
, const void *in_b
)
1667 const nir_variable
*const *a
= in_a
;
1668 const nir_variable
*const *b
= in_b
;
1670 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1674 ntq_setup_inputs(struct vc4_compile
*c
)
1676 unsigned num_entries
= 0;
1677 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->inputs
)
1680 nir_variable
*vars
[num_entries
];
1683 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->inputs
)
1686 /* Sort the variables so that we emit the input setup in
1687 * driver_location order. This is required for VPM reads, whose data
1688 * is fetched into the VPM in driver_location (TGSI register index)
1691 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1693 for (unsigned i
= 0; i
< num_entries
; i
++) {
1694 nir_variable
*var
= vars
[i
];
1695 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1696 /* XXX: map loc slots to semantics */
1697 unsigned semantic_name
= var
->data
.location
;
1698 unsigned semantic_index
= var
->data
.index
;
1699 unsigned loc
= var
->data
.driver_location
;
1701 assert(array_len
== 1);
1703 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1706 if (c
->stage
== QSTAGE_FRAG
) {
1707 if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
1708 emit_fragcoord_input(c
, loc
);
1709 } else if (semantic_name
== TGSI_SEMANTIC_FACE
) {
1710 emit_face_input(c
, loc
);
1711 } else if (semantic_name
== TGSI_SEMANTIC_GENERIC
&&
1712 (c
->fs_key
->point_sprite_mask
&
1713 (1 << semantic_index
))) {
1714 emit_point_coord_input(c
, loc
);
1716 emit_fragment_input(c
, loc
,
1721 emit_vertex_input(c
, loc
);
1727 ntq_setup_outputs(struct vc4_compile
*c
)
1729 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->outputs
) {
1730 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1731 /* XXX: map loc slots to semantics */
1732 unsigned semantic_name
= var
->data
.location
;
1733 unsigned semantic_index
= var
->data
.index
;
1734 unsigned loc
= var
->data
.driver_location
* 4;
1736 assert(array_len
== 1);
1739 /* NIR hack to pass through
1740 * TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS */
1741 if (semantic_name
== TGSI_SEMANTIC_COLOR
&&
1742 semantic_index
== -1)
1745 for (int i
= 0; i
< 4; i
++) {
1753 switch (semantic_name
) {
1754 case TGSI_SEMANTIC_POSITION
:
1755 c
->output_position_index
= loc
;
1757 case TGSI_SEMANTIC_CLIPVERTEX
:
1758 c
->output_clipvertex_index
= loc
;
1760 case TGSI_SEMANTIC_COLOR
:
1761 c
->output_color_index
= loc
;
1763 case TGSI_SEMANTIC_PSIZE
:
1764 c
->output_point_size_index
= loc
;
1772 ntq_setup_uniforms(struct vc4_compile
*c
)
1774 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->uniforms
) {
1775 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1776 unsigned array_elem_size
= 4 * sizeof(float);
1778 declare_uniform_range(c
, var
->data
.driver_location
* array_elem_size
,
1779 array_len
* array_elem_size
);
1785 * Sets up the mapping from nir_register to struct qreg *.
1787 * Each nir_register gets a struct qreg per 32-bit component being stored.
1790 ntq_setup_registers(struct vc4_compile
*c
, struct exec_list
*list
)
1792 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1793 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1794 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1796 nir_reg
->num_components
);
1798 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1800 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1801 qregs
[i
] = qir_uniform_ui(c
, 0);
1806 ntq_emit_load_const(struct vc4_compile
*c
, nir_load_const_instr
*instr
)
1808 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1809 instr
->def
.num_components
);
1810 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1811 qregs
[i
] = qir_uniform_ui(c
, instr
->value
.u
[i
]);
1813 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1817 ntq_emit_intrinsic(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1819 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
1820 struct qreg
*dest
= NULL
;
1822 if (info
->has_dest
) {
1823 dest
= ntq_get_dest(c
, instr
->dest
);
1826 switch (instr
->intrinsic
) {
1827 case nir_intrinsic_load_uniform
:
1828 for (int i
= 0; i
< instr
->num_components
; i
++) {
1829 dest
[i
] = qir_uniform(c
, QUNIFORM_UNIFORM
,
1830 instr
->const_index
[0] * 4 + i
);
1834 case nir_intrinsic_load_uniform_indirect
:
1835 for (int i
= 0; i
< instr
->num_components
; i
++) {
1836 dest
[i
] = indirect_uniform_load(c
,
1837 ntq_get_src(c
, instr
->src
[0], 0),
1838 (instr
->const_index
[0] *
1839 4 + i
) * sizeof(float));
1844 case nir_intrinsic_load_input
:
1845 assert(instr
->num_components
== 1);
1846 *dest
= c
->inputs
[instr
->const_index
[0]];
1850 case nir_intrinsic_store_output
:
1851 assert(instr
->num_components
== 1);
1852 c
->outputs
[instr
->const_index
[0]] =
1853 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0], 0));
1854 c
->num_outputs
= MAX2(c
->num_outputs
, instr
->const_index
[0] + 1);
1857 case nir_intrinsic_discard
:
1858 c
->discard
= qir_uniform_ui(c
, ~0);
1861 case nir_intrinsic_discard_if
:
1862 if (c
->discard
.file
== QFILE_NULL
)
1863 c
->discard
= qir_uniform_ui(c
, 0);
1864 c
->discard
= qir_OR(c
, c
->discard
,
1865 ntq_get_src(c
, instr
->src
[0], 0));
1869 fprintf(stderr
, "Unknown intrinsic: ");
1870 nir_print_instr(&instr
->instr
, stderr
);
1871 fprintf(stderr
, "\n");
1877 ntq_emit_if(struct vc4_compile
*c
, nir_if
*if_stmt
)
1879 fprintf(stderr
, "general IF statements not handled.\n");
1883 ntq_emit_instr(struct vc4_compile
*c
, nir_instr
*instr
)
1885 switch (instr
->type
) {
1886 case nir_instr_type_alu
:
1887 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1890 case nir_instr_type_intrinsic
:
1891 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1894 case nir_instr_type_load_const
:
1895 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1898 case nir_instr_type_tex
:
1899 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1903 fprintf(stderr
, "Unknown NIR instr type: ");
1904 nir_print_instr(instr
, stderr
);
1905 fprintf(stderr
, "\n");
1911 ntq_emit_block(struct vc4_compile
*c
, nir_block
*block
)
1913 nir_foreach_instr(block
, instr
) {
1914 ntq_emit_instr(c
, instr
);
1919 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
)
1921 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1922 switch (node
->type
) {
1923 /* case nir_cf_node_loop: */
1924 case nir_cf_node_block
:
1925 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1928 case nir_cf_node_if
:
1929 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1939 ntq_emit_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
1941 ntq_setup_registers(c
, &impl
->registers
);
1942 ntq_emit_cf_list(c
, &impl
->body
);
1946 nir_to_qir(struct vc4_compile
*c
)
1948 ntq_setup_inputs(c
);
1949 ntq_setup_outputs(c
);
1950 ntq_setup_uniforms(c
);
1951 ntq_setup_registers(c
, &c
->s
->registers
);
1953 /* Find the main function and emit the body. */
1954 nir_foreach_overload(c
->s
, overload
) {
1955 assert(strcmp(overload
->function
->name
, "main") == 0);
1956 assert(overload
->impl
);
1957 ntq_emit_impl(c
, overload
->impl
);
1961 static const nir_shader_compiler_options nir_options
= {
1966 .lower_fsqrt
= true,
1967 .lower_negate
= true,
1971 count_nir_instrs_in_block(nir_block
*block
, void *state
)
1973 int *count
= (int *) state
;
1974 nir_foreach_instr(block
, instr
) {
1975 *count
= *count
+ 1;
1981 count_nir_instrs(nir_shader
*nir
)
1984 nir_foreach_overload(nir
, overload
) {
1985 if (!overload
->impl
)
1987 nir_foreach_block(overload
->impl
, count_nir_instrs_in_block
, &count
);
1992 static struct vc4_compile
*
1993 vc4_shader_ntq(struct vc4_context
*vc4
, enum qstage stage
,
1994 struct vc4_key
*key
)
1996 struct vc4_compile
*c
= qir_compile_init();
1999 c
->shader_state
= &key
->shader_state
->base
;
2000 c
->program_id
= key
->shader_state
->program_id
;
2001 c
->variant_id
= key
->shader_state
->compiled_variant_count
++;
2006 c
->fs_key
= (struct vc4_fs_key
*)key
;
2007 if (c
->fs_key
->is_points
) {
2008 c
->point_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2009 c
->point_y
= emit_fragment_varying(c
, ~0, ~0, 0);
2010 } else if (c
->fs_key
->is_lines
) {
2011 c
->line_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2015 c
->vs_key
= (struct vc4_vs_key
*)key
;
2018 c
->vs_key
= (struct vc4_vs_key
*)key
;
2022 const struct tgsi_token
*tokens
= key
->shader_state
->base
.tokens
;
2023 if (c
->fs_key
&& c
->fs_key
->light_twoside
) {
2024 if (!key
->shader_state
->twoside_tokens
) {
2025 const struct tgsi_lowering_config lowering_config
= {
2026 .color_two_side
= true,
2028 struct tgsi_shader_info info
;
2029 key
->shader_state
->twoside_tokens
=
2030 tgsi_transform_lowering(&lowering_config
,
2031 key
->shader_state
->base
.tokens
,
2034 /* If no transformation occurred, then NULL is
2035 * returned and we just use our original tokens.
2037 if (!key
->shader_state
->twoside_tokens
) {
2038 key
->shader_state
->twoside_tokens
=
2039 key
->shader_state
->base
.tokens
;
2042 tokens
= key
->shader_state
->twoside_tokens
;
2045 if (vc4_debug
& VC4_DEBUG_TGSI
) {
2046 fprintf(stderr
, "%s prog %d/%d TGSI:\n",
2047 qir_get_stage_name(c
->stage
),
2048 c
->program_id
, c
->variant_id
);
2049 tgsi_dump(tokens
, 0);
2052 c
->s
= tgsi_to_nir(tokens
, &nir_options
);
2053 nir_opt_global_to_local(c
->s
);
2054 nir_convert_to_ssa(c
->s
);
2055 vc4_nir_lower_io(c
);
2056 nir_lower_idiv(c
->s
);
2058 vc4_optimize_nir(c
->s
);
2060 nir_remove_dead_variables(c
->s
);
2062 nir_convert_from_ssa(c
->s
, false);
2064 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2065 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2066 qir_get_stage_name(c
->stage
),
2067 c
->program_id
, c
->variant_id
,
2068 count_nir_instrs(c
->s
));
2071 if (vc4_debug
& VC4_DEBUG_NIR
) {
2072 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2073 qir_get_stage_name(c
->stage
),
2074 c
->program_id
, c
->variant_id
);
2075 nir_print_shader(c
->s
, stderr
);
2086 vc4
->prog
.fs
->input_semantics
,
2087 vc4
->prog
.fs
->num_inputs
);
2094 if (vc4_debug
& VC4_DEBUG_QIR
) {
2095 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
2096 qir_get_stage_name(c
->stage
),
2097 c
->program_id
, c
->variant_id
);
2102 qir_lower_uniforms(c
);
2104 if (vc4_debug
& VC4_DEBUG_QIR
) {
2105 fprintf(stderr
, "%s prog %d/%d QIR:\n",
2106 qir_get_stage_name(c
->stage
),
2107 c
->program_id
, c
->variant_id
);
2110 qir_reorder_uniforms(c
);
2111 vc4_generate_code(vc4
, c
);
2113 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2114 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2115 qir_get_stage_name(c
->stage
),
2116 c
->program_id
, c
->variant_id
,
2118 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2119 qir_get_stage_name(c
->stage
),
2120 c
->program_id
, c
->variant_id
,
2130 vc4_shader_state_create(struct pipe_context
*pctx
,
2131 const struct pipe_shader_state
*cso
)
2133 struct vc4_context
*vc4
= vc4_context(pctx
);
2134 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
2138 so
->base
.tokens
= tgsi_dup_tokens(cso
->tokens
);
2139 so
->program_id
= vc4
->next_uncompiled_program_id
++;
2145 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
2146 struct vc4_compile
*c
)
2148 int count
= c
->num_uniforms
;
2149 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2151 uinfo
->count
= count
;
2152 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
2153 memcpy(uinfo
->data
, c
->uniform_data
,
2154 count
* sizeof(*uinfo
->data
));
2155 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
2156 memcpy(uinfo
->contents
, c
->uniform_contents
,
2157 count
* sizeof(*uinfo
->contents
));
2158 uinfo
->num_texture_samples
= c
->num_texture_samples
;
2160 vc4_set_shader_uniform_dirty_flags(shader
);
2163 static struct vc4_compiled_shader
*
2164 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
2165 struct vc4_key
*key
)
2167 struct hash_table
*ht
;
2169 if (stage
== QSTAGE_FRAG
) {
2171 key_size
= sizeof(struct vc4_fs_key
);
2174 key_size
= sizeof(struct vc4_vs_key
);
2177 struct vc4_compiled_shader
*shader
;
2178 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
2182 struct vc4_compile
*c
= vc4_shader_ntq(vc4
, stage
, key
);
2183 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
2185 shader
->program_id
= vc4
->next_compiled_program_id
++;
2186 if (stage
== QSTAGE_FRAG
) {
2187 bool input_live
[c
->num_input_semantics
];
2189 memset(input_live
, 0, sizeof(input_live
));
2190 list_for_each_entry(struct qinst
, inst
, &c
->instructions
, link
) {
2191 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
2192 if (inst
->src
[i
].file
== QFILE_VARY
)
2193 input_live
[inst
->src
[i
].index
] = true;
2197 shader
->input_semantics
= ralloc_array(shader
,
2198 struct vc4_varying_semantic
,
2199 c
->num_input_semantics
);
2201 for (int i
= 0; i
< c
->num_input_semantics
; i
++) {
2202 struct vc4_varying_semantic
*sem
= &c
->input_semantics
[i
];
2207 /* Skip non-VS-output inputs. */
2208 if (sem
->semantic
== (uint8_t)~0)
2211 if (sem
->semantic
== TGSI_SEMANTIC_COLOR
||
2212 sem
->semantic
== TGSI_SEMANTIC_BCOLOR
) {
2213 shader
->color_inputs
|= (1 << shader
->num_inputs
);
2216 shader
->input_semantics
[shader
->num_inputs
] = *sem
;
2217 shader
->num_inputs
++;
2220 shader
->num_inputs
= c
->num_inputs
;
2222 shader
->vattr_offsets
[0] = 0;
2223 for (int i
= 0; i
< 8; i
++) {
2224 shader
->vattr_offsets
[i
+ 1] =
2225 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
2227 if (c
->vattr_sizes
[i
])
2228 shader
->vattrs_live
|= (1 << i
);
2232 copy_uniform_state_to_shader(shader
, c
);
2233 shader
->bo
= vc4_bo_alloc_shader(vc4
->screen
, c
->qpu_insts
,
2234 c
->qpu_inst_count
* sizeof(uint64_t));
2236 /* Copy the compiler UBO range state to the compiled shader, dropping
2237 * out arrays that were never referenced by an indirect load.
2239 * (Note that QIR dead code elimination of an array access still
2240 * leaves that array alive, though)
2242 if (c
->num_ubo_ranges
) {
2243 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
2244 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
2247 for (int i
= 0; i
< c
->num_uniform_ranges
; i
++) {
2248 struct vc4_compiler_ubo_range
*range
=
2253 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
2254 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
2255 shader
->ubo_ranges
[j
].size
= range
->size
;
2256 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
2260 if (shader
->ubo_size
) {
2261 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2262 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2263 qir_get_stage_name(c
->stage
),
2264 c
->program_id
, c
->variant_id
,
2265 shader
->ubo_size
/ 4);
2269 qir_compile_destroy(c
);
2271 struct vc4_key
*dup_key
;
2272 dup_key
= ralloc_size(shader
, key_size
);
2273 memcpy(dup_key
, key
, key_size
);
2274 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2280 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2281 struct vc4_texture_stateobj
*texstate
)
2283 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2284 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2285 struct pipe_sampler_state
*sampler_state
=
2286 texstate
->samplers
[i
];
2289 key
->tex
[i
].format
= sampler
->format
;
2290 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2291 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2292 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2293 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2294 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2295 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2296 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2297 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2301 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2305 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2307 struct vc4_fs_key local_key
;
2308 struct vc4_fs_key
*key
= &local_key
;
2310 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2312 VC4_DIRTY_FRAMEBUFFER
|
2314 VC4_DIRTY_RASTERIZER
|
2316 VC4_DIRTY_TEXSTATE
|
2317 VC4_DIRTY_UNCOMPILED_FS
))) {
2321 memset(key
, 0, sizeof(*key
));
2322 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2323 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2324 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2325 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2326 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2327 key
->blend
= vc4
->blend
->rt
[0];
2328 if (vc4
->blend
->logicop_enable
) {
2329 key
->logicop_func
= vc4
->blend
->logicop_func
;
2331 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2333 if (vc4
->framebuffer
.cbufs
[0])
2334 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2336 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2337 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2338 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2339 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2340 key
->stencil_enabled
);
2341 if (vc4
->zsa
->base
.alpha
.enabled
) {
2342 key
->alpha_test
= true;
2343 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2346 if (key
->is_points
) {
2347 key
->point_sprite_mask
=
2348 vc4
->rasterizer
->base
.sprite_coord_enable
;
2349 key
->point_coord_upper_left
=
2350 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2351 PIPE_SPRITE_COORD_UPPER_LEFT
);
2354 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2356 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2357 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2358 if (vc4
->prog
.fs
== old_fs
)
2361 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2362 if (vc4
->rasterizer
->base
.flatshade
&&
2363 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2364 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2369 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2371 struct vc4_vs_key local_key
;
2372 struct vc4_vs_key
*key
= &local_key
;
2374 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2375 VC4_DIRTY_RASTERIZER
|
2377 VC4_DIRTY_TEXSTATE
|
2378 VC4_DIRTY_VTXSTATE
|
2379 VC4_DIRTY_UNCOMPILED_VS
|
2380 VC4_DIRTY_COMPILED_FS
))) {
2384 memset(key
, 0, sizeof(*key
));
2385 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2386 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2387 key
->compiled_fs_id
= vc4
->prog
.fs
->program_id
;
2389 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2390 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2392 key
->per_vertex_point_size
=
2393 (prim_mode
== PIPE_PRIM_POINTS
&&
2394 vc4
->rasterizer
->base
.point_size_per_vertex
);
2396 struct vc4_compiled_shader
*vs
=
2397 vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2398 if (vs
!= vc4
->prog
.vs
) {
2400 vc4
->dirty
|= VC4_DIRTY_COMPILED_VS
;
2403 key
->is_coord
= true;
2404 struct vc4_compiled_shader
*cs
=
2405 vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2406 if (cs
!= vc4
->prog
.cs
) {
2408 vc4
->dirty
|= VC4_DIRTY_COMPILED_CS
;
2413 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2415 vc4_update_compiled_fs(vc4
, prim_mode
);
2416 vc4_update_compiled_vs(vc4
, prim_mode
);
2420 fs_cache_hash(const void *key
)
2422 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2426 vs_cache_hash(const void *key
)
2428 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2432 fs_cache_compare(const void *key1
, const void *key2
)
2434 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2438 vs_cache_compare(const void *key1
, const void *key2
)
2440 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2444 delete_from_cache_if_matches(struct hash_table
*ht
,
2445 struct hash_entry
*entry
,
2446 struct vc4_uncompiled_shader
*so
)
2448 const struct vc4_key
*key
= entry
->key
;
2450 if (key
->shader_state
== so
) {
2451 struct vc4_compiled_shader
*shader
= entry
->data
;
2452 _mesa_hash_table_remove(ht
, entry
);
2453 vc4_bo_unreference(&shader
->bo
);
2454 ralloc_free(shader
);
2459 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2461 struct vc4_context
*vc4
= vc4_context(pctx
);
2462 struct vc4_uncompiled_shader
*so
= hwcso
;
2464 struct hash_entry
*entry
;
2465 hash_table_foreach(vc4
->fs_cache
, entry
)
2466 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2467 hash_table_foreach(vc4
->vs_cache
, entry
)
2468 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2470 if (so
->twoside_tokens
!= so
->base
.tokens
)
2471 free((void *)so
->twoside_tokens
);
2472 free((void *)so
->base
.tokens
);
2477 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2479 struct vc4_context
*vc4
= vc4_context(pctx
);
2480 vc4
->prog
.bind_fs
= hwcso
;
2481 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2485 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2487 struct vc4_context
*vc4
= vc4_context(pctx
);
2488 vc4
->prog
.bind_vs
= hwcso
;
2489 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2493 vc4_program_init(struct pipe_context
*pctx
)
2495 struct vc4_context
*vc4
= vc4_context(pctx
);
2497 pctx
->create_vs_state
= vc4_shader_state_create
;
2498 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2500 pctx
->create_fs_state
= vc4_shader_state_create
;
2501 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2503 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2504 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2506 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2508 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2513 vc4_program_fini(struct pipe_context
*pctx
)
2515 struct vc4_context
*vc4
= vc4_context(pctx
);
2517 struct hash_entry
*entry
;
2518 hash_table_foreach(vc4
->fs_cache
, entry
) {
2519 struct vc4_compiled_shader
*shader
= entry
->data
;
2520 vc4_bo_unreference(&shader
->bo
);
2521 ralloc_free(shader
);
2522 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2525 hash_table_foreach(vc4
->vs_cache
, entry
) {
2526 struct vc4_compiled_shader
*shader
= entry
->data
;
2527 vc4_bo_unreference(&shader
->bo
);
2528 ralloc_free(shader
);
2529 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);