2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "pipe/p_state.h"
27 #include "util/u_format.h"
28 #include "util/u_hash.h"
29 #include "util/u_math.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include "util/format_srgb.h"
33 #include "util/ralloc.h"
34 #include "util/hash_table.h"
35 #include "tgsi/tgsi_dump.h"
36 #include "tgsi/tgsi_info.h"
37 #include "tgsi/tgsi_lowering.h"
38 #include "tgsi/tgsi_parse.h"
39 #include "nir/tgsi_to_nir.h"
41 #include "vc4_context.h"
44 #ifdef USE_VC4_SIMULATOR
45 #include "simpenrose/simpenrose.h"
49 struct vc4_uncompiled_shader
*shader_state
;
51 enum pipe_format format
;
52 unsigned compare_mode
:1;
53 unsigned compare_func
:3;
57 } tex
[VC4_MAX_TEXTURE_SAMPLERS
];
63 enum pipe_format color_format
;
67 bool stencil_full_writemasks
;
71 bool point_coord_upper_left
;
73 uint8_t alpha_test_func
;
75 uint32_t point_sprite_mask
;
77 struct pipe_rt_blend_state blend
;
84 * This is a proxy for the array of FS input semantics, which is
85 * larger than we would want to put in the key.
87 uint64_t compiled_fs_id
;
89 enum pipe_format attr_formats
[8];
91 bool per_vertex_point_size
;
95 resize_qreg_array(struct vc4_compile
*c
,
100 if (*size
>= decl_size
)
103 uint32_t old_size
= *size
;
104 *size
= MAX2(*size
* 2, decl_size
);
105 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
107 fprintf(stderr
, "Malloc failure\n");
111 for (uint32_t i
= old_size
; i
< *size
; i
++)
112 (*regs
)[i
] = c
->undef
;
116 indirect_uniform_load(struct vc4_compile
*c
,
117 struct qreg indirect_offset
,
120 struct vc4_compiler_ubo_range
*range
= NULL
;
122 for (i
= 0; i
< c
->num_uniform_ranges
; i
++) {
123 range
= &c
->ubo_ranges
[i
];
124 if (offset
>= range
->src_offset
&&
125 offset
< range
->src_offset
+ range
->size
) {
129 /* The driver-location-based offset always has to be within a declared
135 range
->dst_offset
= c
->next_ubo_dst_offset
;
136 c
->next_ubo_dst_offset
+= range
->size
;
140 offset
-= range
->src_offset
;
141 /* Translate the user's TGSI register index from the TGSI register
142 * base to a byte offset.
144 indirect_offset
= qir_SHL(c
, indirect_offset
, qir_uniform_ui(c
, 4));
146 /* Adjust for where we stored the TGSI register base. */
147 indirect_offset
= qir_ADD(c
, indirect_offset
,
148 qir_uniform_ui(c
, (range
->dst_offset
+
150 indirect_offset
= qir_MIN(c
, indirect_offset
,
151 qir_uniform_ui(c
, (range
->dst_offset
+
154 qir_TEX_DIRECT(c
, indirect_offset
, qir_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
155 struct qreg r4
= qir_TEX_RESULT(c
);
156 c
->num_texture_samples
++;
157 return qir_MOV(c
, r4
);
161 ntq_get_dest(struct vc4_compile
*c
, nir_dest dest
)
163 assert(!dest
.is_ssa
);
164 nir_register
*reg
= dest
.reg
.reg
;
165 struct hash_entry
*entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
166 assert(reg
->num_array_elems
== 0);
167 assert(dest
.reg
.base_offset
== 0);
169 struct qreg
*qregs
= entry
->data
;
174 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
)
176 struct hash_entry
*entry
;
178 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
179 assert(i
< src
.ssa
->num_components
);
181 nir_register
*reg
= src
.reg
.reg
;
182 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
183 assert(reg
->num_array_elems
== 0);
184 assert(src
.reg
.base_offset
== 0);
185 assert(i
< reg
->num_components
);
188 struct qreg
*qregs
= entry
->data
;
193 ntq_get_alu_src(struct vc4_compile
*c
, nir_alu_instr
*instr
,
196 assert(util_is_power_of_two(instr
->dest
.write_mask
));
197 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
198 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
199 instr
->src
[src
].swizzle
[chan
]);
201 assert(!instr
->src
[src
].abs
);
202 assert(!instr
->src
[src
].negate
);
208 get_swizzled_channel(struct vc4_compile
*c
,
209 struct qreg
*srcs
, int swiz
)
213 case UTIL_FORMAT_SWIZZLE_NONE
:
214 fprintf(stderr
, "warning: unknown swizzle\n");
216 case UTIL_FORMAT_SWIZZLE_0
:
217 return qir_uniform_f(c
, 0.0);
218 case UTIL_FORMAT_SWIZZLE_1
:
219 return qir_uniform_f(c
, 1.0);
220 case UTIL_FORMAT_SWIZZLE_X
:
221 case UTIL_FORMAT_SWIZZLE_Y
:
222 case UTIL_FORMAT_SWIZZLE_Z
:
223 case UTIL_FORMAT_SWIZZLE_W
:
228 static inline struct qreg
229 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
232 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
233 qir_uniform_f(c
, 0.0));
237 ntq_rcp(struct vc4_compile
*c
, struct qreg x
)
239 struct qreg r
= qir_RCP(c
, x
);
241 /* Apply a Newton-Raphson step to improve the accuracy. */
242 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
243 qir_uniform_f(c
, 2.0),
250 ntq_rsq(struct vc4_compile
*c
, struct qreg x
)
252 struct qreg r
= qir_RSQ(c
, x
);
254 /* Apply a Newton-Raphson step to improve the accuracy. */
255 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
256 qir_uniform_f(c
, 1.5),
258 qir_uniform_f(c
, 0.5),
260 qir_FMUL(c
, r
, r
)))));
266 qir_srgb_decode(struct vc4_compile
*c
, struct qreg srgb
)
268 struct qreg low
= qir_FMUL(c
, srgb
, qir_uniform_f(c
, 1.0 / 12.92));
269 struct qreg high
= qir_POW(c
,
273 qir_uniform_f(c
, 0.055)),
274 qir_uniform_f(c
, 1.0 / 1.055)),
275 qir_uniform_f(c
, 2.4));
277 qir_SF(c
, qir_FSUB(c
, srgb
, qir_uniform_f(c
, 0.04045)));
278 return qir_SEL_X_Y_NS(c
, low
, high
);
282 qir_srgb_encode(struct vc4_compile
*c
, struct qreg linear
)
284 struct qreg low
= qir_FMUL(c
, linear
, qir_uniform_f(c
, 12.92));
285 struct qreg high
= qir_FSUB(c
,
287 qir_uniform_f(c
, 1.055),
290 qir_uniform_f(c
, 0.41666))),
291 qir_uniform_f(c
, 0.055));
293 qir_SF(c
, qir_FSUB(c
, linear
, qir_uniform_f(c
, 0.0031308)));
294 return qir_SEL_X_Y_NS(c
, low
, high
);
298 ntq_umul(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
300 struct qreg src0_hi
= qir_SHR(c
, src0
,
301 qir_uniform_ui(c
, 24));
302 struct qreg src1_hi
= qir_SHR(c
, src1
,
303 qir_uniform_ui(c
, 24));
305 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1
);
306 struct qreg lohi
= qir_MUL24(c
, src0
, src1_hi
);
307 struct qreg lolo
= qir_MUL24(c
, src0
, src1
);
309 return qir_ADD(c
, lolo
, qir_SHL(c
,
310 qir_ADD(c
, hilo
, lohi
),
311 qir_uniform_ui(c
, 24)));
315 ntq_idiv(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
317 return qir_FTOI(c
, qir_FMUL(c
,
319 qir_RCP(c
, qir_ITOF(c
, src1
))));
323 ntq_emit_tex(struct vc4_compile
*c
, nir_tex_instr
*instr
)
325 struct qreg s
, t
, r
, lod
, proj
, compare
;
326 bool is_txb
= false, is_txl
= false, has_proj
= false;
327 unsigned unit
= instr
->sampler_index
;
329 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
330 switch (instr
->src
[i
].src_type
) {
331 case nir_tex_src_coord
:
332 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
333 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_1D
)
334 t
= ntq_get_src(c
, instr
->src
[i
].src
, 1);
335 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
336 r
= ntq_get_src(c
, instr
->src
[i
].src
, 2);
338 case nir_tex_src_bias
:
339 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
342 case nir_tex_src_lod
:
343 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
346 case nir_tex_src_comparitor
:
347 compare
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
349 case nir_tex_src_projector
:
350 proj
= qir_RCP(c
, ntq_get_src(c
, instr
->src
[i
].src
, 0));
351 s
= qir_FMUL(c
, s
, proj
);
352 t
= qir_FMUL(c
, t
, proj
);
356 unreachable("unknown texture source");
360 struct qreg texture_u
[] = {
361 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
362 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
363 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
364 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
366 uint32_t next_texture_u
= 0;
368 /* There is no native support for GL texture rectangle coordinates, so
369 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
372 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
374 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_X
, unit
));
376 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_Y
, unit
));
379 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
|| is_txl
) {
380 texture_u
[2] = qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
381 unit
| (is_txl
<< 16));
384 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
385 struct qreg ma
= qir_FMAXABS(c
, qir_FMAXABS(c
, s
, t
), r
);
386 struct qreg rcp_ma
= qir_RCP(c
, ma
);
387 s
= qir_FMUL(c
, s
, rcp_ma
);
388 t
= qir_FMUL(c
, t
, rcp_ma
);
389 r
= qir_FMUL(c
, r
, rcp_ma
);
391 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
392 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
393 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
394 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
395 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
396 qir_TEX_R(c
, qir_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
397 texture_u
[next_texture_u
++]);
400 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
404 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
408 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
410 if (is_txl
|| is_txb
)
411 qir_TEX_B(c
, lod
, texture_u
[next_texture_u
++]);
413 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
415 c
->num_texture_samples
++;
416 struct qreg r4
= qir_TEX_RESULT(c
);
418 enum pipe_format format
= c
->key
->tex
[unit
].format
;
420 struct qreg unpacked
[4];
421 if (util_format_is_depth_or_stencil(format
)) {
422 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, r4
,
423 qir_uniform_ui(c
, 8)));
424 struct qreg normalized
= qir_FMUL(c
, depthf
,
425 qir_uniform_f(c
, 1.0f
/0xffffff));
427 struct qreg depth_output
;
429 struct qreg one
= qir_uniform_f(c
, 1.0f
);
430 if (c
->key
->tex
[unit
].compare_mode
) {
432 compare
= qir_FMUL(c
, compare
, proj
);
434 switch (c
->key
->tex
[unit
].compare_func
) {
435 case PIPE_FUNC_NEVER
:
436 depth_output
= qir_uniform_f(c
, 0.0f
);
438 case PIPE_FUNC_ALWAYS
:
441 case PIPE_FUNC_EQUAL
:
442 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
443 depth_output
= qir_SEL_X_0_ZS(c
, one
);
445 case PIPE_FUNC_NOTEQUAL
:
446 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
447 depth_output
= qir_SEL_X_0_ZC(c
, one
);
449 case PIPE_FUNC_GREATER
:
450 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
451 depth_output
= qir_SEL_X_0_NC(c
, one
);
453 case PIPE_FUNC_GEQUAL
:
454 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
455 depth_output
= qir_SEL_X_0_NS(c
, one
);
458 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
459 depth_output
= qir_SEL_X_0_NS(c
, one
);
461 case PIPE_FUNC_LEQUAL
:
462 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
463 depth_output
= qir_SEL_X_0_NC(c
, one
);
467 depth_output
= normalized
;
470 for (int i
= 0; i
< 4; i
++)
471 unpacked
[i
] = depth_output
;
473 for (int i
= 0; i
< 4; i
++)
474 unpacked
[i
] = qir_R4_UNPACK(c
, r4
, i
);
477 const uint8_t *format_swiz
= vc4_get_format_swizzle(format
);
478 struct qreg texture_output
[4];
479 for (int i
= 0; i
< 4; i
++) {
480 texture_output
[i
] = get_swizzled_channel(c
, unpacked
,
484 if (util_format_is_srgb(format
)) {
485 for (int i
= 0; i
< 3; i
++)
486 texture_output
[i
] = qir_srgb_decode(c
,
490 struct qreg
*dest
= ntq_get_dest(c
, instr
->dest
);
491 for (int i
= 0; i
< 4; i
++) {
492 dest
[i
] = get_swizzled_channel(c
, texture_output
,
493 c
->key
->tex
[unit
].swizzle
[i
]);
498 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
502 ntq_ffract(struct vc4_compile
*c
, struct qreg src
)
504 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
505 struct qreg diff
= qir_FSUB(c
, src
, trunc
);
507 return qir_SEL_X_Y_NS(c
,
508 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)),
513 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
517 ntq_ffloor(struct vc4_compile
*c
, struct qreg src
)
519 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
521 /* This will be < 0 if we truncated and the truncation was of a value
522 * that was < 0 in the first place.
524 qir_SF(c
, qir_FSUB(c
, src
, trunc
));
526 return qir_SEL_X_Y_NS(c
,
527 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)),
532 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
536 ntq_fceil(struct vc4_compile
*c
, struct qreg src
)
538 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
540 /* This will be < 0 if we truncated and the truncation was of a value
541 * that was > 0 in the first place.
543 qir_SF(c
, qir_FSUB(c
, trunc
, src
));
545 return qir_SEL_X_Y_NS(c
,
546 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)),
551 ntq_fsin(struct vc4_compile
*c
, struct qreg src
)
555 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
556 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
557 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
558 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
561 struct qreg scaled_x
=
564 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
566 struct qreg x
= qir_FADD(c
,
567 ntq_ffract(c
, scaled_x
),
568 qir_uniform_f(c
, -0.5));
569 struct qreg x2
= qir_FMUL(c
, x
, x
);
570 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
571 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
572 x
= qir_FMUL(c
, x
, x2
);
577 qir_uniform_f(c
, coeff
[i
])));
583 ntq_fcos(struct vc4_compile
*c
, struct qreg src
)
587 pow(2.0 * M_PI
, 2) / (2 * 1),
588 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
589 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
590 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
591 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
594 struct qreg scaled_x
=
596 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
597 struct qreg x_frac
= qir_FADD(c
,
598 ntq_ffract(c
, scaled_x
),
599 qir_uniform_f(c
, -0.5));
601 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
602 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
603 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
604 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
606 x
= qir_FMUL(c
, x
, x2
);
608 struct qreg mul
= qir_FMUL(c
,
610 qir_uniform_f(c
, coeff
[i
]));
614 sum
= qir_FADD(c
, sum
, mul
);
620 ntq_fsign(struct vc4_compile
*c
, struct qreg src
)
623 return qir_SEL_X_Y_NC(c
,
624 qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0)),
625 qir_uniform_f(c
, -1.0));
629 get_channel_from_vpm(struct vc4_compile
*c
,
630 struct qreg
*vpm_reads
,
632 const struct util_format_description
*desc
)
634 const struct util_format_channel_description
*chan
=
635 &desc
->channel
[swiz
];
638 if (swiz
> UTIL_FORMAT_SWIZZLE_W
)
639 return get_swizzled_channel(c
, vpm_reads
, swiz
);
640 else if (chan
->size
== 32 &&
641 chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
642 return get_swizzled_channel(c
, vpm_reads
, swiz
);
643 } else if (chan
->size
== 32 &&
644 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
645 if (chan
->normalized
) {
647 qir_ITOF(c
, vpm_reads
[swiz
]),
651 return qir_ITOF(c
, vpm_reads
[swiz
]);
653 } else if (chan
->size
== 8 &&
654 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
655 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
656 struct qreg vpm
= vpm_reads
[0];
657 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
658 temp
= qir_XOR(c
, vpm
, qir_uniform_ui(c
, 0x80808080));
659 if (chan
->normalized
) {
660 return qir_FSUB(c
, qir_FMUL(c
,
661 qir_UNPACK_8_F(c
, temp
, swiz
),
662 qir_uniform_f(c
, 2.0)),
663 qir_uniform_f(c
, 1.0));
667 qir_UNPACK_8_I(c
, temp
,
669 qir_uniform_f(c
, -128.0));
672 if (chan
->normalized
) {
673 return qir_UNPACK_8_F(c
, vpm
, swiz
);
675 return qir_ITOF(c
, qir_UNPACK_8_I(c
, vpm
, swiz
));
678 } else if (chan
->size
== 16 &&
679 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
680 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
681 struct qreg vpm
= vpm_reads
[swiz
/ 2];
683 /* Note that UNPACK_16F eats a half float, not ints, so we use
684 * UNPACK_16_I for all of these.
686 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
687 temp
= qir_ITOF(c
, qir_UNPACK_16_I(c
, vpm
, swiz
% 2));
688 if (chan
->normalized
) {
689 return qir_FMUL(c
, temp
,
690 qir_uniform_f(c
, 1/32768.0f
));
695 /* UNPACK_16I sign-extends, so we have to emit ANDs. */
697 if (swiz
== 1 || swiz
== 3)
698 temp
= qir_UNPACK_16_I(c
, temp
, 1);
699 temp
= qir_AND(c
, temp
, qir_uniform_ui(c
, 0xffff));
700 temp
= qir_ITOF(c
, temp
);
702 if (chan
->normalized
) {
703 return qir_FMUL(c
, temp
,
704 qir_uniform_f(c
, 1 / 65535.0));
715 emit_vertex_input(struct vc4_compile
*c
, int attr
)
717 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
718 uint32_t attr_size
= util_format_get_blocksize(format
);
719 struct qreg vpm_reads
[4];
721 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
722 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
723 struct qreg vpm
= { QFILE_VPM
, attr
* 4 + i
};
724 vpm_reads
[i
] = qir_MOV(c
, vpm
);
728 bool format_warned
= false;
729 const struct util_format_description
*desc
=
730 util_format_description(format
);
732 for (int i
= 0; i
< 4; i
++) {
733 uint8_t swiz
= desc
->swizzle
[i
];
734 struct qreg result
= get_channel_from_vpm(c
, vpm_reads
,
737 if (result
.file
== QFILE_NULL
) {
738 if (!format_warned
) {
740 "vtx element %d unsupported type: %s\n",
741 attr
, util_format_name(format
));
742 format_warned
= true;
744 result
= qir_uniform_f(c
, 0.0);
746 c
->inputs
[attr
* 4 + i
] = result
;
751 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
753 c
->inputs
[attr
* 4 + 0] = qir_FRAG_X(c
);
754 c
->inputs
[attr
* 4 + 1] = qir_FRAG_Y(c
);
755 c
->inputs
[attr
* 4 + 2] =
757 qir_ITOF(c
, qir_FRAG_Z(c
)),
758 qir_uniform_f(c
, 1.0 / 0xffffff));
759 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
763 emit_point_coord_input(struct vc4_compile
*c
, int attr
)
765 if (c
->point_x
.file
== QFILE_NULL
) {
766 c
->point_x
= qir_uniform_f(c
, 0.0);
767 c
->point_y
= qir_uniform_f(c
, 0.0);
770 c
->inputs
[attr
* 4 + 0] = c
->point_x
;
771 if (c
->fs_key
->point_coord_upper_left
) {
772 c
->inputs
[attr
* 4 + 1] = qir_FSUB(c
,
773 qir_uniform_f(c
, 1.0),
776 c
->inputs
[attr
* 4 + 1] = c
->point_y
;
778 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
779 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
783 emit_fragment_varying(struct vc4_compile
*c
, uint8_t semantic
,
784 uint8_t index
, uint8_t swizzle
)
786 uint32_t i
= c
->num_input_semantics
++;
792 if (c
->num_input_semantics
>= c
->input_semantics_array_size
) {
793 c
->input_semantics_array_size
=
794 MAX2(4, c
->input_semantics_array_size
* 2);
796 c
->input_semantics
= reralloc(c
, c
->input_semantics
,
797 struct vc4_varying_semantic
,
798 c
->input_semantics_array_size
);
801 c
->input_semantics
[i
].semantic
= semantic
;
802 c
->input_semantics
[i
].index
= index
;
803 c
->input_semantics
[i
].swizzle
= swizzle
;
805 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
809 emit_fragment_input(struct vc4_compile
*c
, int attr
,
810 unsigned semantic_name
, unsigned semantic_index
)
812 for (int i
= 0; i
< 4; i
++) {
813 c
->inputs
[attr
* 4 + i
] =
814 emit_fragment_varying(c
,
823 emit_face_input(struct vc4_compile
*c
, int attr
)
825 c
->inputs
[attr
* 4 + 0] = qir_FSUB(c
,
826 qir_uniform_f(c
, 1.0),
828 qir_ITOF(c
, qir_FRAG_REV_FLAG(c
)),
829 qir_uniform_f(c
, 2.0)));
830 c
->inputs
[attr
* 4 + 1] = qir_uniform_f(c
, 0.0);
831 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
832 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
836 add_output(struct vc4_compile
*c
,
837 uint32_t decl_offset
,
838 uint8_t semantic_name
,
839 uint8_t semantic_index
,
840 uint8_t semantic_swizzle
)
842 uint32_t old_array_size
= c
->outputs_array_size
;
843 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
846 if (old_array_size
!= c
->outputs_array_size
) {
847 c
->output_semantics
= reralloc(c
,
849 struct vc4_varying_semantic
,
850 c
->outputs_array_size
);
853 c
->output_semantics
[decl_offset
].semantic
= semantic_name
;
854 c
->output_semantics
[decl_offset
].index
= semantic_index
;
855 c
->output_semantics
[decl_offset
].swizzle
= semantic_swizzle
;
859 declare_uniform_range(struct vc4_compile
*c
, uint32_t start
, uint32_t size
)
861 unsigned array_id
= c
->num_uniform_ranges
++;
862 if (array_id
>= c
->ubo_ranges_array_size
) {
863 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
865 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
866 struct vc4_compiler_ubo_range
,
867 c
->ubo_ranges_array_size
);
870 c
->ubo_ranges
[array_id
].dst_offset
= 0;
871 c
->ubo_ranges
[array_id
].src_offset
= start
;
872 c
->ubo_ranges
[array_id
].size
= size
;
873 c
->ubo_ranges
[array_id
].used
= false;
877 ntq_emit_alu(struct vc4_compile
*c
, nir_alu_instr
*instr
)
879 /* Vectors are special in that they have non-scalarized writemasks,
880 * and just take the first swizzle channel for each argument in order
881 * into each writemask channel.
883 if (instr
->op
== nir_op_vec2
||
884 instr
->op
== nir_op_vec3
||
885 instr
->op
== nir_op_vec4
) {
887 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
888 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
889 instr
->src
[i
].swizzle
[0]);
890 struct qreg
*dest
= ntq_get_dest(c
, instr
->dest
.dest
);
891 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
896 /* General case: We can just grab the one used channel per src. */
897 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
898 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
899 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
902 /* Pick the channel to store the output in. */
903 assert(!instr
->dest
.saturate
);
904 struct qreg
*dest
= ntq_get_dest(c
, instr
->dest
.dest
);
905 assert(util_is_power_of_two(instr
->dest
.write_mask
));
906 dest
+= ffs(instr
->dest
.write_mask
) - 1;
911 *dest
= qir_MOV(c
, src
[0]);
914 *dest
= qir_FMUL(c
, src
[0], src
[1]);
917 *dest
= qir_FADD(c
, src
[0], src
[1]);
920 *dest
= qir_FSUB(c
, src
[0], src
[1]);
923 *dest
= qir_FMIN(c
, src
[0], src
[1]);
926 *dest
= qir_FMAX(c
, src
[0], src
[1]);
931 *dest
= qir_FTOI(c
, src
[0]);
935 *dest
= qir_ITOF(c
, src
[0]);
938 *dest
= qir_AND(c
, src
[0], qir_uniform_f(c
, 1.0));
941 *dest
= qir_AND(c
, src
[0], qir_uniform_ui(c
, 1));
946 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
950 *dest
= qir_ADD(c
, src
[0], src
[1]);
953 *dest
= qir_SHR(c
, src
[0], src
[1]);
956 *dest
= qir_SUB(c
, src
[0], src
[1]);
959 *dest
= qir_ASR(c
, src
[0], src
[1]);
962 *dest
= qir_SHL(c
, src
[0], src
[1]);
965 *dest
= qir_MIN(c
, src
[0], src
[1]);
968 *dest
= qir_MAX(c
, src
[0], src
[1]);
971 *dest
= qir_AND(c
, src
[0], src
[1]);
974 *dest
= qir_OR(c
, src
[0], src
[1]);
977 *dest
= qir_XOR(c
, src
[0], src
[1]);
980 *dest
= qir_NOT(c
, src
[0]);
984 *dest
= ntq_umul(c
, src
[0], src
[1]);
987 *dest
= ntq_idiv(c
, src
[0], src
[1]);
991 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
992 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_f(c
, 1.0));
995 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
996 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0));
999 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1000 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_f(c
, 1.0));
1003 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1004 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_f(c
, 1.0));
1007 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1008 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
1011 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1012 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
1015 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1016 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
1019 qir_SF(c
, qir_FSUB(c
, src
[0], src
[1]));
1020 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
1023 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1024 *dest
= qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
1027 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1028 *dest
= qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
1031 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1032 *dest
= qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
1035 qir_SF(c
, qir_SUB(c
, src
[0], src
[1]));
1036 *dest
= qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
1041 *dest
= qir_SEL_X_Y_NS(c
, src
[1], src
[2]);
1045 *dest
= qir_SEL_X_Y_ZC(c
, src
[1], src
[2]);
1049 *dest
= ntq_rcp(c
, src
[0]);
1052 *dest
= ntq_rsq(c
, src
[0]);
1055 *dest
= qir_EXP2(c
, src
[0]);
1058 *dest
= qir_LOG2(c
, src
[0]);
1062 *dest
= qir_ITOF(c
, qir_FTOI(c
, src
[0]));
1065 *dest
= ntq_fceil(c
, src
[0]);
1068 *dest
= ntq_ffract(c
, src
[0]);
1071 *dest
= ntq_ffloor(c
, src
[0]);
1075 *dest
= ntq_fsin(c
, src
[0]);
1078 *dest
= ntq_fcos(c
, src
[0]);
1082 *dest
= ntq_fsign(c
, src
[0]);
1086 *dest
= qir_FMAXABS(c
, src
[0], src
[0]);
1089 *dest
= qir_MAX(c
, src
[0],
1090 qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0]));
1094 fprintf(stderr
, "unknown NIR ALU inst: ");
1095 nir_print_instr(&instr
->instr
, stderr
);
1096 fprintf(stderr
, "\n");
1102 vc4_blend_channel(struct vc4_compile
*c
,
1110 case PIPE_BLENDFACTOR_ONE
:
1112 case PIPE_BLENDFACTOR_SRC_COLOR
:
1113 return qir_FMUL(c
, val
, src
[channel
]);
1114 case PIPE_BLENDFACTOR_SRC_ALPHA
:
1115 return qir_FMUL(c
, val
, src
[3]);
1116 case PIPE_BLENDFACTOR_DST_ALPHA
:
1117 return qir_FMUL(c
, val
, dst
[3]);
1118 case PIPE_BLENDFACTOR_DST_COLOR
:
1119 return qir_FMUL(c
, val
, dst
[channel
]);
1120 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
1127 qir_uniform_f(c
, 1.0),
1132 case PIPE_BLENDFACTOR_CONST_COLOR
:
1133 return qir_FMUL(c
, val
,
1134 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR
,
1136 case PIPE_BLENDFACTOR_CONST_ALPHA
:
1137 return qir_FMUL(c
, val
,
1138 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR
, 3));
1139 case PIPE_BLENDFACTOR_ZERO
:
1140 return qir_uniform_f(c
, 0.0);
1141 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
1142 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1144 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
1145 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1147 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
1148 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1150 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
1151 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1153 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
1154 return qir_FMUL(c
, val
,
1155 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1157 QUNIFORM_BLEND_CONST_COLOR
,
1159 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
1160 return qir_FMUL(c
, val
,
1161 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1163 QUNIFORM_BLEND_CONST_COLOR
,
1167 case PIPE_BLENDFACTOR_SRC1_COLOR
:
1168 case PIPE_BLENDFACTOR_SRC1_ALPHA
:
1169 case PIPE_BLENDFACTOR_INV_SRC1_COLOR
:
1170 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA
:
1172 fprintf(stderr
, "Unknown blend factor %d\n", factor
);
1178 vc4_blend_func(struct vc4_compile
*c
,
1179 struct qreg src
, struct qreg dst
,
1183 case PIPE_BLEND_ADD
:
1184 return qir_FADD(c
, src
, dst
);
1185 case PIPE_BLEND_SUBTRACT
:
1186 return qir_FSUB(c
, src
, dst
);
1187 case PIPE_BLEND_REVERSE_SUBTRACT
:
1188 return qir_FSUB(c
, dst
, src
);
1189 case PIPE_BLEND_MIN
:
1190 return qir_FMIN(c
, src
, dst
);
1191 case PIPE_BLEND_MAX
:
1192 return qir_FMAX(c
, src
, dst
);
1196 fprintf(stderr
, "Unknown blend func %d\n", func
);
1203 * Implements fixed function blending in shader code.
1205 * VC4 doesn't have any hardware support for blending. Instead, you read the
1206 * current contents of the destination from the tile buffer after having
1207 * waited for the scoreboard (which is handled by vc4_qpu_emit.c), then do
1208 * math using your output color and that destination value, and update the
1209 * output color appropriately.
1212 vc4_blend(struct vc4_compile
*c
, struct qreg
*result
,
1213 struct qreg
*dst_color
, struct qreg
*src_color
)
1215 struct pipe_rt_blend_state
*blend
= &c
->fs_key
->blend
;
1217 if (!blend
->blend_enable
) {
1218 for (int i
= 0; i
< 4; i
++)
1219 result
[i
] = src_color
[i
];
1223 struct qreg clamped_src
[4];
1224 struct qreg clamped_dst
[4];
1225 for (int i
= 0; i
< 4; i
++) {
1226 clamped_src
[i
] = qir_SAT(c
, src_color
[i
]);
1227 clamped_dst
[i
] = qir_SAT(c
, dst_color
[i
]);
1229 src_color
= clamped_src
;
1230 dst_color
= clamped_dst
;
1232 struct qreg src_blend
[4], dst_blend
[4];
1233 for (int i
= 0; i
< 3; i
++) {
1234 src_blend
[i
] = vc4_blend_channel(c
,
1235 dst_color
, src_color
,
1237 blend
->rgb_src_factor
, i
);
1238 dst_blend
[i
] = vc4_blend_channel(c
,
1239 dst_color
, src_color
,
1241 blend
->rgb_dst_factor
, i
);
1243 src_blend
[3] = vc4_blend_channel(c
,
1244 dst_color
, src_color
,
1246 blend
->alpha_src_factor
, 3);
1247 dst_blend
[3] = vc4_blend_channel(c
,
1248 dst_color
, src_color
,
1250 blend
->alpha_dst_factor
, 3);
1252 for (int i
= 0; i
< 3; i
++) {
1253 result
[i
] = vc4_blend_func(c
,
1254 src_blend
[i
], dst_blend
[i
],
1257 result
[3] = vc4_blend_func(c
,
1258 src_blend
[3], dst_blend
[3],
1263 clip_distance_discard(struct vc4_compile
*c
)
1265 for (int i
= 0; i
< PIPE_MAX_CLIP_PLANES
; i
++) {
1266 if (!(c
->key
->ucp_enables
& (1 << i
)))
1269 struct qreg dist
= emit_fragment_varying(c
,
1270 TGSI_SEMANTIC_CLIPDIST
,
1276 if (c
->discard
.file
== QFILE_NULL
)
1277 c
->discard
= qir_uniform_ui(c
, 0);
1279 c
->discard
= qir_SEL_X_Y_NS(c
, qir_uniform_ui(c
, ~0),
1285 alpha_test_discard(struct vc4_compile
*c
)
1287 struct qreg src_alpha
;
1288 struct qreg alpha_ref
= qir_uniform(c
, QUNIFORM_ALPHA_REF
, 0);
1290 if (!c
->fs_key
->alpha_test
)
1293 if (c
->output_color_index
!= -1)
1294 src_alpha
= c
->outputs
[c
->output_color_index
+ 3];
1296 src_alpha
= qir_uniform_f(c
, 1.0);
1298 if (c
->discard
.file
== QFILE_NULL
)
1299 c
->discard
= qir_uniform_ui(c
, 0);
1301 switch (c
->fs_key
->alpha_test_func
) {
1302 case PIPE_FUNC_NEVER
:
1303 c
->discard
= qir_uniform_ui(c
, ~0);
1305 case PIPE_FUNC_ALWAYS
:
1307 case PIPE_FUNC_EQUAL
:
1308 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1309 c
->discard
= qir_SEL_X_Y_ZS(c
, c
->discard
,
1310 qir_uniform_ui(c
, ~0));
1312 case PIPE_FUNC_NOTEQUAL
:
1313 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1314 c
->discard
= qir_SEL_X_Y_ZC(c
, c
->discard
,
1315 qir_uniform_ui(c
, ~0));
1317 case PIPE_FUNC_GREATER
:
1318 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1319 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1320 qir_uniform_ui(c
, ~0));
1322 case PIPE_FUNC_GEQUAL
:
1323 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1324 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1325 qir_uniform_ui(c
, ~0));
1327 case PIPE_FUNC_LESS
:
1328 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1329 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1330 qir_uniform_ui(c
, ~0));
1332 case PIPE_FUNC_LEQUAL
:
1333 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1334 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1335 qir_uniform_ui(c
, ~0));
1341 vc4_logicop(struct vc4_compile
*c
, struct qreg src
, struct qreg dst
)
1343 switch (c
->fs_key
->logicop_func
) {
1344 case PIPE_LOGICOP_CLEAR
:
1345 return qir_uniform_f(c
, 0.0);
1346 case PIPE_LOGICOP_NOR
:
1347 return qir_NOT(c
, qir_OR(c
, src
, dst
));
1348 case PIPE_LOGICOP_AND_INVERTED
:
1349 return qir_AND(c
, qir_NOT(c
, src
), dst
);
1350 case PIPE_LOGICOP_COPY_INVERTED
:
1351 return qir_NOT(c
, src
);
1352 case PIPE_LOGICOP_AND_REVERSE
:
1353 return qir_AND(c
, src
, qir_NOT(c
, dst
));
1354 case PIPE_LOGICOP_INVERT
:
1355 return qir_NOT(c
, dst
);
1356 case PIPE_LOGICOP_XOR
:
1357 return qir_XOR(c
, src
, dst
);
1358 case PIPE_LOGICOP_NAND
:
1359 return qir_NOT(c
, qir_AND(c
, src
, dst
));
1360 case PIPE_LOGICOP_AND
:
1361 return qir_AND(c
, src
, dst
);
1362 case PIPE_LOGICOP_EQUIV
:
1363 return qir_NOT(c
, qir_XOR(c
, src
, dst
));
1364 case PIPE_LOGICOP_NOOP
:
1366 case PIPE_LOGICOP_OR_INVERTED
:
1367 return qir_OR(c
, qir_NOT(c
, src
), dst
);
1368 case PIPE_LOGICOP_OR_REVERSE
:
1369 return qir_OR(c
, src
, qir_NOT(c
, dst
));
1370 case PIPE_LOGICOP_OR
:
1371 return qir_OR(c
, src
, dst
);
1372 case PIPE_LOGICOP_SET
:
1373 return qir_uniform_ui(c
, ~0);
1374 case PIPE_LOGICOP_COPY
:
1381 emit_frag_end(struct vc4_compile
*c
)
1383 clip_distance_discard(c
);
1384 alpha_test_discard(c
);
1386 enum pipe_format color_format
= c
->fs_key
->color_format
;
1387 const uint8_t *format_swiz
= vc4_get_format_swizzle(color_format
);
1388 struct qreg tlb_read_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1389 struct qreg dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1390 struct qreg linear_dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1391 struct qreg packed_dst_color
= c
->undef
;
1393 if (c
->fs_key
->blend
.blend_enable
||
1394 c
->fs_key
->blend
.colormask
!= 0xf ||
1395 c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1396 struct qreg r4
= qir_TLB_COLOR_READ(c
);
1397 for (int i
= 0; i
< 4; i
++)
1398 tlb_read_color
[i
] = qir_R4_UNPACK(c
, r4
, i
);
1399 for (int i
= 0; i
< 4; i
++) {
1400 dst_color
[i
] = get_swizzled_channel(c
,
1403 if (util_format_is_srgb(color_format
) && i
!= 3) {
1404 linear_dst_color
[i
] =
1405 qir_srgb_decode(c
, dst_color
[i
]);
1407 linear_dst_color
[i
] = dst_color
[i
];
1411 /* Save the packed value for logic ops. Can't reuse r4
1412 * becuase other things might smash it (like sRGB)
1414 packed_dst_color
= qir_MOV(c
, r4
);
1417 struct qreg blend_color
[4];
1418 struct qreg undef_array
[4] = {
1419 c
->undef
, c
->undef
, c
->undef
, c
->undef
1421 vc4_blend(c
, blend_color
, linear_dst_color
,
1422 (c
->output_color_index
!= -1 ?
1423 c
->outputs
+ c
->output_color_index
:
1426 if (util_format_is_srgb(color_format
)) {
1427 for (int i
= 0; i
< 3; i
++)
1428 blend_color
[i
] = qir_srgb_encode(c
, blend_color
[i
]);
1431 /* Debug: Sometimes you're getting a black output and just want to see
1432 * if the FS is getting executed at all. Spam magenta into the color
1436 blend_color
[0] = qir_uniform_f(c
, 1.0);
1437 blend_color
[1] = qir_uniform_f(c
, 0.0);
1438 blend_color
[2] = qir_uniform_f(c
, 1.0);
1439 blend_color
[3] = qir_uniform_f(c
, 0.5);
1442 struct qreg swizzled_outputs
[4];
1443 for (int i
= 0; i
< 4; i
++) {
1444 swizzled_outputs
[i
] = get_swizzled_channel(c
, blend_color
,
1448 if (c
->discard
.file
!= QFILE_NULL
)
1449 qir_TLB_DISCARD_SETUP(c
, c
->discard
);
1451 if (c
->fs_key
->stencil_enabled
) {
1452 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 0));
1453 if (c
->fs_key
->stencil_twoside
) {
1454 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 1));
1456 if (c
->fs_key
->stencil_full_writemasks
) {
1457 qir_TLB_STENCIL_SETUP(c
, qir_uniform(c
, QUNIFORM_STENCIL
, 2));
1461 if (c
->fs_key
->depth_enabled
) {
1463 if (c
->output_position_index
!= -1) {
1464 z
= qir_FTOI(c
, qir_FMUL(c
, c
->outputs
[c
->output_position_index
+ 2],
1465 qir_uniform_f(c
, 0xffffff)));
1469 qir_TLB_Z_WRITE(c
, z
);
1472 struct qreg packed_color
= c
->undef
;
1473 for (int i
= 0; i
< 4; i
++) {
1474 if (swizzled_outputs
[i
].file
== QFILE_NULL
)
1476 if (packed_color
.file
== QFILE_NULL
) {
1477 packed_color
= qir_PACK_8888_F(c
, swizzled_outputs
[i
]);
1479 packed_color
= qir_PACK_8_F(c
,
1481 swizzled_outputs
[i
],
1486 if (packed_color
.file
== QFILE_NULL
)
1487 packed_color
= qir_uniform_ui(c
, 0);
1489 if (c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1490 packed_color
= vc4_logicop(c
, packed_color
, packed_dst_color
);
1493 /* If the bit isn't set in the color mask, then just return the
1494 * original dst color, instead.
1496 uint32_t colormask
= 0xffffffff;
1497 for (int i
= 0; i
< 4; i
++) {
1498 if (format_swiz
[i
] < 4 &&
1499 !(c
->fs_key
->blend
.colormask
& (1 << format_swiz
[i
]))) {
1500 colormask
&= ~(0xff << (i
* 8));
1503 if (colormask
!= 0xffffffff) {
1504 packed_color
= qir_OR(c
,
1505 qir_AND(c
, packed_color
,
1506 qir_uniform_ui(c
, colormask
)),
1507 qir_AND(c
, packed_dst_color
,
1508 qir_uniform_ui(c
, ~colormask
)));
1511 qir_emit(c
, qir_inst(QOP_TLB_COLOR_WRITE
, c
->undef
,
1512 packed_color
, c
->undef
));
1516 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1520 for (int i
= 0; i
< 2; i
++) {
1522 qir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1524 xyi
[i
] = qir_FTOI(c
, qir_FMUL(c
,
1526 c
->outputs
[c
->output_position_index
+ i
],
1531 qir_VPM_WRITE(c
, qir_PACK_SCALED(c
, xyi
[0], xyi
[1]));
1535 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1537 struct qreg zscale
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1538 struct qreg zoffset
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1540 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1541 c
->outputs
[c
->output_position_index
+ 2],
1548 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1550 qir_VPM_WRITE(c
, rcp_w
);
1554 emit_point_size_write(struct vc4_compile
*c
)
1556 struct qreg point_size
;
1558 if (c
->output_point_size_index
!= -1)
1559 point_size
= c
->outputs
[c
->output_point_size_index
+ 3];
1561 point_size
= qir_uniform_f(c
, 1.0);
1563 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1566 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1568 qir_VPM_WRITE(c
, point_size
);
1572 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1574 * The simulator insists that there be at least one vertex attribute, so
1575 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1576 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1577 * to consume it here.
1580 emit_stub_vpm_read(struct vc4_compile
*c
)
1585 c
->vattr_sizes
[0] = 4;
1586 struct qreg vpm
= { QFILE_VPM
, 0 };
1587 (void)qir_MOV(c
, vpm
);
1592 emit_ucp_clipdistance(struct vc4_compile
*c
)
1595 if (c
->output_clipvertex_index
!= -1)
1596 cv
= c
->output_clipvertex_index
;
1597 else if (c
->output_position_index
!= -1)
1598 cv
= c
->output_position_index
;
1602 for (int plane
= 0; plane
< PIPE_MAX_CLIP_PLANES
; plane
++) {
1603 if (!(c
->key
->ucp_enables
& (1 << plane
)))
1606 /* Pick the next outputs[] that hasn't been written to, since
1607 * there are no other program writes left to be processed at
1608 * this point. If something had been declared but not written
1609 * (like a w component), we'll just smash over the top of it.
1611 uint32_t output_index
= c
->num_outputs
++;
1612 add_output(c
, output_index
,
1613 TGSI_SEMANTIC_CLIPDIST
,
1618 struct qreg dist
= qir_uniform_f(c
, 0.0);
1619 for (int i
= 0; i
< 4; i
++) {
1620 struct qreg pos_chan
= c
->outputs
[cv
+ i
];
1622 qir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1624 dist
= qir_FADD(c
, dist
, qir_FMUL(c
, pos_chan
, ucp
));
1627 c
->outputs
[output_index
] = dist
;
1632 emit_vert_end(struct vc4_compile
*c
,
1633 struct vc4_varying_semantic
*fs_inputs
,
1634 uint32_t num_fs_inputs
)
1636 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1638 emit_stub_vpm_read(c
);
1639 emit_ucp_clipdistance(c
);
1641 emit_scaled_viewport_write(c
, rcp_w
);
1642 emit_zs_write(c
, rcp_w
);
1643 emit_rcp_wc_write(c
, rcp_w
);
1644 if (c
->vs_key
->per_vertex_point_size
)
1645 emit_point_size_write(c
);
1647 for (int i
= 0; i
< num_fs_inputs
; i
++) {
1648 struct vc4_varying_semantic
*input
= &fs_inputs
[i
];
1651 for (j
= 0; j
< c
->num_outputs
; j
++) {
1652 struct vc4_varying_semantic
*output
=
1653 &c
->output_semantics
[j
];
1655 if (input
->semantic
== output
->semantic
&&
1656 input
->index
== output
->index
&&
1657 input
->swizzle
== output
->swizzle
) {
1658 qir_VPM_WRITE(c
, c
->outputs
[j
]);
1662 /* Emit padding if we didn't find a declared VS output for
1665 if (j
== c
->num_outputs
)
1666 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
1671 emit_coord_end(struct vc4_compile
*c
)
1673 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1675 emit_stub_vpm_read(c
);
1677 for (int i
= 0; i
< 4; i
++)
1678 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
1680 emit_scaled_viewport_write(c
, rcp_w
);
1681 emit_zs_write(c
, rcp_w
);
1682 emit_rcp_wc_write(c
, rcp_w
);
1683 if (c
->vs_key
->per_vertex_point_size
)
1684 emit_point_size_write(c
);
1688 vc4_optimize_nir(struct nir_shader
*s
)
1695 nir_lower_vars_to_ssa(s
);
1696 nir_lower_alu_to_scalar(s
);
1698 progress
= nir_copy_prop(s
) || progress
;
1699 progress
= nir_opt_dce(s
) || progress
;
1700 progress
= nir_opt_cse(s
) || progress
;
1701 progress
= nir_opt_peephole_select(s
) || progress
;
1702 progress
= nir_opt_algebraic(s
) || progress
;
1703 progress
= nir_opt_constant_folding(s
) || progress
;
1708 driver_location_compare(const void *in_a
, const void *in_b
)
1710 const nir_variable
*const *a
= in_a
;
1711 const nir_variable
*const *b
= in_b
;
1713 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1717 ntq_setup_inputs(struct vc4_compile
*c
)
1719 unsigned num_entries
= 0;
1720 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->inputs
)
1723 nir_variable
*vars
[num_entries
];
1726 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->inputs
)
1729 /* Sort the variables so that we emit the input setup in
1730 * driver_location order. This is required for VPM reads, whose data
1731 * is fetched into the VPM in driver_location (TGSI register index)
1734 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1736 for (unsigned i
= 0; i
< num_entries
; i
++) {
1737 nir_variable
*var
= vars
[i
];
1738 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1739 /* XXX: map loc slots to semantics */
1740 unsigned semantic_name
= var
->data
.location
;
1741 unsigned semantic_index
= var
->data
.index
;
1742 unsigned loc
= var
->data
.driver_location
;
1744 assert(array_len
== 1);
1745 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1748 if (c
->stage
== QSTAGE_FRAG
) {
1749 if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
1750 emit_fragcoord_input(c
, loc
);
1751 } else if (semantic_name
== TGSI_SEMANTIC_FACE
) {
1752 emit_face_input(c
, loc
);
1753 } else if (semantic_name
== TGSI_SEMANTIC_GENERIC
&&
1754 (c
->fs_key
->point_sprite_mask
&
1755 (1 << semantic_index
))) {
1756 emit_point_coord_input(c
, loc
);
1758 emit_fragment_input(c
, loc
,
1763 emit_vertex_input(c
, loc
);
1769 ntq_setup_outputs(struct vc4_compile
*c
)
1771 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->outputs
) {
1772 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1773 /* XXX: map loc slots to semantics */
1774 unsigned semantic_name
= var
->data
.location
;
1775 unsigned semantic_index
= var
->data
.index
;
1776 unsigned loc
= var
->data
.driver_location
* 4;
1778 assert(array_len
== 1);
1780 for (int i
= 0; i
< 4; i
++) {
1788 switch (semantic_name
) {
1789 case TGSI_SEMANTIC_POSITION
:
1790 c
->output_position_index
= loc
;
1792 case TGSI_SEMANTIC_CLIPVERTEX
:
1793 c
->output_clipvertex_index
= loc
;
1795 case TGSI_SEMANTIC_COLOR
:
1796 c
->output_color_index
= loc
;
1798 case TGSI_SEMANTIC_PSIZE
:
1799 c
->output_point_size_index
= loc
;
1807 ntq_setup_uniforms(struct vc4_compile
*c
)
1809 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->uniforms
) {
1810 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1811 unsigned array_elem_size
= 4 * sizeof(float);
1813 declare_uniform_range(c
, var
->data
.driver_location
* array_elem_size
,
1814 array_len
* array_elem_size
);
1820 * Sets up the mapping from nir_register to struct qreg *.
1822 * Each nir_register gets a struct qreg per 32-bit component being stored.
1825 ntq_setup_registers(struct vc4_compile
*c
, struct exec_list
*list
)
1827 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1828 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1829 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1831 nir_reg
->num_components
);
1833 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1835 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1836 qregs
[i
] = qir_uniform_ui(c
, 0);
1841 ntq_emit_load_const(struct vc4_compile
*c
, nir_load_const_instr
*instr
)
1843 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1844 instr
->def
.num_components
);
1845 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1846 qregs
[i
] = qir_uniform_ui(c
, instr
->value
.u
[i
]);
1848 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1852 ntq_emit_intrinsic(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1854 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
1855 struct qreg
*dest
= NULL
;
1857 if (info
->has_dest
) {
1858 dest
= ntq_get_dest(c
, instr
->dest
);
1861 switch (instr
->intrinsic
) {
1862 case nir_intrinsic_load_uniform
:
1863 assert(instr
->const_index
[1] == 1);
1865 for (int i
= 0; i
< instr
->num_components
; i
++) {
1866 dest
[i
] = qir_uniform(c
, QUNIFORM_UNIFORM
,
1867 instr
->const_index
[0] * 4 + i
);
1871 case nir_intrinsic_load_uniform_indirect
:
1872 assert(instr
->const_index
[1] == 1);
1874 for (int i
= 0; i
< instr
->num_components
; i
++) {
1875 dest
[i
] = indirect_uniform_load(c
,
1876 ntq_get_src(c
, instr
->src
[0], 0),
1877 (instr
->const_index
[0] *
1878 4 + i
) * sizeof(float));
1883 case nir_intrinsic_load_input
:
1884 assert(instr
->const_index
[1] == 1);
1886 for (int i
= 0; i
< instr
->num_components
; i
++)
1887 dest
[i
] = c
->inputs
[instr
->const_index
[0] * 4 + i
];
1891 case nir_intrinsic_store_output
:
1892 for (int i
= 0; i
< instr
->num_components
; i
++) {
1893 c
->outputs
[instr
->const_index
[0] * 4 + i
] =
1894 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
1896 c
->num_outputs
= MAX2(c
->num_outputs
,
1897 instr
->const_index
[0] * 4 +
1898 instr
->num_components
+ 1);
1901 case nir_intrinsic_discard
:
1902 c
->discard
= qir_uniform_ui(c
, ~0);
1905 case nir_intrinsic_discard_if
:
1906 if (c
->discard
.file
== QFILE_NULL
)
1907 c
->discard
= qir_uniform_ui(c
, 0);
1908 c
->discard
= qir_OR(c
, c
->discard
,
1909 ntq_get_src(c
, instr
->src
[0], 0));
1913 fprintf(stderr
, "Unknown intrinsic: ");
1914 nir_print_instr(&instr
->instr
, stderr
);
1915 fprintf(stderr
, "\n");
1921 ntq_emit_if(struct vc4_compile
*c
, nir_if
*if_stmt
)
1923 fprintf(stderr
, "general IF statements not handled.\n");
1927 ntq_emit_instr(struct vc4_compile
*c
, nir_instr
*instr
)
1929 switch (instr
->type
) {
1930 case nir_instr_type_alu
:
1931 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1934 case nir_instr_type_intrinsic
:
1935 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1938 case nir_instr_type_load_const
:
1939 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1942 case nir_instr_type_tex
:
1943 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1947 fprintf(stderr
, "Unknown NIR instr type: ");
1948 nir_print_instr(instr
, stderr
);
1949 fprintf(stderr
, "\n");
1955 ntq_emit_block(struct vc4_compile
*c
, nir_block
*block
)
1957 nir_foreach_instr(block
, instr
) {
1958 ntq_emit_instr(c
, instr
);
1963 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
)
1965 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1966 switch (node
->type
) {
1967 /* case nir_cf_node_loop: */
1968 case nir_cf_node_block
:
1969 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1972 case nir_cf_node_if
:
1973 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1983 ntq_emit_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
1985 ntq_setup_registers(c
, &impl
->registers
);
1986 ntq_emit_cf_list(c
, &impl
->body
);
1990 nir_to_qir(struct vc4_compile
*c
)
1992 ntq_setup_inputs(c
);
1993 ntq_setup_outputs(c
);
1994 ntq_setup_uniforms(c
);
1995 ntq_setup_registers(c
, &c
->s
->registers
);
1997 /* Find the main function and emit the body. */
1998 nir_foreach_overload(c
->s
, overload
) {
1999 assert(strcmp(overload
->function
->name
, "main") == 0);
2000 assert(overload
->impl
);
2001 ntq_emit_impl(c
, overload
->impl
);
2005 static const nir_shader_compiler_options nir_options
= {
2010 .lower_fsqrt
= true,
2011 .lower_negate
= true,
2015 count_nir_instrs_in_block(nir_block
*block
, void *state
)
2017 int *count
= (int *) state
;
2018 nir_foreach_instr(block
, instr
) {
2019 *count
= *count
+ 1;
2025 count_nir_instrs(nir_shader
*nir
)
2028 nir_foreach_overload(nir
, overload
) {
2029 if (!overload
->impl
)
2031 nir_foreach_block(overload
->impl
, count_nir_instrs_in_block
, &count
);
2036 static struct vc4_compile
*
2037 vc4_shader_ntq(struct vc4_context
*vc4
, enum qstage stage
,
2038 struct vc4_key
*key
)
2040 struct vc4_compile
*c
= qir_compile_init();
2043 c
->shader_state
= &key
->shader_state
->base
;
2044 c
->program_id
= key
->shader_state
->program_id
;
2045 c
->variant_id
= key
->shader_state
->compiled_variant_count
++;
2050 c
->fs_key
= (struct vc4_fs_key
*)key
;
2051 if (c
->fs_key
->is_points
) {
2052 c
->point_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2053 c
->point_y
= emit_fragment_varying(c
, ~0, ~0, 0);
2054 } else if (c
->fs_key
->is_lines
) {
2055 c
->line_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2059 c
->vs_key
= (struct vc4_vs_key
*)key
;
2062 c
->vs_key
= (struct vc4_vs_key
*)key
;
2066 const struct tgsi_token
*tokens
= key
->shader_state
->base
.tokens
;
2067 if (c
->fs_key
&& c
->fs_key
->light_twoside
) {
2068 if (!key
->shader_state
->twoside_tokens
) {
2069 const struct tgsi_lowering_config lowering_config
= {
2070 .color_two_side
= true,
2072 struct tgsi_shader_info info
;
2073 key
->shader_state
->twoside_tokens
=
2074 tgsi_transform_lowering(&lowering_config
,
2075 key
->shader_state
->base
.tokens
,
2078 /* If no transformation occurred, then NULL is
2079 * returned and we just use our original tokens.
2081 if (!key
->shader_state
->twoside_tokens
) {
2082 key
->shader_state
->twoside_tokens
=
2083 key
->shader_state
->base
.tokens
;
2086 tokens
= key
->shader_state
->twoside_tokens
;
2089 if (vc4_debug
& VC4_DEBUG_TGSI
) {
2090 fprintf(stderr
, "%s prog %d/%d TGSI:\n",
2091 qir_get_stage_name(c
->stage
),
2092 c
->program_id
, c
->variant_id
);
2093 tgsi_dump(tokens
, 0);
2096 c
->s
= tgsi_to_nir(tokens
, &nir_options
);
2097 nir_opt_global_to_local(c
->s
);
2098 nir_convert_to_ssa(c
->s
);
2100 vc4_optimize_nir(c
->s
);
2102 nir_remove_dead_variables(c
->s
);
2104 nir_convert_from_ssa(c
->s
);
2106 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2107 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2108 qir_get_stage_name(c
->stage
),
2109 c
->program_id
, c
->variant_id
,
2110 count_nir_instrs(c
->s
));
2113 if (vc4_debug
& VC4_DEBUG_NIR
) {
2114 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2115 qir_get_stage_name(c
->stage
),
2116 c
->program_id
, c
->variant_id
);
2117 nir_print_shader(c
->s
, stderr
);
2128 vc4
->prog
.fs
->input_semantics
,
2129 vc4
->prog
.fs
->num_inputs
);
2136 if (vc4_debug
& VC4_DEBUG_QIR
) {
2137 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
2138 qir_get_stage_name(c
->stage
),
2139 c
->program_id
, c
->variant_id
);
2144 qir_lower_uniforms(c
);
2146 if (vc4_debug
& VC4_DEBUG_QIR
) {
2147 fprintf(stderr
, "%s prog %d/%d QIR:\n",
2148 qir_get_stage_name(c
->stage
),
2149 c
->program_id
, c
->variant_id
);
2152 qir_reorder_uniforms(c
);
2153 vc4_generate_code(vc4
, c
);
2155 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2156 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2157 qir_get_stage_name(c
->stage
),
2158 c
->program_id
, c
->variant_id
,
2160 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2161 qir_get_stage_name(c
->stage
),
2162 c
->program_id
, c
->variant_id
,
2172 vc4_shader_state_create(struct pipe_context
*pctx
,
2173 const struct pipe_shader_state
*cso
)
2175 struct vc4_context
*vc4
= vc4_context(pctx
);
2176 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
2180 so
->base
.tokens
= tgsi_dup_tokens(cso
->tokens
);
2181 so
->program_id
= vc4
->next_uncompiled_program_id
++;
2187 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
2188 struct vc4_compile
*c
)
2190 int count
= c
->num_uniforms
;
2191 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2193 uinfo
->count
= count
;
2194 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
2195 memcpy(uinfo
->data
, c
->uniform_data
,
2196 count
* sizeof(*uinfo
->data
));
2197 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
2198 memcpy(uinfo
->contents
, c
->uniform_contents
,
2199 count
* sizeof(*uinfo
->contents
));
2200 uinfo
->num_texture_samples
= c
->num_texture_samples
;
2203 static struct vc4_compiled_shader
*
2204 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
2205 struct vc4_key
*key
)
2207 struct hash_table
*ht
;
2209 if (stage
== QSTAGE_FRAG
) {
2211 key_size
= sizeof(struct vc4_fs_key
);
2214 key_size
= sizeof(struct vc4_vs_key
);
2217 struct vc4_compiled_shader
*shader
;
2218 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
2222 struct vc4_compile
*c
= vc4_shader_ntq(vc4
, stage
, key
);
2223 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
2225 shader
->program_id
= vc4
->next_compiled_program_id
++;
2226 if (stage
== QSTAGE_FRAG
) {
2227 bool input_live
[c
->num_input_semantics
];
2228 struct simple_node
*node
;
2230 memset(input_live
, 0, sizeof(input_live
));
2231 foreach(node
, &c
->instructions
) {
2232 struct qinst
*inst
= (struct qinst
*)node
;
2233 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
2234 if (inst
->src
[i
].file
== QFILE_VARY
)
2235 input_live
[inst
->src
[i
].index
] = true;
2239 shader
->input_semantics
= ralloc_array(shader
,
2240 struct vc4_varying_semantic
,
2241 c
->num_input_semantics
);
2243 for (int i
= 0; i
< c
->num_input_semantics
; i
++) {
2244 struct vc4_varying_semantic
*sem
= &c
->input_semantics
[i
];
2249 /* Skip non-VS-output inputs. */
2250 if (sem
->semantic
== (uint8_t)~0)
2253 if (sem
->semantic
== TGSI_SEMANTIC_COLOR
||
2254 sem
->semantic
== TGSI_SEMANTIC_BCOLOR
) {
2255 shader
->color_inputs
|= (1 << shader
->num_inputs
);
2258 shader
->input_semantics
[shader
->num_inputs
] = *sem
;
2259 shader
->num_inputs
++;
2262 shader
->num_inputs
= c
->num_inputs
;
2264 shader
->vattr_offsets
[0] = 0;
2265 for (int i
= 0; i
< 8; i
++) {
2266 shader
->vattr_offsets
[i
+ 1] =
2267 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
2269 if (c
->vattr_sizes
[i
])
2270 shader
->vattrs_live
|= (1 << i
);
2274 copy_uniform_state_to_shader(shader
, c
);
2275 shader
->bo
= vc4_bo_alloc_mem(vc4
->screen
, c
->qpu_insts
,
2276 c
->qpu_inst_count
* sizeof(uint64_t),
2279 /* Copy the compiler UBO range state to the compiled shader, dropping
2280 * out arrays that were never referenced by an indirect load.
2282 * (Note that QIR dead code elimination of an array access still
2283 * leaves that array alive, though)
2285 if (c
->num_ubo_ranges
) {
2286 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
2287 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
2290 for (int i
= 0; i
< c
->num_uniform_ranges
; i
++) {
2291 struct vc4_compiler_ubo_range
*range
=
2296 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
2297 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
2298 shader
->ubo_ranges
[j
].size
= range
->size
;
2299 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
2303 if (shader
->ubo_size
) {
2304 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2305 qir_get_stage_name(c
->stage
),
2306 c
->program_id
, c
->variant_id
,
2307 shader
->ubo_size
/ 4);
2310 qir_compile_destroy(c
);
2312 struct vc4_key
*dup_key
;
2313 dup_key
= ralloc_size(shader
, key_size
);
2314 memcpy(dup_key
, key
, key_size
);
2315 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2321 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2322 struct vc4_texture_stateobj
*texstate
)
2324 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2325 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2326 struct pipe_sampler_state
*sampler_state
=
2327 texstate
->samplers
[i
];
2330 key
->tex
[i
].format
= sampler
->format
;
2331 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2332 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2333 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2334 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2335 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2336 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2337 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2338 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2342 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2346 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2348 struct vc4_fs_key local_key
;
2349 struct vc4_fs_key
*key
= &local_key
;
2351 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2353 VC4_DIRTY_FRAMEBUFFER
|
2355 VC4_DIRTY_RASTERIZER
|
2357 VC4_DIRTY_TEXSTATE
|
2358 VC4_DIRTY_UNCOMPILED_FS
))) {
2362 memset(key
, 0, sizeof(*key
));
2363 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2364 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2365 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2366 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2367 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2368 key
->blend
= vc4
->blend
->rt
[0];
2369 if (vc4
->blend
->logicop_enable
) {
2370 key
->logicop_func
= vc4
->blend
->logicop_func
;
2372 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2374 if (vc4
->framebuffer
.cbufs
[0])
2375 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2377 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2378 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2379 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2380 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2381 key
->stencil_enabled
);
2382 if (vc4
->zsa
->base
.alpha
.enabled
) {
2383 key
->alpha_test
= true;
2384 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2387 if (key
->is_points
) {
2388 key
->point_sprite_mask
=
2389 vc4
->rasterizer
->base
.sprite_coord_enable
;
2390 key
->point_coord_upper_left
=
2391 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2392 PIPE_SPRITE_COORD_UPPER_LEFT
);
2395 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2397 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2398 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2399 if (vc4
->prog
.fs
== old_fs
)
2402 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2403 if (vc4
->rasterizer
->base
.flatshade
&&
2404 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2405 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2410 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2412 struct vc4_vs_key local_key
;
2413 struct vc4_vs_key
*key
= &local_key
;
2415 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2416 VC4_DIRTY_RASTERIZER
|
2418 VC4_DIRTY_TEXSTATE
|
2419 VC4_DIRTY_VTXSTATE
|
2420 VC4_DIRTY_UNCOMPILED_VS
|
2421 VC4_DIRTY_COMPILED_FS
))) {
2425 memset(key
, 0, sizeof(*key
));
2426 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2427 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2428 key
->compiled_fs_id
= vc4
->prog
.fs
->program_id
;
2430 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2431 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2433 key
->per_vertex_point_size
=
2434 (prim_mode
== PIPE_PRIM_POINTS
&&
2435 vc4
->rasterizer
->base
.point_size_per_vertex
);
2437 vc4
->prog
.vs
= vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2438 key
->is_coord
= true;
2439 vc4
->prog
.cs
= vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2443 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2445 vc4_update_compiled_fs(vc4
, prim_mode
);
2446 vc4_update_compiled_vs(vc4
, prim_mode
);
2450 fs_cache_hash(const void *key
)
2452 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2456 vs_cache_hash(const void *key
)
2458 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2462 fs_cache_compare(const void *key1
, const void *key2
)
2464 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2468 vs_cache_compare(const void *key1
, const void *key2
)
2470 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2474 delete_from_cache_if_matches(struct hash_table
*ht
,
2475 struct hash_entry
*entry
,
2476 struct vc4_uncompiled_shader
*so
)
2478 const struct vc4_key
*key
= entry
->key
;
2480 if (key
->shader_state
== so
) {
2481 struct vc4_compiled_shader
*shader
= entry
->data
;
2482 _mesa_hash_table_remove(ht
, entry
);
2483 vc4_bo_unreference(&shader
->bo
);
2484 ralloc_free(shader
);
2489 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2491 struct vc4_context
*vc4
= vc4_context(pctx
);
2492 struct vc4_uncompiled_shader
*so
= hwcso
;
2494 struct hash_entry
*entry
;
2495 hash_table_foreach(vc4
->fs_cache
, entry
)
2496 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2497 hash_table_foreach(vc4
->vs_cache
, entry
)
2498 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2500 if (so
->twoside_tokens
!= so
->base
.tokens
)
2501 free((void *)so
->twoside_tokens
);
2502 free((void *)so
->base
.tokens
);
2506 static uint32_t translate_wrap(uint32_t p_wrap
, bool using_nearest
)
2509 case PIPE_TEX_WRAP_REPEAT
:
2511 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
2513 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
2515 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
2517 case PIPE_TEX_WRAP_CLAMP
:
2518 return (using_nearest
? 1 : 3);
2520 fprintf(stderr
, "Unknown wrap mode %d\n", p_wrap
);
2521 assert(!"not reached");
2527 write_texture_p0(struct vc4_context
*vc4
,
2528 struct vc4_texture_stateobj
*texstate
,
2531 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2532 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2534 cl_reloc(vc4
, &vc4
->uniforms
, rsc
->bo
,
2535 VC4_SET_FIELD(rsc
->slices
[0].offset
>> 12, VC4_TEX_P0_OFFSET
) |
2536 VC4_SET_FIELD(texture
->u
.tex
.last_level
-
2537 texture
->u
.tex
.first_level
, VC4_TEX_P0_MIPLVLS
) |
2538 VC4_SET_FIELD(texture
->target
== PIPE_TEXTURE_CUBE
,
2539 VC4_TEX_P0_CMMODE
) |
2540 VC4_SET_FIELD(rsc
->vc4_format
& 15, VC4_TEX_P0_TYPE
));
2544 write_texture_p1(struct vc4_context
*vc4
,
2545 struct vc4_texture_stateobj
*texstate
,
2548 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2549 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2550 struct pipe_sampler_state
*sampler
= texstate
->samplers
[unit
];
2551 static const uint8_t minfilter_map
[6] = {
2552 VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR
,
2553 VC4_TEX_P1_MINFILT_LIN_MIP_NEAR
,
2554 VC4_TEX_P1_MINFILT_NEAR_MIP_LIN
,
2555 VC4_TEX_P1_MINFILT_LIN_MIP_LIN
,
2556 VC4_TEX_P1_MINFILT_NEAREST
,
2557 VC4_TEX_P1_MINFILT_LINEAR
,
2559 static const uint32_t magfilter_map
[] = {
2560 [PIPE_TEX_FILTER_NEAREST
] = VC4_TEX_P1_MAGFILT_NEAREST
,
2561 [PIPE_TEX_FILTER_LINEAR
] = VC4_TEX_P1_MAGFILT_LINEAR
,
2564 bool either_nearest
=
2565 (sampler
->mag_img_filter
== PIPE_TEX_MIPFILTER_NEAREST
||
2566 sampler
->min_img_filter
== PIPE_TEX_MIPFILTER_NEAREST
);
2568 cl_aligned_u32(&vc4
->uniforms
,
2569 VC4_SET_FIELD(rsc
->vc4_format
>> 4, VC4_TEX_P1_TYPE4
) |
2570 VC4_SET_FIELD(texture
->texture
->height0
& 2047,
2571 VC4_TEX_P1_HEIGHT
) |
2572 VC4_SET_FIELD(texture
->texture
->width0
& 2047,
2574 VC4_SET_FIELD(magfilter_map
[sampler
->mag_img_filter
],
2575 VC4_TEX_P1_MAGFILT
) |
2576 VC4_SET_FIELD(minfilter_map
[sampler
->min_mip_filter
* 2 +
2577 sampler
->min_img_filter
],
2578 VC4_TEX_P1_MINFILT
) |
2579 VC4_SET_FIELD(translate_wrap(sampler
->wrap_s
, either_nearest
),
2580 VC4_TEX_P1_WRAP_S
) |
2581 VC4_SET_FIELD(translate_wrap(sampler
->wrap_t
, either_nearest
),
2582 VC4_TEX_P1_WRAP_T
));
2586 write_texture_p2(struct vc4_context
*vc4
,
2587 struct vc4_texture_stateobj
*texstate
,
2590 uint32_t unit
= data
& 0xffff;
2591 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2592 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2594 cl_aligned_u32(&vc4
->uniforms
,
2595 VC4_SET_FIELD(VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE
,
2597 VC4_SET_FIELD(rsc
->cube_map_stride
>> 12, VC4_TEX_P2_CMST
) |
2598 VC4_SET_FIELD((data
>> 16) & 1, VC4_TEX_P2_BSLOD
));
2602 #define SWIZ(x,y,z,w) { \
2603 UTIL_FORMAT_SWIZZLE_##x, \
2604 UTIL_FORMAT_SWIZZLE_##y, \
2605 UTIL_FORMAT_SWIZZLE_##z, \
2606 UTIL_FORMAT_SWIZZLE_##w \
2610 write_texture_border_color(struct vc4_context
*vc4
,
2611 struct vc4_texture_stateobj
*texstate
,
2614 struct pipe_sampler_state
*sampler
= texstate
->samplers
[unit
];
2615 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2616 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2617 union util_color uc
;
2619 const struct util_format_description
*tex_format_desc
=
2620 util_format_description(texture
->format
);
2622 float border_color
[4];
2623 for (int i
= 0; i
< 4; i
++)
2624 border_color
[i
] = sampler
->border_color
.f
[i
];
2625 if (util_format_is_srgb(texture
->format
)) {
2626 for (int i
= 0; i
< 3; i
++)
2628 util_format_linear_to_srgb_float(border_color
[i
]);
2631 /* Turn the border color into the layout of channels that it would
2632 * have when stored as texture contents.
2634 float storage_color
[4];
2635 util_format_unswizzle_4f(storage_color
,
2637 tex_format_desc
->swizzle
);
2639 /* Now, pack so that when the vc4_format-sampled texture contents are
2640 * replaced with our border color, the vc4_get_format_swizzle()
2641 * swizzling will get the right channels.
2643 if (util_format_is_depth_or_stencil(texture
->format
)) {
2644 uc
.ui
[0] = util_pack_z(PIPE_FORMAT_Z24X8_UNORM
,
2645 sampler
->border_color
.f
[0]) << 8;
2647 switch (rsc
->vc4_format
) {
2649 case VC4_TEXTURE_TYPE_RGBA8888
:
2650 util_pack_color(storage_color
,
2651 PIPE_FORMAT_R8G8B8A8_UNORM
, &uc
);
2653 case VC4_TEXTURE_TYPE_RGBA4444
:
2654 util_pack_color(storage_color
,
2655 PIPE_FORMAT_A8B8G8R8_UNORM
, &uc
);
2657 case VC4_TEXTURE_TYPE_RGB565
:
2658 util_pack_color(storage_color
,
2659 PIPE_FORMAT_B8G8R8A8_UNORM
, &uc
);
2661 case VC4_TEXTURE_TYPE_ALPHA
:
2662 uc
.ui
[0] = float_to_ubyte(storage_color
[0]) << 24;
2664 case VC4_TEXTURE_TYPE_LUMALPHA
:
2665 uc
.ui
[0] = ((float_to_ubyte(storage_color
[1]) << 24) |
2666 (float_to_ubyte(storage_color
[0]) << 0));
2671 cl_aligned_u32(&vc4
->uniforms
, uc
.ui
[0]);
2675 get_texrect_scale(struct vc4_texture_stateobj
*texstate
,
2676 enum quniform_contents contents
,
2679 struct pipe_sampler_view
*texture
= texstate
->textures
[data
];
2682 if (contents
== QUNIFORM_TEXRECT_SCALE_X
)
2683 dim
= texture
->texture
->width0
;
2685 dim
= texture
->texture
->height0
;
2687 return fui(1.0f
/ dim
);
2690 static struct vc4_bo
*
2691 vc4_upload_ubo(struct vc4_context
*vc4
, struct vc4_compiled_shader
*shader
,
2692 const uint32_t *gallium_uniforms
)
2694 if (!shader
->ubo_size
)
2697 struct vc4_bo
*ubo
= vc4_bo_alloc(vc4
->screen
, shader
->ubo_size
, "ubo");
2698 uint32_t *data
= vc4_bo_map(ubo
);
2699 for (uint32_t i
= 0; i
< shader
->num_ubo_ranges
; i
++) {
2700 memcpy(data
+ shader
->ubo_ranges
[i
].dst_offset
,
2701 gallium_uniforms
+ shader
->ubo_ranges
[i
].src_offset
,
2702 shader
->ubo_ranges
[i
].size
);
2709 vc4_write_uniforms(struct vc4_context
*vc4
, struct vc4_compiled_shader
*shader
,
2710 struct vc4_constbuf_stateobj
*cb
,
2711 struct vc4_texture_stateobj
*texstate
)
2713 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2714 const uint32_t *gallium_uniforms
= cb
->cb
[0].user_buffer
;
2715 struct vc4_bo
*ubo
= vc4_upload_ubo(vc4
, shader
, gallium_uniforms
);
2717 cl_ensure_space(&vc4
->uniforms
, (uinfo
->count
+
2718 uinfo
->num_texture_samples
) * 4);
2720 cl_start_shader_reloc(&vc4
->uniforms
, uinfo
->num_texture_samples
);
2722 for (int i
= 0; i
< uinfo
->count
; i
++) {
2724 switch (uinfo
->contents
[i
]) {
2725 case QUNIFORM_CONSTANT
:
2726 cl_aligned_u32(&vc4
->uniforms
, uinfo
->data
[i
]);
2728 case QUNIFORM_UNIFORM
:
2729 cl_aligned_u32(&vc4
->uniforms
,
2730 gallium_uniforms
[uinfo
->data
[i
]]);
2732 case QUNIFORM_VIEWPORT_X_SCALE
:
2733 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.scale
[0] * 16.0f
);
2735 case QUNIFORM_VIEWPORT_Y_SCALE
:
2736 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.scale
[1] * 16.0f
);
2739 case QUNIFORM_VIEWPORT_Z_OFFSET
:
2740 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.translate
[2]);
2742 case QUNIFORM_VIEWPORT_Z_SCALE
:
2743 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.scale
[2]);
2746 case QUNIFORM_USER_CLIP_PLANE
:
2747 cl_aligned_f(&vc4
->uniforms
,
2748 vc4
->clip
.ucp
[uinfo
->data
[i
] / 4][uinfo
->data
[i
] % 4]);
2751 case QUNIFORM_TEXTURE_CONFIG_P0
:
2752 write_texture_p0(vc4
, texstate
, uinfo
->data
[i
]);
2755 case QUNIFORM_TEXTURE_CONFIG_P1
:
2756 write_texture_p1(vc4
, texstate
, uinfo
->data
[i
]);
2759 case QUNIFORM_TEXTURE_CONFIG_P2
:
2760 write_texture_p2(vc4
, texstate
, uinfo
->data
[i
]);
2763 case QUNIFORM_UBO_ADDR
:
2764 cl_aligned_reloc(vc4
, &vc4
->uniforms
, ubo
, 0);
2767 case QUNIFORM_TEXTURE_BORDER_COLOR
:
2768 write_texture_border_color(vc4
, texstate
, uinfo
->data
[i
]);
2771 case QUNIFORM_TEXRECT_SCALE_X
:
2772 case QUNIFORM_TEXRECT_SCALE_Y
:
2773 cl_aligned_u32(&vc4
->uniforms
,
2774 get_texrect_scale(texstate
,
2779 case QUNIFORM_BLEND_CONST_COLOR
:
2780 cl_aligned_f(&vc4
->uniforms
,
2781 CLAMP(vc4
->blend_color
.color
[uinfo
->data
[i
]], 0, 1));
2784 case QUNIFORM_STENCIL
:
2785 cl_aligned_u32(&vc4
->uniforms
,
2786 vc4
->zsa
->stencil_uniforms
[uinfo
->data
[i
]] |
2787 (uinfo
->data
[i
] <= 1 ?
2788 (vc4
->stencil_ref
.ref_value
[uinfo
->data
[i
]] << 8) :
2792 case QUNIFORM_ALPHA_REF
:
2793 cl_aligned_f(&vc4
->uniforms
,
2794 vc4
->zsa
->base
.alpha
.ref_value
);
2798 uint32_t written_val
= *(uint32_t *)(vc4
->uniforms
.next
- 4);
2799 fprintf(stderr
, "%p: %d / 0x%08x (%f)\n",
2800 shader
, i
, written_val
, uif(written_val
));
2806 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2808 struct vc4_context
*vc4
= vc4_context(pctx
);
2809 vc4
->prog
.bind_fs
= hwcso
;
2810 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2814 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2816 struct vc4_context
*vc4
= vc4_context(pctx
);
2817 vc4
->prog
.bind_vs
= hwcso
;
2818 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2822 vc4_program_init(struct pipe_context
*pctx
)
2824 struct vc4_context
*vc4
= vc4_context(pctx
);
2826 pctx
->create_vs_state
= vc4_shader_state_create
;
2827 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2829 pctx
->create_fs_state
= vc4_shader_state_create
;
2830 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2832 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2833 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2835 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2837 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2842 vc4_program_fini(struct pipe_context
*pctx
)
2844 struct vc4_context
*vc4
= vc4_context(pctx
);
2846 struct hash_entry
*entry
;
2847 hash_table_foreach(vc4
->fs_cache
, entry
) {
2848 struct vc4_compiled_shader
*shader
= entry
->data
;
2849 vc4_bo_unreference(&shader
->bo
);
2850 ralloc_free(shader
);
2851 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2854 hash_table_foreach(vc4
->vs_cache
, entry
) {
2855 struct vc4_compiled_shader
*shader
= entry
->data
;
2856 vc4_bo_unreference(&shader
->bo
);
2857 ralloc_free(shader
);
2858 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);