2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
40 #include "mesa/state_tracker/st_glsl_types.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
);
48 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
);
51 resize_qreg_array(struct vc4_compile
*c
,
56 if (*size
>= decl_size
)
59 uint32_t old_size
= *size
;
60 *size
= MAX2(*size
* 2, decl_size
);
61 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
63 fprintf(stderr
, "Malloc failure\n");
67 for (uint32_t i
= old_size
; i
< *size
; i
++)
68 (*regs
)[i
] = c
->undef
;
72 indirect_uniform_load(struct vc4_compile
*c
, nir_intrinsic_instr
*intr
)
74 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
75 uint32_t offset
= nir_intrinsic_base(intr
);
76 struct vc4_compiler_ubo_range
*range
= NULL
;
78 for (i
= 0; i
< c
->num_uniform_ranges
; i
++) {
79 range
= &c
->ubo_ranges
[i
];
80 if (offset
>= range
->src_offset
&&
81 offset
< range
->src_offset
+ range
->size
) {
85 /* The driver-location-based offset always has to be within a declared
91 range
->dst_offset
= c
->next_ubo_dst_offset
;
92 c
->next_ubo_dst_offset
+= range
->size
;
96 offset
-= range
->src_offset
;
98 /* Adjust for where we stored the TGSI register base. */
99 indirect_offset
= qir_ADD(c
, indirect_offset
,
100 qir_uniform_ui(c
, (range
->dst_offset
+
103 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
104 indirect_offset
= qir_MAX(c
, indirect_offset
, qir_uniform_ui(c
, 0));
105 indirect_offset
= qir_MIN(c
, indirect_offset
,
106 qir_uniform_ui(c
, (range
->dst_offset
+
109 qir_TEX_DIRECT(c
, indirect_offset
, qir_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
110 c
->num_texture_samples
++;
111 return qir_TEX_RESULT(c
);
115 vc4_nir_get_swizzled_channel(nir_builder
*b
, nir_ssa_def
**srcs
, int swiz
)
119 case PIPE_SWIZZLE_NONE
:
120 fprintf(stderr
, "warning: unknown swizzle\n");
123 return nir_imm_float(b
, 0.0);
125 return nir_imm_float(b
, 1.0);
135 ntq_init_ssa_def(struct vc4_compile
*c
, nir_ssa_def
*def
)
137 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
138 def
->num_components
);
139 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
144 ntq_store_dest(struct vc4_compile
*c
, nir_dest
*dest
, int chan
,
148 assert(chan
< dest
->ssa
.num_components
);
151 struct hash_entry
*entry
=
152 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
157 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
159 qregs
[chan
] = result
;
161 nir_register
*reg
= dest
->reg
.reg
;
162 assert(dest
->reg
.base_offset
== 0);
163 assert(reg
->num_array_elems
== 0);
164 struct hash_entry
*entry
=
165 _mesa_hash_table_search(c
->def_ht
, reg
);
166 struct qreg
*qregs
= entry
->data
;
168 /* Conditionally move the result to the destination if the
171 if (c
->execute
.file
!= QFILE_NULL
) {
172 qir_SF(c
, c
->execute
);
173 qir_MOV_cond(c
, QPU_COND_ZS
, qregs
[chan
], result
);
175 qir_MOV_dest(c
, qregs
[chan
], result
);
181 ntq_get_dest(struct vc4_compile
*c
, nir_dest
*dest
)
184 struct qreg
*qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
185 for (int i
= 0; i
< dest
->ssa
.num_components
; i
++)
189 nir_register
*reg
= dest
->reg
.reg
;
190 assert(dest
->reg
.base_offset
== 0);
191 assert(reg
->num_array_elems
== 0);
192 struct hash_entry
*entry
=
193 _mesa_hash_table_search(c
->def_ht
, reg
);
199 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
)
201 struct hash_entry
*entry
;
203 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
204 assert(i
< src
.ssa
->num_components
);
206 nir_register
*reg
= src
.reg
.reg
;
207 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
208 assert(reg
->num_array_elems
== 0);
209 assert(src
.reg
.base_offset
== 0);
210 assert(i
< reg
->num_components
);
213 struct qreg
*qregs
= entry
->data
;
218 ntq_get_alu_src(struct vc4_compile
*c
, nir_alu_instr
*instr
,
221 assert(util_is_power_of_two(instr
->dest
.write_mask
));
222 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
223 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
224 instr
->src
[src
].swizzle
[chan
]);
226 assert(!instr
->src
[src
].abs
);
227 assert(!instr
->src
[src
].negate
);
232 static inline struct qreg
233 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
236 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
237 qir_uniform_f(c
, 0.0));
241 ntq_rcp(struct vc4_compile
*c
, struct qreg x
)
243 struct qreg r
= qir_RCP(c
, x
);
245 /* Apply a Newton-Raphson step to improve the accuracy. */
246 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
247 qir_uniform_f(c
, 2.0),
254 ntq_rsq(struct vc4_compile
*c
, struct qreg x
)
256 struct qreg r
= qir_RSQ(c
, x
);
258 /* Apply a Newton-Raphson step to improve the accuracy. */
259 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
260 qir_uniform_f(c
, 1.5),
262 qir_uniform_f(c
, 0.5),
264 qir_FMUL(c
, r
, r
)))));
270 ntq_umul(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
272 struct qreg src0_hi
= qir_SHR(c
, src0
,
273 qir_uniform_ui(c
, 24));
274 struct qreg src1_hi
= qir_SHR(c
, src1
,
275 qir_uniform_ui(c
, 24));
277 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1
);
278 struct qreg lohi
= qir_MUL24(c
, src0
, src1_hi
);
279 struct qreg lolo
= qir_MUL24(c
, src0
, src1
);
281 return qir_ADD(c
, lolo
, qir_SHL(c
,
282 qir_ADD(c
, hilo
, lohi
),
283 qir_uniform_ui(c
, 24)));
287 ntq_scale_depth_texture(struct vc4_compile
*c
, struct qreg src
)
289 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, src
,
290 qir_uniform_ui(c
, 8)));
291 return qir_FMUL(c
, depthf
, qir_uniform_f(c
, 1.0f
/0xffffff));
295 * Emits a lowered TXF_MS from an MSAA texture.
297 * The addressing math has been lowered in NIR, and now we just need to read
301 ntq_emit_txf(struct vc4_compile
*c
, nir_tex_instr
*instr
)
303 uint32_t tile_width
= 32;
304 uint32_t tile_height
= 32;
305 uint32_t tile_size
= (tile_height
* tile_width
*
306 VC4_MAX_SAMPLES
* sizeof(uint32_t));
308 unsigned unit
= instr
->texture_index
;
309 uint32_t w
= align(c
->key
->tex
[unit
].msaa_width
, tile_width
);
310 uint32_t w_tiles
= w
/ tile_width
;
311 uint32_t h
= align(c
->key
->tex
[unit
].msaa_height
, tile_height
);
312 uint32_t h_tiles
= h
/ tile_height
;
313 uint32_t size
= w_tiles
* h_tiles
* tile_size
;
316 assert(instr
->num_srcs
== 1);
317 assert(instr
->src
[0].src_type
== nir_tex_src_coord
);
318 addr
= ntq_get_src(c
, instr
->src
[0].src
, 0);
320 /* Perform the clamping required by kernel validation. */
321 addr
= qir_MAX(c
, addr
, qir_uniform_ui(c
, 0));
322 addr
= qir_MIN(c
, addr
, qir_uniform_ui(c
, size
- 4));
324 qir_TEX_DIRECT(c
, addr
, qir_uniform(c
, QUNIFORM_TEXTURE_MSAA_ADDR
, unit
));
326 struct qreg tex
= qir_TEX_RESULT(c
);
327 c
->num_texture_samples
++;
330 enum pipe_format format
= c
->key
->tex
[unit
].format
;
331 if (util_format_is_depth_or_stencil(format
)) {
332 struct qreg scaled
= ntq_scale_depth_texture(c
, tex
);
333 for (int i
= 0; i
< 4; i
++)
336 for (int i
= 0; i
< 4; i
++)
337 dest
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
340 for (int i
= 0; i
< 4; i
++)
341 ntq_store_dest(c
, &instr
->dest
, i
, dest
[i
]);
345 ntq_emit_tex(struct vc4_compile
*c
, nir_tex_instr
*instr
)
347 struct qreg s
, t
, r
, lod
, compare
;
348 bool is_txb
= false, is_txl
= false;
349 unsigned unit
= instr
->texture_index
;
351 if (instr
->op
== nir_texop_txf
) {
352 ntq_emit_txf(c
, instr
);
356 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
357 switch (instr
->src
[i
].src_type
) {
358 case nir_tex_src_coord
:
359 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
360 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
)
361 t
= qir_uniform_f(c
, 0.5);
363 t
= ntq_get_src(c
, instr
->src
[i
].src
, 1);
364 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
365 r
= ntq_get_src(c
, instr
->src
[i
].src
, 2);
367 case nir_tex_src_bias
:
368 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
371 case nir_tex_src_lod
:
372 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
375 case nir_tex_src_comparitor
:
376 compare
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
379 unreachable("unknown texture source");
383 if (c
->key
->tex
[unit
].force_first_level
) {
384 lod
= qir_uniform(c
, QUNIFORM_TEXTURE_FIRST_LEVEL
, unit
);
389 struct qreg texture_u
[] = {
390 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
391 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
392 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
393 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
395 uint32_t next_texture_u
= 0;
397 /* There is no native support for GL texture rectangle coordinates, so
398 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
401 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
403 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_X
, unit
));
405 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_Y
, unit
));
408 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
|| is_txl
) {
409 texture_u
[2] = qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
410 unit
| (is_txl
<< 16));
413 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
414 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
415 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
416 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
417 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
418 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
419 qir_TEX_R(c
, qir_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
420 texture_u
[next_texture_u
++]);
423 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
427 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
431 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
433 if (is_txl
|| is_txb
)
434 qir_TEX_B(c
, lod
, texture_u
[next_texture_u
++]);
436 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
438 c
->num_texture_samples
++;
439 struct qreg tex
= qir_TEX_RESULT(c
);
441 enum pipe_format format
= c
->key
->tex
[unit
].format
;
443 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
);
444 if (util_format_is_depth_or_stencil(format
)) {
445 struct qreg normalized
= ntq_scale_depth_texture(c
, tex
);
446 struct qreg depth_output
;
448 struct qreg u0
= qir_uniform_f(c
, 0.0f
);
449 struct qreg u1
= qir_uniform_f(c
, 1.0f
);
450 if (c
->key
->tex
[unit
].compare_mode
) {
451 switch (c
->key
->tex
[unit
].compare_func
) {
452 case PIPE_FUNC_NEVER
:
453 depth_output
= qir_uniform_f(c
, 0.0f
);
455 case PIPE_FUNC_ALWAYS
:
458 case PIPE_FUNC_EQUAL
:
459 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
460 depth_output
= qir_SEL(c
, QPU_COND_ZS
, u1
, u0
);
462 case PIPE_FUNC_NOTEQUAL
:
463 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
464 depth_output
= qir_SEL(c
, QPU_COND_ZC
, u1
, u0
);
466 case PIPE_FUNC_GREATER
:
467 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
468 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
470 case PIPE_FUNC_GEQUAL
:
471 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
472 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
475 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
476 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
478 case PIPE_FUNC_LEQUAL
:
479 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
480 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
484 depth_output
= normalized
;
487 for (int i
= 0; i
< 4; i
++)
488 dest
[i
] = depth_output
;
490 for (int i
= 0; i
< 4; i
++)
491 dest
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
496 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
500 ntq_ffract(struct vc4_compile
*c
, struct qreg src
)
502 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
503 struct qreg diff
= qir_FSUB(c
, src
, trunc
);
505 return qir_SEL(c
, QPU_COND_NS
,
506 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)), diff
);
510 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
514 ntq_ffloor(struct vc4_compile
*c
, struct qreg src
)
516 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
518 /* This will be < 0 if we truncated and the truncation was of a value
519 * that was < 0 in the first place.
521 qir_SF(c
, qir_FSUB(c
, src
, trunc
));
523 return qir_SEL(c
, QPU_COND_NS
,
524 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)), trunc
);
528 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
532 ntq_fceil(struct vc4_compile
*c
, struct qreg src
)
534 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
536 /* This will be < 0 if we truncated and the truncation was of a value
537 * that was > 0 in the first place.
539 qir_SF(c
, qir_FSUB(c
, trunc
, src
));
541 return qir_SEL(c
, QPU_COND_NS
,
542 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)), trunc
);
546 ntq_fsin(struct vc4_compile
*c
, struct qreg src
)
550 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
551 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
552 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
553 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
556 struct qreg scaled_x
=
559 qir_uniform_f(c
, 1.0 / (M_PI
* 2.0)));
561 struct qreg x
= qir_FADD(c
,
562 ntq_ffract(c
, scaled_x
),
563 qir_uniform_f(c
, -0.5));
564 struct qreg x2
= qir_FMUL(c
, x
, x
);
565 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
566 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
567 x
= qir_FMUL(c
, x
, x2
);
572 qir_uniform_f(c
, coeff
[i
])));
578 ntq_fcos(struct vc4_compile
*c
, struct qreg src
)
582 pow(2.0 * M_PI
, 2) / (2 * 1),
583 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
584 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
585 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
586 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
589 struct qreg scaled_x
=
591 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
592 struct qreg x_frac
= qir_FADD(c
,
593 ntq_ffract(c
, scaled_x
),
594 qir_uniform_f(c
, -0.5));
596 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
597 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
598 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
599 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
601 x
= qir_FMUL(c
, x
, x2
);
603 struct qreg mul
= qir_FMUL(c
,
605 qir_uniform_f(c
, coeff
[i
]));
609 sum
= qir_FADD(c
, sum
, mul
);
615 ntq_fsign(struct vc4_compile
*c
, struct qreg src
)
617 struct qreg t
= qir_get_temp(c
);
620 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 0.0));
621 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_ZC
;
622 qir_MOV_dest(c
, t
, qir_uniform_f(c
, -1.0))->cond
= QPU_COND_NS
;
627 emit_vertex_input(struct vc4_compile
*c
, int attr
)
629 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
630 uint32_t attr_size
= util_format_get_blocksize(format
);
632 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
633 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
634 c
->inputs
[attr
* 4 + i
] =
635 qir_MOV(c
, qir_reg(QFILE_VPM
, attr
* 4 + i
));
641 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
643 c
->inputs
[attr
* 4 + 0] = qir_ITOF(c
, qir_reg(QFILE_FRAG_X
, 0));
644 c
->inputs
[attr
* 4 + 1] = qir_ITOF(c
, qir_reg(QFILE_FRAG_Y
, 0));
645 c
->inputs
[attr
* 4 + 2] =
647 qir_ITOF(c
, qir_FRAG_Z(c
)),
648 qir_uniform_f(c
, 1.0 / 0xffffff));
649 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
653 emit_fragment_varying(struct vc4_compile
*c
, gl_varying_slot slot
,
656 uint32_t i
= c
->num_input_slots
++;
662 if (c
->num_input_slots
>= c
->input_slots_array_size
) {
663 c
->input_slots_array_size
=
664 MAX2(4, c
->input_slots_array_size
* 2);
666 c
->input_slots
= reralloc(c
, c
->input_slots
,
667 struct vc4_varying_slot
,
668 c
->input_slots_array_size
);
671 c
->input_slots
[i
].slot
= slot
;
672 c
->input_slots
[i
].swizzle
= swizzle
;
674 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
678 emit_fragment_input(struct vc4_compile
*c
, int attr
, gl_varying_slot slot
)
680 for (int i
= 0; i
< 4; i
++) {
681 c
->inputs
[attr
* 4 + i
] =
682 emit_fragment_varying(c
, slot
, i
);
688 add_output(struct vc4_compile
*c
,
689 uint32_t decl_offset
,
693 uint32_t old_array_size
= c
->outputs_array_size
;
694 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
697 if (old_array_size
!= c
->outputs_array_size
) {
698 c
->output_slots
= reralloc(c
,
700 struct vc4_varying_slot
,
701 c
->outputs_array_size
);
704 c
->output_slots
[decl_offset
].slot
= slot
;
705 c
->output_slots
[decl_offset
].swizzle
= swizzle
;
709 declare_uniform_range(struct vc4_compile
*c
, uint32_t start
, uint32_t size
)
711 unsigned array_id
= c
->num_uniform_ranges
++;
712 if (array_id
>= c
->ubo_ranges_array_size
) {
713 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
715 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
716 struct vc4_compiler_ubo_range
,
717 c
->ubo_ranges_array_size
);
720 c
->ubo_ranges
[array_id
].dst_offset
= 0;
721 c
->ubo_ranges
[array_id
].src_offset
= start
;
722 c
->ubo_ranges
[array_id
].size
= size
;
723 c
->ubo_ranges
[array_id
].used
= false;
727 ntq_src_is_only_ssa_def_user(nir_src
*src
)
732 if (!list_empty(&src
->ssa
->if_uses
))
735 return (src
->ssa
->uses
.next
== &src
->use_link
&&
736 src
->ssa
->uses
.next
->next
== &src
->ssa
->uses
);
740 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
743 * However, as an optimization, it tries to find the instructions generating
744 * the sources to be packed and just emit the pack flag there, if possible.
747 ntq_emit_pack_unorm_4x8(struct vc4_compile
*c
, nir_alu_instr
*instr
)
749 struct qreg result
= qir_get_temp(c
);
750 struct nir_alu_instr
*vec4
= NULL
;
752 /* If packing from a vec4 op (as expected), identify it so that we can
753 * peek back at what generated its sources.
755 if (instr
->src
[0].src
.is_ssa
&&
756 instr
->src
[0].src
.ssa
->parent_instr
->type
== nir_instr_type_alu
&&
757 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
)->op
==
759 vec4
= nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
762 /* If the pack is replicating the same channel 4 times, use the 8888
763 * pack flag. This is common for blending using the alpha
766 if (instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[1] &&
767 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[2] &&
768 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[3]) {
769 struct qreg rep
= ntq_get_src(c
,
771 instr
->src
[0].swizzle
[0]);
772 ntq_store_dest(c
, &instr
->dest
.dest
, 0, qir_PACK_8888_F(c
, rep
));
776 for (int i
= 0; i
< 4; i
++) {
777 int swiz
= instr
->src
[0].swizzle
[i
];
780 src
= ntq_get_src(c
, vec4
->src
[swiz
].src
,
781 vec4
->src
[swiz
].swizzle
[0]);
783 src
= ntq_get_src(c
, instr
->src
[0].src
, swiz
);
787 ntq_src_is_only_ssa_def_user(&vec4
->src
[swiz
].src
) &&
788 src
.file
== QFILE_TEMP
&&
789 c
->defs
[src
.index
] &&
790 qir_is_mul(c
->defs
[src
.index
]) &&
791 !c
->defs
[src
.index
]->dst
.pack
) {
792 struct qinst
*rewrite
= c
->defs
[src
.index
];
793 c
->defs
[src
.index
] = NULL
;
794 rewrite
->dst
= result
;
795 rewrite
->dst
.pack
= QPU_PACK_MUL_8A
+ i
;
799 qir_PACK_8_F(c
, result
, src
, i
);
802 ntq_store_dest(c
, &instr
->dest
.dest
, 0, result
);
805 /** Handles sign-extended bitfield extracts for 16 bits. */
807 ntq_emit_ibfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
810 assert(bits
.file
== QFILE_UNIF
&&
811 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
812 c
->uniform_data
[bits
.index
] == 16);
814 assert(offset
.file
== QFILE_UNIF
&&
815 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
816 int offset_bit
= c
->uniform_data
[offset
.index
];
817 assert(offset_bit
% 16 == 0);
819 return qir_UNPACK_16_I(c
, base
, offset_bit
/ 16);
822 /** Handles unsigned bitfield extracts for 8 bits. */
824 ntq_emit_ubfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
827 assert(bits
.file
== QFILE_UNIF
&&
828 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
829 c
->uniform_data
[bits
.index
] == 8);
831 assert(offset
.file
== QFILE_UNIF
&&
832 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
833 int offset_bit
= c
->uniform_data
[offset
.index
];
834 assert(offset_bit
% 8 == 0);
836 return qir_UNPACK_8_I(c
, base
, offset_bit
/ 8);
840 * If compare_instr is a valid comparison instruction, emits the
841 * compare_instr's comparison and returns the sel_instr's return value based
842 * on the compare_instr's result.
845 ntq_emit_comparison(struct vc4_compile
*c
, struct qreg
*dest
,
846 nir_alu_instr
*compare_instr
,
847 nir_alu_instr
*sel_instr
)
851 switch (compare_instr
->op
) {
877 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
878 struct qreg src1
= ntq_get_alu_src(c
, compare_instr
, 1);
880 unsigned unsized_type
=
881 nir_alu_type_get_base_type(nir_op_infos
[compare_instr
->op
].input_types
[0]);
882 if (unsized_type
== nir_type_float
)
883 qir_SF(c
, qir_FSUB(c
, src0
, src1
));
885 qir_SF(c
, qir_SUB(c
, src0
, src1
));
887 switch (sel_instr
->op
) {
892 *dest
= qir_SEL(c
, cond
,
893 qir_uniform_f(c
, 1.0), qir_uniform_f(c
, 0.0));
897 *dest
= qir_SEL(c
, cond
,
898 ntq_get_alu_src(c
, sel_instr
, 1),
899 ntq_get_alu_src(c
, sel_instr
, 2));
903 *dest
= qir_SEL(c
, cond
,
904 qir_uniform_ui(c
, ~0), qir_uniform_ui(c
, 0));
912 * Attempts to fold a comparison generating a boolean result into the
913 * condition code for selecting between two values, instead of comparing the
914 * boolean result against 0 to generate the condition code.
916 static struct qreg
ntq_emit_bcsel(struct vc4_compile
*c
, nir_alu_instr
*instr
,
919 if (!instr
->src
[0].src
.is_ssa
)
921 nir_alu_instr
*compare
=
922 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
927 if (ntq_emit_comparison(c
, &dest
, compare
, instr
))
932 return qir_SEL(c
, QPU_COND_NS
, src
[1], src
[2]);
936 ntq_fddx(struct vc4_compile
*c
, struct qreg src
)
938 /* Make sure that we have a bare temp to use for MUL rotation, so it
939 * can be allocated to an accumulator.
941 if (src
.pack
|| src
.file
!= QFILE_TEMP
)
942 src
= qir_MOV(c
, src
);
944 struct qreg from_left
= qir_ROT_MUL(c
, src
, 1);
945 struct qreg from_right
= qir_ROT_MUL(c
, src
, 15);
947 /* Distinguish left/right pixels of the quad. */
948 qir_SF(c
, qir_AND(c
, qir_reg(QFILE_QPU_ELEMENT
, 0),
949 qir_uniform_ui(c
, 1)));
951 return qir_SEL(c
, QPU_COND_ZS
,
952 qir_FSUB(c
, from_right
, src
),
953 qir_FSUB(c
, src
, from_left
));
957 ntq_fddy(struct vc4_compile
*c
, struct qreg src
)
959 if (src
.pack
|| src
.file
!= QFILE_TEMP
)
960 src
= qir_MOV(c
, src
);
962 struct qreg from_bottom
= qir_ROT_MUL(c
, src
, 2);
963 struct qreg from_top
= qir_ROT_MUL(c
, src
, 14);
965 /* Distinguish top/bottom pixels of the quad. */
967 qir_reg(QFILE_QPU_ELEMENT
, 0),
968 qir_uniform_ui(c
, 2)));
970 return qir_SEL(c
, QPU_COND_ZS
,
971 qir_FSUB(c
, from_top
, src
),
972 qir_FSUB(c
, src
, from_bottom
));
976 ntq_emit_alu(struct vc4_compile
*c
, nir_alu_instr
*instr
)
978 /* This should always be lowered to ALU operations for VC4. */
979 assert(!instr
->dest
.saturate
);
981 /* Vectors are special in that they have non-scalarized writemasks,
982 * and just take the first swizzle channel for each argument in order
983 * into each writemask channel.
985 if (instr
->op
== nir_op_vec2
||
986 instr
->op
== nir_op_vec3
||
987 instr
->op
== nir_op_vec4
) {
989 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
990 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
991 instr
->src
[i
].swizzle
[0]);
992 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
993 ntq_store_dest(c
, &instr
->dest
.dest
, i
, srcs
[i
]);
997 if (instr
->op
== nir_op_pack_unorm_4x8
) {
998 ntq_emit_pack_unorm_4x8(c
, instr
);
1002 if (instr
->op
== nir_op_unpack_unorm_4x8
) {
1003 struct qreg src
= ntq_get_src(c
, instr
->src
[0].src
,
1004 instr
->src
[0].swizzle
[0]);
1005 for (int i
= 0; i
< 4; i
++) {
1006 if (instr
->dest
.write_mask
& (1 << i
))
1007 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
1008 qir_UNPACK_8_F(c
, src
, i
));
1013 /* General case: We can just grab the one used channel per src. */
1014 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
1015 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
1016 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
1021 switch (instr
->op
) {
1024 result
= qir_MOV(c
, src
[0]);
1027 result
= qir_FMUL(c
, src
[0], src
[1]);
1030 result
= qir_FADD(c
, src
[0], src
[1]);
1033 result
= qir_FSUB(c
, src
[0], src
[1]);
1036 result
= qir_FMIN(c
, src
[0], src
[1]);
1039 result
= qir_FMAX(c
, src
[0], src
[1]);
1044 result
= qir_FTOI(c
, src
[0]);
1048 result
= qir_ITOF(c
, src
[0]);
1051 result
= qir_AND(c
, src
[0], qir_uniform_f(c
, 1.0));
1054 result
= qir_AND(c
, src
[0], qir_uniform_ui(c
, 1));
1059 result
= qir_SEL(c
, QPU_COND_ZC
,
1060 qir_uniform_ui(c
, ~0),
1061 qir_uniform_ui(c
, 0));
1065 result
= qir_ADD(c
, src
[0], src
[1]);
1068 result
= qir_SHR(c
, src
[0], src
[1]);
1071 result
= qir_SUB(c
, src
[0], src
[1]);
1074 result
= qir_ASR(c
, src
[0], src
[1]);
1077 result
= qir_SHL(c
, src
[0], src
[1]);
1080 result
= qir_MIN(c
, src
[0], src
[1]);
1083 result
= qir_MAX(c
, src
[0], src
[1]);
1086 result
= qir_AND(c
, src
[0], src
[1]);
1089 result
= qir_OR(c
, src
[0], src
[1]);
1092 result
= qir_XOR(c
, src
[0], src
[1]);
1095 result
= qir_NOT(c
, src
[0]);
1099 result
= ntq_umul(c
, src
[0], src
[1]);
1115 if (!ntq_emit_comparison(c
, &result
, instr
, instr
)) {
1116 fprintf(stderr
, "Bad comparison instruction\n");
1121 result
= ntq_emit_bcsel(c
, instr
, src
);
1125 result
= qir_SEL(c
, QPU_COND_ZC
, src
[1], src
[2]);
1129 result
= ntq_rcp(c
, src
[0]);
1132 result
= ntq_rsq(c
, src
[0]);
1135 result
= qir_EXP2(c
, src
[0]);
1138 result
= qir_LOG2(c
, src
[0]);
1142 result
= qir_ITOF(c
, qir_FTOI(c
, src
[0]));
1145 result
= ntq_fceil(c
, src
[0]);
1148 result
= ntq_ffract(c
, src
[0]);
1151 result
= ntq_ffloor(c
, src
[0]);
1155 result
= ntq_fsin(c
, src
[0]);
1158 result
= ntq_fcos(c
, src
[0]);
1162 result
= ntq_fsign(c
, src
[0]);
1166 result
= qir_FMAXABS(c
, src
[0], src
[0]);
1169 result
= qir_MAX(c
, src
[0],
1170 qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0]));
1173 case nir_op_ibitfield_extract
:
1174 result
= ntq_emit_ibfe(c
, src
[0], src
[1], src
[2]);
1177 case nir_op_ubitfield_extract
:
1178 result
= ntq_emit_ubfe(c
, src
[0], src
[1], src
[2]);
1181 case nir_op_usadd_4x8
:
1182 result
= qir_V8ADDS(c
, src
[0], src
[1]);
1185 case nir_op_ussub_4x8
:
1186 result
= qir_V8SUBS(c
, src
[0], src
[1]);
1189 case nir_op_umin_4x8
:
1190 result
= qir_V8MIN(c
, src
[0], src
[1]);
1193 case nir_op_umax_4x8
:
1194 result
= qir_V8MAX(c
, src
[0], src
[1]);
1197 case nir_op_umul_unorm_4x8
:
1198 result
= qir_V8MULD(c
, src
[0], src
[1]);
1202 case nir_op_fddx_coarse
:
1203 case nir_op_fddx_fine
:
1204 result
= ntq_fddx(c
, src
[0]);
1208 case nir_op_fddy_coarse
:
1209 case nir_op_fddy_fine
:
1210 result
= ntq_fddy(c
, src
[0]);
1214 fprintf(stderr
, "unknown NIR ALU inst: ");
1215 nir_print_instr(&instr
->instr
, stderr
);
1216 fprintf(stderr
, "\n");
1220 /* We have a scalar result, so the instruction should only have a
1221 * single channel written to.
1223 assert(util_is_power_of_two(instr
->dest
.write_mask
));
1224 ntq_store_dest(c
, &instr
->dest
.dest
,
1225 ffs(instr
->dest
.write_mask
) - 1, result
);
1229 emit_frag_end(struct vc4_compile
*c
)
1232 if (c
->output_color_index
!= -1) {
1233 color
= c
->outputs
[c
->output_color_index
];
1235 color
= qir_uniform_ui(c
, 0);
1238 uint32_t discard_cond
= QPU_COND_ALWAYS
;
1239 if (c
->s
->info
.fs
.uses_discard
) {
1240 qir_SF(c
, c
->discard
);
1241 discard_cond
= QPU_COND_ZS
;
1244 if (c
->fs_key
->stencil_enabled
) {
1245 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1246 qir_uniform(c
, QUNIFORM_STENCIL
, 0));
1247 if (c
->fs_key
->stencil_twoside
) {
1248 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1249 qir_uniform(c
, QUNIFORM_STENCIL
, 1));
1251 if (c
->fs_key
->stencil_full_writemasks
) {
1252 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1253 qir_uniform(c
, QUNIFORM_STENCIL
, 2));
1257 if (c
->output_sample_mask_index
!= -1) {
1258 qir_MS_MASK(c
, c
->outputs
[c
->output_sample_mask_index
]);
1261 if (c
->fs_key
->depth_enabled
) {
1262 if (c
->output_position_index
!= -1) {
1263 qir_FTOI_dest(c
, qir_reg(QFILE_TLB_Z_WRITE
, 0),
1265 c
->outputs
[c
->output_position_index
],
1266 qir_uniform_f(c
, 0xffffff)))->cond
= discard_cond
;
1268 qir_MOV_dest(c
, qir_reg(QFILE_TLB_Z_WRITE
, 0),
1269 qir_FRAG_Z(c
))->cond
= discard_cond
;
1273 if (!c
->msaa_per_sample_output
) {
1274 qir_MOV_dest(c
, qir_reg(QFILE_TLB_COLOR_WRITE
, 0),
1275 color
)->cond
= discard_cond
;
1277 for (int i
= 0; i
< VC4_MAX_SAMPLES
; i
++) {
1278 qir_MOV_dest(c
, qir_reg(QFILE_TLB_COLOR_WRITE_MS
, 0),
1279 c
->sample_colors
[i
])->cond
= discard_cond
;
1285 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1287 struct qreg packed
= qir_get_temp(c
);
1289 for (int i
= 0; i
< 2; i
++) {
1291 qir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1293 struct qreg packed_chan
= packed
;
1294 packed_chan
.pack
= QPU_PACK_A_16A
+ i
;
1296 qir_FTOI_dest(c
, packed_chan
,
1299 c
->outputs
[c
->output_position_index
+ i
],
1304 qir_VPM_WRITE(c
, packed
);
1308 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1310 struct qreg zscale
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1311 struct qreg zoffset
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1313 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1314 c
->outputs
[c
->output_position_index
+ 2],
1321 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1323 qir_VPM_WRITE(c
, rcp_w
);
1327 emit_point_size_write(struct vc4_compile
*c
)
1329 struct qreg point_size
;
1331 if (c
->output_point_size_index
!= -1)
1332 point_size
= c
->outputs
[c
->output_point_size_index
];
1334 point_size
= qir_uniform_f(c
, 1.0);
1336 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1339 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1341 qir_VPM_WRITE(c
, point_size
);
1345 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1347 * The simulator insists that there be at least one vertex attribute, so
1348 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1349 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1350 * to consume it here.
1353 emit_stub_vpm_read(struct vc4_compile
*c
)
1358 c
->vattr_sizes
[0] = 4;
1359 (void)qir_MOV(c
, qir_reg(QFILE_VPM
, 0));
1364 emit_vert_end(struct vc4_compile
*c
,
1365 struct vc4_varying_slot
*fs_inputs
,
1366 uint32_t num_fs_inputs
)
1368 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1370 emit_stub_vpm_read(c
);
1372 emit_scaled_viewport_write(c
, rcp_w
);
1373 emit_zs_write(c
, rcp_w
);
1374 emit_rcp_wc_write(c
, rcp_w
);
1375 if (c
->vs_key
->per_vertex_point_size
)
1376 emit_point_size_write(c
);
1378 for (int i
= 0; i
< num_fs_inputs
; i
++) {
1379 struct vc4_varying_slot
*input
= &fs_inputs
[i
];
1382 for (j
= 0; j
< c
->num_outputs
; j
++) {
1383 struct vc4_varying_slot
*output
=
1384 &c
->output_slots
[j
];
1386 if (input
->slot
== output
->slot
&&
1387 input
->swizzle
== output
->swizzle
) {
1388 qir_VPM_WRITE(c
, c
->outputs
[j
]);
1392 /* Emit padding if we didn't find a declared VS output for
1395 if (j
== c
->num_outputs
)
1396 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
1401 emit_coord_end(struct vc4_compile
*c
)
1403 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
1405 emit_stub_vpm_read(c
);
1407 for (int i
= 0; i
< 4; i
++)
1408 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
1410 emit_scaled_viewport_write(c
, rcp_w
);
1411 emit_zs_write(c
, rcp_w
);
1412 emit_rcp_wc_write(c
, rcp_w
);
1413 if (c
->vs_key
->per_vertex_point_size
)
1414 emit_point_size_write(c
);
1418 vc4_optimize_nir(struct nir_shader
*s
)
1425 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1426 NIR_PASS_V(s
, nir_lower_alu_to_scalar
);
1427 NIR_PASS_V(s
, nir_lower_phis_to_scalar
);
1429 NIR_PASS(progress
, s
, nir_copy_prop
);
1430 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1431 NIR_PASS(progress
, s
, nir_opt_dce
);
1432 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1433 NIR_PASS(progress
, s
, nir_opt_cse
);
1434 NIR_PASS(progress
, s
, nir_opt_peephole_select
);
1435 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1436 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1437 NIR_PASS(progress
, s
, nir_opt_undef
);
1442 driver_location_compare(const void *in_a
, const void *in_b
)
1444 const nir_variable
*const *a
= in_a
;
1445 const nir_variable
*const *b
= in_b
;
1447 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1451 ntq_setup_inputs(struct vc4_compile
*c
)
1453 unsigned num_entries
= 0;
1454 nir_foreach_variable(var
, &c
->s
->inputs
)
1457 nir_variable
*vars
[num_entries
];
1460 nir_foreach_variable(var
, &c
->s
->inputs
)
1463 /* Sort the variables so that we emit the input setup in
1464 * driver_location order. This is required for VPM reads, whose data
1465 * is fetched into the VPM in driver_location (TGSI register index)
1468 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1470 for (unsigned i
= 0; i
< num_entries
; i
++) {
1471 nir_variable
*var
= vars
[i
];
1472 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1473 unsigned loc
= var
->data
.driver_location
;
1475 assert(array_len
== 1);
1477 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1480 if (c
->stage
== QSTAGE_FRAG
) {
1481 if (var
->data
.location
== VARYING_SLOT_POS
) {
1482 emit_fragcoord_input(c
, loc
);
1483 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1484 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1485 (c
->fs_key
->point_sprite_mask
&
1486 (1 << (var
->data
.location
-
1487 VARYING_SLOT_VAR0
))))) {
1488 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1489 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1491 emit_fragment_input(c
, loc
, var
->data
.location
);
1494 emit_vertex_input(c
, loc
);
1500 ntq_setup_outputs(struct vc4_compile
*c
)
1502 nir_foreach_variable(var
, &c
->s
->outputs
) {
1503 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1504 unsigned loc
= var
->data
.driver_location
* 4;
1506 assert(array_len
== 1);
1509 for (int i
= 0; i
< 4; i
++)
1510 add_output(c
, loc
+ i
, var
->data
.location
, i
);
1512 if (c
->stage
== QSTAGE_FRAG
) {
1513 switch (var
->data
.location
) {
1514 case FRAG_RESULT_COLOR
:
1515 case FRAG_RESULT_DATA0
:
1516 c
->output_color_index
= loc
;
1518 case FRAG_RESULT_DEPTH
:
1519 c
->output_position_index
= loc
;
1521 case FRAG_RESULT_SAMPLE_MASK
:
1522 c
->output_sample_mask_index
= loc
;
1526 switch (var
->data
.location
) {
1527 case VARYING_SLOT_POS
:
1528 c
->output_position_index
= loc
;
1530 case VARYING_SLOT_PSIZ
:
1531 c
->output_point_size_index
= loc
;
1539 ntq_setup_uniforms(struct vc4_compile
*c
)
1541 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1542 uint32_t vec4_count
= st_glsl_type_size(var
->type
);
1543 unsigned vec4_size
= 4 * sizeof(float);
1545 declare_uniform_range(c
, var
->data
.driver_location
* vec4_size
,
1546 vec4_count
* vec4_size
);
1552 * Sets up the mapping from nir_register to struct qreg *.
1554 * Each nir_register gets a struct qreg per 32-bit component being stored.
1557 ntq_setup_registers(struct vc4_compile
*c
, struct exec_list
*list
)
1559 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1560 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1561 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1563 nir_reg
->num_components
);
1565 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1567 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1568 qregs
[i
] = qir_get_temp(c
);
1573 ntq_emit_load_const(struct vc4_compile
*c
, nir_load_const_instr
*instr
)
1575 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1576 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1577 qregs
[i
] = qir_uniform_ui(c
, instr
->value
.u32
[i
]);
1579 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1583 ntq_emit_ssa_undef(struct vc4_compile
*c
, nir_ssa_undef_instr
*instr
)
1585 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1587 /* QIR needs there to be *some* value, so pick 0 (same as for
1588 * ntq_setup_registers().
1590 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1591 qregs
[i
] = qir_uniform_ui(c
, 0);
1595 ntq_emit_intrinsic(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1597 nir_const_value
*const_offset
;
1600 switch (instr
->intrinsic
) {
1601 case nir_intrinsic_load_uniform
:
1602 assert(instr
->num_components
== 1);
1603 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1605 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1606 assert(offset
% 4 == 0);
1607 /* We need dwords */
1608 offset
= offset
/ 4;
1609 ntq_store_dest(c
, &instr
->dest
, 0,
1610 qir_uniform(c
, QUNIFORM_UNIFORM
,
1613 ntq_store_dest(c
, &instr
->dest
, 0,
1614 indirect_uniform_load(c
, instr
));
1618 case nir_intrinsic_load_user_clip_plane
:
1619 for (int i
= 0; i
< instr
->num_components
; i
++) {
1620 ntq_store_dest(c
, &instr
->dest
, i
,
1621 qir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1622 nir_intrinsic_ucp_id(instr
) *
1627 case nir_intrinsic_load_blend_const_color_r_float
:
1628 case nir_intrinsic_load_blend_const_color_g_float
:
1629 case nir_intrinsic_load_blend_const_color_b_float
:
1630 case nir_intrinsic_load_blend_const_color_a_float
:
1631 ntq_store_dest(c
, &instr
->dest
, 0,
1632 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR_X
+
1634 nir_intrinsic_load_blend_const_color_r_float
),
1638 case nir_intrinsic_load_blend_const_color_rgba8888_unorm
:
1639 ntq_store_dest(c
, &instr
->dest
, 0,
1640 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR_RGBA
,
1644 case nir_intrinsic_load_blend_const_color_aaaa8888_unorm
:
1645 ntq_store_dest(c
, &instr
->dest
, 0,
1646 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR_AAAA
,
1650 case nir_intrinsic_load_alpha_ref_float
:
1651 ntq_store_dest(c
, &instr
->dest
, 0,
1652 qir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1655 case nir_intrinsic_load_sample_mask_in
:
1656 ntq_store_dest(c
, &instr
->dest
, 0,
1657 qir_uniform(c
, QUNIFORM_SAMPLE_MASK
, 0));
1660 case nir_intrinsic_load_front_face
:
1661 /* The register contains 0 (front) or 1 (back), and we need to
1662 * turn it into a NIR bool where true means front.
1664 ntq_store_dest(c
, &instr
->dest
, 0,
1666 qir_uniform_ui(c
, -1),
1667 qir_reg(QFILE_FRAG_REV_FLAG
, 0)));
1670 case nir_intrinsic_load_input
:
1671 assert(instr
->num_components
== 1);
1672 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1673 assert(const_offset
&& "vc4 doesn't support indirect inputs");
1674 if (c
->stage
== QSTAGE_FRAG
&&
1675 nir_intrinsic_base(instr
) >= VC4_NIR_TLB_COLOR_READ_INPUT
) {
1676 assert(const_offset
->u32
[0] == 0);
1677 /* Reads of the per-sample color need to be done in
1680 int sample_index
= (nir_intrinsic_base(instr
) -
1681 VC4_NIR_TLB_COLOR_READ_INPUT
);
1682 for (int i
= 0; i
<= sample_index
; i
++) {
1683 if (c
->color_reads
[i
].file
== QFILE_NULL
) {
1685 qir_TLB_COLOR_READ(c
);
1688 ntq_store_dest(c
, &instr
->dest
, 0,
1689 c
->color_reads
[sample_index
]);
1691 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1692 int comp
= nir_intrinsic_component(instr
);
1693 ntq_store_dest(c
, &instr
->dest
, 0,
1694 c
->inputs
[offset
* 4 + comp
]);
1698 case nir_intrinsic_store_output
:
1699 const_offset
= nir_src_as_const_value(instr
->src
[1]);
1700 assert(const_offset
&& "vc4 doesn't support indirect outputs");
1701 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1703 /* MSAA color outputs are the only case where we have an
1704 * output that's not lowered to being a store of a single 32
1707 if (c
->stage
== QSTAGE_FRAG
&& instr
->num_components
== 4) {
1708 assert(offset
== c
->output_color_index
);
1709 for (int i
= 0; i
< 4; i
++) {
1710 c
->sample_colors
[i
] =
1711 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0],
1715 offset
= offset
* 4 + nir_intrinsic_component(instr
);
1716 assert(instr
->num_components
== 1);
1717 c
->outputs
[offset
] =
1718 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0], 0));
1719 c
->num_outputs
= MAX2(c
->num_outputs
, offset
+ 1);
1723 case nir_intrinsic_discard
:
1724 if (c
->execute
.file
!= QFILE_NULL
) {
1725 qir_SF(c
, c
->execute
);
1726 qir_MOV_cond(c
, QPU_COND_ZS
, c
->discard
,
1727 qir_uniform_ui(c
, ~0));
1729 qir_MOV_dest(c
, c
->discard
, qir_uniform_ui(c
, ~0));
1733 case nir_intrinsic_discard_if
: {
1734 /* true (~0) if we're discarding */
1735 struct qreg cond
= ntq_get_src(c
, instr
->src
[0], 0);
1737 if (c
->execute
.file
!= QFILE_NULL
) {
1738 /* execute == 0 means the channel is active. Invert
1739 * the condition so that we can use zero as "executing
1742 qir_SF(c
, qir_AND(c
, c
->execute
, qir_NOT(c
, cond
)));
1743 qir_MOV_cond(c
, QPU_COND_ZS
, c
->discard
, cond
);
1745 qir_OR_dest(c
, c
->discard
, c
->discard
,
1746 ntq_get_src(c
, instr
->src
[0], 0));
1753 fprintf(stderr
, "Unknown intrinsic: ");
1754 nir_print_instr(&instr
->instr
, stderr
);
1755 fprintf(stderr
, "\n");
1760 /* Clears (activates) the execute flags for any channels whose jump target
1761 * matches this block.
1764 ntq_activate_execute_for_block(struct vc4_compile
*c
)
1766 qir_SF(c
, qir_SUB(c
,
1768 qir_uniform_ui(c
, c
->cur_block
->index
)));
1769 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
, qir_uniform_ui(c
, 0));
1773 ntq_emit_if(struct vc4_compile
*c
, nir_if
*if_stmt
)
1775 if (!c
->vc4
->screen
->has_control_flow
) {
1777 "IF statement support requires updated kernel.\n");
1781 nir_cf_node
*nir_first_else_node
= nir_if_first_else_node(if_stmt
);
1782 nir_cf_node
*nir_last_else_node
= nir_if_last_else_node(if_stmt
);
1783 nir_block
*nir_else_block
= nir_cf_node_as_block(nir_first_else_node
);
1784 bool empty_else_block
=
1785 (nir_first_else_node
== nir_last_else_node
&&
1786 exec_list_is_empty(&nir_else_block
->instr_list
));
1788 struct qblock
*then_block
= qir_new_block(c
);
1789 struct qblock
*after_block
= qir_new_block(c
);
1790 struct qblock
*else_block
;
1791 if (empty_else_block
)
1792 else_block
= after_block
;
1794 else_block
= qir_new_block(c
);
1796 bool was_top_level
= false;
1797 if (c
->execute
.file
== QFILE_NULL
) {
1798 c
->execute
= qir_MOV(c
, qir_uniform_ui(c
, 0));
1799 was_top_level
= true;
1802 /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1803 * 0) channels, and then update execute flags for those to point to
1808 ntq_get_src(c
, if_stmt
->condition
, 0)));
1809 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
,
1810 qir_uniform_ui(c
, else_block
->index
));
1812 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1815 qir_SF(c
, c
->execute
);
1816 qir_BRANCH(c
, QPU_COND_BRANCH_ALL_ZC
);
1817 qir_link_blocks(c
->cur_block
, else_block
);
1818 qir_link_blocks(c
->cur_block
, then_block
);
1820 /* Process the THEN block. */
1821 qir_set_emit_block(c
, then_block
);
1822 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1824 if (!empty_else_block
) {
1825 /* Handle the end of the THEN block. First, all currently
1826 * active channels update their execute flags to point to
1829 qir_SF(c
, c
->execute
);
1830 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
,
1831 qir_uniform_ui(c
, after_block
->index
));
1833 /* If everything points at ENDIF, then jump there immediately. */
1834 qir_SF(c
, qir_SUB(c
, c
->execute
, qir_uniform_ui(c
, after_block
->index
)));
1835 qir_BRANCH(c
, QPU_COND_BRANCH_ALL_ZS
);
1836 qir_link_blocks(c
->cur_block
, after_block
);
1837 qir_link_blocks(c
->cur_block
, else_block
);
1839 qir_set_emit_block(c
, else_block
);
1840 ntq_activate_execute_for_block(c
);
1841 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
1844 qir_link_blocks(c
->cur_block
, after_block
);
1846 qir_set_emit_block(c
, after_block
);
1848 c
->execute
= c
->undef
;
1850 ntq_activate_execute_for_block(c
);
1855 ntq_emit_jump(struct vc4_compile
*c
, nir_jump_instr
*jump
)
1857 switch (jump
->type
) {
1858 case nir_jump_break
:
1859 qir_SF(c
, c
->execute
);
1860 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
,
1861 qir_uniform_ui(c
, c
->loop_break_block
->index
));
1864 case nir_jump_continue
:
1865 qir_SF(c
, c
->execute
);
1866 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
,
1867 qir_uniform_ui(c
, c
->loop_cont_block
->index
));
1870 case nir_jump_return
:
1871 unreachable("All returns shouold be lowered\n");
1876 ntq_emit_instr(struct vc4_compile
*c
, nir_instr
*instr
)
1878 switch (instr
->type
) {
1879 case nir_instr_type_alu
:
1880 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1883 case nir_instr_type_intrinsic
:
1884 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1887 case nir_instr_type_load_const
:
1888 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1891 case nir_instr_type_ssa_undef
:
1892 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1895 case nir_instr_type_tex
:
1896 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1899 case nir_instr_type_jump
:
1900 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
1904 fprintf(stderr
, "Unknown NIR instr type: ");
1905 nir_print_instr(instr
, stderr
);
1906 fprintf(stderr
, "\n");
1912 ntq_emit_block(struct vc4_compile
*c
, nir_block
*block
)
1914 nir_foreach_instr(instr
, block
) {
1915 ntq_emit_instr(c
, instr
);
1919 static void ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
);
1922 ntq_emit_loop(struct vc4_compile
*c
, nir_loop
*loop
)
1924 if (!c
->vc4
->screen
->has_control_flow
) {
1926 "loop support requires updated kernel.\n");
1927 ntq_emit_cf_list(c
, &loop
->body
);
1931 bool was_top_level
= false;
1932 if (c
->execute
.file
== QFILE_NULL
) {
1933 c
->execute
= qir_MOV(c
, qir_uniform_ui(c
, 0));
1934 was_top_level
= true;
1937 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
1938 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
1940 c
->loop_cont_block
= qir_new_block(c
);
1941 c
->loop_break_block
= qir_new_block(c
);
1943 qir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1944 qir_set_emit_block(c
, c
->loop_cont_block
);
1945 ntq_activate_execute_for_block(c
);
1947 ntq_emit_cf_list(c
, &loop
->body
);
1949 /* If anything had explicitly continued, or is here at the end of the
1950 * loop, then we need to loop again. SF updates are masked by the
1951 * instruction's condition, so we can do the OR of the two conditions
1954 qir_SF(c
, c
->execute
);
1955 struct qinst
*cont_check
=
1959 qir_uniform_ui(c
, c
->loop_cont_block
->index
));
1960 cont_check
->cond
= QPU_COND_ZC
;
1961 cont_check
->sf
= true;
1963 qir_BRANCH(c
, QPU_COND_BRANCH_ANY_ZS
);
1964 qir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1965 qir_link_blocks(c
->cur_block
, c
->loop_break_block
);
1967 qir_set_emit_block(c
, c
->loop_break_block
);
1969 c
->execute
= c
->undef
;
1971 ntq_activate_execute_for_block(c
);
1973 c
->loop_break_block
= save_loop_break_block
;
1974 c
->loop_cont_block
= save_loop_cont_block
;
1978 ntq_emit_function(struct vc4_compile
*c
, nir_function_impl
*func
)
1980 fprintf(stderr
, "FUNCTIONS not handled.\n");
1985 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
)
1987 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1988 switch (node
->type
) {
1989 case nir_cf_node_block
:
1990 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1993 case nir_cf_node_if
:
1994 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1997 case nir_cf_node_loop
:
1998 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
2001 case nir_cf_node_function
:
2002 ntq_emit_function(c
, nir_cf_node_as_function(node
));
2006 fprintf(stderr
, "Unknown NIR node type\n");
2013 ntq_emit_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
2015 ntq_setup_registers(c
, &impl
->registers
);
2016 ntq_emit_cf_list(c
, &impl
->body
);
2020 nir_to_qir(struct vc4_compile
*c
)
2022 if (c
->stage
== QSTAGE_FRAG
&& c
->s
->info
.fs
.uses_discard
)
2023 c
->discard
= qir_MOV(c
, qir_uniform_ui(c
, 0));
2025 ntq_setup_inputs(c
);
2026 ntq_setup_outputs(c
);
2027 ntq_setup_uniforms(c
);
2028 ntq_setup_registers(c
, &c
->s
->registers
);
2030 /* Find the main function and emit the body. */
2031 nir_foreach_function(function
, c
->s
) {
2032 assert(strcmp(function
->name
, "main") == 0);
2033 assert(function
->impl
);
2034 ntq_emit_impl(c
, function
->impl
);
2038 static const nir_shader_compiler_options nir_options
= {
2039 .lower_extract_byte
= true,
2040 .lower_extract_word
= true,
2042 .lower_flrp32
= true,
2045 .lower_fsqrt
= true,
2046 .lower_negate
= true,
2047 .native_integers
= true,
2051 vc4_screen_get_compiler_options(struct pipe_screen
*pscreen
,
2052 enum pipe_shader_ir ir
, unsigned shader
)
2054 return &nir_options
;
2058 count_nir_instrs(nir_shader
*nir
)
2061 nir_foreach_function(function
, nir
) {
2062 if (!function
->impl
)
2064 nir_foreach_block(block
, function
->impl
) {
2065 nir_foreach_instr(instr
, block
)
2072 static struct vc4_compile
*
2073 vc4_shader_ntq(struct vc4_context
*vc4
, enum qstage stage
,
2074 struct vc4_key
*key
)
2076 struct vc4_compile
*c
= qir_compile_init();
2080 c
->shader_state
= &key
->shader_state
->base
;
2081 c
->program_id
= key
->shader_state
->program_id
;
2083 p_atomic_inc_return(&key
->shader_state
->compiled_variant_count
);
2088 c
->fs_key
= (struct vc4_fs_key
*)key
;
2089 if (c
->fs_key
->is_points
) {
2090 c
->point_x
= emit_fragment_varying(c
, ~0, 0);
2091 c
->point_y
= emit_fragment_varying(c
, ~0, 0);
2092 } else if (c
->fs_key
->is_lines
) {
2093 c
->line_x
= emit_fragment_varying(c
, ~0, 0);
2097 c
->vs_key
= (struct vc4_vs_key
*)key
;
2100 c
->vs_key
= (struct vc4_vs_key
*)key
;
2104 c
->s
= nir_shader_clone(c
, key
->shader_state
->base
.ir
.nir
);
2106 if (stage
== QSTAGE_FRAG
)
2107 NIR_PASS_V(c
->s
, vc4_nir_lower_blend
, c
);
2109 struct nir_lower_tex_options tex_options
= {
2110 /* We would need to implement txs, but we don't want the
2111 * int/float conversions
2113 .lower_rect
= false,
2117 /* Apply swizzles to all samplers. */
2118 .swizzle_result
= ~0,
2121 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2122 * The format swizzling applies before sRGB decode, and
2123 * ARB_texture_swizzle is the last thing before returning the sample.
2125 for (int i
= 0; i
< ARRAY_SIZE(key
->tex
); i
++) {
2126 enum pipe_format format
= c
->key
->tex
[i
].format
;
2131 const uint8_t *format_swizzle
= vc4_get_format_swizzle(format
);
2133 for (int j
= 0; j
< 4; j
++) {
2134 uint8_t arb_swiz
= c
->key
->tex
[i
].swizzle
[j
];
2136 if (arb_swiz
<= 3) {
2137 tex_options
.swizzles
[i
][j
] =
2138 format_swizzle
[arb_swiz
];
2140 tex_options
.swizzles
[i
][j
] = arb_swiz
;
2144 if (util_format_is_srgb(format
))
2145 tex_options
.lower_srgb
|= (1 << i
);
2148 NIR_PASS_V(c
->s
, nir_lower_tex
, &tex_options
);
2150 if (c
->fs_key
&& c
->fs_key
->light_twoside
)
2151 NIR_PASS_V(c
->s
, nir_lower_two_sided_color
);
2153 if (c
->vs_key
&& c
->vs_key
->clamp_color
)
2154 NIR_PASS_V(c
->s
, nir_lower_clamp_color_outputs
);
2156 if (c
->key
->ucp_enables
) {
2157 if (stage
== QSTAGE_FRAG
) {
2158 NIR_PASS_V(c
->s
, nir_lower_clip_fs
, c
->key
->ucp_enables
);
2160 NIR_PASS_V(c
->s
, nir_lower_clip_vs
, c
->key
->ucp_enables
);
2161 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
,
2162 nir_var_shader_out
);
2166 /* FS input scalarizing must happen after nir_lower_two_sided_color,
2167 * which only handles a vec4 at a time. Similarly, VS output
2168 * scalarizing must happen after nir_lower_clip_vs.
2170 if (c
->stage
== QSTAGE_FRAG
)
2171 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_in
);
2173 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_out
);
2175 NIR_PASS_V(c
->s
, vc4_nir_lower_io
, c
);
2176 NIR_PASS_V(c
->s
, vc4_nir_lower_txf_ms
, c
);
2177 NIR_PASS_V(c
->s
, nir_lower_idiv
);
2179 vc4_optimize_nir(c
->s
);
2181 NIR_PASS_V(c
->s
, nir_convert_from_ssa
, true);
2183 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2184 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2185 qir_get_stage_name(c
->stage
),
2186 c
->program_id
, c
->variant_id
,
2187 count_nir_instrs(c
->s
));
2190 if (vc4_debug
& VC4_DEBUG_NIR
) {
2191 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2192 qir_get_stage_name(c
->stage
),
2193 c
->program_id
, c
->variant_id
);
2194 nir_print_shader(c
->s
, stderr
);
2205 c
->vs_key
->fs_inputs
->input_slots
,
2206 c
->vs_key
->fs_inputs
->num_inputs
);
2213 if (vc4_debug
& VC4_DEBUG_QIR
) {
2214 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
2215 qir_get_stage_name(c
->stage
),
2216 c
->program_id
, c
->variant_id
);
2218 fprintf(stderr
, "\n");
2222 qir_lower_uniforms(c
);
2224 qir_schedule_instructions(c
);
2225 qir_emit_uniform_stream_resets(c
);
2227 if (vc4_debug
& VC4_DEBUG_QIR
) {
2228 fprintf(stderr
, "%s prog %d/%d QIR:\n",
2229 qir_get_stage_name(c
->stage
),
2230 c
->program_id
, c
->variant_id
);
2232 fprintf(stderr
, "\n");
2235 qir_reorder_uniforms(c
);
2236 vc4_generate_code(vc4
, c
);
2238 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2239 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2240 qir_get_stage_name(c
->stage
),
2241 c
->program_id
, c
->variant_id
,
2243 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2244 qir_get_stage_name(c
->stage
),
2245 c
->program_id
, c
->variant_id
,
2255 vc4_shader_state_create(struct pipe_context
*pctx
,
2256 const struct pipe_shader_state
*cso
)
2258 struct vc4_context
*vc4
= vc4_context(pctx
);
2259 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
2263 so
->program_id
= vc4
->next_uncompiled_program_id
++;
2267 if (cso
->type
== PIPE_SHADER_IR_NIR
) {
2268 /* The backend takes ownership of the NIR shader on state
2273 assert(cso
->type
== PIPE_SHADER_IR_TGSI
);
2275 if (vc4_debug
& VC4_DEBUG_TGSI
) {
2276 fprintf(stderr
, "prog %d TGSI:\n",
2278 tgsi_dump(cso
->tokens
, 0);
2279 fprintf(stderr
, "\n");
2281 s
= tgsi_to_nir(cso
->tokens
, &nir_options
);
2284 NIR_PASS_V(s
, nir_opt_global_to_local
);
2285 NIR_PASS_V(s
, nir_convert_to_ssa
);
2286 NIR_PASS_V(s
, nir_normalize_cubemap_coords
);
2288 NIR_PASS_V(s
, nir_lower_load_const_to_scalar
);
2290 vc4_optimize_nir(s
);
2292 NIR_PASS_V(s
, nir_remove_dead_variables
, nir_var_local
);
2294 /* Garbage collect dead instructions */
2297 so
->base
.type
= PIPE_SHADER_IR_NIR
;
2298 so
->base
.ir
.nir
= s
;
2300 if (vc4_debug
& VC4_DEBUG_NIR
) {
2301 fprintf(stderr
, "%s prog %d NIR:\n",
2302 gl_shader_stage_name(s
->stage
),
2304 nir_print_shader(s
, stderr
);
2305 fprintf(stderr
, "\n");
2312 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
2313 struct vc4_compile
*c
)
2315 int count
= c
->num_uniforms
;
2316 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2318 uinfo
->count
= count
;
2319 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
2320 memcpy(uinfo
->data
, c
->uniform_data
,
2321 count
* sizeof(*uinfo
->data
));
2322 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
2323 memcpy(uinfo
->contents
, c
->uniform_contents
,
2324 count
* sizeof(*uinfo
->contents
));
2325 uinfo
->num_texture_samples
= c
->num_texture_samples
;
2327 vc4_set_shader_uniform_dirty_flags(shader
);
2331 vc4_setup_compiled_fs_inputs(struct vc4_context
*vc4
, struct vc4_compile
*c
,
2332 struct vc4_compiled_shader
*shader
)
2334 struct vc4_fs_inputs inputs
;
2336 memset(&inputs
, 0, sizeof(inputs
));
2337 inputs
.input_slots
= ralloc_array(shader
,
2338 struct vc4_varying_slot
,
2339 c
->num_input_slots
);
2341 bool input_live
[c
->num_input_slots
];
2343 memset(input_live
, 0, sizeof(input_live
));
2344 qir_for_each_inst_inorder(inst
, c
) {
2345 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
2346 if (inst
->src
[i
].file
== QFILE_VARY
)
2347 input_live
[inst
->src
[i
].index
] = true;
2351 for (int i
= 0; i
< c
->num_input_slots
; i
++) {
2352 struct vc4_varying_slot
*slot
= &c
->input_slots
[i
];
2357 /* Skip non-VS-output inputs. */
2358 if (slot
->slot
== (uint8_t)~0)
2361 if (slot
->slot
== VARYING_SLOT_COL0
||
2362 slot
->slot
== VARYING_SLOT_COL1
||
2363 slot
->slot
== VARYING_SLOT_BFC0
||
2364 slot
->slot
== VARYING_SLOT_BFC1
) {
2365 shader
->color_inputs
|= (1 << inputs
.num_inputs
);
2368 inputs
.input_slots
[inputs
.num_inputs
] = *slot
;
2369 inputs
.num_inputs
++;
2371 shader
->num_inputs
= inputs
.num_inputs
;
2373 /* Add our set of inputs to the set of all inputs seen. This way, we
2374 * can have a single pointer that identifies an FS inputs set,
2375 * allowing VS to avoid recompiling when the FS is recompiled (or a
2376 * new one is bound using separate shader objects) but the inputs
2379 struct set_entry
*entry
= _mesa_set_search(vc4
->fs_inputs_set
, &inputs
);
2381 shader
->fs_inputs
= entry
->key
;
2382 ralloc_free(inputs
.input_slots
);
2384 struct vc4_fs_inputs
*alloc_inputs
;
2386 alloc_inputs
= rzalloc(vc4
->fs_inputs_set
, struct vc4_fs_inputs
);
2387 memcpy(alloc_inputs
, &inputs
, sizeof(inputs
));
2388 ralloc_steal(alloc_inputs
, inputs
.input_slots
);
2389 _mesa_set_add(vc4
->fs_inputs_set
, alloc_inputs
);
2391 shader
->fs_inputs
= alloc_inputs
;
2395 static struct vc4_compiled_shader
*
2396 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
2397 struct vc4_key
*key
)
2399 struct hash_table
*ht
;
2401 if (stage
== QSTAGE_FRAG
) {
2403 key_size
= sizeof(struct vc4_fs_key
);
2406 key_size
= sizeof(struct vc4_vs_key
);
2409 struct vc4_compiled_shader
*shader
;
2410 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
2414 struct vc4_compile
*c
= vc4_shader_ntq(vc4
, stage
, key
);
2415 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
2417 shader
->program_id
= vc4
->next_compiled_program_id
++;
2418 if (stage
== QSTAGE_FRAG
) {
2419 vc4_setup_compiled_fs_inputs(vc4
, c
, shader
);
2421 /* Note: the temporary clone in c->s has been freed. */
2422 nir_shader
*orig_shader
= key
->shader_state
->base
.ir
.nir
;
2423 if (orig_shader
->info
.outputs_written
& (1 << FRAG_RESULT_DEPTH
))
2424 shader
->disable_early_z
= true;
2426 shader
->num_inputs
= c
->num_inputs
;
2428 shader
->vattr_offsets
[0] = 0;
2429 for (int i
= 0; i
< 8; i
++) {
2430 shader
->vattr_offsets
[i
+ 1] =
2431 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
2433 if (c
->vattr_sizes
[i
])
2434 shader
->vattrs_live
|= (1 << i
);
2438 copy_uniform_state_to_shader(shader
, c
);
2439 shader
->bo
= vc4_bo_alloc_shader(vc4
->screen
, c
->qpu_insts
,
2440 c
->qpu_inst_count
* sizeof(uint64_t));
2442 /* Copy the compiler UBO range state to the compiled shader, dropping
2443 * out arrays that were never referenced by an indirect load.
2445 * (Note that QIR dead code elimination of an array access still
2446 * leaves that array alive, though)
2448 if (c
->num_ubo_ranges
) {
2449 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
2450 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
2453 for (int i
= 0; i
< c
->num_uniform_ranges
; i
++) {
2454 struct vc4_compiler_ubo_range
*range
=
2459 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
2460 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
2461 shader
->ubo_ranges
[j
].size
= range
->size
;
2462 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
2466 if (shader
->ubo_size
) {
2467 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2468 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2469 qir_get_stage_name(c
->stage
),
2470 c
->program_id
, c
->variant_id
,
2471 shader
->ubo_size
/ 4);
2475 qir_compile_destroy(c
);
2477 struct vc4_key
*dup_key
;
2478 dup_key
= ralloc_size(shader
, key_size
);
2479 memcpy(dup_key
, key
, key_size
);
2480 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2486 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2487 struct vc4_texture_stateobj
*texstate
)
2489 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2490 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2491 struct vc4_sampler_view
*vc4_sampler
= vc4_sampler_view(sampler
);
2492 struct pipe_sampler_state
*sampler_state
=
2493 texstate
->samplers
[i
];
2498 key
->tex
[i
].format
= sampler
->format
;
2499 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2500 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2501 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2502 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2504 if (sampler
->texture
->nr_samples
> 1) {
2505 key
->tex
[i
].msaa_width
= sampler
->texture
->width0
;
2506 key
->tex
[i
].msaa_height
= sampler
->texture
->height0
;
2507 } else if (sampler
){
2508 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2509 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2510 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2511 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2512 key
->tex
[i
].force_first_level
=
2513 vc4_sampler
->force_first_level
;
2517 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2521 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2523 struct vc4_job
*job
= vc4
->job
;
2524 struct vc4_fs_key local_key
;
2525 struct vc4_fs_key
*key
= &local_key
;
2527 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2529 VC4_DIRTY_FRAMEBUFFER
|
2531 VC4_DIRTY_RASTERIZER
|
2532 VC4_DIRTY_SAMPLE_MASK
|
2534 VC4_DIRTY_UNCOMPILED_FS
))) {
2538 memset(key
, 0, sizeof(*key
));
2539 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2540 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2541 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2542 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2543 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2544 key
->blend
= vc4
->blend
->rt
[0];
2545 if (vc4
->blend
->logicop_enable
) {
2546 key
->logicop_func
= vc4
->blend
->logicop_func
;
2548 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2551 key
->msaa
= vc4
->rasterizer
->base
.multisample
;
2552 key
->sample_coverage
= (vc4
->rasterizer
->base
.multisample
&&
2553 vc4
->sample_mask
!= (1 << VC4_MAX_SAMPLES
) - 1);
2554 key
->sample_alpha_to_coverage
= vc4
->blend
->alpha_to_coverage
;
2555 key
->sample_alpha_to_one
= vc4
->blend
->alpha_to_one
;
2558 if (vc4
->framebuffer
.cbufs
[0])
2559 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2561 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2562 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2563 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2564 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2565 key
->stencil_enabled
);
2566 if (vc4
->zsa
->base
.alpha
.enabled
) {
2567 key
->alpha_test
= true;
2568 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2571 if (key
->is_points
) {
2572 key
->point_sprite_mask
=
2573 vc4
->rasterizer
->base
.sprite_coord_enable
;
2574 key
->point_coord_upper_left
=
2575 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2576 PIPE_SPRITE_COORD_UPPER_LEFT
);
2579 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2581 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2582 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2583 if (vc4
->prog
.fs
== old_fs
)
2586 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2588 if (vc4
->rasterizer
->base
.flatshade
&&
2589 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2590 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2593 if (old_fs
&& vc4
->prog
.fs
->fs_inputs
!= old_fs
->fs_inputs
)
2594 vc4
->dirty
|= VC4_DIRTY_FS_INPUTS
;
2598 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2600 struct vc4_vs_key local_key
;
2601 struct vc4_vs_key
*key
= &local_key
;
2603 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2604 VC4_DIRTY_RASTERIZER
|
2606 VC4_DIRTY_VTXSTATE
|
2607 VC4_DIRTY_UNCOMPILED_VS
|
2608 VC4_DIRTY_FS_INPUTS
))) {
2612 memset(key
, 0, sizeof(*key
));
2613 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2614 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2615 key
->fs_inputs
= vc4
->prog
.fs
->fs_inputs
;
2616 key
->clamp_color
= vc4
->rasterizer
->base
.clamp_vertex_color
;
2618 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2619 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2621 key
->per_vertex_point_size
=
2622 (prim_mode
== PIPE_PRIM_POINTS
&&
2623 vc4
->rasterizer
->base
.point_size_per_vertex
);
2625 struct vc4_compiled_shader
*vs
=
2626 vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2627 if (vs
!= vc4
->prog
.vs
) {
2629 vc4
->dirty
|= VC4_DIRTY_COMPILED_VS
;
2632 key
->is_coord
= true;
2633 /* Coord shaders don't care what the FS inputs are. */
2634 key
->fs_inputs
= NULL
;
2635 struct vc4_compiled_shader
*cs
=
2636 vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2637 if (cs
!= vc4
->prog
.cs
) {
2639 vc4
->dirty
|= VC4_DIRTY_COMPILED_CS
;
2644 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2646 vc4_update_compiled_fs(vc4
, prim_mode
);
2647 vc4_update_compiled_vs(vc4
, prim_mode
);
2651 fs_cache_hash(const void *key
)
2653 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2657 vs_cache_hash(const void *key
)
2659 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2663 fs_cache_compare(const void *key1
, const void *key2
)
2665 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2669 vs_cache_compare(const void *key1
, const void *key2
)
2671 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2675 fs_inputs_hash(const void *key
)
2677 const struct vc4_fs_inputs
*inputs
= key
;
2679 return _mesa_hash_data(inputs
->input_slots
,
2680 sizeof(*inputs
->input_slots
) *
2681 inputs
->num_inputs
);
2685 fs_inputs_compare(const void *key1
, const void *key2
)
2687 const struct vc4_fs_inputs
*inputs1
= key1
;
2688 const struct vc4_fs_inputs
*inputs2
= key2
;
2690 return (inputs1
->num_inputs
== inputs2
->num_inputs
&&
2691 memcmp(inputs1
->input_slots
,
2692 inputs2
->input_slots
,
2693 sizeof(*inputs1
->input_slots
) *
2694 inputs1
->num_inputs
) == 0);
2698 delete_from_cache_if_matches(struct hash_table
*ht
,
2699 struct hash_entry
*entry
,
2700 struct vc4_uncompiled_shader
*so
)
2702 const struct vc4_key
*key
= entry
->key
;
2704 if (key
->shader_state
== so
) {
2705 struct vc4_compiled_shader
*shader
= entry
->data
;
2706 _mesa_hash_table_remove(ht
, entry
);
2707 vc4_bo_unreference(&shader
->bo
);
2708 ralloc_free(shader
);
2713 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2715 struct vc4_context
*vc4
= vc4_context(pctx
);
2716 struct vc4_uncompiled_shader
*so
= hwcso
;
2718 struct hash_entry
*entry
;
2719 hash_table_foreach(vc4
->fs_cache
, entry
)
2720 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2721 hash_table_foreach(vc4
->vs_cache
, entry
)
2722 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2724 ralloc_free(so
->base
.ir
.nir
);
2729 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2731 struct vc4_context
*vc4
= vc4_context(pctx
);
2732 vc4
->prog
.bind_fs
= hwcso
;
2733 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2737 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2739 struct vc4_context
*vc4
= vc4_context(pctx
);
2740 vc4
->prog
.bind_vs
= hwcso
;
2741 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2745 vc4_program_init(struct pipe_context
*pctx
)
2747 struct vc4_context
*vc4
= vc4_context(pctx
);
2749 pctx
->create_vs_state
= vc4_shader_state_create
;
2750 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2752 pctx
->create_fs_state
= vc4_shader_state_create
;
2753 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2755 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2756 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2758 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2760 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2762 vc4
->fs_inputs_set
= _mesa_set_create(pctx
, fs_inputs_hash
,
2767 vc4_program_fini(struct pipe_context
*pctx
)
2769 struct vc4_context
*vc4
= vc4_context(pctx
);
2771 struct hash_entry
*entry
;
2772 hash_table_foreach(vc4
->fs_cache
, entry
) {
2773 struct vc4_compiled_shader
*shader
= entry
->data
;
2774 vc4_bo_unreference(&shader
->bo
);
2775 ralloc_free(shader
);
2776 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2779 hash_table_foreach(vc4
->vs_cache
, entry
) {
2780 struct vc4_compiled_shader
*shader
= entry
->data
;
2781 vc4_bo_unreference(&shader
->bo
);
2782 ralloc_free(shader
);
2783 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);