2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/format/u_format.h"
27 #include "util/crc32.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "compiler/nir_types.h"
37 #include "nir/tgsi_to_nir.h"
38 #include "vc4_context.h"
43 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
);
45 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
);
48 type_size(const struct glsl_type
*type
, bool bindless
)
50 return glsl_count_attribute_slots(type
, false);
54 resize_qreg_array(struct vc4_compile
*c
,
59 if (*size
>= decl_size
)
62 uint32_t old_size
= *size
;
63 *size
= MAX2(*size
* 2, decl_size
);
64 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
66 fprintf(stderr
, "Malloc failure\n");
70 for (uint32_t i
= old_size
; i
< *size
; i
++)
71 (*regs
)[i
] = c
->undef
;
75 ntq_emit_thrsw(struct vc4_compile
*c
)
80 /* Always thread switch after each texture operation for now.
82 * We could do better by batching a bunch of texture fetches up and
83 * then doing one thread switch and collecting all their results
86 qir_emit_nondef(c
, qir_inst(QOP_THRSW
, c
->undef
,
88 c
->last_thrsw_at_top_level
= (c
->execute
.file
== QFILE_NULL
);
92 indirect_uniform_load(struct vc4_compile
*c
, nir_intrinsic_instr
*intr
)
94 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
96 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
97 uint32_t range
= nir_intrinsic_range(intr
);
98 indirect_offset
= qir_MAX(c
, indirect_offset
, qir_uniform_ui(c
, 0));
99 indirect_offset
= qir_MIN_NOIMM(c
, indirect_offset
,
100 qir_uniform_ui(c
, range
- 4));
102 qir_ADD_dest(c
, qir_reg(QFILE_TEX_S_DIRECT
, 0),
104 qir_uniform(c
, QUNIFORM_UBO0_ADDR
,
105 nir_intrinsic_base(intr
)));
107 c
->num_texture_samples
++;
111 return qir_TEX_RESULT(c
);
115 vc4_ubo_load(struct vc4_compile
*c
, nir_intrinsic_instr
*intr
)
117 int buffer_index
= nir_src_as_uint(intr
->src
[0]);
118 assert(buffer_index
== 1);
119 assert(c
->stage
== QSTAGE_FRAG
);
121 struct qreg offset
= ntq_get_src(c
, intr
->src
[1], 0);
123 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
124 offset
= qir_MAX(c
, offset
, qir_uniform_ui(c
, 0));
125 offset
= qir_MIN_NOIMM(c
, offset
,
126 qir_uniform_ui(c
, c
->fs_key
->ubo_1_size
- 4));
128 qir_ADD_dest(c
, qir_reg(QFILE_TEX_S_DIRECT
, 0),
130 qir_uniform(c
, QUNIFORM_UBO1_ADDR
, 0));
132 c
->num_texture_samples
++;
136 return qir_TEX_RESULT(c
);
140 vc4_nir_get_swizzled_channel(nir_builder
*b
, nir_ssa_def
**srcs
, int swiz
)
144 case PIPE_SWIZZLE_NONE
:
145 fprintf(stderr
, "warning: unknown swizzle\n");
148 return nir_imm_float(b
, 0.0);
150 return nir_imm_float(b
, 1.0);
160 ntq_init_ssa_def(struct vc4_compile
*c
, nir_ssa_def
*def
)
162 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
163 def
->num_components
);
164 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
169 * This function is responsible for getting QIR results into the associated
170 * storage for a NIR instruction.
172 * If it's a NIR SSA def, then we just set the associated hash table entry to
175 * If it's a NIR reg, then we need to update the existing qreg assigned to the
176 * NIR destination with the incoming value. To do that without introducing
177 * new MOVs, we require that the incoming qreg either be a uniform, or be
178 * SSA-defined by the previous QIR instruction in the block and rewritable by
179 * this function. That lets us sneak ahead and insert the SF flag beforehand
180 * (knowing that the previous instruction doesn't depend on flags) and rewrite
181 * its destination to be the NIR reg's destination
184 ntq_store_dest(struct vc4_compile
*c
, nir_dest
*dest
, int chan
,
187 struct qinst
*last_inst
= NULL
;
188 if (!list_is_empty(&c
->cur_block
->instructions
))
189 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
191 assert(result
.file
== QFILE_UNIF
||
192 (result
.file
== QFILE_TEMP
&&
193 last_inst
&& last_inst
== c
->defs
[result
.index
]));
196 assert(chan
< dest
->ssa
.num_components
);
199 struct hash_entry
*entry
=
200 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
205 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
207 qregs
[chan
] = result
;
209 nir_register
*reg
= dest
->reg
.reg
;
210 assert(dest
->reg
.base_offset
== 0);
211 assert(reg
->num_array_elems
== 0);
212 struct hash_entry
*entry
=
213 _mesa_hash_table_search(c
->def_ht
, reg
);
214 struct qreg
*qregs
= entry
->data
;
216 /* Insert a MOV if the source wasn't an SSA def in the
217 * previous instruction.
219 if (result
.file
== QFILE_UNIF
) {
220 result
= qir_MOV(c
, result
);
221 last_inst
= c
->defs
[result
.index
];
224 /* We know they're both temps, so just rewrite index. */
225 c
->defs
[last_inst
->dst
.index
] = NULL
;
226 last_inst
->dst
.index
= qregs
[chan
].index
;
228 /* If we're in control flow, then make this update of the reg
229 * conditional on the execution mask.
231 if (c
->execute
.file
!= QFILE_NULL
) {
232 last_inst
->dst
.index
= qregs
[chan
].index
;
234 /* Set the flags to the current exec mask. To insert
235 * the SF, we temporarily remove our SSA instruction.
237 list_del(&last_inst
->link
);
238 qir_SF(c
, c
->execute
);
239 list_addtail(&last_inst
->link
,
240 &c
->cur_block
->instructions
);
242 last_inst
->cond
= QPU_COND_ZS
;
243 last_inst
->cond_is_exec_mask
= true;
249 ntq_get_dest(struct vc4_compile
*c
, nir_dest
*dest
)
252 struct qreg
*qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
253 for (int i
= 0; i
< dest
->ssa
.num_components
; i
++)
257 nir_register
*reg
= dest
->reg
.reg
;
258 assert(dest
->reg
.base_offset
== 0);
259 assert(reg
->num_array_elems
== 0);
260 struct hash_entry
*entry
=
261 _mesa_hash_table_search(c
->def_ht
, reg
);
267 ntq_get_src(struct vc4_compile
*c
, nir_src src
, int i
)
269 struct hash_entry
*entry
;
271 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
272 assert(i
< src
.ssa
->num_components
);
274 nir_register
*reg
= src
.reg
.reg
;
275 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
276 assert(reg
->num_array_elems
== 0);
277 assert(src
.reg
.base_offset
== 0);
278 assert(i
< reg
->num_components
);
281 struct qreg
*qregs
= entry
->data
;
286 ntq_get_alu_src(struct vc4_compile
*c
, nir_alu_instr
*instr
,
289 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
290 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
291 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
292 instr
->src
[src
].swizzle
[chan
]);
294 assert(!instr
->src
[src
].abs
);
295 assert(!instr
->src
[src
].negate
);
300 static inline struct qreg
301 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
304 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
305 qir_uniform_f(c
, 0.0));
309 ntq_rcp(struct vc4_compile
*c
, struct qreg x
)
311 struct qreg r
= qir_RCP(c
, x
);
313 /* Apply a Newton-Raphson step to improve the accuracy. */
314 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
315 qir_uniform_f(c
, 2.0),
322 ntq_rsq(struct vc4_compile
*c
, struct qreg x
)
324 struct qreg r
= qir_RSQ(c
, x
);
326 /* Apply a Newton-Raphson step to improve the accuracy. */
327 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
328 qir_uniform_f(c
, 1.5),
330 qir_uniform_f(c
, 0.5),
332 qir_FMUL(c
, r
, r
)))));
338 ntq_umul(struct vc4_compile
*c
, struct qreg src0
, struct qreg src1
)
340 struct qreg src0_hi
= qir_SHR(c
, src0
,
341 qir_uniform_ui(c
, 24));
342 struct qreg src1_hi
= qir_SHR(c
, src1
,
343 qir_uniform_ui(c
, 24));
345 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1
);
346 struct qreg lohi
= qir_MUL24(c
, src0
, src1_hi
);
347 struct qreg lolo
= qir_MUL24(c
, src0
, src1
);
349 return qir_ADD(c
, lolo
, qir_SHL(c
,
350 qir_ADD(c
, hilo
, lohi
),
351 qir_uniform_ui(c
, 24)));
355 ntq_scale_depth_texture(struct vc4_compile
*c
, struct qreg src
)
357 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, src
,
358 qir_uniform_ui(c
, 8)));
359 return qir_FMUL(c
, depthf
, qir_uniform_f(c
, 1.0f
/0xffffff));
363 * Emits a lowered TXF_MS from an MSAA texture.
365 * The addressing math has been lowered in NIR, and now we just need to read
369 ntq_emit_txf(struct vc4_compile
*c
, nir_tex_instr
*instr
)
371 uint32_t tile_width
= 32;
372 uint32_t tile_height
= 32;
373 uint32_t tile_size
= (tile_height
* tile_width
*
374 VC4_MAX_SAMPLES
* sizeof(uint32_t));
376 unsigned unit
= instr
->texture_index
;
377 uint32_t w
= align(c
->key
->tex
[unit
].msaa_width
, tile_width
);
378 uint32_t w_tiles
= w
/ tile_width
;
379 uint32_t h
= align(c
->key
->tex
[unit
].msaa_height
, tile_height
);
380 uint32_t h_tiles
= h
/ tile_height
;
381 uint32_t size
= w_tiles
* h_tiles
* tile_size
;
384 assert(instr
->num_srcs
== 1);
385 assert(instr
->src
[0].src_type
== nir_tex_src_coord
);
386 addr
= ntq_get_src(c
, instr
->src
[0].src
, 0);
388 /* Perform the clamping required by kernel validation. */
389 addr
= qir_MAX(c
, addr
, qir_uniform_ui(c
, 0));
390 addr
= qir_MIN_NOIMM(c
, addr
, qir_uniform_ui(c
, size
- 4));
392 qir_ADD_dest(c
, qir_reg(QFILE_TEX_S_DIRECT
, 0),
393 addr
, qir_uniform(c
, QUNIFORM_TEXTURE_MSAA_ADDR
, unit
));
397 struct qreg tex
= qir_TEX_RESULT(c
);
398 c
->num_texture_samples
++;
400 enum pipe_format format
= c
->key
->tex
[unit
].format
;
401 if (util_format_is_depth_or_stencil(format
)) {
402 struct qreg scaled
= ntq_scale_depth_texture(c
, tex
);
403 for (int i
= 0; i
< 4; i
++)
404 ntq_store_dest(c
, &instr
->dest
, i
, qir_MOV(c
, scaled
));
406 for (int i
= 0; i
< 4; i
++)
407 ntq_store_dest(c
, &instr
->dest
, i
,
408 qir_UNPACK_8_F(c
, tex
, i
));
413 ntq_emit_tex(struct vc4_compile
*c
, nir_tex_instr
*instr
)
415 struct qreg s
, t
, r
, lod
, compare
;
416 bool is_txb
= false, is_txl
= false;
417 unsigned unit
= instr
->texture_index
;
419 if (instr
->op
== nir_texop_txf
) {
420 ntq_emit_txf(c
, instr
);
424 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
425 switch (instr
->src
[i
].src_type
) {
426 case nir_tex_src_coord
:
427 s
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
428 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
)
429 t
= qir_uniform_f(c
, 0.5);
431 t
= ntq_get_src(c
, instr
->src
[i
].src
, 1);
432 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
433 r
= ntq_get_src(c
, instr
->src
[i
].src
, 2);
435 case nir_tex_src_bias
:
436 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
439 case nir_tex_src_lod
:
440 lod
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
443 case nir_tex_src_comparator
:
444 compare
= ntq_get_src(c
, instr
->src
[i
].src
, 0);
447 unreachable("unknown texture source");
451 if (c
->stage
!= QSTAGE_FRAG
&& !is_txl
) {
452 /* From the GLSL 1.20 spec:
454 * "If it is mip-mapped and running on the vertex shader,
455 * then the base texture is used."
458 lod
= qir_uniform_ui(c
, 0);
461 if (c
->key
->tex
[unit
].force_first_level
) {
462 lod
= qir_uniform(c
, QUNIFORM_TEXTURE_FIRST_LEVEL
, unit
);
467 struct qreg texture_u
[] = {
468 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
469 qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
470 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
471 qir_uniform(c
, QUNIFORM_CONSTANT
, 0),
473 uint32_t next_texture_u
= 0;
475 /* There is no native support for GL texture rectangle coordinates, so
476 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
479 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
481 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_X
, unit
));
483 qir_uniform(c
, QUNIFORM_TEXRECT_SCALE_Y
, unit
));
486 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
|| is_txl
) {
487 texture_u
[2] = qir_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
488 unit
| (is_txl
<< 16));
492 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
493 tmu
= qir_MOV_dest(c
, qir_reg(QFILE_TEX_R
, 0), r
);
494 tmu
->src
[qir_get_tex_uniform_src(tmu
)] =
495 texture_u
[next_texture_u
++];
496 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
497 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
498 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
499 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
500 tmu
= qir_MOV_dest(c
, qir_reg(QFILE_TEX_R
, 0),
501 qir_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
,
503 tmu
->src
[qir_get_tex_uniform_src(tmu
)] =
504 texture_u
[next_texture_u
++];
507 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
511 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
515 tmu
= qir_MOV_dest(c
, qir_reg(QFILE_TEX_T
, 0), t
);
516 tmu
->src
[qir_get_tex_uniform_src(tmu
)] =
517 texture_u
[next_texture_u
++];
519 if (is_txl
|| is_txb
) {
520 tmu
= qir_MOV_dest(c
, qir_reg(QFILE_TEX_B
, 0), lod
);
521 tmu
->src
[qir_get_tex_uniform_src(tmu
)] =
522 texture_u
[next_texture_u
++];
525 tmu
= qir_MOV_dest(c
, qir_reg(QFILE_TEX_S
, 0), s
);
526 tmu
->src
[qir_get_tex_uniform_src(tmu
)] = texture_u
[next_texture_u
++];
528 c
->num_texture_samples
++;
532 struct qreg tex
= qir_TEX_RESULT(c
);
534 enum pipe_format format
= c
->key
->tex
[unit
].format
;
536 struct qreg
*dest
= ntq_get_dest(c
, &instr
->dest
);
537 if (util_format_is_depth_or_stencil(format
)) {
538 struct qreg normalized
= ntq_scale_depth_texture(c
, tex
);
539 struct qreg depth_output
;
541 struct qreg u0
= qir_uniform_f(c
, 0.0f
);
542 struct qreg u1
= qir_uniform_f(c
, 1.0f
);
543 if (c
->key
->tex
[unit
].compare_mode
) {
544 /* From the GL_ARB_shadow spec:
546 * "Let Dt (D subscript t) be the depth texture
547 * value, in the range [0, 1]. Let R be the
548 * interpolated texture coordinate clamped to the
551 compare
= qir_SAT(c
, compare
);
553 switch (c
->key
->tex
[unit
].compare_func
) {
554 case PIPE_FUNC_NEVER
:
555 depth_output
= qir_uniform_f(c
, 0.0f
);
557 case PIPE_FUNC_ALWAYS
:
560 case PIPE_FUNC_EQUAL
:
561 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
562 depth_output
= qir_SEL(c
, QPU_COND_ZS
, u1
, u0
);
564 case PIPE_FUNC_NOTEQUAL
:
565 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
566 depth_output
= qir_SEL(c
, QPU_COND_ZC
, u1
, u0
);
568 case PIPE_FUNC_GREATER
:
569 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
570 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
572 case PIPE_FUNC_GEQUAL
:
573 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
574 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
577 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
578 depth_output
= qir_SEL(c
, QPU_COND_NS
, u1
, u0
);
580 case PIPE_FUNC_LEQUAL
:
581 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
582 depth_output
= qir_SEL(c
, QPU_COND_NC
, u1
, u0
);
586 depth_output
= normalized
;
589 for (int i
= 0; i
< 4; i
++)
590 dest
[i
] = depth_output
;
592 for (int i
= 0; i
< 4; i
++)
593 dest
[i
] = qir_UNPACK_8_F(c
, tex
, i
);
598 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
602 ntq_ffract(struct vc4_compile
*c
, struct qreg src
)
604 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
));
605 struct qreg diff
= qir_FSUB(c
, src
, trunc
);
608 qir_FADD_dest(c
, diff
,
609 diff
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_NS
;
611 return qir_MOV(c
, diff
);
615 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
619 ntq_ffloor(struct vc4_compile
*c
, struct qreg src
)
621 struct qreg result
= qir_ITOF(c
, qir_FTOI(c
, src
));
623 /* This will be < 0 if we truncated and the truncation was of a value
624 * that was < 0 in the first place.
626 qir_SF(c
, qir_FSUB(c
, src
, result
));
628 struct qinst
*sub
= qir_FSUB_dest(c
, result
,
629 result
, qir_uniform_f(c
, 1.0));
630 sub
->cond
= QPU_COND_NS
;
632 return qir_MOV(c
, result
);
636 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
640 ntq_fceil(struct vc4_compile
*c
, struct qreg src
)
642 struct qreg result
= qir_ITOF(c
, qir_FTOI(c
, src
));
644 /* This will be < 0 if we truncated and the truncation was of a value
645 * that was > 0 in the first place.
647 qir_SF(c
, qir_FSUB(c
, result
, src
));
649 qir_FADD_dest(c
, result
,
650 result
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_NS
;
652 return qir_MOV(c
, result
);
656 ntq_shrink_sincos_input_range(struct vc4_compile
*c
, struct qreg x
)
658 /* Since we're using a Taylor approximation, we want to have a small
659 * number of coefficients and take advantage of sin/cos repeating
660 * every 2pi. We keep our x as close to 0 as we can, since the series
661 * will be less accurate as |x| increases. (Also, be careful of
662 * shifting the input x value to be tricky with sin/cos relations,
663 * because getting accurate values for x==0 is very important for SDL
666 struct qreg scaled_x
=
668 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
669 /* Note: FTOI truncates toward 0. */
670 struct qreg x_frac
= qir_FSUB(c
, scaled_x
,
671 qir_ITOF(c
, qir_FTOI(c
, scaled_x
)));
672 /* Map [0.5, 1] to [-0.5, 0] */
673 qir_SF(c
, qir_FSUB(c
, x_frac
, qir_uniform_f(c
, 0.5)));
674 qir_FSUB_dest(c
, x_frac
, x_frac
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_NC
;
675 /* Map [-1, -0.5] to [0, 0.5] */
676 qir_SF(c
, qir_FADD(c
, x_frac
, qir_uniform_f(c
, 0.5)));
677 qir_FADD_dest(c
, x_frac
, x_frac
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_NS
;
683 ntq_fsin(struct vc4_compile
*c
, struct qreg src
)
687 -pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
688 pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
689 -pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
690 pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
693 struct qreg x
= ntq_shrink_sincos_input_range(c
, src
);
694 struct qreg x2
= qir_FMUL(c
, x
, x
);
695 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
696 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
697 x
= qir_FMUL(c
, x
, x2
);
702 qir_uniform_f(c
, coeff
[i
])));
708 ntq_fcos(struct vc4_compile
*c
, struct qreg src
)
712 -pow(2.0 * M_PI
, 2) / (2 * 1),
713 pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
714 -pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
715 pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
716 -pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
719 struct qreg x_frac
= ntq_shrink_sincos_input_range(c
, src
);
720 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
721 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
722 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
723 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
725 x
= qir_FMUL(c
, x
, x2
);
727 sum
= qir_FADD(c
, qir_FMUL(c
,
729 qir_uniform_f(c
, coeff
[i
])),
736 ntq_fsign(struct vc4_compile
*c
, struct qreg src
)
738 struct qreg t
= qir_get_temp(c
);
741 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 0.0));
742 qir_MOV_dest(c
, t
, qir_uniform_f(c
, 1.0))->cond
= QPU_COND_ZC
;
743 qir_MOV_dest(c
, t
, qir_uniform_f(c
, -1.0))->cond
= QPU_COND_NS
;
744 return qir_MOV(c
, t
);
748 emit_vertex_input(struct vc4_compile
*c
, int attr
)
750 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
751 uint32_t attr_size
= util_format_get_blocksize(format
);
753 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
754 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
755 c
->inputs
[attr
* 4 + i
] =
756 qir_MOV(c
, qir_reg(QFILE_VPM
, attr
* 4 + i
));
762 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
764 c
->inputs
[attr
* 4 + 0] = qir_ITOF(c
, qir_reg(QFILE_FRAG_X
, 0));
765 c
->inputs
[attr
* 4 + 1] = qir_ITOF(c
, qir_reg(QFILE_FRAG_Y
, 0));
766 c
->inputs
[attr
* 4 + 2] =
768 qir_ITOF(c
, qir_FRAG_Z(c
)),
769 qir_uniform_f(c
, 1.0 / 0xffffff));
770 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
774 emit_fragment_varying(struct vc4_compile
*c
, gl_varying_slot slot
,
777 uint32_t i
= c
->num_input_slots
++;
783 if (c
->num_input_slots
>= c
->input_slots_array_size
) {
784 c
->input_slots_array_size
=
785 MAX2(4, c
->input_slots_array_size
* 2);
787 c
->input_slots
= reralloc(c
, c
->input_slots
,
788 struct vc4_varying_slot
,
789 c
->input_slots_array_size
);
792 c
->input_slots
[i
].slot
= slot
;
793 c
->input_slots
[i
].swizzle
= swizzle
;
795 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
799 emit_fragment_input(struct vc4_compile
*c
, int attr
, gl_varying_slot slot
)
801 for (int i
= 0; i
< 4; i
++) {
802 c
->inputs
[attr
* 4 + i
] =
803 emit_fragment_varying(c
, slot
, i
);
809 add_output(struct vc4_compile
*c
,
810 uint32_t decl_offset
,
814 uint32_t old_array_size
= c
->outputs_array_size
;
815 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
818 if (old_array_size
!= c
->outputs_array_size
) {
819 c
->output_slots
= reralloc(c
,
821 struct vc4_varying_slot
,
822 c
->outputs_array_size
);
825 c
->output_slots
[decl_offset
].slot
= slot
;
826 c
->output_slots
[decl_offset
].swizzle
= swizzle
;
830 ntq_src_is_only_ssa_def_user(nir_src
*src
)
835 if (!list_is_empty(&src
->ssa
->if_uses
))
838 return (src
->ssa
->uses
.next
== &src
->use_link
&&
839 src
->ssa
->uses
.next
->next
== &src
->ssa
->uses
);
843 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
846 * However, as an optimization, it tries to find the instructions generating
847 * the sources to be packed and just emit the pack flag there, if possible.
850 ntq_emit_pack_unorm_4x8(struct vc4_compile
*c
, nir_alu_instr
*instr
)
852 struct qreg result
= qir_get_temp(c
);
853 struct nir_alu_instr
*vec4
= NULL
;
855 /* If packing from a vec4 op (as expected), identify it so that we can
856 * peek back at what generated its sources.
858 if (instr
->src
[0].src
.is_ssa
&&
859 instr
->src
[0].src
.ssa
->parent_instr
->type
== nir_instr_type_alu
&&
860 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
)->op
==
862 vec4
= nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
865 /* If the pack is replicating the same channel 4 times, use the 8888
866 * pack flag. This is common for blending using the alpha
869 if (instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[1] &&
870 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[2] &&
871 instr
->src
[0].swizzle
[0] == instr
->src
[0].swizzle
[3]) {
872 struct qreg rep
= ntq_get_src(c
,
874 instr
->src
[0].swizzle
[0]);
875 ntq_store_dest(c
, &instr
->dest
.dest
, 0, qir_PACK_8888_F(c
, rep
));
879 for (int i
= 0; i
< 4; i
++) {
880 int swiz
= instr
->src
[0].swizzle
[i
];
883 src
= ntq_get_src(c
, vec4
->src
[swiz
].src
,
884 vec4
->src
[swiz
].swizzle
[0]);
886 src
= ntq_get_src(c
, instr
->src
[0].src
, swiz
);
890 ntq_src_is_only_ssa_def_user(&vec4
->src
[swiz
].src
) &&
891 src
.file
== QFILE_TEMP
&&
892 c
->defs
[src
.index
] &&
893 qir_is_mul(c
->defs
[src
.index
]) &&
894 !c
->defs
[src
.index
]->dst
.pack
) {
895 struct qinst
*rewrite
= c
->defs
[src
.index
];
896 c
->defs
[src
.index
] = NULL
;
897 rewrite
->dst
= result
;
898 rewrite
->dst
.pack
= QPU_PACK_MUL_8A
+ i
;
902 qir_PACK_8_F(c
, result
, src
, i
);
905 ntq_store_dest(c
, &instr
->dest
.dest
, 0, qir_MOV(c
, result
));
908 /** Handles sign-extended bitfield extracts for 16 bits. */
910 ntq_emit_ibfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
913 assert(bits
.file
== QFILE_UNIF
&&
914 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
915 c
->uniform_data
[bits
.index
] == 16);
917 assert(offset
.file
== QFILE_UNIF
&&
918 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
919 int offset_bit
= c
->uniform_data
[offset
.index
];
920 assert(offset_bit
% 16 == 0);
922 return qir_UNPACK_16_I(c
, base
, offset_bit
/ 16);
925 /** Handles unsigned bitfield extracts for 8 bits. */
927 ntq_emit_ubfe(struct vc4_compile
*c
, struct qreg base
, struct qreg offset
,
930 assert(bits
.file
== QFILE_UNIF
&&
931 c
->uniform_contents
[bits
.index
] == QUNIFORM_CONSTANT
&&
932 c
->uniform_data
[bits
.index
] == 8);
934 assert(offset
.file
== QFILE_UNIF
&&
935 c
->uniform_contents
[offset
.index
] == QUNIFORM_CONSTANT
);
936 int offset_bit
= c
->uniform_data
[offset
.index
];
937 assert(offset_bit
% 8 == 0);
939 return qir_UNPACK_8_I(c
, base
, offset_bit
/ 8);
943 * If compare_instr is a valid comparison instruction, emits the
944 * compare_instr's comparison and returns the sel_instr's return value based
945 * on the compare_instr's result.
948 ntq_emit_comparison(struct vc4_compile
*c
, struct qreg
*dest
,
949 nir_alu_instr
*compare_instr
,
950 nir_alu_instr
*sel_instr
)
954 switch (compare_instr
->op
) {
980 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
981 struct qreg src1
= ntq_get_alu_src(c
, compare_instr
, 1);
983 unsigned unsized_type
=
984 nir_alu_type_get_base_type(nir_op_infos
[compare_instr
->op
].input_types
[0]);
985 if (unsized_type
== nir_type_float
)
986 qir_SF(c
, qir_FSUB(c
, src0
, src1
));
988 qir_SF(c
, qir_SUB(c
, src0
, src1
));
990 switch (sel_instr
->op
) {
995 *dest
= qir_SEL(c
, cond
,
996 qir_uniform_f(c
, 1.0), qir_uniform_f(c
, 0.0));
1000 *dest
= qir_SEL(c
, cond
,
1001 ntq_get_alu_src(c
, sel_instr
, 1),
1002 ntq_get_alu_src(c
, sel_instr
, 2));
1006 *dest
= qir_SEL(c
, cond
,
1007 qir_uniform_ui(c
, ~0), qir_uniform_ui(c
, 0));
1011 /* Make the temporary for nir_store_dest(). */
1012 *dest
= qir_MOV(c
, *dest
);
1018 * Attempts to fold a comparison generating a boolean result into the
1019 * condition code for selecting between two values, instead of comparing the
1020 * boolean result against 0 to generate the condition code.
1022 static struct qreg
ntq_emit_bcsel(struct vc4_compile
*c
, nir_alu_instr
*instr
,
1025 if (!instr
->src
[0].src
.is_ssa
)
1027 if (instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
1029 nir_alu_instr
*compare
=
1030 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
1035 if (ntq_emit_comparison(c
, &dest
, compare
, instr
))
1040 return qir_MOV(c
, qir_SEL(c
, QPU_COND_NS
, src
[1], src
[2]));
1044 ntq_fddx(struct vc4_compile
*c
, struct qreg src
)
1046 /* Make sure that we have a bare temp to use for MUL rotation, so it
1047 * can be allocated to an accumulator.
1049 if (src
.pack
|| src
.file
!= QFILE_TEMP
)
1050 src
= qir_MOV(c
, src
);
1052 struct qreg from_left
= qir_ROT_MUL(c
, src
, 1);
1053 struct qreg from_right
= qir_ROT_MUL(c
, src
, 15);
1055 /* Distinguish left/right pixels of the quad. */
1056 qir_SF(c
, qir_AND(c
, qir_reg(QFILE_QPU_ELEMENT
, 0),
1057 qir_uniform_ui(c
, 1)));
1059 return qir_MOV(c
, qir_SEL(c
, QPU_COND_ZS
,
1060 qir_FSUB(c
, from_right
, src
),
1061 qir_FSUB(c
, src
, from_left
)));
1065 ntq_fddy(struct vc4_compile
*c
, struct qreg src
)
1067 if (src
.pack
|| src
.file
!= QFILE_TEMP
)
1068 src
= qir_MOV(c
, src
);
1070 struct qreg from_bottom
= qir_ROT_MUL(c
, src
, 2);
1071 struct qreg from_top
= qir_ROT_MUL(c
, src
, 14);
1073 /* Distinguish top/bottom pixels of the quad. */
1074 qir_SF(c
, qir_AND(c
,
1075 qir_reg(QFILE_QPU_ELEMENT
, 0),
1076 qir_uniform_ui(c
, 2)));
1078 return qir_MOV(c
, qir_SEL(c
, QPU_COND_ZS
,
1079 qir_FSUB(c
, from_top
, src
),
1080 qir_FSUB(c
, src
, from_bottom
)));
1084 ntq_emit_alu(struct vc4_compile
*c
, nir_alu_instr
*instr
)
1086 /* This should always be lowered to ALU operations for VC4. */
1087 assert(!instr
->dest
.saturate
);
1089 /* Vectors are special in that they have non-scalarized writemasks,
1090 * and just take the first swizzle channel for each argument in order
1091 * into each writemask channel.
1093 if (instr
->op
== nir_op_vec2
||
1094 instr
->op
== nir_op_vec3
||
1095 instr
->op
== nir_op_vec4
) {
1096 struct qreg srcs
[4];
1097 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1098 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
1099 instr
->src
[i
].swizzle
[0]);
1100 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1101 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
1102 qir_MOV(c
, srcs
[i
]));
1106 if (instr
->op
== nir_op_pack_unorm_4x8
) {
1107 ntq_emit_pack_unorm_4x8(c
, instr
);
1111 if (instr
->op
== nir_op_unpack_unorm_4x8
) {
1112 struct qreg src
= ntq_get_src(c
, instr
->src
[0].src
,
1113 instr
->src
[0].swizzle
[0]);
1114 for (int i
= 0; i
< 4; i
++) {
1115 if (instr
->dest
.write_mask
& (1 << i
))
1116 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
1117 qir_UNPACK_8_F(c
, src
, i
));
1122 /* General case: We can just grab the one used channel per src. */
1123 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
1124 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
1125 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
1130 switch (instr
->op
) {
1132 result
= qir_MOV(c
, src
[0]);
1135 result
= qir_FMUL(c
, src
[0], src
[1]);
1138 result
= qir_FADD(c
, src
[0], src
[1]);
1141 result
= qir_FSUB(c
, src
[0], src
[1]);
1144 result
= qir_FMIN(c
, src
[0], src
[1]);
1147 result
= qir_FMAX(c
, src
[0], src
[1]);
1152 result
= qir_FTOI(c
, src
[0]);
1156 result
= qir_ITOF(c
, src
[0]);
1159 result
= qir_AND(c
, src
[0], qir_uniform_f(c
, 1.0));
1162 result
= qir_AND(c
, src
[0], qir_uniform_ui(c
, 1));
1167 result
= qir_MOV(c
, qir_SEL(c
, QPU_COND_ZC
,
1168 qir_uniform_ui(c
, ~0),
1169 qir_uniform_ui(c
, 0)));
1173 result
= qir_ADD(c
, src
[0], src
[1]);
1176 result
= qir_SHR(c
, src
[0], src
[1]);
1179 result
= qir_SUB(c
, src
[0], src
[1]);
1182 result
= qir_ASR(c
, src
[0], src
[1]);
1185 result
= qir_SHL(c
, src
[0], src
[1]);
1188 result
= qir_MIN(c
, src
[0], src
[1]);
1191 result
= qir_MAX(c
, src
[0], src
[1]);
1194 result
= qir_AND(c
, src
[0], src
[1]);
1197 result
= qir_OR(c
, src
[0], src
[1]);
1200 result
= qir_XOR(c
, src
[0], src
[1]);
1203 result
= qir_NOT(c
, src
[0]);
1207 result
= ntq_umul(c
, src
[0], src
[1]);
1223 if (!ntq_emit_comparison(c
, &result
, instr
, instr
)) {
1224 fprintf(stderr
, "Bad comparison instruction\n");
1228 case nir_op_b32csel
:
1229 result
= ntq_emit_bcsel(c
, instr
, src
);
1233 result
= qir_MOV(c
, qir_SEL(c
, QPU_COND_ZC
, src
[1], src
[2]));
1237 result
= ntq_rcp(c
, src
[0]);
1240 result
= ntq_rsq(c
, src
[0]);
1243 result
= qir_EXP2(c
, src
[0]);
1246 result
= qir_LOG2(c
, src
[0]);
1250 result
= qir_ITOF(c
, qir_FTOI(c
, src
[0]));
1253 result
= ntq_fceil(c
, src
[0]);
1256 result
= ntq_ffract(c
, src
[0]);
1259 result
= ntq_ffloor(c
, src
[0]);
1263 result
= ntq_fsin(c
, src
[0]);
1266 result
= ntq_fcos(c
, src
[0]);
1270 result
= ntq_fsign(c
, src
[0]);
1274 result
= qir_FMAXABS(c
, src
[0], src
[0]);
1277 result
= qir_MAX(c
, src
[0],
1278 qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0]));
1281 case nir_op_ibitfield_extract
:
1282 result
= ntq_emit_ibfe(c
, src
[0], src
[1], src
[2]);
1285 case nir_op_ubitfield_extract
:
1286 result
= ntq_emit_ubfe(c
, src
[0], src
[1], src
[2]);
1289 case nir_op_usadd_4x8
:
1290 result
= qir_V8ADDS(c
, src
[0], src
[1]);
1293 case nir_op_ussub_4x8
:
1294 result
= qir_V8SUBS(c
, src
[0], src
[1]);
1297 case nir_op_umin_4x8
:
1298 result
= qir_V8MIN(c
, src
[0], src
[1]);
1301 case nir_op_umax_4x8
:
1302 result
= qir_V8MAX(c
, src
[0], src
[1]);
1305 case nir_op_umul_unorm_4x8
:
1306 result
= qir_V8MULD(c
, src
[0], src
[1]);
1310 case nir_op_fddx_coarse
:
1311 case nir_op_fddx_fine
:
1312 result
= ntq_fddx(c
, src
[0]);
1316 case nir_op_fddy_coarse
:
1317 case nir_op_fddy_fine
:
1318 result
= ntq_fddy(c
, src
[0]);
1322 fprintf(stderr
, "unknown NIR ALU inst: ");
1323 nir_print_instr(&instr
->instr
, stderr
);
1324 fprintf(stderr
, "\n");
1328 /* We have a scalar result, so the instruction should only have a
1329 * single channel written to.
1331 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
1332 ntq_store_dest(c
, &instr
->dest
.dest
,
1333 ffs(instr
->dest
.write_mask
) - 1, result
);
1337 emit_frag_end(struct vc4_compile
*c
)
1340 if (c
->output_color_index
!= -1) {
1341 color
= c
->outputs
[c
->output_color_index
];
1343 color
= qir_uniform_ui(c
, 0);
1346 uint32_t discard_cond
= QPU_COND_ALWAYS
;
1347 if (c
->s
->info
.fs
.uses_discard
) {
1348 qir_SF(c
, c
->discard
);
1349 discard_cond
= QPU_COND_ZS
;
1352 if (c
->fs_key
->stencil_enabled
) {
1353 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1354 qir_uniform(c
, QUNIFORM_STENCIL
, 0));
1355 if (c
->fs_key
->stencil_twoside
) {
1356 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1357 qir_uniform(c
, QUNIFORM_STENCIL
, 1));
1359 if (c
->fs_key
->stencil_full_writemasks
) {
1360 qir_MOV_dest(c
, qir_reg(QFILE_TLB_STENCIL_SETUP
, 0),
1361 qir_uniform(c
, QUNIFORM_STENCIL
, 2));
1365 if (c
->output_sample_mask_index
!= -1) {
1366 qir_MS_MASK(c
, c
->outputs
[c
->output_sample_mask_index
]);
1369 if (c
->fs_key
->depth_enabled
) {
1370 if (c
->output_position_index
!= -1) {
1371 qir_FTOI_dest(c
, qir_reg(QFILE_TLB_Z_WRITE
, 0),
1373 c
->outputs
[c
->output_position_index
],
1374 qir_uniform_f(c
, 0xffffff)))->cond
= discard_cond
;
1376 qir_MOV_dest(c
, qir_reg(QFILE_TLB_Z_WRITE
, 0),
1377 qir_FRAG_Z(c
))->cond
= discard_cond
;
1381 if (!c
->msaa_per_sample_output
) {
1382 qir_MOV_dest(c
, qir_reg(QFILE_TLB_COLOR_WRITE
, 0),
1383 color
)->cond
= discard_cond
;
1385 for (int i
= 0; i
< VC4_MAX_SAMPLES
; i
++) {
1386 qir_MOV_dest(c
, qir_reg(QFILE_TLB_COLOR_WRITE_MS
, 0),
1387 c
->sample_colors
[i
])->cond
= discard_cond
;
1393 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1395 struct qreg packed
= qir_get_temp(c
);
1397 for (int i
= 0; i
< 2; i
++) {
1399 qir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1401 struct qreg packed_chan
= packed
;
1402 packed_chan
.pack
= QPU_PACK_A_16A
+ i
;
1404 qir_FTOI_dest(c
, packed_chan
,
1407 c
->outputs
[c
->output_position_index
+ i
],
1412 qir_VPM_WRITE(c
, packed
);
1416 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1418 struct qreg zscale
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1419 struct qreg zoffset
= qir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1421 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1422 c
->outputs
[c
->output_position_index
+ 2],
1429 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1431 qir_VPM_WRITE(c
, rcp_w
);
1435 emit_point_size_write(struct vc4_compile
*c
)
1437 struct qreg point_size
;
1439 if (c
->output_point_size_index
!= -1)
1440 point_size
= c
->outputs
[c
->output_point_size_index
];
1442 point_size
= qir_uniform_f(c
, 1.0);
1444 qir_VPM_WRITE(c
, point_size
);
1448 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1450 * The simulator insists that there be at least one vertex attribute, so
1451 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1452 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1453 * to consume it here.
1456 emit_stub_vpm_read(struct vc4_compile
*c
)
1461 c
->vattr_sizes
[0] = 4;
1462 (void)qir_MOV(c
, qir_reg(QFILE_VPM
, 0));
1467 emit_vert_end(struct vc4_compile
*c
,
1468 struct vc4_varying_slot
*fs_inputs
,
1469 uint32_t num_fs_inputs
)
1471 struct qreg rcp_w
= ntq_rcp(c
, c
->outputs
[c
->output_position_index
+ 3]);
1473 emit_stub_vpm_read(c
);
1475 emit_scaled_viewport_write(c
, rcp_w
);
1476 emit_zs_write(c
, rcp_w
);
1477 emit_rcp_wc_write(c
, rcp_w
);
1478 if (c
->vs_key
->per_vertex_point_size
)
1479 emit_point_size_write(c
);
1481 for (int i
= 0; i
< num_fs_inputs
; i
++) {
1482 struct vc4_varying_slot
*input
= &fs_inputs
[i
];
1485 for (j
= 0; j
< c
->num_outputs
; j
++) {
1486 struct vc4_varying_slot
*output
=
1487 &c
->output_slots
[j
];
1489 if (input
->slot
== output
->slot
&&
1490 input
->swizzle
== output
->swizzle
) {
1491 qir_VPM_WRITE(c
, c
->outputs
[j
]);
1495 /* Emit padding if we didn't find a declared VS output for
1498 if (j
== c
->num_outputs
)
1499 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
1504 emit_coord_end(struct vc4_compile
*c
)
1506 struct qreg rcp_w
= ntq_rcp(c
, c
->outputs
[c
->output_position_index
+ 3]);
1508 emit_stub_vpm_read(c
);
1510 for (int i
= 0; i
< 4; i
++)
1511 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
1513 emit_scaled_viewport_write(c
, rcp_w
);
1514 emit_zs_write(c
, rcp_w
);
1515 emit_rcp_wc_write(c
, rcp_w
);
1516 if (c
->vs_key
->per_vertex_point_size
)
1517 emit_point_size_write(c
);
1521 vc4_optimize_nir(struct nir_shader
*s
)
1524 unsigned lower_flrp
=
1525 (s
->options
->lower_flrp16
? 16 : 0) |
1526 (s
->options
->lower_flrp32
? 32 : 0) |
1527 (s
->options
->lower_flrp64
? 64 : 0);
1532 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1533 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
, NULL
, NULL
);
1534 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1535 NIR_PASS(progress
, s
, nir_copy_prop
);
1536 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1537 NIR_PASS(progress
, s
, nir_opt_dce
);
1538 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1539 NIR_PASS(progress
, s
, nir_opt_cse
);
1540 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8, true, true);
1541 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1542 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1543 if (lower_flrp
!= 0) {
1544 bool lower_flrp_progress
= false;
1546 NIR_PASS(lower_flrp_progress
, s
, nir_lower_flrp
,
1548 false /* always_precise */,
1549 s
->options
->lower_ffma
);
1550 if (lower_flrp_progress
) {
1551 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1555 /* Nothing should rematerialize any flrps, so we only
1556 * need to do this lowering once.
1561 NIR_PASS(progress
, s
, nir_opt_undef
);
1562 NIR_PASS(progress
, s
, nir_opt_loop_unroll
,
1564 nir_var_shader_out
|
1565 nir_var_function_temp
);
1570 driver_location_compare(const void *in_a
, const void *in_b
)
1572 const nir_variable
*const *a
= in_a
;
1573 const nir_variable
*const *b
= in_b
;
1575 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1579 ntq_setup_inputs(struct vc4_compile
*c
)
1581 unsigned num_entries
= 0;
1582 nir_foreach_variable(var
, &c
->s
->inputs
)
1585 nir_variable
*vars
[num_entries
];
1588 nir_foreach_variable(var
, &c
->s
->inputs
)
1591 /* Sort the variables so that we emit the input setup in
1592 * driver_location order. This is required for VPM reads, whose data
1593 * is fetched into the VPM in driver_location (TGSI register index)
1596 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1598 for (unsigned i
= 0; i
< num_entries
; i
++) {
1599 nir_variable
*var
= vars
[i
];
1600 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1601 unsigned loc
= var
->data
.driver_location
;
1603 assert(array_len
== 1);
1605 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1608 if (c
->stage
== QSTAGE_FRAG
) {
1609 if (var
->data
.location
== VARYING_SLOT_POS
) {
1610 emit_fragcoord_input(c
, loc
);
1611 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1612 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1613 (c
->fs_key
->point_sprite_mask
&
1614 (1 << (var
->data
.location
-
1615 VARYING_SLOT_VAR0
))))) {
1616 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1617 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1619 emit_fragment_input(c
, loc
, var
->data
.location
);
1622 emit_vertex_input(c
, loc
);
1628 ntq_setup_outputs(struct vc4_compile
*c
)
1630 nir_foreach_variable(var
, &c
->s
->outputs
) {
1631 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1632 unsigned loc
= var
->data
.driver_location
* 4;
1634 assert(array_len
== 1);
1637 for (int i
= 0; i
< 4; i
++)
1638 add_output(c
, loc
+ i
, var
->data
.location
, i
);
1640 if (c
->stage
== QSTAGE_FRAG
) {
1641 switch (var
->data
.location
) {
1642 case FRAG_RESULT_COLOR
:
1643 case FRAG_RESULT_DATA0
:
1644 c
->output_color_index
= loc
;
1646 case FRAG_RESULT_DEPTH
:
1647 c
->output_position_index
= loc
;
1649 case FRAG_RESULT_SAMPLE_MASK
:
1650 c
->output_sample_mask_index
= loc
;
1654 switch (var
->data
.location
) {
1655 case VARYING_SLOT_POS
:
1656 c
->output_position_index
= loc
;
1658 case VARYING_SLOT_PSIZ
:
1659 c
->output_point_size_index
= loc
;
1667 * Sets up the mapping from nir_register to struct qreg *.
1669 * Each nir_register gets a struct qreg per 32-bit component being stored.
1672 ntq_setup_registers(struct vc4_compile
*c
, struct exec_list
*list
)
1674 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1675 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1676 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1678 nir_reg
->num_components
);
1680 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1682 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1683 qregs
[i
] = qir_get_temp(c
);
1688 ntq_emit_load_const(struct vc4_compile
*c
, nir_load_const_instr
*instr
)
1690 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1691 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1692 qregs
[i
] = qir_uniform_ui(c
, instr
->value
[i
].u32
);
1694 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1698 ntq_emit_ssa_undef(struct vc4_compile
*c
, nir_ssa_undef_instr
*instr
)
1700 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1702 /* QIR needs there to be *some* value, so pick 0 (same as for
1703 * ntq_setup_registers().
1705 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1706 qregs
[i
] = qir_uniform_ui(c
, 0);
1710 ntq_emit_color_read(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1712 assert(nir_src_as_uint(instr
->src
[0]) == 0);
1714 /* Reads of the per-sample color need to be done in
1717 int sample_index
= (nir_intrinsic_base(instr
) -
1718 VC4_NIR_TLB_COLOR_READ_INPUT
);
1719 for (int i
= 0; i
<= sample_index
; i
++) {
1720 if (c
->color_reads
[i
].file
== QFILE_NULL
) {
1722 qir_TLB_COLOR_READ(c
);
1725 ntq_store_dest(c
, &instr
->dest
, 0,
1726 qir_MOV(c
, c
->color_reads
[sample_index
]));
1730 ntq_emit_load_input(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1732 assert(instr
->num_components
== 1);
1733 assert(nir_src_is_const(instr
->src
[0]) &&
1734 "vc4 doesn't support indirect inputs");
1736 if (c
->stage
== QSTAGE_FRAG
&&
1737 nir_intrinsic_base(instr
) >= VC4_NIR_TLB_COLOR_READ_INPUT
) {
1738 ntq_emit_color_read(c
, instr
);
1742 uint32_t offset
= nir_intrinsic_base(instr
) +
1743 nir_src_as_uint(instr
->src
[0]);
1744 int comp
= nir_intrinsic_component(instr
);
1745 ntq_store_dest(c
, &instr
->dest
, 0,
1746 qir_MOV(c
, c
->inputs
[offset
* 4 + comp
]));
1750 ntq_emit_intrinsic(struct vc4_compile
*c
, nir_intrinsic_instr
*instr
)
1754 switch (instr
->intrinsic
) {
1755 case nir_intrinsic_load_uniform
:
1756 assert(instr
->num_components
== 1);
1757 if (nir_src_is_const(instr
->src
[0])) {
1758 offset
= nir_intrinsic_base(instr
) +
1759 nir_src_as_uint(instr
->src
[0]);
1760 assert(offset
% 4 == 0);
1761 /* We need dwords */
1762 offset
= offset
/ 4;
1763 ntq_store_dest(c
, &instr
->dest
, 0,
1764 qir_uniform(c
, QUNIFORM_UNIFORM
,
1767 ntq_store_dest(c
, &instr
->dest
, 0,
1768 indirect_uniform_load(c
, instr
));
1772 case nir_intrinsic_load_ubo
:
1773 assert(instr
->num_components
== 1);
1774 ntq_store_dest(c
, &instr
->dest
, 0, vc4_ubo_load(c
, instr
));
1777 case nir_intrinsic_load_user_clip_plane
:
1778 for (int i
= 0; i
< instr
->num_components
; i
++) {
1779 ntq_store_dest(c
, &instr
->dest
, i
,
1780 qir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1781 nir_intrinsic_ucp_id(instr
) *
1786 case nir_intrinsic_load_blend_const_color_r_float
:
1787 case nir_intrinsic_load_blend_const_color_g_float
:
1788 case nir_intrinsic_load_blend_const_color_b_float
:
1789 case nir_intrinsic_load_blend_const_color_a_float
:
1790 ntq_store_dest(c
, &instr
->dest
, 0,
1791 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR_X
+
1793 nir_intrinsic_load_blend_const_color_r_float
),
1797 case nir_intrinsic_load_blend_const_color_rgba8888_unorm
:
1798 ntq_store_dest(c
, &instr
->dest
, 0,
1799 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR_RGBA
,
1803 case nir_intrinsic_load_blend_const_color_aaaa8888_unorm
:
1804 ntq_store_dest(c
, &instr
->dest
, 0,
1805 qir_uniform(c
, QUNIFORM_BLEND_CONST_COLOR_AAAA
,
1809 case nir_intrinsic_load_alpha_ref_float
:
1810 ntq_store_dest(c
, &instr
->dest
, 0,
1811 qir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1814 case nir_intrinsic_load_sample_mask_in
:
1815 ntq_store_dest(c
, &instr
->dest
, 0,
1816 qir_uniform(c
, QUNIFORM_SAMPLE_MASK
, 0));
1819 case nir_intrinsic_load_front_face
:
1820 /* The register contains 0 (front) or 1 (back), and we need to
1821 * turn it into a NIR bool where true means front.
1823 ntq_store_dest(c
, &instr
->dest
, 0,
1825 qir_uniform_ui(c
, -1),
1826 qir_reg(QFILE_FRAG_REV_FLAG
, 0)));
1829 case nir_intrinsic_load_input
:
1830 ntq_emit_load_input(c
, instr
);
1833 case nir_intrinsic_store_output
:
1834 assert(nir_src_is_const(instr
->src
[1]) &&
1835 "vc4 doesn't support indirect outputs");
1836 offset
= nir_intrinsic_base(instr
) +
1837 nir_src_as_uint(instr
->src
[1]);
1839 /* MSAA color outputs are the only case where we have an
1840 * output that's not lowered to being a store of a single 32
1843 if (c
->stage
== QSTAGE_FRAG
&& instr
->num_components
== 4) {
1844 assert(offset
== c
->output_color_index
);
1845 for (int i
= 0; i
< 4; i
++) {
1846 c
->sample_colors
[i
] =
1847 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0],
1851 offset
= offset
* 4 + nir_intrinsic_component(instr
);
1852 assert(instr
->num_components
== 1);
1853 c
->outputs
[offset
] =
1854 qir_MOV(c
, ntq_get_src(c
, instr
->src
[0], 0));
1855 c
->num_outputs
= MAX2(c
->num_outputs
, offset
+ 1);
1859 case nir_intrinsic_discard
:
1860 if (c
->execute
.file
!= QFILE_NULL
) {
1861 qir_SF(c
, c
->execute
);
1862 qir_MOV_cond(c
, QPU_COND_ZS
, c
->discard
,
1863 qir_uniform_ui(c
, ~0));
1865 qir_MOV_dest(c
, c
->discard
, qir_uniform_ui(c
, ~0));
1869 case nir_intrinsic_discard_if
: {
1870 /* true (~0) if we're discarding */
1871 struct qreg cond
= ntq_get_src(c
, instr
->src
[0], 0);
1873 if (c
->execute
.file
!= QFILE_NULL
) {
1874 /* execute == 0 means the channel is active. Invert
1875 * the condition so that we can use zero as "executing
1878 qir_SF(c
, qir_AND(c
, c
->execute
, qir_NOT(c
, cond
)));
1879 qir_MOV_cond(c
, QPU_COND_ZS
, c
->discard
, cond
);
1881 qir_OR_dest(c
, c
->discard
, c
->discard
,
1882 ntq_get_src(c
, instr
->src
[0], 0));
1889 fprintf(stderr
, "Unknown intrinsic: ");
1890 nir_print_instr(&instr
->instr
, stderr
);
1891 fprintf(stderr
, "\n");
1896 /* Clears (activates) the execute flags for any channels whose jump target
1897 * matches this block.
1900 ntq_activate_execute_for_block(struct vc4_compile
*c
)
1902 qir_SF(c
, qir_SUB(c
,
1904 qir_uniform_ui(c
, c
->cur_block
->index
)));
1905 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
, qir_uniform_ui(c
, 0));
1909 ntq_emit_if(struct vc4_compile
*c
, nir_if
*if_stmt
)
1911 if (!c
->vc4
->screen
->has_control_flow
) {
1913 "IF statement support requires updated kernel.\n");
1917 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1918 bool empty_else_block
=
1919 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1920 exec_list_is_empty(&nir_else_block
->instr_list
));
1922 struct qblock
*then_block
= qir_new_block(c
);
1923 struct qblock
*after_block
= qir_new_block(c
);
1924 struct qblock
*else_block
;
1925 if (empty_else_block
)
1926 else_block
= after_block
;
1928 else_block
= qir_new_block(c
);
1930 bool was_top_level
= false;
1931 if (c
->execute
.file
== QFILE_NULL
) {
1932 c
->execute
= qir_MOV(c
, qir_uniform_ui(c
, 0));
1933 was_top_level
= true;
1936 /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1937 * 0) channels, and then update execute flags for those to point to
1942 ntq_get_src(c
, if_stmt
->condition
, 0)));
1943 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
,
1944 qir_uniform_ui(c
, else_block
->index
));
1946 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1949 qir_SF(c
, c
->execute
);
1950 qir_BRANCH(c
, QPU_COND_BRANCH_ALL_ZC
);
1951 qir_link_blocks(c
->cur_block
, else_block
);
1952 qir_link_blocks(c
->cur_block
, then_block
);
1954 /* Process the THEN block. */
1955 qir_set_emit_block(c
, then_block
);
1956 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1958 if (!empty_else_block
) {
1959 /* Handle the end of the THEN block. First, all currently
1960 * active channels update their execute flags to point to
1963 qir_SF(c
, c
->execute
);
1964 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
,
1965 qir_uniform_ui(c
, after_block
->index
));
1967 /* If everything points at ENDIF, then jump there immediately. */
1968 qir_SF(c
, qir_SUB(c
, c
->execute
, qir_uniform_ui(c
, after_block
->index
)));
1969 qir_BRANCH(c
, QPU_COND_BRANCH_ALL_ZS
);
1970 qir_link_blocks(c
->cur_block
, after_block
);
1971 qir_link_blocks(c
->cur_block
, else_block
);
1973 qir_set_emit_block(c
, else_block
);
1974 ntq_activate_execute_for_block(c
);
1975 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
1978 qir_link_blocks(c
->cur_block
, after_block
);
1980 qir_set_emit_block(c
, after_block
);
1981 if (was_top_level
) {
1982 c
->execute
= c
->undef
;
1983 c
->last_top_block
= c
->cur_block
;
1985 ntq_activate_execute_for_block(c
);
1990 ntq_emit_jump(struct vc4_compile
*c
, nir_jump_instr
*jump
)
1992 struct qblock
*jump_block
;
1993 switch (jump
->type
) {
1994 case nir_jump_break
:
1995 jump_block
= c
->loop_break_block
;
1997 case nir_jump_continue
:
1998 jump_block
= c
->loop_cont_block
;
2001 unreachable("Unsupported jump type\n");
2004 qir_SF(c
, c
->execute
);
2005 qir_MOV_cond(c
, QPU_COND_ZS
, c
->execute
,
2006 qir_uniform_ui(c
, jump_block
->index
));
2008 /* Jump to the destination block if everyone has taken the jump. */
2009 qir_SF(c
, qir_SUB(c
, c
->execute
, qir_uniform_ui(c
, jump_block
->index
)));
2010 qir_BRANCH(c
, QPU_COND_BRANCH_ALL_ZS
);
2011 struct qblock
*new_block
= qir_new_block(c
);
2012 qir_link_blocks(c
->cur_block
, jump_block
);
2013 qir_link_blocks(c
->cur_block
, new_block
);
2014 qir_set_emit_block(c
, new_block
);
2018 ntq_emit_instr(struct vc4_compile
*c
, nir_instr
*instr
)
2020 switch (instr
->type
) {
2021 case nir_instr_type_alu
:
2022 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
2025 case nir_instr_type_intrinsic
:
2026 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
2029 case nir_instr_type_load_const
:
2030 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
2033 case nir_instr_type_ssa_undef
:
2034 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
2037 case nir_instr_type_tex
:
2038 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
2041 case nir_instr_type_jump
:
2042 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
2046 fprintf(stderr
, "Unknown NIR instr type: ");
2047 nir_print_instr(instr
, stderr
);
2048 fprintf(stderr
, "\n");
2054 ntq_emit_block(struct vc4_compile
*c
, nir_block
*block
)
2056 nir_foreach_instr(instr
, block
) {
2057 ntq_emit_instr(c
, instr
);
2061 static void ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
);
2064 ntq_emit_loop(struct vc4_compile
*c
, nir_loop
*loop
)
2066 if (!c
->vc4
->screen
->has_control_flow
) {
2068 "loop support requires updated kernel.\n");
2069 ntq_emit_cf_list(c
, &loop
->body
);
2073 bool was_top_level
= false;
2074 if (c
->execute
.file
== QFILE_NULL
) {
2075 c
->execute
= qir_MOV(c
, qir_uniform_ui(c
, 0));
2076 was_top_level
= true;
2079 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
2080 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
2082 c
->loop_cont_block
= qir_new_block(c
);
2083 c
->loop_break_block
= qir_new_block(c
);
2085 qir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2086 qir_set_emit_block(c
, c
->loop_cont_block
);
2087 ntq_activate_execute_for_block(c
);
2089 ntq_emit_cf_list(c
, &loop
->body
);
2091 /* If anything had explicitly continued, or is here at the end of the
2092 * loop, then we need to loop again. SF updates are masked by the
2093 * instruction's condition, so we can do the OR of the two conditions
2096 qir_SF(c
, c
->execute
);
2097 struct qinst
*cont_check
=
2101 qir_uniform_ui(c
, c
->loop_cont_block
->index
));
2102 cont_check
->cond
= QPU_COND_ZC
;
2103 cont_check
->sf
= true;
2105 qir_BRANCH(c
, QPU_COND_BRANCH_ANY_ZS
);
2106 qir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2107 qir_link_blocks(c
->cur_block
, c
->loop_break_block
);
2109 qir_set_emit_block(c
, c
->loop_break_block
);
2110 if (was_top_level
) {
2111 c
->execute
= c
->undef
;
2112 c
->last_top_block
= c
->cur_block
;
2114 ntq_activate_execute_for_block(c
);
2117 c
->loop_break_block
= save_loop_break_block
;
2118 c
->loop_cont_block
= save_loop_cont_block
;
2122 ntq_emit_function(struct vc4_compile
*c
, nir_function_impl
*func
)
2124 fprintf(stderr
, "FUNCTIONS not handled.\n");
2129 ntq_emit_cf_list(struct vc4_compile
*c
, struct exec_list
*list
)
2131 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
2132 switch (node
->type
) {
2133 case nir_cf_node_block
:
2134 ntq_emit_block(c
, nir_cf_node_as_block(node
));
2137 case nir_cf_node_if
:
2138 ntq_emit_if(c
, nir_cf_node_as_if(node
));
2141 case nir_cf_node_loop
:
2142 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
2145 case nir_cf_node_function
:
2146 ntq_emit_function(c
, nir_cf_node_as_function(node
));
2150 fprintf(stderr
, "Unknown NIR node type\n");
2157 ntq_emit_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
2159 ntq_setup_registers(c
, &impl
->registers
);
2160 ntq_emit_cf_list(c
, &impl
->body
);
2164 nir_to_qir(struct vc4_compile
*c
)
2166 if (c
->stage
== QSTAGE_FRAG
&& c
->s
->info
.fs
.uses_discard
)
2167 c
->discard
= qir_MOV(c
, qir_uniform_ui(c
, 0));
2169 ntq_setup_inputs(c
);
2170 ntq_setup_outputs(c
);
2172 /* Find the main function and emit the body. */
2173 nir_foreach_function(function
, c
->s
) {
2174 assert(strcmp(function
->name
, "main") == 0);
2175 assert(function
->impl
);
2176 ntq_emit_impl(c
, function
->impl
);
2180 static const nir_shader_compiler_options nir_options
= {
2181 .lower_all_io_to_temps
= true,
2182 .lower_extract_byte
= true,
2183 .lower_extract_word
= true,
2186 .lower_flrp32
= true,
2190 .lower_fsqrt
= true,
2191 .lower_ldexp
= true,
2192 .lower_negate
= true,
2193 .lower_rotate
= true,
2194 .lower_to_scalar
= true,
2195 .max_unroll_iterations
= 32,
2199 vc4_screen_get_compiler_options(struct pipe_screen
*pscreen
,
2200 enum pipe_shader_ir ir
,
2201 enum pipe_shader_type shader
)
2203 return &nir_options
;
2207 count_nir_instrs(nir_shader
*nir
)
2210 nir_foreach_function(function
, nir
) {
2211 if (!function
->impl
)
2213 nir_foreach_block(block
, function
->impl
) {
2214 nir_foreach_instr(instr
, block
)
2221 static struct vc4_compile
*
2222 vc4_shader_ntq(struct vc4_context
*vc4
, enum qstage stage
,
2223 struct vc4_key
*key
, bool fs_threaded
)
2225 struct vc4_compile
*c
= qir_compile_init();
2229 c
->shader_state
= &key
->shader_state
->base
;
2230 c
->program_id
= key
->shader_state
->program_id
;
2232 p_atomic_inc_return(&key
->shader_state
->compiled_variant_count
);
2233 c
->fs_threaded
= fs_threaded
;
2238 c
->fs_key
= (struct vc4_fs_key
*)key
;
2239 if (c
->fs_key
->is_points
) {
2240 c
->point_x
= emit_fragment_varying(c
, ~0, 0);
2241 c
->point_y
= emit_fragment_varying(c
, ~0, 0);
2242 } else if (c
->fs_key
->is_lines
) {
2243 c
->line_x
= emit_fragment_varying(c
, ~0, 0);
2247 c
->vs_key
= (struct vc4_vs_key
*)key
;
2250 c
->vs_key
= (struct vc4_vs_key
*)key
;
2254 c
->s
= nir_shader_clone(c
, key
->shader_state
->base
.ir
.nir
);
2256 if (stage
== QSTAGE_FRAG
) {
2257 if (c
->fs_key
->alpha_test_func
!= COMPARE_FUNC_ALWAYS
) {
2258 NIR_PASS_V(c
->s
, nir_lower_alpha_test
,
2259 c
->fs_key
->alpha_test_func
,
2260 c
->fs_key
->sample_alpha_to_one
&&
2264 NIR_PASS_V(c
->s
, vc4_nir_lower_blend
, c
);
2267 struct nir_lower_tex_options tex_options
= {
2268 /* We would need to implement txs, but we don't want the
2269 * int/float conversions
2271 .lower_rect
= false,
2275 /* Apply swizzles to all samplers. */
2276 .swizzle_result
= ~0,
2279 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2280 * The format swizzling applies before sRGB decode, and
2281 * ARB_texture_swizzle is the last thing before returning the sample.
2283 for (int i
= 0; i
< ARRAY_SIZE(key
->tex
); i
++) {
2284 enum pipe_format format
= c
->key
->tex
[i
].format
;
2289 const uint8_t *format_swizzle
= vc4_get_format_swizzle(format
);
2291 for (int j
= 0; j
< 4; j
++) {
2292 uint8_t arb_swiz
= c
->key
->tex
[i
].swizzle
[j
];
2294 if (arb_swiz
<= 3) {
2295 tex_options
.swizzles
[i
][j
] =
2296 format_swizzle
[arb_swiz
];
2298 tex_options
.swizzles
[i
][j
] = arb_swiz
;
2302 if (util_format_is_srgb(format
))
2303 tex_options
.lower_srgb
|= (1 << i
);
2306 NIR_PASS_V(c
->s
, nir_lower_tex
, &tex_options
);
2308 if (c
->fs_key
&& c
->fs_key
->light_twoside
)
2309 NIR_PASS_V(c
->s
, nir_lower_two_sided_color
);
2311 if (c
->vs_key
&& c
->vs_key
->clamp_color
)
2312 NIR_PASS_V(c
->s
, nir_lower_clamp_color_outputs
);
2314 if (c
->key
->ucp_enables
) {
2315 if (stage
== QSTAGE_FRAG
) {
2316 NIR_PASS_V(c
->s
, nir_lower_clip_fs
,
2317 c
->key
->ucp_enables
, false);
2319 NIR_PASS_V(c
->s
, nir_lower_clip_vs
,
2320 c
->key
->ucp_enables
, false, false, NULL
);
2321 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
,
2322 nir_var_shader_out
);
2326 /* FS input scalarizing must happen after nir_lower_two_sided_color,
2327 * which only handles a vec4 at a time. Similarly, VS output
2328 * scalarizing must happen after nir_lower_clip_vs.
2330 if (c
->stage
== QSTAGE_FRAG
)
2331 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_in
);
2333 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_out
);
2335 NIR_PASS_V(c
->s
, vc4_nir_lower_io
, c
);
2336 NIR_PASS_V(c
->s
, vc4_nir_lower_txf_ms
, c
);
2337 NIR_PASS_V(c
->s
, nir_lower_idiv
, nir_lower_idiv_fast
);
2339 vc4_optimize_nir(c
->s
);
2341 /* Do late algebraic optimization to turn add(a, neg(b)) back into
2342 * subs, then the mandatory cleanup after algebraic. Note that it may
2343 * produce fnegs, and if so then we need to keep running to squash
2346 bool more_late_algebraic
= true;
2347 while (more_late_algebraic
) {
2348 more_late_algebraic
= false;
2349 NIR_PASS(more_late_algebraic
, c
->s
, nir_opt_algebraic_late
);
2350 NIR_PASS_V(c
->s
, nir_opt_constant_folding
);
2351 NIR_PASS_V(c
->s
, nir_copy_prop
);
2352 NIR_PASS_V(c
->s
, nir_opt_dce
);
2353 NIR_PASS_V(c
->s
, nir_opt_cse
);
2356 NIR_PASS_V(c
->s
, nir_lower_bool_to_int32
);
2358 NIR_PASS_V(c
->s
, nir_convert_from_ssa
, true);
2360 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2361 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2362 qir_get_stage_name(c
->stage
),
2363 c
->program_id
, c
->variant_id
,
2364 count_nir_instrs(c
->s
));
2367 if (vc4_debug
& VC4_DEBUG_NIR
) {
2368 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2369 qir_get_stage_name(c
->stage
),
2370 c
->program_id
, c
->variant_id
);
2371 nir_print_shader(c
->s
, stderr
);
2378 /* FS threading requires that the thread execute
2379 * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
2380 * (with no other THRSW afterwards, obviously). If we didn't
2381 * fetch a texture at a top level block, this wouldn't be
2384 if (c
->fs_threaded
&& !c
->last_thrsw_at_top_level
) {
2393 c
->vs_key
->fs_inputs
->input_slots
,
2394 c
->vs_key
->fs_inputs
->num_inputs
);
2401 if (vc4_debug
& VC4_DEBUG_QIR
) {
2402 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
2403 qir_get_stage_name(c
->stage
),
2404 c
->program_id
, c
->variant_id
);
2406 fprintf(stderr
, "\n");
2410 qir_lower_uniforms(c
);
2412 qir_schedule_instructions(c
);
2413 qir_emit_uniform_stream_resets(c
);
2415 if (vc4_debug
& VC4_DEBUG_QIR
) {
2416 fprintf(stderr
, "%s prog %d/%d QIR:\n",
2417 qir_get_stage_name(c
->stage
),
2418 c
->program_id
, c
->variant_id
);
2420 fprintf(stderr
, "\n");
2423 qir_reorder_uniforms(c
);
2424 vc4_generate_code(vc4
, c
);
2426 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2427 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2428 qir_get_stage_name(c
->stage
),
2429 c
->program_id
, c
->variant_id
,
2431 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2432 qir_get_stage_name(c
->stage
),
2433 c
->program_id
, c
->variant_id
,
2443 vc4_shader_state_create(struct pipe_context
*pctx
,
2444 const struct pipe_shader_state
*cso
)
2446 struct vc4_context
*vc4
= vc4_context(pctx
);
2447 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
2451 so
->program_id
= vc4
->next_uncompiled_program_id
++;
2455 if (cso
->type
== PIPE_SHADER_IR_NIR
) {
2456 /* The backend takes ownership of the NIR shader on state
2461 assert(cso
->type
== PIPE_SHADER_IR_TGSI
);
2463 if (vc4_debug
& VC4_DEBUG_TGSI
) {
2464 fprintf(stderr
, "prog %d TGSI:\n",
2466 tgsi_dump(cso
->tokens
, 0);
2467 fprintf(stderr
, "\n");
2469 s
= tgsi_to_nir(cso
->tokens
, pctx
->screen
, false);
2472 if (s
->info
.stage
== MESA_SHADER_VERTEX
)
2473 NIR_PASS_V(s
, nir_lower_point_size
, 1.0f
, 0.0f
);
2475 NIR_PASS_V(s
, nir_lower_io
, nir_var_all
, type_size
,
2476 (nir_lower_io_options
)0);
2478 NIR_PASS_V(s
, nir_lower_regs_to_ssa
);
2479 NIR_PASS_V(s
, nir_normalize_cubemap_coords
);
2481 NIR_PASS_V(s
, nir_lower_load_const_to_scalar
);
2483 vc4_optimize_nir(s
);
2485 NIR_PASS_V(s
, nir_remove_dead_variables
, nir_var_function_temp
);
2487 /* Garbage collect dead instructions */
2490 so
->base
.type
= PIPE_SHADER_IR_NIR
;
2491 so
->base
.ir
.nir
= s
;
2493 if (vc4_debug
& VC4_DEBUG_NIR
) {
2494 fprintf(stderr
, "%s prog %d NIR:\n",
2495 gl_shader_stage_name(s
->info
.stage
),
2497 nir_print_shader(s
, stderr
);
2498 fprintf(stderr
, "\n");
2505 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
2506 struct vc4_compile
*c
)
2508 int count
= c
->num_uniforms
;
2509 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2511 uinfo
->count
= count
;
2512 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
2513 memcpy(uinfo
->data
, c
->uniform_data
,
2514 count
* sizeof(*uinfo
->data
));
2515 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
2516 memcpy(uinfo
->contents
, c
->uniform_contents
,
2517 count
* sizeof(*uinfo
->contents
));
2518 uinfo
->num_texture_samples
= c
->num_texture_samples
;
2520 vc4_set_shader_uniform_dirty_flags(shader
);
2524 vc4_setup_compiled_fs_inputs(struct vc4_context
*vc4
, struct vc4_compile
*c
,
2525 struct vc4_compiled_shader
*shader
)
2527 struct vc4_fs_inputs inputs
;
2529 memset(&inputs
, 0, sizeof(inputs
));
2530 inputs
.input_slots
= ralloc_array(shader
,
2531 struct vc4_varying_slot
,
2532 c
->num_input_slots
);
2534 bool input_live
[c
->num_input_slots
];
2536 memset(input_live
, 0, sizeof(input_live
));
2537 qir_for_each_inst_inorder(inst
, c
) {
2538 for (int i
= 0; i
< qir_get_nsrc(inst
); i
++) {
2539 if (inst
->src
[i
].file
== QFILE_VARY
)
2540 input_live
[inst
->src
[i
].index
] = true;
2544 for (int i
= 0; i
< c
->num_input_slots
; i
++) {
2545 struct vc4_varying_slot
*slot
= &c
->input_slots
[i
];
2550 /* Skip non-VS-output inputs. */
2551 if (slot
->slot
== (uint8_t)~0)
2554 if (slot
->slot
== VARYING_SLOT_COL0
||
2555 slot
->slot
== VARYING_SLOT_COL1
||
2556 slot
->slot
== VARYING_SLOT_BFC0
||
2557 slot
->slot
== VARYING_SLOT_BFC1
) {
2558 shader
->color_inputs
|= (1 << inputs
.num_inputs
);
2561 inputs
.input_slots
[inputs
.num_inputs
] = *slot
;
2562 inputs
.num_inputs
++;
2564 shader
->num_inputs
= inputs
.num_inputs
;
2566 /* Add our set of inputs to the set of all inputs seen. This way, we
2567 * can have a single pointer that identifies an FS inputs set,
2568 * allowing VS to avoid recompiling when the FS is recompiled (or a
2569 * new one is bound using separate shader objects) but the inputs
2572 struct set_entry
*entry
= _mesa_set_search(vc4
->fs_inputs_set
, &inputs
);
2574 shader
->fs_inputs
= entry
->key
;
2575 ralloc_free(inputs
.input_slots
);
2577 struct vc4_fs_inputs
*alloc_inputs
;
2579 alloc_inputs
= rzalloc(vc4
->fs_inputs_set
, struct vc4_fs_inputs
);
2580 memcpy(alloc_inputs
, &inputs
, sizeof(inputs
));
2581 ralloc_steal(alloc_inputs
, inputs
.input_slots
);
2582 _mesa_set_add(vc4
->fs_inputs_set
, alloc_inputs
);
2584 shader
->fs_inputs
= alloc_inputs
;
2588 static struct vc4_compiled_shader
*
2589 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
2590 struct vc4_key
*key
)
2592 struct hash_table
*ht
;
2596 if (stage
== QSTAGE_FRAG
) {
2598 key_size
= sizeof(struct vc4_fs_key
);
2599 try_threading
= vc4
->screen
->has_threaded_fs
;
2602 key_size
= sizeof(struct vc4_vs_key
);
2603 try_threading
= false;
2606 struct vc4_compiled_shader
*shader
;
2607 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
2611 struct vc4_compile
*c
= vc4_shader_ntq(vc4
, stage
, key
, try_threading
);
2612 /* If the FS failed to compile threaded, fall back to single threaded. */
2613 if (try_threading
&& c
->failed
) {
2614 qir_compile_destroy(c
);
2615 c
= vc4_shader_ntq(vc4
, stage
, key
, false);
2618 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
2620 shader
->program_id
= vc4
->next_compiled_program_id
++;
2621 if (stage
== QSTAGE_FRAG
) {
2622 vc4_setup_compiled_fs_inputs(vc4
, c
, shader
);
2624 /* Note: the temporary clone in c->s has been freed. */
2625 nir_shader
*orig_shader
= key
->shader_state
->base
.ir
.nir
;
2626 if (orig_shader
->info
.outputs_written
& (1 << FRAG_RESULT_DEPTH
))
2627 shader
->disable_early_z
= true;
2629 shader
->num_inputs
= c
->num_inputs
;
2631 shader
->vattr_offsets
[0] = 0;
2632 for (int i
= 0; i
< 8; i
++) {
2633 shader
->vattr_offsets
[i
+ 1] =
2634 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
2636 if (c
->vattr_sizes
[i
])
2637 shader
->vattrs_live
|= (1 << i
);
2641 shader
->failed
= c
->failed
;
2643 shader
->failed
= true;
2645 copy_uniform_state_to_shader(shader
, c
);
2646 shader
->bo
= vc4_bo_alloc_shader(vc4
->screen
, c
->qpu_insts
,
2651 shader
->fs_threaded
= c
->fs_threaded
;
2653 if ((vc4_debug
& VC4_DEBUG_SHADERDB
) && stage
== QSTAGE_FRAG
) {
2654 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d FS threads\n",
2655 qir_get_stage_name(c
->stage
),
2656 c
->program_id
, c
->variant_id
,
2657 1 + shader
->fs_threaded
);
2660 qir_compile_destroy(c
);
2662 struct vc4_key
*dup_key
;
2663 dup_key
= rzalloc_size(shader
, key_size
); /* TODO: don't use rzalloc */
2664 memcpy(dup_key
, key
, key_size
);
2665 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2671 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2672 struct vc4_texture_stateobj
*texstate
)
2674 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2675 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2676 struct vc4_sampler_view
*vc4_sampler
= vc4_sampler_view(sampler
);
2677 struct pipe_sampler_state
*sampler_state
=
2678 texstate
->samplers
[i
];
2683 key
->tex
[i
].format
= sampler
->format
;
2684 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2685 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2686 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2687 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2689 if (sampler
->texture
->nr_samples
> 1) {
2690 key
->tex
[i
].msaa_width
= sampler
->texture
->width0
;
2691 key
->tex
[i
].msaa_height
= sampler
->texture
->height0
;
2692 } else if (sampler
){
2693 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2694 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2695 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2696 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2697 key
->tex
[i
].force_first_level
=
2698 vc4_sampler
->force_first_level
;
2702 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2706 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2708 struct vc4_job
*job
= vc4
->job
;
2709 struct vc4_fs_key local_key
;
2710 struct vc4_fs_key
*key
= &local_key
;
2712 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2714 VC4_DIRTY_FRAMEBUFFER
|
2716 VC4_DIRTY_RASTERIZER
|
2717 VC4_DIRTY_SAMPLE_MASK
|
2719 VC4_DIRTY_UNCOMPILED_FS
|
2720 VC4_DIRTY_UBO_1_SIZE
))) {
2724 memset(key
, 0, sizeof(*key
));
2725 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2726 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2727 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2728 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2729 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2730 key
->blend
= vc4
->blend
->rt
[0];
2731 if (vc4
->blend
->logicop_enable
) {
2732 key
->logicop_func
= vc4
->blend
->logicop_func
;
2734 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2737 key
->msaa
= vc4
->rasterizer
->base
.multisample
;
2738 key
->sample_coverage
= (vc4
->sample_mask
!= (1 << VC4_MAX_SAMPLES
) - 1);
2739 key
->sample_alpha_to_coverage
= vc4
->blend
->alpha_to_coverage
;
2740 key
->sample_alpha_to_one
= vc4
->blend
->alpha_to_one
;
2743 if (vc4
->framebuffer
.cbufs
[0])
2744 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2746 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2747 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2748 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2749 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2750 key
->stencil_enabled
);
2751 if (vc4
->zsa
->base
.alpha
.enabled
)
2752 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2754 key
->alpha_test_func
= COMPARE_FUNC_ALWAYS
;
2756 if (key
->is_points
) {
2757 key
->point_sprite_mask
=
2758 vc4
->rasterizer
->base
.sprite_coord_enable
;
2759 key
->point_coord_upper_left
=
2760 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2761 PIPE_SPRITE_COORD_UPPER_LEFT
);
2764 key
->ubo_1_size
= vc4
->constbuf
[PIPE_SHADER_FRAGMENT
].cb
[1].buffer_size
;
2765 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2767 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2768 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2769 if (vc4
->prog
.fs
== old_fs
)
2772 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2774 if (vc4
->rasterizer
->base
.flatshade
&&
2775 (!old_fs
|| vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
)) {
2776 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2779 if (!old_fs
|| vc4
->prog
.fs
->fs_inputs
!= old_fs
->fs_inputs
)
2780 vc4
->dirty
|= VC4_DIRTY_FS_INPUTS
;
2784 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2786 struct vc4_vs_key local_key
;
2787 struct vc4_vs_key
*key
= &local_key
;
2789 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2790 VC4_DIRTY_RASTERIZER
|
2792 VC4_DIRTY_VTXSTATE
|
2793 VC4_DIRTY_UNCOMPILED_VS
|
2794 VC4_DIRTY_FS_INPUTS
))) {
2798 memset(key
, 0, sizeof(*key
));
2799 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2800 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2801 key
->fs_inputs
= vc4
->prog
.fs
->fs_inputs
;
2802 key
->clamp_color
= vc4
->rasterizer
->base
.clamp_vertex_color
;
2804 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2805 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2807 key
->per_vertex_point_size
=
2808 (prim_mode
== PIPE_PRIM_POINTS
&&
2809 vc4
->rasterizer
->base
.point_size_per_vertex
);
2811 struct vc4_compiled_shader
*vs
=
2812 vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2813 if (vs
!= vc4
->prog
.vs
) {
2815 vc4
->dirty
|= VC4_DIRTY_COMPILED_VS
;
2818 key
->is_coord
= true;
2819 /* Coord shaders don't care what the FS inputs are. */
2820 key
->fs_inputs
= NULL
;
2821 struct vc4_compiled_shader
*cs
=
2822 vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2823 if (cs
!= vc4
->prog
.cs
) {
2825 vc4
->dirty
|= VC4_DIRTY_COMPILED_CS
;
2830 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2832 vc4_update_compiled_fs(vc4
, prim_mode
);
2833 vc4_update_compiled_vs(vc4
, prim_mode
);
2835 return !(vc4
->prog
.cs
->failed
||
2836 vc4
->prog
.vs
->failed
||
2837 vc4
->prog
.fs
->failed
);
2841 fs_cache_hash(const void *key
)
2843 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2847 vs_cache_hash(const void *key
)
2849 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2853 fs_cache_compare(const void *key1
, const void *key2
)
2855 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2859 vs_cache_compare(const void *key1
, const void *key2
)
2861 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2865 fs_inputs_hash(const void *key
)
2867 const struct vc4_fs_inputs
*inputs
= key
;
2869 return _mesa_hash_data(inputs
->input_slots
,
2870 sizeof(*inputs
->input_slots
) *
2871 inputs
->num_inputs
);
2875 fs_inputs_compare(const void *key1
, const void *key2
)
2877 const struct vc4_fs_inputs
*inputs1
= key1
;
2878 const struct vc4_fs_inputs
*inputs2
= key2
;
2880 return (inputs1
->num_inputs
== inputs2
->num_inputs
&&
2881 memcmp(inputs1
->input_slots
,
2882 inputs2
->input_slots
,
2883 sizeof(*inputs1
->input_slots
) *
2884 inputs1
->num_inputs
) == 0);
2888 delete_from_cache_if_matches(struct hash_table
*ht
,
2889 struct vc4_compiled_shader
**last_compile
,
2890 struct hash_entry
*entry
,
2891 struct vc4_uncompiled_shader
*so
)
2893 const struct vc4_key
*key
= entry
->key
;
2895 if (key
->shader_state
== so
) {
2896 struct vc4_compiled_shader
*shader
= entry
->data
;
2897 _mesa_hash_table_remove(ht
, entry
);
2898 vc4_bo_unreference(&shader
->bo
);
2900 if (shader
== *last_compile
)
2901 *last_compile
= NULL
;
2903 ralloc_free(shader
);
2908 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2910 struct vc4_context
*vc4
= vc4_context(pctx
);
2911 struct vc4_uncompiled_shader
*so
= hwcso
;
2913 hash_table_foreach(vc4
->fs_cache
, entry
) {
2914 delete_from_cache_if_matches(vc4
->fs_cache
, &vc4
->prog
.fs
,
2917 hash_table_foreach(vc4
->vs_cache
, entry
) {
2918 delete_from_cache_if_matches(vc4
->vs_cache
, &vc4
->prog
.vs
,
2922 ralloc_free(so
->base
.ir
.nir
);
2927 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2929 struct vc4_context
*vc4
= vc4_context(pctx
);
2930 vc4
->prog
.bind_fs
= hwcso
;
2931 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2935 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2937 struct vc4_context
*vc4
= vc4_context(pctx
);
2938 vc4
->prog
.bind_vs
= hwcso
;
2939 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2943 vc4_program_init(struct pipe_context
*pctx
)
2945 struct vc4_context
*vc4
= vc4_context(pctx
);
2947 pctx
->create_vs_state
= vc4_shader_state_create
;
2948 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2950 pctx
->create_fs_state
= vc4_shader_state_create
;
2951 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2953 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2954 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2956 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2958 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2960 vc4
->fs_inputs_set
= _mesa_set_create(pctx
, fs_inputs_hash
,
2965 vc4_program_fini(struct pipe_context
*pctx
)
2967 struct vc4_context
*vc4
= vc4_context(pctx
);
2969 hash_table_foreach(vc4
->fs_cache
, entry
) {
2970 struct vc4_compiled_shader
*shader
= entry
->data
;
2971 vc4_bo_unreference(&shader
->bo
);
2972 ralloc_free(shader
);
2973 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2976 hash_table_foreach(vc4
->vs_cache
, entry
) {
2977 struct vc4_compiled_shader
*shader
= entry
->data
;
2978 vc4_bo_unreference(&shader
->bo
);
2979 ralloc_free(shader
);
2980 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);