2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
35 #define GENERAL_TMU_LOOKUP_PER_QUAD (0 << 7)
36 #define GENERAL_TMU_LOOKUP_PER_PIXEL (1 << 7)
37 #define GENERAL_TMU_READ_OP_PREFETCH (0 << 3)
38 #define GENERAL_TMU_READ_OP_CACHE_CLEAR (1 << 3)
39 #define GENERAL_TMU_READ_OP_CACHE_FLUSH (3 << 3)
40 #define GENERAL_TMU_READ_OP_CACHE_CLEAN (3 << 3)
41 #define GENERAL_TMU_READ_OP_CACHE_L1T_CLEAR (4 << 3)
42 #define GENERAL_TMU_READ_OP_CACHE_L1T_FLUSH_AGGREGATION (5 << 3)
43 #define GENERAL_TMU_READ_OP_ATOMIC_INC (8 << 3)
44 #define GENERAL_TMU_READ_OP_ATOMIC_DEC (9 << 3)
45 #define GENERAL_TMU_READ_OP_ATOMIC_NOT (10 << 3)
46 #define GENERAL_TMU_READ_OP_READ (15 << 3)
47 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_I (0 << 0)
48 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_I (1 << 0)
49 #define GENERAL_TMU_LOOKUP_TYPE_VEC2 (2 << 0)
50 #define GENERAL_TMU_LOOKUP_TYPE_VEC3 (3 << 0)
51 #define GENERAL_TMU_LOOKUP_TYPE_VEC4 (4 << 0)
52 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_UI (5 << 0)
53 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_UI (6 << 0)
54 #define GENERAL_TMU_LOOKUP_TYPE_32BIT_UI (7 << 0)
56 #define GENERAL_TMU_WRITE_OP_ATOMIC_ADD_WRAP (0 << 3)
57 #define GENERAL_TMU_WRITE_OP_ATOMIC_SUB_WRAP (1 << 3)
58 #define GENERAL_TMU_WRITE_OP_ATOMIC_XCHG (2 << 3)
59 #define GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG (3 << 3)
60 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMIN (4 << 3)
61 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMAX (5 << 3)
62 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMIN (6 << 3)
63 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMAX (7 << 3)
64 #define GENERAL_TMU_WRITE_OP_ATOMIC_AND (8 << 3)
65 #define GENERAL_TMU_WRITE_OP_ATOMIC_OR (9 << 3)
66 #define GENERAL_TMU_WRITE_OP_ATOMIC_XOR (10 << 3)
67 #define GENERAL_TMU_WRITE_OP_WRITE (15 << 3)
69 #define V3D_TSY_SET_QUORUM 0
70 #define V3D_TSY_INC_WAITERS 1
71 #define V3D_TSY_DEC_WAITERS 2
72 #define V3D_TSY_INC_QUORUM 3
73 #define V3D_TSY_DEC_QUORUM 4
74 #define V3D_TSY_FREE_ALL 5
75 #define V3D_TSY_RELEASE 6
76 #define V3D_TSY_ACQUIRE 7
77 #define V3D_TSY_WAIT 8
78 #define V3D_TSY_WAIT_INC 9
79 #define V3D_TSY_WAIT_CHECK 10
80 #define V3D_TSY_WAIT_INC_CHECK 11
81 #define V3D_TSY_WAIT_CV 12
82 #define V3D_TSY_INC_SEMAPHORE 13
83 #define V3D_TSY_DEC_SEMAPHORE 14
84 #define V3D_TSY_SET_QUORUM_FREE_ALL 15
87 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
90 resize_qreg_array(struct v3d_compile
*c
,
95 if (*size
>= decl_size
)
98 uint32_t old_size
= *size
;
99 *size
= MAX2(*size
* 2, decl_size
);
100 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
102 fprintf(stderr
, "Malloc failure\n");
106 for (uint32_t i
= old_size
; i
< *size
; i
++)
107 (*regs
)[i
] = c
->undef
;
111 vir_emit_thrsw(struct v3d_compile
*c
)
116 /* Always thread switch after each texture operation for now.
118 * We could do better by batching a bunch of texture fetches up and
119 * then doing one thread switch and collecting all their results
122 c
->last_thrsw
= vir_NOP(c
);
123 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
124 c
->last_thrsw_at_top_level
= !c
->in_control_flow
;
128 v3d_general_tmu_op(nir_intrinsic_instr
*instr
)
130 switch (instr
->intrinsic
) {
131 case nir_intrinsic_load_ssbo
:
132 case nir_intrinsic_load_ubo
:
133 case nir_intrinsic_load_uniform
:
134 case nir_intrinsic_load_shared
:
135 return GENERAL_TMU_READ_OP_READ
;
136 case nir_intrinsic_store_ssbo
:
137 case nir_intrinsic_store_shared
:
138 return GENERAL_TMU_WRITE_OP_WRITE
;
139 case nir_intrinsic_ssbo_atomic_add
:
140 case nir_intrinsic_shared_atomic_add
:
141 return GENERAL_TMU_WRITE_OP_ATOMIC_ADD_WRAP
;
142 case nir_intrinsic_ssbo_atomic_imin
:
143 case nir_intrinsic_shared_atomic_imin
:
144 return GENERAL_TMU_WRITE_OP_ATOMIC_SMIN
;
145 case nir_intrinsic_ssbo_atomic_umin
:
146 case nir_intrinsic_shared_atomic_umin
:
147 return GENERAL_TMU_WRITE_OP_ATOMIC_UMIN
;
148 case nir_intrinsic_ssbo_atomic_imax
:
149 case nir_intrinsic_shared_atomic_imax
:
150 return GENERAL_TMU_WRITE_OP_ATOMIC_SMAX
;
151 case nir_intrinsic_ssbo_atomic_umax
:
152 case nir_intrinsic_shared_atomic_umax
:
153 return GENERAL_TMU_WRITE_OP_ATOMIC_UMAX
;
154 case nir_intrinsic_ssbo_atomic_and
:
155 case nir_intrinsic_shared_atomic_and
:
156 return GENERAL_TMU_WRITE_OP_ATOMIC_AND
;
157 case nir_intrinsic_ssbo_atomic_or
:
158 case nir_intrinsic_shared_atomic_or
:
159 return GENERAL_TMU_WRITE_OP_ATOMIC_OR
;
160 case nir_intrinsic_ssbo_atomic_xor
:
161 case nir_intrinsic_shared_atomic_xor
:
162 return GENERAL_TMU_WRITE_OP_ATOMIC_XOR
;
163 case nir_intrinsic_ssbo_atomic_exchange
:
164 case nir_intrinsic_shared_atomic_exchange
:
165 return GENERAL_TMU_WRITE_OP_ATOMIC_XCHG
;
166 case nir_intrinsic_ssbo_atomic_comp_swap
:
167 case nir_intrinsic_shared_atomic_comp_swap
:
168 return GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG
;
170 unreachable("unknown intrinsic op");
175 * Implements indirect uniform loads and SSBO accesses through the TMU general
176 * memory access interface.
179 ntq_emit_tmu_general(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
,
182 /* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
183 * wants to have support for inc/dec?
186 uint32_t tmu_op
= v3d_general_tmu_op(instr
);
187 bool is_store
= (instr
->intrinsic
== nir_intrinsic_store_ssbo
||
188 instr
->intrinsic
== nir_intrinsic_store_shared
);
189 bool has_index
= !is_shared
;
192 int tmu_writes
= 1; /* address */
193 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
195 } else if (instr
->intrinsic
== nir_intrinsic_load_ssbo
||
196 instr
->intrinsic
== nir_intrinsic_load_ubo
||
197 instr
->intrinsic
== nir_intrinsic_load_shared
) {
198 offset_src
= 0 + has_index
;
199 } else if (is_store
) {
200 offset_src
= 1 + has_index
;
201 for (int i
= 0; i
< instr
->num_components
; i
++) {
203 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
204 ntq_get_src(c
, instr
->src
[0], i
));
208 offset_src
= 0 + has_index
;
210 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
211 ntq_get_src(c
, instr
->src
[1 + has_index
], 0));
213 if (tmu_op
== GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG
) {
215 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
216 ntq_get_src(c
, instr
->src
[2 + has_index
],
222 bool dynamic_src
= !nir_src_is_const(instr
->src
[offset_src
]);
223 uint32_t const_offset
= 0;
225 const_offset
= nir_src_as_uint(instr
->src
[offset_src
]);
227 /* Make sure we won't exceed the 16-entry TMU fifo if each thread is
228 * storing at the same time.
230 while (tmu_writes
> 16 / c
->threads
)
234 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
235 const_offset
+= nir_intrinsic_base(instr
);
236 offset
= vir_uniform(c
, QUNIFORM_UBO_ADDR
,
237 v3d_unit_data_create(0, const_offset
));
239 } else if (instr
->intrinsic
== nir_intrinsic_load_ubo
) {
240 uint32_t index
= nir_src_as_uint(instr
->src
[0]) + 1;
241 /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
242 * 1 (0 is gallium's constant buffer 0).
244 offset
= vir_uniform(c
, QUNIFORM_UBO_ADDR
,
245 v3d_unit_data_create(index
, const_offset
));
247 } else if (is_shared
) {
248 const_offset
+= nir_intrinsic_base(instr
);
250 /* Shared variables have no buffer index, and all start from a
251 * common base that we set up at the start of dispatch
253 offset
= c
->cs_shared_offset
;
255 offset
= vir_uniform(c
, QUNIFORM_SSBO_OFFSET
,
256 nir_src_as_uint(instr
->src
[is_store
?
260 uint32_t config
= (0xffffff00 |
262 GENERAL_TMU_LOOKUP_PER_PIXEL
);
263 if (instr
->num_components
== 1) {
264 config
|= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI
;
266 config
|= (GENERAL_TMU_LOOKUP_TYPE_VEC2
+
267 instr
->num_components
- 2);
270 if (vir_in_nonuniform_control_flow(c
)) {
271 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
277 tmua
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
);
279 tmua
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUAU
);
283 if (const_offset
!= 0) {
284 offset
= vir_ADD(c
, offset
,
285 vir_uniform_ui(c
, const_offset
));
287 tmu
= vir_ADD_dest(c
, tmua
, offset
,
288 ntq_get_src(c
, instr
->src
[offset_src
], 0));
290 if (const_offset
!= 0) {
291 tmu
= vir_ADD_dest(c
, tmua
, offset
,
292 vir_uniform_ui(c
, const_offset
));
294 tmu
= vir_MOV_dest(c
, tmua
, offset
);
299 tmu
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
303 if (vir_in_nonuniform_control_flow(c
))
304 vir_set_cond(tmu
, V3D_QPU_COND_IFA
);
308 /* Read the result, or wait for the TMU op to complete. */
309 for (int i
= 0; i
< nir_intrinsic_dest_components(instr
); i
++)
310 ntq_store_dest(c
, &instr
->dest
, i
, vir_MOV(c
, vir_LDTMU(c
)));
312 if (nir_intrinsic_dest_components(instr
) == 0)
317 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
319 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
320 def
->num_components
);
321 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
326 * This function is responsible for getting VIR results into the associated
327 * storage for a NIR instruction.
329 * If it's a NIR SSA def, then we just set the associated hash table entry to
332 * If it's a NIR reg, then we need to update the existing qreg assigned to the
333 * NIR destination with the incoming value. To do that without introducing
334 * new MOVs, we require that the incoming qreg either be a uniform, or be
335 * SSA-defined by the previous VIR instruction in the block and rewritable by
336 * this function. That lets us sneak ahead and insert the SF flag beforehand
337 * (knowing that the previous instruction doesn't depend on flags) and rewrite
338 * its destination to be the NIR reg's destination
341 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
344 struct qinst
*last_inst
= NULL
;
345 if (!list_empty(&c
->cur_block
->instructions
))
346 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
348 assert((result
.file
== QFILE_TEMP
&&
349 last_inst
&& last_inst
== c
->defs
[result
.index
]));
352 assert(chan
< dest
->ssa
.num_components
);
355 struct hash_entry
*entry
=
356 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
361 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
363 qregs
[chan
] = result
;
365 nir_register
*reg
= dest
->reg
.reg
;
366 assert(dest
->reg
.base_offset
== 0);
367 assert(reg
->num_array_elems
== 0);
368 struct hash_entry
*entry
=
369 _mesa_hash_table_search(c
->def_ht
, reg
);
370 struct qreg
*qregs
= entry
->data
;
372 /* Insert a MOV if the source wasn't an SSA def in the
373 * previous instruction.
375 if ((vir_in_nonuniform_control_flow(c
) &&
376 c
->defs
[last_inst
->dst
.index
]->qpu
.sig
.ldunif
)) {
377 result
= vir_MOV(c
, result
);
378 last_inst
= c
->defs
[result
.index
];
381 /* We know they're both temps, so just rewrite index. */
382 c
->defs
[last_inst
->dst
.index
] = NULL
;
383 last_inst
->dst
.index
= qregs
[chan
].index
;
385 /* If we're in control flow, then make this update of the reg
386 * conditional on the execution mask.
388 if (vir_in_nonuniform_control_flow(c
)) {
389 last_inst
->dst
.index
= qregs
[chan
].index
;
391 /* Set the flags to the current exec mask.
393 c
->cursor
= vir_before_inst(last_inst
);
394 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
396 c
->cursor
= vir_after_inst(last_inst
);
398 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
404 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
406 struct hash_entry
*entry
;
408 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
409 assert(i
< src
.ssa
->num_components
);
411 nir_register
*reg
= src
.reg
.reg
;
412 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
413 assert(reg
->num_array_elems
== 0);
414 assert(src
.reg
.base_offset
== 0);
415 assert(i
< reg
->num_components
);
418 struct qreg
*qregs
= entry
->data
;
423 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
426 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
427 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
428 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
429 instr
->src
[src
].swizzle
[chan
]);
431 assert(!instr
->src
[src
].abs
);
432 assert(!instr
->src
[src
].negate
);
438 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
440 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
444 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
446 unsigned unit
= instr
->texture_index
;
447 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
448 int dest_size
= nir_tex_instr_dest_size(instr
);
450 struct qreg lod
= c
->undef
;
452 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
454 for (int i
= 0; i
< dest_size
; i
++) {
456 enum quniform_contents contents
;
458 if (instr
->is_array
&& i
== dest_size
- 1)
459 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
461 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
463 struct qreg size
= vir_uniform(c
, contents
, unit
);
465 switch (instr
->sampler_dim
) {
466 case GLSL_SAMPLER_DIM_1D
:
467 case GLSL_SAMPLER_DIM_2D
:
468 case GLSL_SAMPLER_DIM_MS
:
469 case GLSL_SAMPLER_DIM_3D
:
470 case GLSL_SAMPLER_DIM_CUBE
:
471 /* Don't minify the array size. */
472 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
473 size
= ntq_minify(c
, size
, lod
);
477 case GLSL_SAMPLER_DIM_RECT
:
478 /* There's no LOD field for rects */
482 unreachable("Bad sampler type");
485 ntq_store_dest(c
, &instr
->dest
, i
, size
);
490 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
492 unsigned unit
= instr
->texture_index
;
494 /* Since each texture sampling op requires uploading uniforms to
495 * reference the texture, there's no HW support for texture size and
496 * you just upload uniforms containing the size.
499 case nir_texop_query_levels
:
500 ntq_store_dest(c
, &instr
->dest
, 0,
501 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
504 ntq_emit_txs(c
, instr
);
510 if (c
->devinfo
->ver
>= 40)
511 v3d40_vir_emit_tex(c
, instr
);
513 v3d33_vir_emit_tex(c
, instr
);
517 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
519 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
521 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
523 struct qreg periods
= vir_FROUND(c
, input
);
524 struct qreg sin_output
= vir_SIN(c
, vir_FSUB(c
, input
, periods
));
525 return vir_XOR(c
, sin_output
, vir_SHL(c
,
526 vir_FTOIN(c
, periods
),
527 vir_uniform_ui(c
, -1)));
531 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
533 struct qreg t
= vir_get_temp(c
);
535 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
536 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHZ
);
537 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
538 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHN
);
539 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
540 return vir_MOV(c
, t
);
544 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
546 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
547 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
548 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
549 c
->inputs
[attr
* 4 + 3] = vir_RECIP(c
, c
->payload_w
);
553 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
554 uint8_t swizzle
, int array_index
)
556 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
557 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
560 if (c
->devinfo
->ver
>= 41) {
561 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
563 ldvary
->qpu
.sig
.ldvary
= true;
564 vary
= vir_emit_def(c
, ldvary
);
566 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
570 /* For gl_PointCoord input or distance along a line, we'll be called
571 * with no nir_variable, and we don't count toward VPM size so we
572 * don't track an input slot.
575 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
578 int i
= c
->num_inputs
++;
580 v3d_slot_from_slot_and_component(var
->data
.location
+
581 array_index
, swizzle
);
583 switch (var
->data
.interpolation
) {
584 case INTERP_MODE_NONE
:
585 /* If a gl_FrontColor or gl_BackColor input has no interp
586 * qualifier, then if we're using glShadeModel(GL_FLAT) it
587 * needs to be flat shaded.
589 switch (var
->data
.location
+ array_index
) {
590 case VARYING_SLOT_COL0
:
591 case VARYING_SLOT_COL1
:
592 case VARYING_SLOT_BFC0
:
593 case VARYING_SLOT_BFC1
:
594 if (c
->fs_key
->shade_model_flat
) {
595 BITSET_SET(c
->flat_shade_flags
, i
);
596 vir_MOV_dest(c
, c
->undef
, vary
);
597 return vir_MOV(c
, r5
);
599 return vir_FADD(c
, vir_FMUL(c
, vary
,
606 case INTERP_MODE_SMOOTH
:
607 if (var
->data
.centroid
) {
608 BITSET_SET(c
->centroid_flags
, i
);
609 return vir_FADD(c
, vir_FMUL(c
, vary
,
610 c
->payload_w_centroid
), r5
);
612 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
614 case INTERP_MODE_NOPERSPECTIVE
:
615 BITSET_SET(c
->noperspective_flags
, i
);
616 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
617 case INTERP_MODE_FLAT
:
618 BITSET_SET(c
->flat_shade_flags
, i
);
619 vir_MOV_dest(c
, c
->undef
, vary
);
620 return vir_MOV(c
, r5
);
622 unreachable("Bad interp mode");
627 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
,
630 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
631 int chan
= var
->data
.location_frac
+ i
;
632 c
->inputs
[attr
* 4 + chan
] =
633 emit_fragment_varying(c
, var
, chan
, array_index
);
638 add_output(struct v3d_compile
*c
,
639 uint32_t decl_offset
,
643 uint32_t old_array_size
= c
->outputs_array_size
;
644 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
647 if (old_array_size
!= c
->outputs_array_size
) {
648 c
->output_slots
= reralloc(c
,
650 struct v3d_varying_slot
,
651 c
->outputs_array_size
);
654 c
->output_slots
[decl_offset
] =
655 v3d_slot_from_slot_and_component(slot
, swizzle
);
659 * If compare_instr is a valid comparison instruction, emits the
660 * compare_instr's comparison and returns the sel_instr's return value based
661 * on the compare_instr's result.
664 ntq_emit_comparison(struct v3d_compile
*c
,
665 nir_alu_instr
*compare_instr
,
666 enum v3d_qpu_cond
*out_cond
)
668 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
670 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
671 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
672 bool cond_invert
= false;
673 struct qreg nop
= vir_nop_reg();
675 switch (compare_instr
->op
) {
678 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
681 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
686 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
690 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
696 vir_set_pf(vir_FCMP_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
699 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
703 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
709 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHN
);
712 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
715 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
719 vir_set_pf(vir_MOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
724 vir_set_pf(vir_FMOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
732 *out_cond
= cond_invert
? V3D_QPU_COND_IFNA
: V3D_QPU_COND_IFA
;
737 /* Finds an ALU instruction that generates our src value that could
738 * (potentially) be greedily emitted in the consuming instruction.
740 static struct nir_alu_instr
*
741 ntq_get_alu_parent(nir_src src
)
743 if (!src
.is_ssa
|| src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
745 nir_alu_instr
*instr
= nir_instr_as_alu(src
.ssa
->parent_instr
);
749 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
750 * moving emission of the ALU instr down past another write of the
753 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
754 if (!instr
->src
[i
].src
.is_ssa
)
761 /* Turns a NIR bool into a condition code to predicate on. */
762 static enum v3d_qpu_cond
763 ntq_emit_bool_to_cond(struct v3d_compile
*c
, nir_src src
)
765 nir_alu_instr
*compare
= ntq_get_alu_parent(src
);
769 enum v3d_qpu_cond cond
;
770 if (ntq_emit_comparison(c
, compare
, &cond
))
774 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), ntq_get_src(c
, src
, 0)),
776 return V3D_QPU_COND_IFNA
;
780 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
782 /* This should always be lowered to ALU operations for V3D. */
783 assert(!instr
->dest
.saturate
);
785 /* Vectors are special in that they have non-scalarized writemasks,
786 * and just take the first swizzle channel for each argument in order
787 * into each writemask channel.
789 if (instr
->op
== nir_op_vec2
||
790 instr
->op
== nir_op_vec3
||
791 instr
->op
== nir_op_vec4
) {
793 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
794 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
795 instr
->src
[i
].swizzle
[0]);
796 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
797 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
798 vir_MOV(c
, srcs
[i
]));
802 /* General case: We can just grab the one used channel per src. */
803 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
804 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
805 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
813 result
= vir_MOV(c
, src
[0]);
817 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
820 result
= vir_NEG(c
, src
[0]);
824 result
= vir_FMUL(c
, src
[0], src
[1]);
827 result
= vir_FADD(c
, src
[0], src
[1]);
830 result
= vir_FSUB(c
, src
[0], src
[1]);
833 result
= vir_FMIN(c
, src
[0], src
[1]);
836 result
= vir_FMAX(c
, src
[0], src
[1]);
840 nir_alu_instr
*src0_alu
= ntq_get_alu_parent(instr
->src
[0].src
);
841 if (src0_alu
&& src0_alu
->op
== nir_op_fround_even
) {
842 result
= vir_FTOIN(c
, ntq_get_alu_src(c
, src0_alu
, 0));
844 result
= vir_FTOIZ(c
, src
[0]);
850 result
= vir_FTOUZ(c
, src
[0]);
853 result
= vir_ITOF(c
, src
[0]);
856 result
= vir_UTOF(c
, src
[0]);
859 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
862 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
866 result
= vir_ADD(c
, src
[0], src
[1]);
869 result
= vir_SHR(c
, src
[0], src
[1]);
872 result
= vir_SUB(c
, src
[0], src
[1]);
875 result
= vir_ASR(c
, src
[0], src
[1]);
878 result
= vir_SHL(c
, src
[0], src
[1]);
881 result
= vir_MIN(c
, src
[0], src
[1]);
884 result
= vir_UMIN(c
, src
[0], src
[1]);
887 result
= vir_MAX(c
, src
[0], src
[1]);
890 result
= vir_UMAX(c
, src
[0], src
[1]);
893 result
= vir_AND(c
, src
[0], src
[1]);
896 result
= vir_OR(c
, src
[0], src
[1]);
899 result
= vir_XOR(c
, src
[0], src
[1]);
902 result
= vir_NOT(c
, src
[0]);
905 case nir_op_ufind_msb
:
906 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
910 result
= vir_UMUL(c
, src
[0], src
[1]);
917 enum v3d_qpu_cond cond
;
918 MAYBE_UNUSED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
920 result
= vir_MOV(c
, vir_SEL(c
, cond
,
921 vir_uniform_f(c
, 1.0),
922 vir_uniform_f(c
, 0.0)));
938 enum v3d_qpu_cond cond
;
939 MAYBE_UNUSED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
941 result
= vir_MOV(c
, vir_SEL(c
, cond
,
942 vir_uniform_ui(c
, ~0),
943 vir_uniform_ui(c
, 0)));
950 ntq_emit_bool_to_cond(c
, instr
->src
[0].src
),
955 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), src
[0]),
957 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
962 result
= vir_RECIP(c
, src
[0]);
965 result
= vir_RSQRT(c
, src
[0]);
968 result
= vir_EXP(c
, src
[0]);
971 result
= vir_LOG(c
, src
[0]);
975 result
= vir_FCEIL(c
, src
[0]);
978 result
= vir_FFLOOR(c
, src
[0]);
980 case nir_op_fround_even
:
981 result
= vir_FROUND(c
, src
[0]);
984 result
= vir_FTRUNC(c
, src
[0]);
988 result
= ntq_fsincos(c
, src
[0], false);
991 result
= ntq_fsincos(c
, src
[0], true);
995 result
= ntq_fsign(c
, src
[0]);
999 result
= vir_FMOV(c
, src
[0]);
1000 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
1005 result
= vir_MAX(c
, src
[0], vir_NEG(c
, src
[0]));
1009 case nir_op_fddx_coarse
:
1010 case nir_op_fddx_fine
:
1011 result
= vir_FDX(c
, src
[0]);
1015 case nir_op_fddy_coarse
:
1016 case nir_op_fddy_fine
:
1017 result
= vir_FDY(c
, src
[0]);
1020 case nir_op_uadd_carry
:
1021 vir_set_pf(vir_ADD_dest(c
, vir_nop_reg(), src
[0], src
[1]),
1023 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1024 vir_uniform_ui(c
, ~0),
1025 vir_uniform_ui(c
, 0)));
1028 case nir_op_pack_half_2x16_split
:
1029 result
= vir_VFPACK(c
, src
[0], src
[1]);
1032 case nir_op_unpack_half_2x16_split_x
:
1033 result
= vir_FMOV(c
, src
[0]);
1034 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_L
);
1037 case nir_op_unpack_half_2x16_split_y
:
1038 result
= vir_FMOV(c
, src
[0]);
1039 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_H
);
1043 fprintf(stderr
, "unknown NIR ALU inst: ");
1044 nir_print_instr(&instr
->instr
, stderr
);
1045 fprintf(stderr
, "\n");
1049 /* We have a scalar result, so the instruction should only have a
1050 * single channel written to.
1052 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
1053 ntq_store_dest(c
, &instr
->dest
.dest
,
1054 ffs(instr
->dest
.write_mask
) - 1, result
);
1057 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
1058 * specifier. They come from a register that's preloaded with 0xffffffff
1059 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
1060 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
1062 #define TLB_TYPE_F16_COLOR (3 << 6)
1063 #define TLB_TYPE_I32_COLOR (1 << 6)
1064 #define TLB_TYPE_F32_COLOR (0 << 6)
1065 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
1066 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
1067 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
1068 #define TLB_F16_SWAP_HI_LO (1 << 1)
1069 #define TLB_VEC_SIZE_4_F16 (1 << 0)
1070 #define TLB_VEC_SIZE_2_F16 (0 << 0)
1071 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
1073 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
1076 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
1077 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
1078 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
1079 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
1080 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
1082 /* Stencil is a single 32-bit write. */
1083 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
1086 emit_frag_end(struct v3d_compile
*c
)
1089 if (c->output_sample_mask_index != -1) {
1090 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1094 bool has_any_tlb_color_write
= false;
1095 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++) {
1096 if (c
->fs_key
->cbufs
& (1 << rt
) && c
->output_color_var
[rt
])
1097 has_any_tlb_color_write
= true;
1100 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
1101 struct nir_variable
*var
= c
->output_color_var
[0];
1102 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
1104 vir_SETMSF_dest(c
, vir_nop_reg(),
1107 vir_FTOC(c
, color
[3])));
1110 struct qreg tlb_reg
= vir_magic_reg(V3D_QPU_WADDR_TLB
);
1111 struct qreg tlbu_reg
= vir_magic_reg(V3D_QPU_WADDR_TLBU
);
1112 if (c
->output_position_index
!= -1) {
1113 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1114 c
->outputs
[c
->output_position_index
]);
1115 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1117 if (c
->devinfo
->ver
>= 42) {
1118 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_PER_PIXEL
|
1119 TLB_SAMPLE_MODE_PER_PIXEL
);
1121 tlb_specifier
|= TLB_DEPTH_TYPE_PER_PIXEL
;
1123 inst
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
1127 } else if (c
->s
->info
.fs
.uses_discard
||
1128 !c
->s
->info
.fs
.early_fragment_tests
||
1129 c
->fs_key
->sample_alpha_to_coverage
||
1130 !has_any_tlb_color_write
) {
1131 /* Emit passthrough Z if it needed to be delayed until shader
1132 * end due to potential discards.
1134 * Since (single-threaded) fragment shaders always need a TLB
1135 * write, emit passthrouh Z if we didn't have any color
1136 * buffers and flag us as potentially discarding, so that we
1137 * can use Z as the TLB write.
1139 c
->s
->info
.fs
.uses_discard
= true;
1141 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1143 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1145 if (c
->devinfo
->ver
>= 42) {
1146 /* The spec says the PER_PIXEL flag is ignored for
1147 * invariant writes, but the simulator demands it.
1149 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_INVARIANT
|
1150 TLB_SAMPLE_MODE_PER_PIXEL
);
1152 tlb_specifier
|= TLB_DEPTH_TYPE_INVARIANT
;
1155 inst
->uniform
= vir_get_uniform_index(c
,
1162 /* XXX: Performance improvement: Merge Z write and color writes TLB
1166 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++) {
1167 if (!(c
->fs_key
->cbufs
& (1 << rt
)) || !c
->output_color_var
[rt
])
1170 nir_variable
*var
= c
->output_color_var
[rt
];
1171 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
1172 int num_components
= glsl_get_vector_elements(var
->type
);
1173 uint32_t conf
= 0xffffff00;
1176 conf
|= TLB_SAMPLE_MODE_PER_PIXEL
;
1177 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
1179 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
1180 num_components
= MAX2(num_components
, 3);
1182 assert(num_components
!= 0);
1183 switch (glsl_get_base_type(var
->type
)) {
1184 case GLSL_TYPE_UINT
:
1186 /* The F32 vs I32 distinction was dropped in 4.2. */
1187 if (c
->devinfo
->ver
< 42)
1188 conf
|= TLB_TYPE_I32_COLOR
;
1190 conf
|= TLB_TYPE_F32_COLOR
;
1191 conf
|= ((num_components
- 1) <<
1192 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1194 inst
= vir_MOV_dest(c
, tlbu_reg
, color
[0]);
1195 inst
->uniform
= vir_get_uniform_index(c
,
1199 for (int i
= 1; i
< num_components
; i
++) {
1200 inst
= vir_MOV_dest(c
, tlb_reg
, color
[i
]);
1205 struct qreg r
= color
[0];
1206 struct qreg g
= color
[1];
1207 struct qreg b
= color
[2];
1208 struct qreg a
= color
[3];
1210 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1211 conf
|= TLB_TYPE_F32_COLOR
;
1212 conf
|= ((num_components
- 1) <<
1213 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1215 conf
|= TLB_TYPE_F16_COLOR
;
1216 conf
|= TLB_F16_SWAP_HI_LO
;
1217 if (num_components
>= 3)
1218 conf
|= TLB_VEC_SIZE_4_F16
;
1220 conf
|= TLB_VEC_SIZE_2_F16
;
1223 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1228 if (c
->fs_key
->sample_alpha_to_one
)
1229 a
= vir_uniform_f(c
, 1.0);
1231 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1232 inst
= vir_MOV_dest(c
, tlbu_reg
, r
);
1233 inst
->uniform
= vir_get_uniform_index(c
,
1237 if (num_components
>= 2)
1238 vir_MOV_dest(c
, tlb_reg
, g
);
1239 if (num_components
>= 3)
1240 vir_MOV_dest(c
, tlb_reg
, b
);
1241 if (num_components
>= 4)
1242 vir_MOV_dest(c
, tlb_reg
, a
);
1244 inst
= vir_VFPACK_dest(c
, tlb_reg
, r
, g
);
1246 inst
->dst
= tlbu_reg
;
1247 inst
->uniform
= vir_get_uniform_index(c
,
1252 if (num_components
>= 3)
1253 inst
= vir_VFPACK_dest(c
, tlb_reg
, b
, a
);
1262 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t vpm_index
)
1264 if (c
->devinfo
->ver
>= 40) {
1265 vir_STVPMV(c
, vir_uniform_ui(c
, vpm_index
), val
);
1267 /* XXX: v3d33_vir_vpm_write_setup(c); */
1268 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1273 emit_vert_end(struct v3d_compile
*c
)
1275 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1277 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1282 v3d_optimize_nir(struct nir_shader
*s
)
1289 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1290 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
);
1291 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1292 NIR_PASS(progress
, s
, nir_copy_prop
);
1293 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1294 NIR_PASS(progress
, s
, nir_opt_dce
);
1295 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1296 NIR_PASS(progress
, s
, nir_opt_cse
);
1297 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8, true, true);
1298 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1299 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1300 NIR_PASS(progress
, s
, nir_opt_undef
);
1303 NIR_PASS(progress
, s
, nir_opt_move_load_ubo
);
1307 driver_location_compare(const void *in_a
, const void *in_b
)
1309 const nir_variable
*const *a
= in_a
;
1310 const nir_variable
*const *b
= in_b
;
1312 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1316 ntq_emit_vpm_read(struct v3d_compile
*c
,
1317 uint32_t *num_components_queued
,
1318 uint32_t *remaining
,
1321 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1323 if (c
->devinfo
->ver
>= 40 ) {
1324 return vir_LDVPMV_IN(c
,
1326 (*num_components_queued
)++));
1329 if (*num_components_queued
!= 0) {
1330 (*num_components_queued
)--;
1331 return vir_MOV(c
, vpm
);
1334 uint32_t num_components
= MIN2(*remaining
, 32);
1336 v3d33_vir_vpm_read_setup(c
, num_components
);
1338 *num_components_queued
= num_components
- 1;
1339 *remaining
-= num_components
;
1341 return vir_MOV(c
, vpm
);
1345 ntq_setup_vpm_inputs(struct v3d_compile
*c
)
1347 /* Figure out how many components of each vertex attribute the shader
1348 * uses. Each variable should have been split to individual
1349 * components and unused ones DCEed. The vertex fetcher will load
1350 * from the start of the attribute to the number of components we
1351 * declare we need in c->vattr_sizes[].
1353 nir_foreach_variable(var
, &c
->s
->inputs
) {
1354 /* No VS attribute array support. */
1355 assert(MAX2(glsl_get_length(var
->type
), 1) == 1);
1357 unsigned loc
= var
->data
.driver_location
;
1358 int start_component
= var
->data
.location_frac
;
1359 int num_components
= glsl_get_components(var
->type
);
1361 c
->vattr_sizes
[loc
] = MAX2(c
->vattr_sizes
[loc
],
1362 start_component
+ num_components
);
1365 unsigned num_components
= 0;
1366 uint32_t vpm_components_queued
= 0;
1367 bool uses_iid
= c
->s
->info
.system_values_read
&
1368 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1369 bool uses_vid
= c
->s
->info
.system_values_read
&
1370 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1371 num_components
+= uses_iid
;
1372 num_components
+= uses_vid
;
1374 for (int i
= 0; i
< ARRAY_SIZE(c
->vattr_sizes
); i
++)
1375 num_components
+= c
->vattr_sizes
[i
];
1378 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1379 &num_components
, ~0);
1383 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1384 &num_components
, ~0);
1387 /* The actual loads will happen directly in nir_intrinsic_load_input
1388 * on newer versions.
1390 if (c
->devinfo
->ver
>= 40)
1393 for (int loc
= 0; loc
< ARRAY_SIZE(c
->vattr_sizes
); loc
++) {
1394 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1397 for (int i
= 0; i
< c
->vattr_sizes
[loc
]; i
++) {
1398 c
->inputs
[loc
* 4 + i
] =
1399 ntq_emit_vpm_read(c
,
1400 &vpm_components_queued
,
1407 if (c
->devinfo
->ver
>= 40) {
1408 assert(vpm_components_queued
== num_components
);
1410 assert(vpm_components_queued
== 0);
1411 assert(num_components
== 0);
1416 ntq_setup_fs_inputs(struct v3d_compile
*c
)
1418 unsigned num_entries
= 0;
1419 unsigned num_components
= 0;
1420 nir_foreach_variable(var
, &c
->s
->inputs
) {
1422 num_components
+= glsl_get_components(var
->type
);
1425 nir_variable
*vars
[num_entries
];
1428 nir_foreach_variable(var
, &c
->s
->inputs
)
1431 /* Sort the variables so that we emit the input setup in
1432 * driver_location order. This is required for VPM reads, whose data
1433 * is fetched into the VPM in driver_location (TGSI register index)
1436 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1438 for (unsigned i
= 0; i
< num_entries
; i
++) {
1439 nir_variable
*var
= vars
[i
];
1440 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1441 unsigned loc
= var
->data
.driver_location
;
1443 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1444 (loc
+ array_len
) * 4);
1446 if (var
->data
.location
== VARYING_SLOT_POS
) {
1447 emit_fragcoord_input(c
, loc
);
1448 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1449 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1450 (c
->fs_key
->point_sprite_mask
&
1451 (1 << (var
->data
.location
-
1452 VARYING_SLOT_VAR0
))))) {
1453 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1454 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1456 for (int j
= 0; j
< array_len
; j
++)
1457 emit_fragment_input(c
, loc
+ j
, var
, j
);
1463 ntq_setup_outputs(struct v3d_compile
*c
)
1465 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
1468 nir_foreach_variable(var
, &c
->s
->outputs
) {
1469 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1470 unsigned loc
= var
->data
.driver_location
* 4;
1472 assert(array_len
== 1);
1475 for (int i
= 0; i
< 4 - var
->data
.location_frac
; i
++) {
1476 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1478 var
->data
.location_frac
+ i
);
1481 switch (var
->data
.location
) {
1482 case FRAG_RESULT_COLOR
:
1483 c
->output_color_var
[0] = var
;
1484 c
->output_color_var
[1] = var
;
1485 c
->output_color_var
[2] = var
;
1486 c
->output_color_var
[3] = var
;
1488 case FRAG_RESULT_DATA0
:
1489 case FRAG_RESULT_DATA1
:
1490 case FRAG_RESULT_DATA2
:
1491 case FRAG_RESULT_DATA3
:
1492 c
->output_color_var
[var
->data
.location
-
1493 FRAG_RESULT_DATA0
] = var
;
1495 case FRAG_RESULT_DEPTH
:
1496 c
->output_position_index
= loc
;
1498 case FRAG_RESULT_SAMPLE_MASK
:
1499 c
->output_sample_mask_index
= loc
;
1506 * Sets up the mapping from nir_register to struct qreg *.
1508 * Each nir_register gets a struct qreg per 32-bit component being stored.
1511 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1513 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1514 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1515 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1517 nir_reg
->num_components
);
1519 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1521 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1522 qregs
[i
] = vir_get_temp(c
);
1527 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1529 /* XXX perf: Experiment with using immediate loads to avoid having
1530 * these end up in the uniform stream. Watch out for breaking the
1531 * small immediates optimization in the process!
1533 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1534 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1535 qregs
[i
] = vir_uniform_ui(c
, instr
->value
.u32
[i
]);
1537 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1541 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1543 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1545 /* VIR needs there to be *some* value, so pick 0 (same as for
1546 * ntq_setup_registers().
1548 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1549 qregs
[i
] = vir_uniform_ui(c
, 0);
1553 ntq_emit_image_size(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1555 assert(instr
->intrinsic
== nir_intrinsic_image_deref_size
);
1556 nir_variable
*var
= nir_intrinsic_get_var(instr
, 0);
1557 unsigned image_index
= var
->data
.driver_location
;
1558 const struct glsl_type
*sampler_type
= glsl_without_array(var
->type
);
1559 bool is_array
= glsl_sampler_type_is_array(sampler_type
);
1561 ntq_store_dest(c
, &instr
->dest
, 0,
1562 vir_uniform(c
, QUNIFORM_IMAGE_WIDTH
, image_index
));
1563 if (instr
->num_components
> 1) {
1564 ntq_store_dest(c
, &instr
->dest
, 1,
1565 vir_uniform(c
, QUNIFORM_IMAGE_HEIGHT
,
1568 if (instr
->num_components
> 2) {
1569 ntq_store_dest(c
, &instr
->dest
, 2,
1572 QUNIFORM_IMAGE_ARRAY_SIZE
:
1573 QUNIFORM_IMAGE_DEPTH
,
1579 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1583 switch (instr
->intrinsic
) {
1584 case nir_intrinsic_load_uniform
:
1585 if (nir_src_is_const(instr
->src
[0])) {
1586 int offset
= (nir_intrinsic_base(instr
) +
1587 nir_src_as_uint(instr
->src
[0]));
1588 assert(offset
% 4 == 0);
1589 /* We need dwords */
1590 offset
= offset
/ 4;
1591 for (int i
= 0; i
< instr
->num_components
; i
++) {
1592 ntq_store_dest(c
, &instr
->dest
, i
,
1593 vir_uniform(c
, QUNIFORM_UNIFORM
,
1597 ntq_emit_tmu_general(c
, instr
, false);
1601 case nir_intrinsic_load_ubo
:
1602 ntq_emit_tmu_general(c
, instr
, false);
1605 case nir_intrinsic_ssbo_atomic_add
:
1606 case nir_intrinsic_ssbo_atomic_imin
:
1607 case nir_intrinsic_ssbo_atomic_umin
:
1608 case nir_intrinsic_ssbo_atomic_imax
:
1609 case nir_intrinsic_ssbo_atomic_umax
:
1610 case nir_intrinsic_ssbo_atomic_and
:
1611 case nir_intrinsic_ssbo_atomic_or
:
1612 case nir_intrinsic_ssbo_atomic_xor
:
1613 case nir_intrinsic_ssbo_atomic_exchange
:
1614 case nir_intrinsic_ssbo_atomic_comp_swap
:
1615 case nir_intrinsic_load_ssbo
:
1616 case nir_intrinsic_store_ssbo
:
1617 ntq_emit_tmu_general(c
, instr
, false);
1620 case nir_intrinsic_shared_atomic_add
:
1621 case nir_intrinsic_shared_atomic_imin
:
1622 case nir_intrinsic_shared_atomic_umin
:
1623 case nir_intrinsic_shared_atomic_imax
:
1624 case nir_intrinsic_shared_atomic_umax
:
1625 case nir_intrinsic_shared_atomic_and
:
1626 case nir_intrinsic_shared_atomic_or
:
1627 case nir_intrinsic_shared_atomic_xor
:
1628 case nir_intrinsic_shared_atomic_exchange
:
1629 case nir_intrinsic_shared_atomic_comp_swap
:
1630 case nir_intrinsic_load_shared
:
1631 case nir_intrinsic_store_shared
:
1632 ntq_emit_tmu_general(c
, instr
, true);
1635 case nir_intrinsic_image_deref_load
:
1636 case nir_intrinsic_image_deref_store
:
1637 case nir_intrinsic_image_deref_atomic_add
:
1638 case nir_intrinsic_image_deref_atomic_min
:
1639 case nir_intrinsic_image_deref_atomic_max
:
1640 case nir_intrinsic_image_deref_atomic_and
:
1641 case nir_intrinsic_image_deref_atomic_or
:
1642 case nir_intrinsic_image_deref_atomic_xor
:
1643 case nir_intrinsic_image_deref_atomic_exchange
:
1644 case nir_intrinsic_image_deref_atomic_comp_swap
:
1645 v3d40_vir_emit_image_load_store(c
, instr
);
1648 case nir_intrinsic_get_buffer_size
:
1649 ntq_store_dest(c
, &instr
->dest
, 0,
1650 vir_uniform(c
, QUNIFORM_GET_BUFFER_SIZE
,
1651 nir_src_as_uint(instr
->src
[0])));
1654 case nir_intrinsic_load_user_clip_plane
:
1655 for (int i
= 0; i
< instr
->num_components
; i
++) {
1656 ntq_store_dest(c
, &instr
->dest
, i
,
1657 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1658 nir_intrinsic_ucp_id(instr
) *
1663 case nir_intrinsic_load_viewport_x_scale
:
1664 ntq_store_dest(c
, &instr
->dest
, 0,
1665 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
, 0));
1668 case nir_intrinsic_load_viewport_y_scale
:
1669 ntq_store_dest(c
, &instr
->dest
, 0,
1670 vir_uniform(c
, QUNIFORM_VIEWPORT_Y_SCALE
, 0));
1673 case nir_intrinsic_load_viewport_z_scale
:
1674 ntq_store_dest(c
, &instr
->dest
, 0,
1675 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0));
1678 case nir_intrinsic_load_viewport_z_offset
:
1679 ntq_store_dest(c
, &instr
->dest
, 0,
1680 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0));
1683 case nir_intrinsic_load_alpha_ref_float
:
1684 ntq_store_dest(c
, &instr
->dest
, 0,
1685 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1688 case nir_intrinsic_load_sample_mask_in
:
1689 ntq_store_dest(c
, &instr
->dest
, 0, vir_MSF(c
));
1692 case nir_intrinsic_load_helper_invocation
:
1693 vir_set_pf(vir_MSF_dest(c
, vir_nop_reg()), V3D_QPU_PF_PUSHZ
);
1694 ntq_store_dest(c
, &instr
->dest
, 0,
1695 vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1696 vir_uniform_ui(c
, ~0),
1697 vir_uniform_ui(c
, 0))));
1700 case nir_intrinsic_load_front_face
:
1701 /* The register contains 0 (front) or 1 (back), and we need to
1702 * turn it into a NIR bool where true means front.
1704 ntq_store_dest(c
, &instr
->dest
, 0,
1706 vir_uniform_ui(c
, -1),
1710 case nir_intrinsic_load_instance_id
:
1711 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
1714 case nir_intrinsic_load_vertex_id
:
1715 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
1718 case nir_intrinsic_load_input
:
1719 offset
= (nir_intrinsic_base(instr
) +
1720 nir_src_as_uint(instr
->src
[0]));
1721 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
&&
1722 c
->devinfo
->ver
>= 40) {
1723 /* Emit the LDVPM directly now, rather than at the top
1724 * of the shader like we did for V3D 3.x (which needs
1725 * vpmsetup when not just taking the next offset).
1727 * Note that delaying like this may introduce stalls,
1728 * as LDVPMV takes a minimum of 1 instruction but may
1729 * be slower if the VPM unit is busy with another QPU.
1732 if (c
->s
->info
.system_values_read
&
1733 (1ull << SYSTEM_VALUE_INSTANCE_ID
)) {
1736 if (c
->s
->info
.system_values_read
&
1737 (1ull << SYSTEM_VALUE_VERTEX_ID
)) {
1740 for (int i
= 0; i
< offset
; i
++)
1741 index
+= c
->vattr_sizes
[i
];
1742 index
+= nir_intrinsic_component(instr
);
1743 for (int i
= 0; i
< instr
->num_components
; i
++) {
1744 struct qreg vpm_offset
=
1745 vir_uniform_ui(c
, index
++);
1746 ntq_store_dest(c
, &instr
->dest
, i
,
1747 vir_LDVPMV_IN(c
, vpm_offset
));
1750 for (int i
= 0; i
< instr
->num_components
; i
++) {
1751 int comp
= nir_intrinsic_component(instr
) + i
;
1752 ntq_store_dest(c
, &instr
->dest
, i
,
1753 vir_MOV(c
, c
->inputs
[offset
* 4 +
1759 case nir_intrinsic_store_output
:
1760 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1761 offset
= ((nir_intrinsic_base(instr
) +
1762 nir_src_as_uint(instr
->src
[1])) * 4 +
1763 nir_intrinsic_component(instr
));
1764 for (int i
= 0; i
< instr
->num_components
; i
++) {
1765 c
->outputs
[offset
+ i
] =
1771 assert(instr
->num_components
== 1);
1774 ntq_get_src(c
, instr
->src
[0], 0),
1775 nir_intrinsic_base(instr
));
1779 case nir_intrinsic_image_deref_size
:
1780 ntq_emit_image_size(c
, instr
);
1783 case nir_intrinsic_discard
:
1784 if (vir_in_nonuniform_control_flow(c
)) {
1785 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
1787 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
1788 vir_uniform_ui(c
, 0)),
1791 vir_SETMSF_dest(c
, vir_nop_reg(),
1792 vir_uniform_ui(c
, 0));
1796 case nir_intrinsic_discard_if
: {
1797 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, instr
->src
[0]);
1799 if (vir_in_nonuniform_control_flow(c
)) {
1800 struct qinst
*exec_flag
= vir_MOV_dest(c
, vir_nop_reg(),
1802 if (cond
== V3D_QPU_COND_IFA
) {
1803 vir_set_uf(exec_flag
, V3D_QPU_UF_ANDZ
);
1805 vir_set_uf(exec_flag
, V3D_QPU_UF_NORNZ
);
1806 cond
= V3D_QPU_COND_IFA
;
1810 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
1811 vir_uniform_ui(c
, 0)), cond
);
1816 case nir_intrinsic_memory_barrier
:
1817 case nir_intrinsic_memory_barrier_atomic_counter
:
1818 case nir_intrinsic_memory_barrier_buffer
:
1819 case nir_intrinsic_memory_barrier_image
:
1820 case nir_intrinsic_group_memory_barrier
:
1821 /* We don't do any instruction scheduling of these NIR
1822 * instructions between each other, so we just need to make
1823 * sure that the TMU operations before the barrier are flushed
1824 * before the ones after the barrier. That is currently
1825 * handled by having a THRSW in each of them and a LDTMU
1826 * series or a TMUWT after.
1830 case nir_intrinsic_barrier
:
1831 /* Emit a TSY op to get all invocations in the workgroup
1832 * (actually supergroup) to block until the last invocation
1833 * reaches the TSY op.
1835 if (c
->devinfo
->ver
>= 42) {
1836 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
1837 V3D_QPU_WADDR_SYNCB
));
1839 struct qinst
*sync
=
1840 vir_BARRIERID_dest(c
,
1841 vir_reg(QFILE_MAGIC
,
1842 V3D_QPU_WADDR_SYNCU
));
1844 vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
1846 V3D_TSY_WAIT_INC_CHECK
);
1850 /* The blocking of a TSY op only happens at the next thread
1851 * switch. No texturing may be outstanding at the time of a
1852 * TSY blocking operation.
1857 case nir_intrinsic_load_num_work_groups
:
1858 for (int i
= 0; i
< 3; i
++) {
1859 ntq_store_dest(c
, &instr
->dest
, i
,
1860 vir_uniform(c
, QUNIFORM_NUM_WORK_GROUPS
,
1865 case nir_intrinsic_load_local_invocation_index
:
1866 ntq_store_dest(c
, &instr
->dest
, 0,
1867 vir_SHR(c
, c
->cs_payload
[1],
1868 vir_uniform_ui(c
, 32 - c
->local_invocation_index_bits
)));
1871 case nir_intrinsic_load_work_group_id
:
1872 ntq_store_dest(c
, &instr
->dest
, 0,
1873 vir_AND(c
, c
->cs_payload
[0],
1874 vir_uniform_ui(c
, 0xffff)));
1875 ntq_store_dest(c
, &instr
->dest
, 1,
1876 vir_SHR(c
, c
->cs_payload
[0],
1877 vir_uniform_ui(c
, 16)));
1878 ntq_store_dest(c
, &instr
->dest
, 2,
1879 vir_AND(c
, c
->cs_payload
[1],
1880 vir_uniform_ui(c
, 0xffff)));
1883 case nir_intrinsic_load_subgroup_id
:
1884 ntq_store_dest(c
, &instr
->dest
, 0, vir_EIDX(c
));
1888 fprintf(stderr
, "Unknown intrinsic: ");
1889 nir_print_instr(&instr
->instr
, stderr
);
1890 fprintf(stderr
, "\n");
1895 /* Clears (activates) the execute flags for any channels whose jump target
1896 * matches this block.
1898 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
1901 * XXX perf: For uniform control flow, we should be able to skip c->execute
1902 * handling entirely.
1905 ntq_activate_execute_for_block(struct v3d_compile
*c
)
1907 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
1908 c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
1911 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1915 ntq_emit_uniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
1917 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1918 bool empty_else_block
=
1919 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1920 exec_list_is_empty(&nir_else_block
->instr_list
));
1922 struct qblock
*then_block
= vir_new_block(c
);
1923 struct qblock
*after_block
= vir_new_block(c
);
1924 struct qblock
*else_block
;
1925 if (empty_else_block
)
1926 else_block
= after_block
;
1928 else_block
= vir_new_block(c
);
1930 /* Set up the flags for the IF condition (taking the THEN branch). */
1931 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
1934 vir_BRANCH(c
, cond
== V3D_QPU_COND_IFA
?
1935 V3D_QPU_BRANCH_COND_ALLNA
:
1936 V3D_QPU_BRANCH_COND_ALLA
);
1937 vir_link_blocks(c
->cur_block
, else_block
);
1938 vir_link_blocks(c
->cur_block
, then_block
);
1940 /* Process the THEN block. */
1941 vir_set_emit_block(c
, then_block
);
1942 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1944 if (!empty_else_block
) {
1945 /* At the end of the THEN block, jump to ENDIF */
1946 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALWAYS
);
1947 vir_link_blocks(c
->cur_block
, after_block
);
1949 /* Emit the else block. */
1950 vir_set_emit_block(c
, else_block
);
1951 ntq_activate_execute_for_block(c
);
1952 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
1955 vir_link_blocks(c
->cur_block
, after_block
);
1957 vir_set_emit_block(c
, after_block
);
1961 ntq_emit_nonuniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
1963 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1964 bool empty_else_block
=
1965 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1966 exec_list_is_empty(&nir_else_block
->instr_list
));
1968 struct qblock
*then_block
= vir_new_block(c
);
1969 struct qblock
*after_block
= vir_new_block(c
);
1970 struct qblock
*else_block
;
1971 if (empty_else_block
)
1972 else_block
= after_block
;
1974 else_block
= vir_new_block(c
);
1976 bool was_uniform_control_flow
= false;
1977 if (!vir_in_nonuniform_control_flow(c
)) {
1978 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1979 was_uniform_control_flow
= true;
1982 /* Set up the flags for the IF condition (taking the THEN branch). */
1983 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
1985 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
1986 * was previously active (execute Z) for updating the exec flags.
1988 if (was_uniform_control_flow
) {
1989 cond
= v3d_qpu_cond_invert(cond
);
1991 struct qinst
*inst
= vir_MOV_dest(c
, vir_nop_reg(), c
->execute
);
1992 if (cond
== V3D_QPU_COND_IFA
) {
1993 vir_set_uf(inst
, V3D_QPU_UF_NORNZ
);
1995 vir_set_uf(inst
, V3D_QPU_UF_ANDZ
);
1996 cond
= V3D_QPU_COND_IFA
;
2000 vir_MOV_cond(c
, cond
,
2002 vir_uniform_ui(c
, else_block
->index
));
2004 /* Jump to ELSE if nothing is active for THEN, otherwise fall
2007 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2008 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
2009 vir_link_blocks(c
->cur_block
, else_block
);
2010 vir_link_blocks(c
->cur_block
, then_block
);
2012 /* Process the THEN block. */
2013 vir_set_emit_block(c
, then_block
);
2014 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
2016 if (!empty_else_block
) {
2017 /* Handle the end of the THEN block. First, all currently
2018 * active channels update their execute flags to point to
2021 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2023 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2024 vir_uniform_ui(c
, after_block
->index
));
2026 /* If everything points at ENDIF, then jump there immediately. */
2027 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
2029 vir_uniform_ui(c
, after_block
->index
)),
2031 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
2032 vir_link_blocks(c
->cur_block
, after_block
);
2033 vir_link_blocks(c
->cur_block
, else_block
);
2035 vir_set_emit_block(c
, else_block
);
2036 ntq_activate_execute_for_block(c
);
2037 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
2040 vir_link_blocks(c
->cur_block
, after_block
);
2042 vir_set_emit_block(c
, after_block
);
2043 if (was_uniform_control_flow
)
2044 c
->execute
= c
->undef
;
2046 ntq_activate_execute_for_block(c
);
2050 ntq_emit_if(struct v3d_compile
*c
, nir_if
*nif
)
2052 bool was_in_control_flow
= c
->in_control_flow
;
2053 c
->in_control_flow
= true;
2054 if (!vir_in_nonuniform_control_flow(c
) &&
2055 nir_src_is_dynamically_uniform(nif
->condition
)) {
2056 ntq_emit_uniform_if(c
, nif
);
2058 ntq_emit_nonuniform_if(c
, nif
);
2060 c
->in_control_flow
= was_in_control_flow
;
2064 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
2066 switch (jump
->type
) {
2067 case nir_jump_break
:
2068 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2070 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2071 vir_uniform_ui(c
, c
->loop_break_block
->index
));
2074 case nir_jump_continue
:
2075 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2077 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2078 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
2081 case nir_jump_return
:
2082 unreachable("All returns shouold be lowered\n");
2087 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
2089 switch (instr
->type
) {
2090 case nir_instr_type_deref
:
2091 /* ignored, will be walked by the intrinsic using it. */
2094 case nir_instr_type_alu
:
2095 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
2098 case nir_instr_type_intrinsic
:
2099 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
2102 case nir_instr_type_load_const
:
2103 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
2106 case nir_instr_type_ssa_undef
:
2107 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
2110 case nir_instr_type_tex
:
2111 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
2114 case nir_instr_type_jump
:
2115 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
2119 fprintf(stderr
, "Unknown NIR instr type: ");
2120 nir_print_instr(instr
, stderr
);
2121 fprintf(stderr
, "\n");
2127 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
2129 nir_foreach_instr(instr
, block
) {
2130 ntq_emit_instr(c
, instr
);
2134 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
2137 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
2139 bool was_in_control_flow
= c
->in_control_flow
;
2140 c
->in_control_flow
= true;
2142 bool was_uniform_control_flow
= false;
2143 if (!vir_in_nonuniform_control_flow(c
)) {
2144 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
2145 was_uniform_control_flow
= true;
2148 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
2149 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
2151 c
->loop_cont_block
= vir_new_block(c
);
2152 c
->loop_break_block
= vir_new_block(c
);
2154 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2155 vir_set_emit_block(c
, c
->loop_cont_block
);
2156 ntq_activate_execute_for_block(c
);
2158 ntq_emit_cf_list(c
, &loop
->body
);
2160 /* Re-enable any previous continues now, so our ANYA check below
2163 * XXX: Use the .ORZ flags update, instead.
2165 vir_set_pf(vir_XOR_dest(c
,
2168 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
2170 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
2172 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2174 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
2175 /* Pixels that were not dispatched or have been discarded should not
2176 * contribute to looping again.
2178 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
2179 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2180 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
2182 vir_set_emit_block(c
, c
->loop_break_block
);
2183 if (was_uniform_control_flow
)
2184 c
->execute
= c
->undef
;
2186 ntq_activate_execute_for_block(c
);
2188 c
->loop_break_block
= save_loop_break_block
;
2189 c
->loop_cont_block
= save_loop_cont_block
;
2193 c
->in_control_flow
= was_in_control_flow
;
2197 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
2199 fprintf(stderr
, "FUNCTIONS not handled.\n");
2204 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
2206 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
2207 switch (node
->type
) {
2208 case nir_cf_node_block
:
2209 ntq_emit_block(c
, nir_cf_node_as_block(node
));
2212 case nir_cf_node_if
:
2213 ntq_emit_if(c
, nir_cf_node_as_if(node
));
2216 case nir_cf_node_loop
:
2217 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
2220 case nir_cf_node_function
:
2221 ntq_emit_function(c
, nir_cf_node_as_function(node
));
2225 fprintf(stderr
, "Unknown NIR node type\n");
2232 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
2234 ntq_setup_registers(c
, &impl
->registers
);
2235 ntq_emit_cf_list(c
, &impl
->body
);
2239 nir_to_vir(struct v3d_compile
*c
)
2241 switch (c
->s
->info
.stage
) {
2242 case MESA_SHADER_FRAGMENT
:
2243 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2244 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
2245 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2247 /* XXX perf: We could set the "disable implicit point/line
2248 * varyings" field in the shader record and not emit these, if
2249 * they're not going to be used.
2251 if (c
->fs_key
->is_points
) {
2252 c
->point_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2253 c
->point_y
= emit_fragment_varying(c
, NULL
, 0, 0);
2254 } else if (c
->fs_key
->is_lines
) {
2255 c
->line_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2258 case MESA_SHADER_COMPUTE
:
2259 /* Set up the TSO for barriers, assuming we do some. */
2260 if (c
->devinfo
->ver
< 42) {
2261 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
2262 V3D_QPU_WADDR_SYNC
));
2265 if (c
->s
->info
.system_values_read
&
2266 ((1ull << SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
) |
2267 (1ull << SYSTEM_VALUE_WORK_GROUP_ID
))) {
2268 c
->cs_payload
[0] = vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2270 if ((c
->s
->info
.system_values_read
&
2271 ((1ull << SYSTEM_VALUE_WORK_GROUP_ID
))) ||
2272 c
->s
->info
.cs
.shared_size
) {
2273 c
->cs_payload
[1] = vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2276 /* Set up the division between gl_LocalInvocationIndex and
2277 * wg_in_mem in the payload reg.
2279 int wg_size
= (c
->s
->info
.cs
.local_size
[0] *
2280 c
->s
->info
.cs
.local_size
[1] *
2281 c
->s
->info
.cs
.local_size
[2]);
2282 c
->local_invocation_index_bits
=
2283 ffs(util_next_power_of_two(MAX2(wg_size
, 64))) - 1;
2284 assert(c
->local_invocation_index_bits
<= 8);
2286 if (c
->s
->info
.cs
.shared_size
) {
2287 struct qreg wg_in_mem
= vir_SHR(c
, c
->cs_payload
[1],
2288 vir_uniform_ui(c
, 16));
2289 if (c
->s
->info
.cs
.local_size
[0] != 1 ||
2290 c
->s
->info
.cs
.local_size
[1] != 1 ||
2291 c
->s
->info
.cs
.local_size
[2] != 1) {
2293 c
->local_invocation_index_bits
);
2294 int wg_mask
= (1 << wg_bits
) - 1;
2295 wg_in_mem
= vir_AND(c
, wg_in_mem
,
2296 vir_uniform_ui(c
, wg_mask
));
2298 struct qreg shared_per_wg
=
2299 vir_uniform_ui(c
, c
->s
->info
.cs
.shared_size
);
2301 c
->cs_shared_offset
=
2303 vir_uniform(c
, QUNIFORM_SHARED_OFFSET
,0),
2304 vir_UMUL(c
, wg_in_mem
, shared_per_wg
));
2311 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
)
2312 ntq_setup_fs_inputs(c
);
2314 ntq_setup_vpm_inputs(c
);
2316 ntq_setup_outputs(c
);
2318 /* Find the main function and emit the body. */
2319 nir_foreach_function(function
, c
->s
) {
2320 assert(strcmp(function
->name
, "main") == 0);
2321 assert(function
->impl
);
2322 ntq_emit_impl(c
, function
->impl
);
2326 const nir_shader_compiler_options v3d_nir_options
= {
2327 .lower_all_io_to_temps
= true,
2328 .lower_extract_byte
= true,
2329 .lower_extract_word
= true,
2331 .lower_bitfield_insert_to_shifts
= true,
2332 .lower_bitfield_extract_to_shifts
= true,
2333 .lower_bitfield_reverse
= true,
2334 .lower_bit_count
= true,
2335 .lower_cs_local_id_from_index
= true,
2336 .lower_ffract
= true,
2337 .lower_pack_unorm_2x16
= true,
2338 .lower_pack_snorm_2x16
= true,
2339 .lower_pack_unorm_4x8
= true,
2340 .lower_pack_snorm_4x8
= true,
2341 .lower_unpack_unorm_4x8
= true,
2342 .lower_unpack_snorm_4x8
= true,
2343 .lower_pack_half_2x16
= true,
2344 .lower_unpack_half_2x16
= true,
2346 .lower_find_lsb
= true,
2348 .lower_flrp32
= true,
2351 .lower_fsqrt
= true,
2352 .lower_ifind_msb
= true,
2353 .lower_isign
= true,
2354 .lower_ldexp
= true,
2355 .lower_mul_high
= true,
2356 .lower_wpos_pntc
= true,
2357 .native_integers
= true,
2361 * When demoting a shader down to single-threaded, removes the THRSW
2362 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2366 vir_remove_thrsw(struct v3d_compile
*c
)
2368 vir_for_each_block(block
, c
) {
2369 vir_for_each_inst_safe(inst
, block
) {
2370 if (inst
->qpu
.sig
.thrsw
)
2371 vir_remove_instruction(c
, inst
);
2375 c
->last_thrsw
= NULL
;
2379 vir_emit_last_thrsw(struct v3d_compile
*c
)
2381 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2382 * switching, so disable threads if we didn't do any TMU ops (each of
2383 * which would have emitted a THRSW).
2385 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
2388 vir_remove_thrsw(c
);
2392 /* If we're threaded and the last THRSW was in conditional code, then
2393 * we need to emit another one so that we can flag it as the last
2396 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
2397 assert(c
->devinfo
->ver
>= 41);
2401 /* If we're threaded, then we need to mark the last THRSW instruction
2402 * so we can emit a pair of them at QPU emit time.
2404 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2405 * post-last-THRSW state, so we can skip this.
2407 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2408 assert(c
->devinfo
->ver
>= 41);
2413 c
->last_thrsw
->is_last_thrsw
= true;
2416 /* There's a flag in the shader for "center W is needed for reasons other than
2417 * non-centroid varyings", so we just walk the program after VIR optimization
2418 * to see if it's used. It should be harmless to set even if we only use
2419 * center W for varyings.
2422 vir_check_payload_w(struct v3d_compile
*c
)
2424 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2427 vir_for_each_inst_inorder(inst
, c
) {
2428 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2429 if (inst
->src
[i
].file
== QFILE_REG
&&
2430 inst
->src
[i
].index
== 0) {
2431 c
->uses_center_w
= true;
2440 v3d_nir_to_vir(struct v3d_compile
*c
)
2442 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2443 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2444 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2445 vir_get_stage_name(c
),
2446 c
->program_id
, c
->variant_id
);
2447 nir_print_shader(c
->s
, stderr
);
2452 /* Emit the last THRSW before STVPM and TLB writes. */
2453 vir_emit_last_thrsw(c
);
2455 switch (c
->s
->info
.stage
) {
2456 case MESA_SHADER_FRAGMENT
:
2459 case MESA_SHADER_VERTEX
:
2462 case MESA_SHADER_COMPUTE
:
2465 unreachable("bad stage");
2468 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2469 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2470 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
2471 vir_get_stage_name(c
),
2472 c
->program_id
, c
->variant_id
);
2474 fprintf(stderr
, "\n");
2479 vir_check_payload_w(c
);
2481 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2482 * We used that on that platform to pipeline TMU writes and reduce the
2483 * number of thread switches, as well as try (mostly successfully) to
2484 * reduce maximum register pressure to allow more threads. We should
2485 * do something of that sort for V3D -- either instruction scheduling
2486 * here, or delay the the THRSW and LDTMUs from our texture
2487 * instructions until the results are needed.
2490 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2491 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2492 fprintf(stderr
, "%s prog %d/%d VIR:\n",
2493 vir_get_stage_name(c
),
2494 c
->program_id
, c
->variant_id
);
2496 fprintf(stderr
, "\n");
2499 /* Attempt to allocate registers for the temporaries. If we fail,
2500 * reduce thread count and try again.
2502 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
2503 struct qpu_reg
*temp_registers
;
2506 temp_registers
= v3d_register_allocate(c
, &spilled
);
2513 if (c
->threads
== min_threads
) {
2514 fprintf(stderr
, "Failed to register allocate at %d threads:\n",
2523 if (c
->threads
== 1)
2524 vir_remove_thrsw(c
);
2527 if (c
->spill_size
&&
2528 (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2529 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
)))) {
2530 fprintf(stderr
, "%s prog %d/%d spilled VIR:\n",
2531 vir_get_stage_name(c
),
2532 c
->program_id
, c
->variant_id
);
2534 fprintf(stderr
, "\n");
2537 v3d_vir_to_qpu(c
, temp_registers
);