2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
35 #define GENERAL_TMU_LOOKUP_PER_QUAD (0 << 7)
36 #define GENERAL_TMU_LOOKUP_PER_PIXEL (1 << 7)
37 #define GENERAL_TMU_READ_OP_PREFETCH (0 << 3)
38 #define GENERAL_TMU_READ_OP_CACHE_CLEAR (1 << 3)
39 #define GENERAL_TMU_READ_OP_CACHE_FLUSH (3 << 3)
40 #define GENERAL_TMU_READ_OP_CACHE_CLEAN (3 << 3)
41 #define GENERAL_TMU_READ_OP_CACHE_L1T_CLEAR (4 << 3)
42 #define GENERAL_TMU_READ_OP_CACHE_L1T_FLUSH_AGGREGATION (5 << 3)
43 #define GENERAL_TMU_READ_OP_ATOMIC_INC (8 << 3)
44 #define GENERAL_TMU_READ_OP_ATOMIC_DEC (9 << 3)
45 #define GENERAL_TMU_READ_OP_ATOMIC_NOT (10 << 3)
46 #define GENERAL_TMU_READ_OP_READ (15 << 3)
47 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_I (0 << 0)
48 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_I (1 << 0)
49 #define GENERAL_TMU_LOOKUP_TYPE_VEC2 (2 << 0)
50 #define GENERAL_TMU_LOOKUP_TYPE_VEC3 (3 << 0)
51 #define GENERAL_TMU_LOOKUP_TYPE_VEC4 (4 << 0)
52 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_UI (5 << 0)
53 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_UI (6 << 0)
54 #define GENERAL_TMU_LOOKUP_TYPE_32BIT_UI (7 << 0)
56 #define GENERAL_TMU_WRITE_OP_ATOMIC_ADD_WRAP (0 << 3)
57 #define GENERAL_TMU_WRITE_OP_ATOMIC_SUB_WRAP (1 << 3)
58 #define GENERAL_TMU_WRITE_OP_ATOMIC_XCHG (2 << 3)
59 #define GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG (3 << 3)
60 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMIN (4 << 3)
61 #define GENERAL_TMU_WRITE_OP_ATOMIC_UMAX (5 << 3)
62 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMIN (6 << 3)
63 #define GENERAL_TMU_WRITE_OP_ATOMIC_SMAX (7 << 3)
64 #define GENERAL_TMU_WRITE_OP_ATOMIC_AND (8 << 3)
65 #define GENERAL_TMU_WRITE_OP_ATOMIC_OR (9 << 3)
66 #define GENERAL_TMU_WRITE_OP_ATOMIC_XOR (10 << 3)
67 #define GENERAL_TMU_WRITE_OP_WRITE (15 << 3)
69 #define V3D_TSY_SET_QUORUM 0
70 #define V3D_TSY_INC_WAITERS 1
71 #define V3D_TSY_DEC_WAITERS 2
72 #define V3D_TSY_INC_QUORUM 3
73 #define V3D_TSY_DEC_QUORUM 4
74 #define V3D_TSY_FREE_ALL 5
75 #define V3D_TSY_RELEASE 6
76 #define V3D_TSY_ACQUIRE 7
77 #define V3D_TSY_WAIT 8
78 #define V3D_TSY_WAIT_INC 9
79 #define V3D_TSY_WAIT_CHECK 10
80 #define V3D_TSY_WAIT_INC_CHECK 11
81 #define V3D_TSY_WAIT_CV 12
82 #define V3D_TSY_INC_SEMAPHORE 13
83 #define V3D_TSY_DEC_SEMAPHORE 14
84 #define V3D_TSY_SET_QUORUM_FREE_ALL 15
87 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
90 resize_qreg_array(struct v3d_compile
*c
,
95 if (*size
>= decl_size
)
98 uint32_t old_size
= *size
;
99 *size
= MAX2(*size
* 2, decl_size
);
100 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
102 fprintf(stderr
, "Malloc failure\n");
106 for (uint32_t i
= old_size
; i
< *size
; i
++)
107 (*regs
)[i
] = c
->undef
;
111 vir_emit_thrsw(struct v3d_compile
*c
)
116 /* Always thread switch after each texture operation for now.
118 * We could do better by batching a bunch of texture fetches up and
119 * then doing one thread switch and collecting all their results
122 c
->last_thrsw
= vir_NOP(c
);
123 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
124 c
->last_thrsw_at_top_level
= !c
->in_control_flow
;
128 v3d_general_tmu_op(nir_intrinsic_instr
*instr
)
130 switch (instr
->intrinsic
) {
131 case nir_intrinsic_load_ssbo
:
132 case nir_intrinsic_load_ubo
:
133 case nir_intrinsic_load_uniform
:
134 case nir_intrinsic_load_shared
:
135 return GENERAL_TMU_READ_OP_READ
;
136 case nir_intrinsic_store_ssbo
:
137 case nir_intrinsic_store_shared
:
138 return GENERAL_TMU_WRITE_OP_WRITE
;
139 case nir_intrinsic_ssbo_atomic_add
:
140 case nir_intrinsic_shared_atomic_add
:
141 return GENERAL_TMU_WRITE_OP_ATOMIC_ADD_WRAP
;
142 case nir_intrinsic_ssbo_atomic_imin
:
143 case nir_intrinsic_shared_atomic_imin
:
144 return GENERAL_TMU_WRITE_OP_ATOMIC_SMIN
;
145 case nir_intrinsic_ssbo_atomic_umin
:
146 case nir_intrinsic_shared_atomic_umin
:
147 return GENERAL_TMU_WRITE_OP_ATOMIC_UMIN
;
148 case nir_intrinsic_ssbo_atomic_imax
:
149 case nir_intrinsic_shared_atomic_imax
:
150 return GENERAL_TMU_WRITE_OP_ATOMIC_SMAX
;
151 case nir_intrinsic_ssbo_atomic_umax
:
152 case nir_intrinsic_shared_atomic_umax
:
153 return GENERAL_TMU_WRITE_OP_ATOMIC_UMAX
;
154 case nir_intrinsic_ssbo_atomic_and
:
155 case nir_intrinsic_shared_atomic_and
:
156 return GENERAL_TMU_WRITE_OP_ATOMIC_AND
;
157 case nir_intrinsic_ssbo_atomic_or
:
158 case nir_intrinsic_shared_atomic_or
:
159 return GENERAL_TMU_WRITE_OP_ATOMIC_OR
;
160 case nir_intrinsic_ssbo_atomic_xor
:
161 case nir_intrinsic_shared_atomic_xor
:
162 return GENERAL_TMU_WRITE_OP_ATOMIC_XOR
;
163 case nir_intrinsic_ssbo_atomic_exchange
:
164 case nir_intrinsic_shared_atomic_exchange
:
165 return GENERAL_TMU_WRITE_OP_ATOMIC_XCHG
;
166 case nir_intrinsic_ssbo_atomic_comp_swap
:
167 case nir_intrinsic_shared_atomic_comp_swap
:
168 return GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG
;
170 unreachable("unknown intrinsic op");
175 * Implements indirect uniform loads and SSBO accesses through the TMU general
176 * memory access interface.
179 ntq_emit_tmu_general(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
,
182 /* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
183 * wants to have support for inc/dec?
186 uint32_t tmu_op
= v3d_general_tmu_op(instr
);
187 bool is_store
= (instr
->intrinsic
== nir_intrinsic_store_ssbo
||
188 instr
->intrinsic
== nir_intrinsic_store_shared
);
189 bool has_index
= !is_shared
;
192 int tmu_writes
= 1; /* address */
193 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
195 } else if (instr
->intrinsic
== nir_intrinsic_load_ssbo
||
196 instr
->intrinsic
== nir_intrinsic_load_ubo
||
197 instr
->intrinsic
== nir_intrinsic_load_shared
) {
198 offset_src
= 0 + has_index
;
199 } else if (is_store
) {
200 offset_src
= 1 + has_index
;
201 for (int i
= 0; i
< instr
->num_components
; i
++) {
203 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
204 ntq_get_src(c
, instr
->src
[0], i
));
208 offset_src
= 0 + has_index
;
210 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
211 ntq_get_src(c
, instr
->src
[1 + has_index
], 0));
213 if (tmu_op
== GENERAL_TMU_WRITE_OP_ATOMIC_CMPXCHG
) {
215 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
216 ntq_get_src(c
, instr
->src
[2 + has_index
],
222 uint32_t const_offset
= 0;
223 if (nir_src_is_const(instr
->src
[offset_src
]))
224 const_offset
= nir_src_as_uint(instr
->src
[offset_src
]);
226 /* Make sure we won't exceed the 16-entry TMU fifo if each thread is
227 * storing at the same time.
229 while (tmu_writes
> 16 / c
->threads
)
233 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
234 /* Find what variable in the default uniform block this
235 * uniform load is coming from.
237 uint32_t base
= nir_intrinsic_base(instr
);
239 struct v3d_ubo_range
*range
= NULL
;
240 for (i
= 0; i
< c
->num_ubo_ranges
; i
++) {
241 range
= &c
->ubo_ranges
[i
];
242 if (base
>= range
->src_offset
&&
243 base
< range
->src_offset
+ range
->size
) {
247 /* The driver-location-based offset always has to be within a
248 * declared uniform range.
250 assert(i
!= c
->num_ubo_ranges
);
251 if (!c
->ubo_range_used
[i
]) {
252 c
->ubo_range_used
[i
] = true;
253 range
->dst_offset
= c
->next_ubo_dst_offset
;
254 c
->next_ubo_dst_offset
+= range
->size
;
257 const_offset
+= base
- range
->src_offset
+ range
->dst_offset
;
259 offset
= vir_uniform(c
, QUNIFORM_UBO_ADDR
,
260 v3d_unit_data_create(0, const_offset
));
262 } else if (instr
->intrinsic
== nir_intrinsic_load_ubo
) {
263 uint32_t index
= nir_src_as_uint(instr
->src
[0]) + 1;
264 /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
265 * 1 (0 is gallium's constant buffer 0).
267 offset
= vir_uniform(c
, QUNIFORM_UBO_ADDR
,
268 v3d_unit_data_create(index
, const_offset
));
270 } else if (is_shared
) {
271 /* Shared variables have no buffer index, and all start from a
272 * common base that we set up at the start of dispatch
274 offset
= c
->cs_shared_offset
;
276 offset
= vir_uniform(c
, QUNIFORM_SSBO_OFFSET
,
277 nir_src_as_uint(instr
->src
[is_store
?
281 uint32_t config
= (0xffffff00 |
283 GENERAL_TMU_LOOKUP_PER_PIXEL
);
284 if (instr
->num_components
== 1) {
285 config
|= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI
;
287 config
|= (GENERAL_TMU_LOOKUP_TYPE_VEC2
+
288 instr
->num_components
- 2);
291 if (vir_in_nonuniform_control_flow(c
)) {
292 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
298 dest
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
);
300 dest
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUAU
);
303 if (nir_src_is_const(instr
->src
[offset_src
]) && const_offset
== 0) {
304 tmu
= vir_MOV_dest(c
, dest
, offset
);
306 tmu
= vir_ADD_dest(c
, dest
,
308 ntq_get_src(c
, instr
->src
[offset_src
], 0));
312 tmu
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
316 if (vir_in_nonuniform_control_flow(c
))
317 vir_set_cond(tmu
, V3D_QPU_COND_IFA
);
321 /* Read the result, or wait for the TMU op to complete. */
322 for (int i
= 0; i
< nir_intrinsic_dest_components(instr
); i
++)
323 ntq_store_dest(c
, &instr
->dest
, i
, vir_MOV(c
, vir_LDTMU(c
)));
325 if (nir_intrinsic_dest_components(instr
) == 0)
330 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
332 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
333 def
->num_components
);
334 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
339 * This function is responsible for getting VIR results into the associated
340 * storage for a NIR instruction.
342 * If it's a NIR SSA def, then we just set the associated hash table entry to
345 * If it's a NIR reg, then we need to update the existing qreg assigned to the
346 * NIR destination with the incoming value. To do that without introducing
347 * new MOVs, we require that the incoming qreg either be a uniform, or be
348 * SSA-defined by the previous VIR instruction in the block and rewritable by
349 * this function. That lets us sneak ahead and insert the SF flag beforehand
350 * (knowing that the previous instruction doesn't depend on flags) and rewrite
351 * its destination to be the NIR reg's destination
354 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
357 struct qinst
*last_inst
= NULL
;
358 if (!list_empty(&c
->cur_block
->instructions
))
359 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
361 assert((result
.file
== QFILE_TEMP
&&
362 last_inst
&& last_inst
== c
->defs
[result
.index
]));
365 assert(chan
< dest
->ssa
.num_components
);
368 struct hash_entry
*entry
=
369 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
374 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
376 qregs
[chan
] = result
;
378 nir_register
*reg
= dest
->reg
.reg
;
379 assert(dest
->reg
.base_offset
== 0);
380 assert(reg
->num_array_elems
== 0);
381 struct hash_entry
*entry
=
382 _mesa_hash_table_search(c
->def_ht
, reg
);
383 struct qreg
*qregs
= entry
->data
;
385 /* Insert a MOV if the source wasn't an SSA def in the
386 * previous instruction.
388 if ((vir_in_nonuniform_control_flow(c
) &&
389 c
->defs
[last_inst
->dst
.index
]->qpu
.sig
.ldunif
)) {
390 result
= vir_MOV(c
, result
);
391 last_inst
= c
->defs
[result
.index
];
394 /* We know they're both temps, so just rewrite index. */
395 c
->defs
[last_inst
->dst
.index
] = NULL
;
396 last_inst
->dst
.index
= qregs
[chan
].index
;
398 /* If we're in control flow, then make this update of the reg
399 * conditional on the execution mask.
401 if (vir_in_nonuniform_control_flow(c
)) {
402 last_inst
->dst
.index
= qregs
[chan
].index
;
404 /* Set the flags to the current exec mask.
406 c
->cursor
= vir_before_inst(last_inst
);
407 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
409 c
->cursor
= vir_after_inst(last_inst
);
411 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
417 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
419 struct hash_entry
*entry
;
421 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
422 assert(i
< src
.ssa
->num_components
);
424 nir_register
*reg
= src
.reg
.reg
;
425 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
426 assert(reg
->num_array_elems
== 0);
427 assert(src
.reg
.base_offset
== 0);
428 assert(i
< reg
->num_components
);
431 struct qreg
*qregs
= entry
->data
;
436 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
439 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
440 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
441 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
442 instr
->src
[src
].swizzle
[chan
]);
444 assert(!instr
->src
[src
].abs
);
445 assert(!instr
->src
[src
].negate
);
451 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
453 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
457 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
459 unsigned unit
= instr
->texture_index
;
460 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
461 int dest_size
= nir_tex_instr_dest_size(instr
);
463 struct qreg lod
= c
->undef
;
465 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
467 for (int i
= 0; i
< dest_size
; i
++) {
469 enum quniform_contents contents
;
471 if (instr
->is_array
&& i
== dest_size
- 1)
472 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
474 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
476 struct qreg size
= vir_uniform(c
, contents
, unit
);
478 switch (instr
->sampler_dim
) {
479 case GLSL_SAMPLER_DIM_1D
:
480 case GLSL_SAMPLER_DIM_2D
:
481 case GLSL_SAMPLER_DIM_MS
:
482 case GLSL_SAMPLER_DIM_3D
:
483 case GLSL_SAMPLER_DIM_CUBE
:
484 /* Don't minify the array size. */
485 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
486 size
= ntq_minify(c
, size
, lod
);
490 case GLSL_SAMPLER_DIM_RECT
:
491 /* There's no LOD field for rects */
495 unreachable("Bad sampler type");
498 ntq_store_dest(c
, &instr
->dest
, i
, size
);
503 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
505 unsigned unit
= instr
->texture_index
;
507 /* Since each texture sampling op requires uploading uniforms to
508 * reference the texture, there's no HW support for texture size and
509 * you just upload uniforms containing the size.
512 case nir_texop_query_levels
:
513 ntq_store_dest(c
, &instr
->dest
, 0,
514 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
517 ntq_emit_txs(c
, instr
);
523 if (c
->devinfo
->ver
>= 40)
524 v3d40_vir_emit_tex(c
, instr
);
526 v3d33_vir_emit_tex(c
, instr
);
530 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
532 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
534 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
536 struct qreg periods
= vir_FROUND(c
, input
);
537 struct qreg sin_output
= vir_SIN(c
, vir_FSUB(c
, input
, periods
));
538 return vir_XOR(c
, sin_output
, vir_SHL(c
,
539 vir_FTOIN(c
, periods
),
540 vir_uniform_ui(c
, -1)));
544 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
546 struct qreg t
= vir_get_temp(c
);
548 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
549 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHZ
);
550 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
551 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHN
);
552 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
553 return vir_MOV(c
, t
);
557 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
559 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
560 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
561 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
562 c
->inputs
[attr
* 4 + 3] = vir_RECIP(c
, c
->payload_w
);
566 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
567 uint8_t swizzle
, int array_index
)
569 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
570 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
573 if (c
->devinfo
->ver
>= 41) {
574 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
576 ldvary
->qpu
.sig
.ldvary
= true;
577 vary
= vir_emit_def(c
, ldvary
);
579 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
583 /* For gl_PointCoord input or distance along a line, we'll be called
584 * with no nir_variable, and we don't count toward VPM size so we
585 * don't track an input slot.
588 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
591 int i
= c
->num_inputs
++;
593 v3d_slot_from_slot_and_component(var
->data
.location
+
594 array_index
, swizzle
);
596 switch (var
->data
.interpolation
) {
597 case INTERP_MODE_NONE
:
598 /* If a gl_FrontColor or gl_BackColor input has no interp
599 * qualifier, then if we're using glShadeModel(GL_FLAT) it
600 * needs to be flat shaded.
602 switch (var
->data
.location
+ array_index
) {
603 case VARYING_SLOT_COL0
:
604 case VARYING_SLOT_COL1
:
605 case VARYING_SLOT_BFC0
:
606 case VARYING_SLOT_BFC1
:
607 if (c
->fs_key
->shade_model_flat
) {
608 BITSET_SET(c
->flat_shade_flags
, i
);
609 vir_MOV_dest(c
, c
->undef
, vary
);
610 return vir_MOV(c
, r5
);
612 return vir_FADD(c
, vir_FMUL(c
, vary
,
619 case INTERP_MODE_SMOOTH
:
620 if (var
->data
.centroid
) {
621 BITSET_SET(c
->centroid_flags
, i
);
622 return vir_FADD(c
, vir_FMUL(c
, vary
,
623 c
->payload_w_centroid
), r5
);
625 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
627 case INTERP_MODE_NOPERSPECTIVE
:
628 BITSET_SET(c
->noperspective_flags
, i
);
629 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
630 case INTERP_MODE_FLAT
:
631 BITSET_SET(c
->flat_shade_flags
, i
);
632 vir_MOV_dest(c
, c
->undef
, vary
);
633 return vir_MOV(c
, r5
);
635 unreachable("Bad interp mode");
640 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
,
643 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
644 int chan
= var
->data
.location_frac
+ i
;
645 c
->inputs
[attr
* 4 + chan
] =
646 emit_fragment_varying(c
, var
, chan
, array_index
);
651 add_output(struct v3d_compile
*c
,
652 uint32_t decl_offset
,
656 uint32_t old_array_size
= c
->outputs_array_size
;
657 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
660 if (old_array_size
!= c
->outputs_array_size
) {
661 c
->output_slots
= reralloc(c
,
663 struct v3d_varying_slot
,
664 c
->outputs_array_size
);
667 c
->output_slots
[decl_offset
] =
668 v3d_slot_from_slot_and_component(slot
, swizzle
);
672 declare_uniform_range(struct v3d_compile
*c
, uint32_t start
, uint32_t size
)
674 unsigned array_id
= c
->num_ubo_ranges
++;
675 if (array_id
>= c
->ubo_ranges_array_size
) {
676 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
678 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
679 struct v3d_ubo_range
,
680 c
->ubo_ranges_array_size
);
681 c
->ubo_range_used
= reralloc(c
, c
->ubo_range_used
,
683 c
->ubo_ranges_array_size
);
686 c
->ubo_ranges
[array_id
].dst_offset
= 0;
687 c
->ubo_ranges
[array_id
].src_offset
= start
;
688 c
->ubo_ranges
[array_id
].size
= size
;
689 c
->ubo_range_used
[array_id
] = false;
693 * If compare_instr is a valid comparison instruction, emits the
694 * compare_instr's comparison and returns the sel_instr's return value based
695 * on the compare_instr's result.
698 ntq_emit_comparison(struct v3d_compile
*c
,
699 nir_alu_instr
*compare_instr
,
700 enum v3d_qpu_cond
*out_cond
)
702 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
704 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
705 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
706 bool cond_invert
= false;
707 struct qreg nop
= vir_nop_reg();
709 switch (compare_instr
->op
) {
712 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
715 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
720 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
724 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
730 vir_set_pf(vir_FCMP_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
733 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
737 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
743 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHN
);
746 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
749 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
753 vir_set_pf(vir_MOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
758 vir_set_pf(vir_FMOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
766 *out_cond
= cond_invert
? V3D_QPU_COND_IFNA
: V3D_QPU_COND_IFA
;
771 /* Finds an ALU instruction that generates our src value that could
772 * (potentially) be greedily emitted in the consuming instruction.
774 static struct nir_alu_instr
*
775 ntq_get_alu_parent(nir_src src
)
777 if (!src
.is_ssa
|| src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
779 nir_alu_instr
*instr
= nir_instr_as_alu(src
.ssa
->parent_instr
);
783 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
784 * moving emission of the ALU instr down past another write of the
787 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
788 if (!instr
->src
[i
].src
.is_ssa
)
795 /* Turns a NIR bool into a condition code to predicate on. */
796 static enum v3d_qpu_cond
797 ntq_emit_bool_to_cond(struct v3d_compile
*c
, nir_src src
)
799 nir_alu_instr
*compare
= ntq_get_alu_parent(src
);
803 enum v3d_qpu_cond cond
;
804 if (ntq_emit_comparison(c
, compare
, &cond
))
808 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), ntq_get_src(c
, src
, 0)),
810 return V3D_QPU_COND_IFNA
;
814 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
816 /* This should always be lowered to ALU operations for V3D. */
817 assert(!instr
->dest
.saturate
);
819 /* Vectors are special in that they have non-scalarized writemasks,
820 * and just take the first swizzle channel for each argument in order
821 * into each writemask channel.
823 if (instr
->op
== nir_op_vec2
||
824 instr
->op
== nir_op_vec3
||
825 instr
->op
== nir_op_vec4
) {
827 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
828 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
829 instr
->src
[i
].swizzle
[0]);
830 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
831 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
832 vir_MOV(c
, srcs
[i
]));
836 /* General case: We can just grab the one used channel per src. */
837 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
838 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
839 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
847 result
= vir_MOV(c
, src
[0]);
851 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
854 result
= vir_NEG(c
, src
[0]);
858 result
= vir_FMUL(c
, src
[0], src
[1]);
861 result
= vir_FADD(c
, src
[0], src
[1]);
864 result
= vir_FSUB(c
, src
[0], src
[1]);
867 result
= vir_FMIN(c
, src
[0], src
[1]);
870 result
= vir_FMAX(c
, src
[0], src
[1]);
874 nir_alu_instr
*src0_alu
= ntq_get_alu_parent(instr
->src
[0].src
);
875 if (src0_alu
&& src0_alu
->op
== nir_op_fround_even
) {
876 result
= vir_FTOIN(c
, ntq_get_alu_src(c
, src0_alu
, 0));
878 result
= vir_FTOIZ(c
, src
[0]);
884 result
= vir_FTOUZ(c
, src
[0]);
887 result
= vir_ITOF(c
, src
[0]);
890 result
= vir_UTOF(c
, src
[0]);
893 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
896 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
900 result
= vir_ADD(c
, src
[0], src
[1]);
903 result
= vir_SHR(c
, src
[0], src
[1]);
906 result
= vir_SUB(c
, src
[0], src
[1]);
909 result
= vir_ASR(c
, src
[0], src
[1]);
912 result
= vir_SHL(c
, src
[0], src
[1]);
915 result
= vir_MIN(c
, src
[0], src
[1]);
918 result
= vir_UMIN(c
, src
[0], src
[1]);
921 result
= vir_MAX(c
, src
[0], src
[1]);
924 result
= vir_UMAX(c
, src
[0], src
[1]);
927 result
= vir_AND(c
, src
[0], src
[1]);
930 result
= vir_OR(c
, src
[0], src
[1]);
933 result
= vir_XOR(c
, src
[0], src
[1]);
936 result
= vir_NOT(c
, src
[0]);
939 case nir_op_ufind_msb
:
940 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
944 result
= vir_UMUL(c
, src
[0], src
[1]);
951 enum v3d_qpu_cond cond
;
952 MAYBE_UNUSED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
954 result
= vir_MOV(c
, vir_SEL(c
, cond
,
955 vir_uniform_f(c
, 1.0),
956 vir_uniform_f(c
, 0.0)));
972 enum v3d_qpu_cond cond
;
973 MAYBE_UNUSED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
975 result
= vir_MOV(c
, vir_SEL(c
, cond
,
976 vir_uniform_ui(c
, ~0),
977 vir_uniform_ui(c
, 0)));
984 ntq_emit_bool_to_cond(c
, instr
->src
[0].src
),
989 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), src
[0]),
991 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
996 result
= vir_RECIP(c
, src
[0]);
999 result
= vir_RSQRT(c
, src
[0]);
1002 result
= vir_EXP(c
, src
[0]);
1005 result
= vir_LOG(c
, src
[0]);
1009 result
= vir_FCEIL(c
, src
[0]);
1012 result
= vir_FFLOOR(c
, src
[0]);
1014 case nir_op_fround_even
:
1015 result
= vir_FROUND(c
, src
[0]);
1018 result
= vir_FTRUNC(c
, src
[0]);
1022 result
= ntq_fsincos(c
, src
[0], false);
1025 result
= ntq_fsincos(c
, src
[0], true);
1029 result
= ntq_fsign(c
, src
[0]);
1033 result
= vir_FMOV(c
, src
[0]);
1034 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
1039 result
= vir_MAX(c
, src
[0], vir_NEG(c
, src
[0]));
1043 case nir_op_fddx_coarse
:
1044 case nir_op_fddx_fine
:
1045 result
= vir_FDX(c
, src
[0]);
1049 case nir_op_fddy_coarse
:
1050 case nir_op_fddy_fine
:
1051 result
= vir_FDY(c
, src
[0]);
1054 case nir_op_uadd_carry
:
1055 vir_set_pf(vir_ADD_dest(c
, vir_nop_reg(), src
[0], src
[1]),
1057 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1058 vir_uniform_ui(c
, ~0),
1059 vir_uniform_ui(c
, 0)));
1062 case nir_op_pack_half_2x16_split
:
1063 result
= vir_VFPACK(c
, src
[0], src
[1]);
1066 case nir_op_unpack_half_2x16_split_x
:
1067 result
= vir_FMOV(c
, src
[0]);
1068 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_L
);
1071 case nir_op_unpack_half_2x16_split_y
:
1072 result
= vir_FMOV(c
, src
[0]);
1073 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_H
);
1077 fprintf(stderr
, "unknown NIR ALU inst: ");
1078 nir_print_instr(&instr
->instr
, stderr
);
1079 fprintf(stderr
, "\n");
1083 /* We have a scalar result, so the instruction should only have a
1084 * single channel written to.
1086 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
1087 ntq_store_dest(c
, &instr
->dest
.dest
,
1088 ffs(instr
->dest
.write_mask
) - 1, result
);
1091 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
1092 * specifier. They come from a register that's preloaded with 0xffffffff
1093 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
1094 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
1096 #define TLB_TYPE_F16_COLOR (3 << 6)
1097 #define TLB_TYPE_I32_COLOR (1 << 6)
1098 #define TLB_TYPE_F32_COLOR (0 << 6)
1099 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
1100 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
1101 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
1102 #define TLB_F16_SWAP_HI_LO (1 << 1)
1103 #define TLB_VEC_SIZE_4_F16 (1 << 0)
1104 #define TLB_VEC_SIZE_2_F16 (0 << 0)
1105 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
1107 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
1110 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
1111 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
1112 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
1113 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
1114 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
1116 /* Stencil is a single 32-bit write. */
1117 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
1120 emit_frag_end(struct v3d_compile
*c
)
1123 if (c->output_sample_mask_index != -1) {
1124 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1128 bool has_any_tlb_color_write
= false;
1129 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++) {
1130 if (c
->fs_key
->cbufs
& (1 << rt
) && c
->output_color_var
[rt
])
1131 has_any_tlb_color_write
= true;
1134 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
1135 struct nir_variable
*var
= c
->output_color_var
[0];
1136 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
1138 vir_SETMSF_dest(c
, vir_nop_reg(),
1141 vir_FTOC(c
, color
[3])));
1144 struct qreg tlb_reg
= vir_magic_reg(V3D_QPU_WADDR_TLB
);
1145 struct qreg tlbu_reg
= vir_magic_reg(V3D_QPU_WADDR_TLBU
);
1146 if (c
->output_position_index
!= -1) {
1147 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1148 c
->outputs
[c
->output_position_index
]);
1149 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1151 if (c
->devinfo
->ver
>= 42) {
1152 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_PER_PIXEL
|
1153 TLB_SAMPLE_MODE_PER_PIXEL
);
1155 tlb_specifier
|= TLB_DEPTH_TYPE_PER_PIXEL
;
1157 inst
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
1161 } else if (c
->s
->info
.fs
.uses_discard
||
1162 !c
->s
->info
.fs
.early_fragment_tests
||
1163 c
->fs_key
->sample_alpha_to_coverage
||
1164 !has_any_tlb_color_write
) {
1165 /* Emit passthrough Z if it needed to be delayed until shader
1166 * end due to potential discards.
1168 * Since (single-threaded) fragment shaders always need a TLB
1169 * write, emit passthrouh Z if we didn't have any color
1170 * buffers and flag us as potentially discarding, so that we
1171 * can use Z as the TLB write.
1173 c
->s
->info
.fs
.uses_discard
= true;
1175 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1177 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1179 if (c
->devinfo
->ver
>= 42) {
1180 /* The spec says the PER_PIXEL flag is ignored for
1181 * invariant writes, but the simulator demands it.
1183 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_INVARIANT
|
1184 TLB_SAMPLE_MODE_PER_PIXEL
);
1186 tlb_specifier
|= TLB_DEPTH_TYPE_INVARIANT
;
1189 inst
->uniform
= vir_get_uniform_index(c
,
1196 /* XXX: Performance improvement: Merge Z write and color writes TLB
1200 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++) {
1201 if (!(c
->fs_key
->cbufs
& (1 << rt
)) || !c
->output_color_var
[rt
])
1204 nir_variable
*var
= c
->output_color_var
[rt
];
1205 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
1206 int num_components
= glsl_get_vector_elements(var
->type
);
1207 uint32_t conf
= 0xffffff00;
1210 conf
|= TLB_SAMPLE_MODE_PER_PIXEL
;
1211 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
1213 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
1214 num_components
= MAX2(num_components
, 3);
1216 assert(num_components
!= 0);
1217 switch (glsl_get_base_type(var
->type
)) {
1218 case GLSL_TYPE_UINT
:
1220 /* The F32 vs I32 distinction was dropped in 4.2. */
1221 if (c
->devinfo
->ver
< 42)
1222 conf
|= TLB_TYPE_I32_COLOR
;
1224 conf
|= TLB_TYPE_F32_COLOR
;
1225 conf
|= ((num_components
- 1) <<
1226 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1228 inst
= vir_MOV_dest(c
, tlbu_reg
, color
[0]);
1229 inst
->uniform
= vir_get_uniform_index(c
,
1233 for (int i
= 1; i
< num_components
; i
++) {
1234 inst
= vir_MOV_dest(c
, tlb_reg
, color
[i
]);
1239 struct qreg r
= color
[0];
1240 struct qreg g
= color
[1];
1241 struct qreg b
= color
[2];
1242 struct qreg a
= color
[3];
1244 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1245 conf
|= TLB_TYPE_F32_COLOR
;
1246 conf
|= ((num_components
- 1) <<
1247 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1249 conf
|= TLB_TYPE_F16_COLOR
;
1250 conf
|= TLB_F16_SWAP_HI_LO
;
1251 if (num_components
>= 3)
1252 conf
|= TLB_VEC_SIZE_4_F16
;
1254 conf
|= TLB_VEC_SIZE_2_F16
;
1257 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1262 if (c
->fs_key
->sample_alpha_to_one
)
1263 a
= vir_uniform_f(c
, 1.0);
1265 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1266 inst
= vir_MOV_dest(c
, tlbu_reg
, r
);
1267 inst
->uniform
= vir_get_uniform_index(c
,
1271 if (num_components
>= 2)
1272 vir_MOV_dest(c
, tlb_reg
, g
);
1273 if (num_components
>= 3)
1274 vir_MOV_dest(c
, tlb_reg
, b
);
1275 if (num_components
>= 4)
1276 vir_MOV_dest(c
, tlb_reg
, a
);
1278 inst
= vir_VFPACK_dest(c
, tlb_reg
, r
, g
);
1280 inst
->dst
= tlbu_reg
;
1281 inst
->uniform
= vir_get_uniform_index(c
,
1286 if (num_components
>= 3)
1287 inst
= vir_VFPACK_dest(c
, tlb_reg
, b
, a
);
1296 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t vpm_index
)
1298 if (c
->devinfo
->ver
>= 40) {
1299 vir_STVPMV(c
, vir_uniform_ui(c
, vpm_index
), val
);
1301 /* XXX: v3d33_vir_vpm_write_setup(c); */
1302 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1307 emit_vert_end(struct v3d_compile
*c
)
1309 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1311 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1316 v3d_optimize_nir(struct nir_shader
*s
)
1323 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1324 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
);
1325 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1326 NIR_PASS(progress
, s
, nir_copy_prop
);
1327 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1328 NIR_PASS(progress
, s
, nir_opt_dce
);
1329 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1330 NIR_PASS(progress
, s
, nir_opt_cse
);
1331 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8, true, true);
1332 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1333 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1334 NIR_PASS(progress
, s
, nir_opt_undef
);
1337 NIR_PASS(progress
, s
, nir_opt_move_load_ubo
);
1341 driver_location_compare(const void *in_a
, const void *in_b
)
1343 const nir_variable
*const *a
= in_a
;
1344 const nir_variable
*const *b
= in_b
;
1346 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1350 ntq_emit_vpm_read(struct v3d_compile
*c
,
1351 uint32_t *num_components_queued
,
1352 uint32_t *remaining
,
1355 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1357 if (c
->devinfo
->ver
>= 40 ) {
1358 return vir_LDVPMV_IN(c
,
1360 (*num_components_queued
)++));
1363 if (*num_components_queued
!= 0) {
1364 (*num_components_queued
)--;
1365 return vir_MOV(c
, vpm
);
1368 uint32_t num_components
= MIN2(*remaining
, 32);
1370 v3d33_vir_vpm_read_setup(c
, num_components
);
1372 *num_components_queued
= num_components
- 1;
1373 *remaining
-= num_components
;
1375 return vir_MOV(c
, vpm
);
1379 ntq_setup_vpm_inputs(struct v3d_compile
*c
)
1381 /* Figure out how many components of each vertex attribute the shader
1382 * uses. Each variable should have been split to individual
1383 * components and unused ones DCEed. The vertex fetcher will load
1384 * from the start of the attribute to the number of components we
1385 * declare we need in c->vattr_sizes[].
1387 nir_foreach_variable(var
, &c
->s
->inputs
) {
1388 /* No VS attribute array support. */
1389 assert(MAX2(glsl_get_length(var
->type
), 1) == 1);
1391 unsigned loc
= var
->data
.driver_location
;
1392 int start_component
= var
->data
.location_frac
;
1393 int num_components
= glsl_get_components(var
->type
);
1395 c
->vattr_sizes
[loc
] = MAX2(c
->vattr_sizes
[loc
],
1396 start_component
+ num_components
);
1399 unsigned num_components
= 0;
1400 uint32_t vpm_components_queued
= 0;
1401 bool uses_iid
= c
->s
->info
.system_values_read
&
1402 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1403 bool uses_vid
= c
->s
->info
.system_values_read
&
1404 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1405 num_components
+= uses_iid
;
1406 num_components
+= uses_vid
;
1408 for (int i
= 0; i
< ARRAY_SIZE(c
->vattr_sizes
); i
++)
1409 num_components
+= c
->vattr_sizes
[i
];
1412 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1413 &num_components
, ~0);
1417 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1418 &num_components
, ~0);
1421 /* The actual loads will happen directly in nir_intrinsic_load_input
1422 * on newer versions.
1424 if (c
->devinfo
->ver
>= 40)
1427 for (int loc
= 0; loc
< ARRAY_SIZE(c
->vattr_sizes
); loc
++) {
1428 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1431 for (int i
= 0; i
< c
->vattr_sizes
[loc
]; i
++) {
1432 c
->inputs
[loc
* 4 + i
] =
1433 ntq_emit_vpm_read(c
,
1434 &vpm_components_queued
,
1441 if (c
->devinfo
->ver
>= 40) {
1442 assert(vpm_components_queued
== num_components
);
1444 assert(vpm_components_queued
== 0);
1445 assert(num_components
== 0);
1450 ntq_setup_fs_inputs(struct v3d_compile
*c
)
1452 unsigned num_entries
= 0;
1453 unsigned num_components
= 0;
1454 nir_foreach_variable(var
, &c
->s
->inputs
) {
1456 num_components
+= glsl_get_components(var
->type
);
1459 nir_variable
*vars
[num_entries
];
1462 nir_foreach_variable(var
, &c
->s
->inputs
)
1465 /* Sort the variables so that we emit the input setup in
1466 * driver_location order. This is required for VPM reads, whose data
1467 * is fetched into the VPM in driver_location (TGSI register index)
1470 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1472 for (unsigned i
= 0; i
< num_entries
; i
++) {
1473 nir_variable
*var
= vars
[i
];
1474 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1475 unsigned loc
= var
->data
.driver_location
;
1477 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1478 (loc
+ array_len
) * 4);
1480 if (var
->data
.location
== VARYING_SLOT_POS
) {
1481 emit_fragcoord_input(c
, loc
);
1482 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1483 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1484 (c
->fs_key
->point_sprite_mask
&
1485 (1 << (var
->data
.location
-
1486 VARYING_SLOT_VAR0
))))) {
1487 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1488 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1490 for (int j
= 0; j
< array_len
; j
++)
1491 emit_fragment_input(c
, loc
+ j
, var
, j
);
1497 ntq_setup_outputs(struct v3d_compile
*c
)
1499 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
1502 nir_foreach_variable(var
, &c
->s
->outputs
) {
1503 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1504 unsigned loc
= var
->data
.driver_location
* 4;
1506 assert(array_len
== 1);
1509 for (int i
= 0; i
< 4 - var
->data
.location_frac
; i
++) {
1510 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1512 var
->data
.location_frac
+ i
);
1515 switch (var
->data
.location
) {
1516 case FRAG_RESULT_COLOR
:
1517 c
->output_color_var
[0] = var
;
1518 c
->output_color_var
[1] = var
;
1519 c
->output_color_var
[2] = var
;
1520 c
->output_color_var
[3] = var
;
1522 case FRAG_RESULT_DATA0
:
1523 case FRAG_RESULT_DATA1
:
1524 case FRAG_RESULT_DATA2
:
1525 case FRAG_RESULT_DATA3
:
1526 c
->output_color_var
[var
->data
.location
-
1527 FRAG_RESULT_DATA0
] = var
;
1529 case FRAG_RESULT_DEPTH
:
1530 c
->output_position_index
= loc
;
1532 case FRAG_RESULT_SAMPLE_MASK
:
1533 c
->output_sample_mask_index
= loc
;
1540 ntq_setup_uniforms(struct v3d_compile
*c
)
1542 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1543 uint32_t vec4_count
= glsl_count_attribute_slots(var
->type
,
1545 unsigned vec4_size
= 4 * sizeof(float);
1547 if (var
->data
.mode
!= nir_var_uniform
)
1550 declare_uniform_range(c
, var
->data
.driver_location
* vec4_size
,
1551 vec4_count
* vec4_size
);
1557 * Sets up the mapping from nir_register to struct qreg *.
1559 * Each nir_register gets a struct qreg per 32-bit component being stored.
1562 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1564 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1565 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1566 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1568 nir_reg
->num_components
);
1570 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1572 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1573 qregs
[i
] = vir_get_temp(c
);
1578 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1580 /* XXX perf: Experiment with using immediate loads to avoid having
1581 * these end up in the uniform stream. Watch out for breaking the
1582 * small immediates optimization in the process!
1584 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1585 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1586 qregs
[i
] = vir_uniform_ui(c
, instr
->value
.u32
[i
]);
1588 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1592 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1594 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1596 /* VIR needs there to be *some* value, so pick 0 (same as for
1597 * ntq_setup_registers().
1599 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1600 qregs
[i
] = vir_uniform_ui(c
, 0);
1604 ntq_emit_image_size(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1606 assert(instr
->intrinsic
== nir_intrinsic_image_deref_size
);
1607 nir_variable
*var
= nir_intrinsic_get_var(instr
, 0);
1608 unsigned image_index
= var
->data
.driver_location
;
1609 const struct glsl_type
*sampler_type
= glsl_without_array(var
->type
);
1610 bool is_array
= glsl_sampler_type_is_array(sampler_type
);
1612 ntq_store_dest(c
, &instr
->dest
, 0,
1613 vir_uniform(c
, QUNIFORM_IMAGE_WIDTH
, image_index
));
1614 if (instr
->num_components
> 1) {
1615 ntq_store_dest(c
, &instr
->dest
, 1,
1616 vir_uniform(c
, QUNIFORM_IMAGE_HEIGHT
,
1619 if (instr
->num_components
> 2) {
1620 ntq_store_dest(c
, &instr
->dest
, 2,
1623 QUNIFORM_IMAGE_ARRAY_SIZE
:
1624 QUNIFORM_IMAGE_DEPTH
,
1630 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1634 switch (instr
->intrinsic
) {
1635 case nir_intrinsic_load_uniform
:
1636 if (nir_src_is_const(instr
->src
[0])) {
1637 int offset
= (nir_intrinsic_base(instr
) +
1638 nir_src_as_uint(instr
->src
[0]));
1639 assert(offset
% 4 == 0);
1640 /* We need dwords */
1641 offset
= offset
/ 4;
1642 for (int i
= 0; i
< instr
->num_components
; i
++) {
1643 ntq_store_dest(c
, &instr
->dest
, i
,
1644 vir_uniform(c
, QUNIFORM_UNIFORM
,
1648 ntq_emit_tmu_general(c
, instr
, false);
1652 case nir_intrinsic_load_ubo
:
1653 ntq_emit_tmu_general(c
, instr
, false);
1656 case nir_intrinsic_ssbo_atomic_add
:
1657 case nir_intrinsic_ssbo_atomic_imin
:
1658 case nir_intrinsic_ssbo_atomic_umin
:
1659 case nir_intrinsic_ssbo_atomic_imax
:
1660 case nir_intrinsic_ssbo_atomic_umax
:
1661 case nir_intrinsic_ssbo_atomic_and
:
1662 case nir_intrinsic_ssbo_atomic_or
:
1663 case nir_intrinsic_ssbo_atomic_xor
:
1664 case nir_intrinsic_ssbo_atomic_exchange
:
1665 case nir_intrinsic_ssbo_atomic_comp_swap
:
1666 case nir_intrinsic_load_ssbo
:
1667 case nir_intrinsic_store_ssbo
:
1668 ntq_emit_tmu_general(c
, instr
, false);
1671 case nir_intrinsic_shared_atomic_add
:
1672 case nir_intrinsic_shared_atomic_imin
:
1673 case nir_intrinsic_shared_atomic_umin
:
1674 case nir_intrinsic_shared_atomic_imax
:
1675 case nir_intrinsic_shared_atomic_umax
:
1676 case nir_intrinsic_shared_atomic_and
:
1677 case nir_intrinsic_shared_atomic_or
:
1678 case nir_intrinsic_shared_atomic_xor
:
1679 case nir_intrinsic_shared_atomic_exchange
:
1680 case nir_intrinsic_shared_atomic_comp_swap
:
1681 case nir_intrinsic_load_shared
:
1682 case nir_intrinsic_store_shared
:
1683 ntq_emit_tmu_general(c
, instr
, true);
1686 case nir_intrinsic_image_deref_load
:
1687 case nir_intrinsic_image_deref_store
:
1688 case nir_intrinsic_image_deref_atomic_add
:
1689 case nir_intrinsic_image_deref_atomic_min
:
1690 case nir_intrinsic_image_deref_atomic_max
:
1691 case nir_intrinsic_image_deref_atomic_and
:
1692 case nir_intrinsic_image_deref_atomic_or
:
1693 case nir_intrinsic_image_deref_atomic_xor
:
1694 case nir_intrinsic_image_deref_atomic_exchange
:
1695 case nir_intrinsic_image_deref_atomic_comp_swap
:
1696 v3d40_vir_emit_image_load_store(c
, instr
);
1699 case nir_intrinsic_get_buffer_size
:
1700 ntq_store_dest(c
, &instr
->dest
, 0,
1701 vir_uniform(c
, QUNIFORM_GET_BUFFER_SIZE
,
1702 nir_src_as_uint(instr
->src
[0])));
1705 case nir_intrinsic_load_user_clip_plane
:
1706 for (int i
= 0; i
< instr
->num_components
; i
++) {
1707 ntq_store_dest(c
, &instr
->dest
, i
,
1708 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1709 nir_intrinsic_ucp_id(instr
) *
1714 case nir_intrinsic_load_viewport_x_scale
:
1715 ntq_store_dest(c
, &instr
->dest
, 0,
1716 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
, 0));
1719 case nir_intrinsic_load_viewport_y_scale
:
1720 ntq_store_dest(c
, &instr
->dest
, 0,
1721 vir_uniform(c
, QUNIFORM_VIEWPORT_Y_SCALE
, 0));
1724 case nir_intrinsic_load_viewport_z_scale
:
1725 ntq_store_dest(c
, &instr
->dest
, 0,
1726 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0));
1729 case nir_intrinsic_load_viewport_z_offset
:
1730 ntq_store_dest(c
, &instr
->dest
, 0,
1731 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0));
1734 case nir_intrinsic_load_alpha_ref_float
:
1735 ntq_store_dest(c
, &instr
->dest
, 0,
1736 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1739 case nir_intrinsic_load_sample_mask_in
:
1740 ntq_store_dest(c
, &instr
->dest
, 0, vir_MSF(c
));
1743 case nir_intrinsic_load_helper_invocation
:
1744 vir_set_pf(vir_MSF_dest(c
, vir_nop_reg()), V3D_QPU_PF_PUSHZ
);
1745 ntq_store_dest(c
, &instr
->dest
, 0,
1746 vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1747 vir_uniform_ui(c
, ~0),
1748 vir_uniform_ui(c
, 0))));
1751 case nir_intrinsic_load_front_face
:
1752 /* The register contains 0 (front) or 1 (back), and we need to
1753 * turn it into a NIR bool where true means front.
1755 ntq_store_dest(c
, &instr
->dest
, 0,
1757 vir_uniform_ui(c
, -1),
1761 case nir_intrinsic_load_instance_id
:
1762 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
1765 case nir_intrinsic_load_vertex_id
:
1766 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
1769 case nir_intrinsic_load_input
:
1770 offset
= (nir_intrinsic_base(instr
) +
1771 nir_src_as_uint(instr
->src
[0]));
1772 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
&&
1773 c
->devinfo
->ver
>= 40) {
1774 /* Emit the LDVPM directly now, rather than at the top
1775 * of the shader like we did for V3D 3.x (which needs
1776 * vpmsetup when not just taking the next offset).
1778 * Note that delaying like this may introduce stalls,
1779 * as LDVPMV takes a minimum of 1 instruction but may
1780 * be slower if the VPM unit is busy with another QPU.
1783 if (c
->s
->info
.system_values_read
&
1784 (1ull << SYSTEM_VALUE_INSTANCE_ID
)) {
1787 if (c
->s
->info
.system_values_read
&
1788 (1ull << SYSTEM_VALUE_VERTEX_ID
)) {
1791 for (int i
= 0; i
< offset
; i
++)
1792 index
+= c
->vattr_sizes
[i
];
1793 index
+= nir_intrinsic_component(instr
);
1794 for (int i
= 0; i
< instr
->num_components
; i
++) {
1795 struct qreg vpm_offset
=
1796 vir_uniform_ui(c
, index
++);
1797 ntq_store_dest(c
, &instr
->dest
, i
,
1798 vir_LDVPMV_IN(c
, vpm_offset
));
1801 for (int i
= 0; i
< instr
->num_components
; i
++) {
1802 int comp
= nir_intrinsic_component(instr
) + i
;
1803 ntq_store_dest(c
, &instr
->dest
, i
,
1804 vir_MOV(c
, c
->inputs
[offset
* 4 +
1810 case nir_intrinsic_store_output
:
1811 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1812 offset
= ((nir_intrinsic_base(instr
) +
1813 nir_src_as_uint(instr
->src
[1])) * 4 +
1814 nir_intrinsic_component(instr
));
1815 for (int i
= 0; i
< instr
->num_components
; i
++) {
1816 c
->outputs
[offset
+ i
] =
1822 assert(instr
->num_components
== 1);
1825 ntq_get_src(c
, instr
->src
[0], 0),
1826 nir_intrinsic_base(instr
));
1830 case nir_intrinsic_image_deref_size
:
1831 ntq_emit_image_size(c
, instr
);
1834 case nir_intrinsic_discard
:
1835 if (vir_in_nonuniform_control_flow(c
)) {
1836 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
1838 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
1839 vir_uniform_ui(c
, 0)),
1842 vir_SETMSF_dest(c
, vir_nop_reg(),
1843 vir_uniform_ui(c
, 0));
1847 case nir_intrinsic_discard_if
: {
1848 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, instr
->src
[0]);
1850 if (vir_in_nonuniform_control_flow(c
)) {
1851 struct qinst
*exec_flag
= vir_MOV_dest(c
, vir_nop_reg(),
1853 if (cond
== V3D_QPU_COND_IFA
) {
1854 vir_set_uf(exec_flag
, V3D_QPU_UF_ANDZ
);
1856 vir_set_uf(exec_flag
, V3D_QPU_UF_NORNZ
);
1857 cond
= V3D_QPU_COND_IFA
;
1861 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
1862 vir_uniform_ui(c
, 0)), cond
);
1867 case nir_intrinsic_memory_barrier
:
1868 case nir_intrinsic_memory_barrier_atomic_counter
:
1869 case nir_intrinsic_memory_barrier_buffer
:
1870 case nir_intrinsic_memory_barrier_image
:
1871 case nir_intrinsic_memory_barrier_shared
:
1872 /* We don't do any instruction scheduling of these NIR
1873 * instructions between each other, so we just need to make
1874 * sure that the TMU operations before the barrier are flushed
1875 * before the ones after the barrier. That is currently
1876 * handled by having a THRSW in each of them and a LDTMU
1877 * series or a TMUWT after.
1881 case nir_intrinsic_barrier
:
1882 /* Emit a TSY op to get all invocations in the workgroup
1883 * (actually supergroup) to block until the last invocation
1884 * reaches the TSY op.
1886 if (c
->devinfo
->ver
>= 42) {
1887 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
1888 V3D_QPU_WADDR_SYNCB
));
1890 struct qinst
*sync
=
1891 vir_BARRIERID_dest(c
,
1892 vir_reg(QFILE_MAGIC
,
1893 V3D_QPU_WADDR_SYNCU
));
1895 vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
1897 V3D_TSY_WAIT_INC_CHECK
);
1901 /* The blocking of a TSY op only happens at the next thread
1902 * switch. No texturing may be outstanding at the time of a
1903 * TSY blocking operation.
1908 case nir_intrinsic_load_num_work_groups
:
1909 for (int i
= 0; i
< 3; i
++) {
1910 ntq_store_dest(c
, &instr
->dest
, i
,
1911 vir_uniform(c
, QUNIFORM_NUM_WORK_GROUPS
,
1916 case nir_intrinsic_load_local_invocation_index
:
1917 ntq_store_dest(c
, &instr
->dest
, 0,
1918 vir_SHR(c
, c
->cs_payload
[1],
1919 vir_uniform_ui(c
, 32 - c
->local_invocation_index_bits
)));
1922 case nir_intrinsic_load_work_group_id
:
1923 ntq_store_dest(c
, &instr
->dest
, 0,
1924 vir_AND(c
, c
->cs_payload
[0],
1925 vir_uniform_ui(c
, 0xffff)));
1926 ntq_store_dest(c
, &instr
->dest
, 1,
1927 vir_SHR(c
, c
->cs_payload
[0],
1928 vir_uniform_ui(c
, 16)));
1929 ntq_store_dest(c
, &instr
->dest
, 2,
1930 vir_AND(c
, c
->cs_payload
[1],
1931 vir_uniform_ui(c
, 0xffff)));
1935 fprintf(stderr
, "Unknown intrinsic: ");
1936 nir_print_instr(&instr
->instr
, stderr
);
1937 fprintf(stderr
, "\n");
1942 /* Clears (activates) the execute flags for any channels whose jump target
1943 * matches this block.
1945 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
1948 * XXX perf: For uniform control flow, we should be able to skip c->execute
1949 * handling entirely.
1952 ntq_activate_execute_for_block(struct v3d_compile
*c
)
1954 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
1955 c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
1958 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1962 ntq_emit_uniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
1964 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1965 bool empty_else_block
=
1966 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1967 exec_list_is_empty(&nir_else_block
->instr_list
));
1969 struct qblock
*then_block
= vir_new_block(c
);
1970 struct qblock
*after_block
= vir_new_block(c
);
1971 struct qblock
*else_block
;
1972 if (empty_else_block
)
1973 else_block
= after_block
;
1975 else_block
= vir_new_block(c
);
1977 /* Set up the flags for the IF condition (taking the THEN branch). */
1978 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
1981 vir_BRANCH(c
, cond
== V3D_QPU_COND_IFA
?
1982 V3D_QPU_BRANCH_COND_ALLNA
:
1983 V3D_QPU_BRANCH_COND_ALLA
);
1984 vir_link_blocks(c
->cur_block
, else_block
);
1985 vir_link_blocks(c
->cur_block
, then_block
);
1987 /* Process the THEN block. */
1988 vir_set_emit_block(c
, then_block
);
1989 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1991 if (!empty_else_block
) {
1992 /* At the end of the THEN block, jump to ENDIF */
1993 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALWAYS
);
1994 vir_link_blocks(c
->cur_block
, after_block
);
1996 /* Emit the else block. */
1997 vir_set_emit_block(c
, else_block
);
1998 ntq_activate_execute_for_block(c
);
1999 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
2002 vir_link_blocks(c
->cur_block
, after_block
);
2004 vir_set_emit_block(c
, after_block
);
2008 ntq_emit_nonuniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
2010 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
2011 bool empty_else_block
=
2012 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
2013 exec_list_is_empty(&nir_else_block
->instr_list
));
2015 struct qblock
*then_block
= vir_new_block(c
);
2016 struct qblock
*after_block
= vir_new_block(c
);
2017 struct qblock
*else_block
;
2018 if (empty_else_block
)
2019 else_block
= after_block
;
2021 else_block
= vir_new_block(c
);
2023 bool was_uniform_control_flow
= false;
2024 if (!vir_in_nonuniform_control_flow(c
)) {
2025 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
2026 was_uniform_control_flow
= true;
2029 /* Set up the flags for the IF condition (taking the THEN branch). */
2030 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
2032 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
2033 * was previously active (execute Z) for updating the exec flags.
2035 if (was_uniform_control_flow
) {
2036 cond
= v3d_qpu_cond_invert(cond
);
2038 struct qinst
*inst
= vir_MOV_dest(c
, vir_nop_reg(), c
->execute
);
2039 if (cond
== V3D_QPU_COND_IFA
) {
2040 vir_set_uf(inst
, V3D_QPU_UF_NORNZ
);
2042 vir_set_uf(inst
, V3D_QPU_UF_ANDZ
);
2043 cond
= V3D_QPU_COND_IFA
;
2047 vir_MOV_cond(c
, cond
,
2049 vir_uniform_ui(c
, else_block
->index
));
2051 /* Jump to ELSE if nothing is active for THEN, otherwise fall
2054 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2055 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
2056 vir_link_blocks(c
->cur_block
, else_block
);
2057 vir_link_blocks(c
->cur_block
, then_block
);
2059 /* Process the THEN block. */
2060 vir_set_emit_block(c
, then_block
);
2061 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
2063 if (!empty_else_block
) {
2064 /* Handle the end of the THEN block. First, all currently
2065 * active channels update their execute flags to point to
2068 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2070 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2071 vir_uniform_ui(c
, after_block
->index
));
2073 /* If everything points at ENDIF, then jump there immediately. */
2074 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
2076 vir_uniform_ui(c
, after_block
->index
)),
2078 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
2079 vir_link_blocks(c
->cur_block
, after_block
);
2080 vir_link_blocks(c
->cur_block
, else_block
);
2082 vir_set_emit_block(c
, else_block
);
2083 ntq_activate_execute_for_block(c
);
2084 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
2087 vir_link_blocks(c
->cur_block
, after_block
);
2089 vir_set_emit_block(c
, after_block
);
2090 if (was_uniform_control_flow
)
2091 c
->execute
= c
->undef
;
2093 ntq_activate_execute_for_block(c
);
2097 ntq_emit_if(struct v3d_compile
*c
, nir_if
*nif
)
2099 bool was_in_control_flow
= c
->in_control_flow
;
2100 c
->in_control_flow
= true;
2101 if (!vir_in_nonuniform_control_flow(c
) &&
2102 nir_src_is_dynamically_uniform(nif
->condition
)) {
2103 ntq_emit_uniform_if(c
, nif
);
2105 ntq_emit_nonuniform_if(c
, nif
);
2107 c
->in_control_flow
= was_in_control_flow
;
2111 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
2113 switch (jump
->type
) {
2114 case nir_jump_break
:
2115 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2117 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2118 vir_uniform_ui(c
, c
->loop_break_block
->index
));
2121 case nir_jump_continue
:
2122 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2124 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2125 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
2128 case nir_jump_return
:
2129 unreachable("All returns shouold be lowered\n");
2134 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
2136 switch (instr
->type
) {
2137 case nir_instr_type_deref
:
2138 /* ignored, will be walked by the intrinsic using it. */
2141 case nir_instr_type_alu
:
2142 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
2145 case nir_instr_type_intrinsic
:
2146 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
2149 case nir_instr_type_load_const
:
2150 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
2153 case nir_instr_type_ssa_undef
:
2154 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
2157 case nir_instr_type_tex
:
2158 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
2161 case nir_instr_type_jump
:
2162 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
2166 fprintf(stderr
, "Unknown NIR instr type: ");
2167 nir_print_instr(instr
, stderr
);
2168 fprintf(stderr
, "\n");
2174 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
2176 nir_foreach_instr(instr
, block
) {
2177 ntq_emit_instr(c
, instr
);
2181 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
2184 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
2186 bool was_in_control_flow
= c
->in_control_flow
;
2187 c
->in_control_flow
= true;
2189 bool was_uniform_control_flow
= false;
2190 if (!vir_in_nonuniform_control_flow(c
)) {
2191 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
2192 was_uniform_control_flow
= true;
2195 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
2196 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
2198 c
->loop_cont_block
= vir_new_block(c
);
2199 c
->loop_break_block
= vir_new_block(c
);
2201 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2202 vir_set_emit_block(c
, c
->loop_cont_block
);
2203 ntq_activate_execute_for_block(c
);
2205 ntq_emit_cf_list(c
, &loop
->body
);
2207 /* Re-enable any previous continues now, so our ANYA check below
2210 * XXX: Use the .ORZ flags update, instead.
2212 vir_set_pf(vir_XOR_dest(c
,
2215 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
2217 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
2219 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2221 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
2222 /* Pixels that were not dispatched or have been discarded should not
2223 * contribute to looping again.
2225 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
2226 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2227 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
2229 vir_set_emit_block(c
, c
->loop_break_block
);
2230 if (was_uniform_control_flow
)
2231 c
->execute
= c
->undef
;
2233 ntq_activate_execute_for_block(c
);
2235 c
->loop_break_block
= save_loop_break_block
;
2236 c
->loop_cont_block
= save_loop_cont_block
;
2240 c
->in_control_flow
= was_in_control_flow
;
2244 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
2246 fprintf(stderr
, "FUNCTIONS not handled.\n");
2251 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
2253 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
2254 switch (node
->type
) {
2255 case nir_cf_node_block
:
2256 ntq_emit_block(c
, nir_cf_node_as_block(node
));
2259 case nir_cf_node_if
:
2260 ntq_emit_if(c
, nir_cf_node_as_if(node
));
2263 case nir_cf_node_loop
:
2264 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
2267 case nir_cf_node_function
:
2268 ntq_emit_function(c
, nir_cf_node_as_function(node
));
2272 fprintf(stderr
, "Unknown NIR node type\n");
2279 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
2281 ntq_setup_registers(c
, &impl
->registers
);
2282 ntq_emit_cf_list(c
, &impl
->body
);
2286 nir_to_vir(struct v3d_compile
*c
)
2288 switch (c
->s
->info
.stage
) {
2289 case MESA_SHADER_FRAGMENT
:
2290 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2291 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
2292 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2294 /* XXX perf: We could set the "disable implicit point/line
2295 * varyings" field in the shader record and not emit these, if
2296 * they're not going to be used.
2298 if (c
->fs_key
->is_points
) {
2299 c
->point_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2300 c
->point_y
= emit_fragment_varying(c
, NULL
, 0, 0);
2301 } else if (c
->fs_key
->is_lines
) {
2302 c
->line_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2305 case MESA_SHADER_COMPUTE
:
2306 /* Set up the TSO for barriers, assuming we do some. */
2307 if (c
->devinfo
->ver
< 42) {
2308 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
2309 V3D_QPU_WADDR_SYNC
));
2312 if (c
->s
->info
.system_values_read
&
2313 ((1ull << SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
) |
2314 (1ull << SYSTEM_VALUE_WORK_GROUP_ID
))) {
2315 c
->cs_payload
[0] = vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2317 if ((c
->s
->info
.system_values_read
&
2318 ((1ull << SYSTEM_VALUE_WORK_GROUP_ID
))) ||
2319 c
->s
->info
.cs
.shared_size
) {
2320 c
->cs_payload
[1] = vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2323 /* Set up the division between gl_LocalInvocationIndex and
2324 * wg_in_mem in the payload reg.
2326 int wg_size
= (c
->s
->info
.cs
.local_size
[0] *
2327 c
->s
->info
.cs
.local_size
[1] *
2328 c
->s
->info
.cs
.local_size
[2]);
2329 c
->local_invocation_index_bits
=
2330 ffs(util_next_power_of_two(MAX2(wg_size
, 64))) - 1;
2331 assert(c
->local_invocation_index_bits
<= 8);
2333 if (c
->s
->info
.cs
.shared_size
) {
2334 struct qreg wg_in_mem
= vir_SHR(c
, c
->cs_payload
[1],
2335 vir_uniform_ui(c
, 16));
2336 if (c
->s
->info
.cs
.local_size
[0] != 1 ||
2337 c
->s
->info
.cs
.local_size
[1] != 1 ||
2338 c
->s
->info
.cs
.local_size
[2] != 1) {
2340 c
->local_invocation_index_bits
);
2341 int wg_mask
= (1 << wg_bits
) - 1;
2342 wg_in_mem
= vir_AND(c
, wg_in_mem
,
2343 vir_uniform_ui(c
, wg_mask
));
2345 struct qreg shared_per_wg
=
2346 vir_uniform_ui(c
, c
->s
->info
.cs
.shared_size
);
2348 c
->cs_shared_offset
=
2350 vir_uniform(c
, QUNIFORM_SHARED_OFFSET
,0),
2351 vir_UMUL(c
, wg_in_mem
, shared_per_wg
));
2358 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
)
2359 ntq_setup_fs_inputs(c
);
2361 ntq_setup_vpm_inputs(c
);
2363 ntq_setup_outputs(c
);
2364 ntq_setup_uniforms(c
);
2365 ntq_setup_registers(c
, &c
->s
->registers
);
2367 /* Find the main function and emit the body. */
2368 nir_foreach_function(function
, c
->s
) {
2369 assert(strcmp(function
->name
, "main") == 0);
2370 assert(function
->impl
);
2371 ntq_emit_impl(c
, function
->impl
);
2375 const nir_shader_compiler_options v3d_nir_options
= {
2376 .lower_all_io_to_temps
= true,
2377 .lower_extract_byte
= true,
2378 .lower_extract_word
= true,
2380 .lower_bitfield_insert_to_shifts
= true,
2381 .lower_bitfield_extract_to_shifts
= true,
2382 .lower_bitfield_reverse
= true,
2383 .lower_bit_count
= true,
2384 .lower_cs_local_id_from_index
= true,
2385 .lower_ffract
= true,
2386 .lower_pack_unorm_2x16
= true,
2387 .lower_pack_snorm_2x16
= true,
2388 .lower_pack_unorm_4x8
= true,
2389 .lower_pack_snorm_4x8
= true,
2390 .lower_unpack_unorm_4x8
= true,
2391 .lower_unpack_snorm_4x8
= true,
2392 .lower_pack_half_2x16
= true,
2393 .lower_unpack_half_2x16
= true,
2395 .lower_find_lsb
= true,
2397 .lower_flrp32
= true,
2400 .lower_fsqrt
= true,
2401 .lower_ifind_msb
= true,
2402 .lower_isign
= true,
2403 .lower_ldexp
= true,
2404 .lower_mul_high
= true,
2405 .lower_wpos_pntc
= true,
2406 .native_integers
= true,
2410 * When demoting a shader down to single-threaded, removes the THRSW
2411 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2415 vir_remove_thrsw(struct v3d_compile
*c
)
2417 vir_for_each_block(block
, c
) {
2418 vir_for_each_inst_safe(inst
, block
) {
2419 if (inst
->qpu
.sig
.thrsw
)
2420 vir_remove_instruction(c
, inst
);
2424 c
->last_thrsw
= NULL
;
2428 vir_emit_last_thrsw(struct v3d_compile
*c
)
2430 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2431 * switching, so disable threads if we didn't do any TMU ops (each of
2432 * which would have emitted a THRSW).
2434 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
2437 vir_remove_thrsw(c
);
2441 /* If we're threaded and the last THRSW was in conditional code, then
2442 * we need to emit another one so that we can flag it as the last
2445 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
2446 assert(c
->devinfo
->ver
>= 41);
2450 /* If we're threaded, then we need to mark the last THRSW instruction
2451 * so we can emit a pair of them at QPU emit time.
2453 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2454 * post-last-THRSW state, so we can skip this.
2456 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2457 assert(c
->devinfo
->ver
>= 41);
2462 c
->last_thrsw
->is_last_thrsw
= true;
2465 /* There's a flag in the shader for "center W is needed for reasons other than
2466 * non-centroid varyings", so we just walk the program after VIR optimization
2467 * to see if it's used. It should be harmless to set even if we only use
2468 * center W for varyings.
2471 vir_check_payload_w(struct v3d_compile
*c
)
2473 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2476 vir_for_each_inst_inorder(inst
, c
) {
2477 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2478 if (inst
->src
[i
].file
== QFILE_REG
&&
2479 inst
->src
[i
].index
== 0) {
2480 c
->uses_center_w
= true;
2489 v3d_nir_to_vir(struct v3d_compile
*c
)
2491 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2492 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2493 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2494 vir_get_stage_name(c
),
2495 c
->program_id
, c
->variant_id
);
2496 nir_print_shader(c
->s
, stderr
);
2501 /* Emit the last THRSW before STVPM and TLB writes. */
2502 vir_emit_last_thrsw(c
);
2504 switch (c
->s
->info
.stage
) {
2505 case MESA_SHADER_FRAGMENT
:
2508 case MESA_SHADER_VERTEX
:
2512 unreachable("bad stage");
2515 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2516 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2517 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
2518 vir_get_stage_name(c
),
2519 c
->program_id
, c
->variant_id
);
2521 fprintf(stderr
, "\n");
2526 vir_check_payload_w(c
);
2528 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2529 * We used that on that platform to pipeline TMU writes and reduce the
2530 * number of thread switches, as well as try (mostly successfully) to
2531 * reduce maximum register pressure to allow more threads. We should
2532 * do something of that sort for V3D -- either instruction scheduling
2533 * here, or delay the the THRSW and LDTMUs from our texture
2534 * instructions until the results are needed.
2537 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2538 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2539 fprintf(stderr
, "%s prog %d/%d VIR:\n",
2540 vir_get_stage_name(c
),
2541 c
->program_id
, c
->variant_id
);
2543 fprintf(stderr
, "\n");
2546 /* Attempt to allocate registers for the temporaries. If we fail,
2547 * reduce thread count and try again.
2549 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
2550 struct qpu_reg
*temp_registers
;
2553 temp_registers
= v3d_register_allocate(c
, &spilled
);
2560 if (c
->threads
== min_threads
) {
2561 fprintf(stderr
, "Failed to register allocate at %d threads:\n",
2570 if (c
->threads
== 1)
2571 vir_remove_thrsw(c
);
2574 if (c
->spill_size
&&
2575 (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2576 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
)))) {
2577 fprintf(stderr
, "%s prog %d/%d spilled VIR:\n",
2578 vir_get_stage_name(c
),
2579 c
->program_id
, c
->variant_id
);
2581 fprintf(stderr
, "\n");
2584 v3d_vir_to_qpu(c
, temp_registers
);