2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
35 /* We don't do any address packing. */
36 #define __gen_user_data void
37 #define __gen_address_type uint32_t
38 #define __gen_address_offset(reloc) (*reloc)
39 #define __gen_emit_reloc(cl, reloc)
40 #include "cle/v3d_packet_v41_pack.h"
42 #define GENERAL_TMU_LOOKUP_PER_QUAD (0 << 7)
43 #define GENERAL_TMU_LOOKUP_PER_PIXEL (1 << 7)
44 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_I (0 << 0)
45 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_I (1 << 0)
46 #define GENERAL_TMU_LOOKUP_TYPE_VEC2 (2 << 0)
47 #define GENERAL_TMU_LOOKUP_TYPE_VEC3 (3 << 0)
48 #define GENERAL_TMU_LOOKUP_TYPE_VEC4 (4 << 0)
49 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_UI (5 << 0)
50 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_UI (6 << 0)
51 #define GENERAL_TMU_LOOKUP_TYPE_32BIT_UI (7 << 0)
53 #define V3D_TSY_SET_QUORUM 0
54 #define V3D_TSY_INC_WAITERS 1
55 #define V3D_TSY_DEC_WAITERS 2
56 #define V3D_TSY_INC_QUORUM 3
57 #define V3D_TSY_DEC_QUORUM 4
58 #define V3D_TSY_FREE_ALL 5
59 #define V3D_TSY_RELEASE 6
60 #define V3D_TSY_ACQUIRE 7
61 #define V3D_TSY_WAIT 8
62 #define V3D_TSY_WAIT_INC 9
63 #define V3D_TSY_WAIT_CHECK 10
64 #define V3D_TSY_WAIT_INC_CHECK 11
65 #define V3D_TSY_WAIT_CV 12
66 #define V3D_TSY_INC_SEMAPHORE 13
67 #define V3D_TSY_DEC_SEMAPHORE 14
68 #define V3D_TSY_SET_QUORUM_FREE_ALL 15
71 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
74 resize_qreg_array(struct v3d_compile
*c
,
79 if (*size
>= decl_size
)
82 uint32_t old_size
= *size
;
83 *size
= MAX2(*size
* 2, decl_size
);
84 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
86 fprintf(stderr
, "Malloc failure\n");
90 for (uint32_t i
= old_size
; i
< *size
; i
++)
91 (*regs
)[i
] = c
->undef
;
95 vir_emit_thrsw(struct v3d_compile
*c
)
100 /* Always thread switch after each texture operation for now.
102 * We could do better by batching a bunch of texture fetches up and
103 * then doing one thread switch and collecting all their results
106 c
->last_thrsw
= vir_NOP(c
);
107 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
108 c
->last_thrsw_at_top_level
= !c
->in_control_flow
;
110 /* We need to lock the scoreboard before any tlb acess happens. If this
111 * thread switch comes after we have emitted a tlb load, then it means
112 * that we can't lock on the last thread switch any more.
114 if (c
->emitted_tlb_load
)
115 c
->lock_scoreboard_on_first_thrsw
= true;
119 v3d_get_op_for_atomic_add(nir_intrinsic_instr
*instr
, unsigned src
)
121 if (nir_src_is_const(instr
->src
[src
])) {
122 int64_t add_val
= nir_src_as_int(instr
->src
[src
]);
124 return V3D_TMU_OP_WRITE_AND_READ_INC
;
125 else if (add_val
== -1)
126 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
129 return V3D_TMU_OP_WRITE_ADD_READ_PREFETCH
;
133 v3d_general_tmu_op(nir_intrinsic_instr
*instr
)
135 switch (instr
->intrinsic
) {
136 case nir_intrinsic_load_ssbo
:
137 case nir_intrinsic_load_ubo
:
138 case nir_intrinsic_load_uniform
:
139 case nir_intrinsic_load_shared
:
140 case nir_intrinsic_load_scratch
:
141 case nir_intrinsic_store_ssbo
:
142 case nir_intrinsic_store_shared
:
143 case nir_intrinsic_store_scratch
:
144 return V3D_TMU_OP_REGULAR
;
145 case nir_intrinsic_ssbo_atomic_add
:
146 return v3d_get_op_for_atomic_add(instr
, 2);
147 case nir_intrinsic_shared_atomic_add
:
148 return v3d_get_op_for_atomic_add(instr
, 1);
149 case nir_intrinsic_ssbo_atomic_imin
:
150 case nir_intrinsic_shared_atomic_imin
:
151 return V3D_TMU_OP_WRITE_SMIN
;
152 case nir_intrinsic_ssbo_atomic_umin
:
153 case nir_intrinsic_shared_atomic_umin
:
154 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR
;
155 case nir_intrinsic_ssbo_atomic_imax
:
156 case nir_intrinsic_shared_atomic_imax
:
157 return V3D_TMU_OP_WRITE_SMAX
;
158 case nir_intrinsic_ssbo_atomic_umax
:
159 case nir_intrinsic_shared_atomic_umax
:
160 return V3D_TMU_OP_WRITE_UMAX
;
161 case nir_intrinsic_ssbo_atomic_and
:
162 case nir_intrinsic_shared_atomic_and
:
163 return V3D_TMU_OP_WRITE_AND_READ_INC
;
164 case nir_intrinsic_ssbo_atomic_or
:
165 case nir_intrinsic_shared_atomic_or
:
166 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
167 case nir_intrinsic_ssbo_atomic_xor
:
168 case nir_intrinsic_shared_atomic_xor
:
169 return V3D_TMU_OP_WRITE_XOR_READ_NOT
;
170 case nir_intrinsic_ssbo_atomic_exchange
:
171 case nir_intrinsic_shared_atomic_exchange
:
172 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH
;
173 case nir_intrinsic_ssbo_atomic_comp_swap
:
174 case nir_intrinsic_shared_atomic_comp_swap
:
175 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
;
177 unreachable("unknown intrinsic op");
182 * Implements indirect uniform loads and SSBO accesses through the TMU general
183 * memory access interface.
186 ntq_emit_tmu_general(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
,
187 bool is_shared_or_scratch
)
189 uint32_t tmu_op
= v3d_general_tmu_op(instr
);
191 /* If we were able to replace atomic_add for an inc/dec, then we
192 * need/can to do things slightly different, like not loading the
193 * amount to add/sub, as that is implicit.
195 bool atomic_add_replaced
= ((instr
->intrinsic
== nir_intrinsic_ssbo_atomic_add
||
196 instr
->intrinsic
== nir_intrinsic_shared_atomic_add
) &&
197 (tmu_op
== V3D_TMU_OP_WRITE_AND_READ_INC
||
198 tmu_op
== V3D_TMU_OP_WRITE_OR_READ_DEC
));
200 bool is_store
= (instr
->intrinsic
== nir_intrinsic_store_ssbo
||
201 instr
->intrinsic
== nir_intrinsic_store_scratch
||
202 instr
->intrinsic
== nir_intrinsic_store_shared
);
204 bool is_load
= (instr
->intrinsic
== nir_intrinsic_load_uniform
||
205 instr
->intrinsic
== nir_intrinsic_load_ubo
||
206 instr
->intrinsic
== nir_intrinsic_load_ssbo
||
207 instr
->intrinsic
== nir_intrinsic_load_scratch
||
208 instr
->intrinsic
== nir_intrinsic_load_shared
);
210 bool has_index
= !is_shared_or_scratch
;
213 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
215 } else if (instr
->intrinsic
== nir_intrinsic_load_ssbo
||
216 instr
->intrinsic
== nir_intrinsic_load_ubo
||
217 instr
->intrinsic
== nir_intrinsic_load_scratch
||
218 instr
->intrinsic
== nir_intrinsic_load_shared
||
219 atomic_add_replaced
) {
220 offset_src
= 0 + has_index
;
221 } else if (is_store
) {
222 offset_src
= 1 + has_index
;
224 offset_src
= 0 + has_index
;
227 bool dynamic_src
= !nir_src_is_const(instr
->src
[offset_src
]);
228 uint32_t const_offset
= 0;
230 const_offset
= nir_src_as_uint(instr
->src
[offset_src
]);
233 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
234 const_offset
+= nir_intrinsic_base(instr
);
235 offset
= vir_uniform(c
, QUNIFORM_UBO_ADDR
,
236 v3d_unit_data_create(0, const_offset
));
238 } else if (instr
->intrinsic
== nir_intrinsic_load_ubo
) {
239 uint32_t index
= nir_src_as_uint(instr
->src
[0]) + 1;
240 /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
241 * 1 (0 is gallium's constant buffer 0).
243 offset
= vir_uniform(c
, QUNIFORM_UBO_ADDR
,
244 v3d_unit_data_create(index
, const_offset
));
246 } else if (is_shared_or_scratch
) {
247 /* Shared and scratch variables have no buffer index, and all
248 * start from a common base that we set up at the start of
251 if (instr
->intrinsic
== nir_intrinsic_load_scratch
||
252 instr
->intrinsic
== nir_intrinsic_store_scratch
) {
253 offset
= c
->spill_base
;
255 offset
= c
->cs_shared_offset
;
256 const_offset
+= nir_intrinsic_base(instr
);
259 offset
= vir_uniform(c
, QUNIFORM_SSBO_OFFSET
,
260 nir_src_as_uint(instr
->src
[is_store
?
264 int tmu_writes
= 1; /* address */
266 for (int i
= 0; i
< instr
->num_components
; i
++) {
268 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
269 ntq_get_src(c
, instr
->src
[0], i
));
272 } else if (!is_load
&& !atomic_add_replaced
) {
274 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
275 ntq_get_src(c
, instr
->src
[1 + has_index
], 0));
277 if (tmu_op
== V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
) {
279 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
280 ntq_get_src(c
, instr
->src
[2 + has_index
],
286 /* Make sure we won't exceed the 16-entry TMU fifo if each thread is
287 * storing at the same time.
289 while (tmu_writes
> 16 / c
->threads
)
292 /* The spec says that for atomics, the TYPE field is ignored, but that
293 * doesn't seem to be the case for CMPXCHG. Just use the number of
294 * tmud writes we did to decide the type (or choose "32bit" for atomic
295 * reads, which has been fine).
298 if (tmu_op
== V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
)
301 num_components
= instr
->num_components
;
303 uint32_t config
= (0xffffff00 |
305 GENERAL_TMU_LOOKUP_PER_PIXEL
);
306 if (num_components
== 1) {
307 config
|= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI
;
309 config
|= GENERAL_TMU_LOOKUP_TYPE_VEC2
+ num_components
- 2;
312 if (vir_in_nonuniform_control_flow(c
)) {
313 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
319 tmua
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
);
321 tmua
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUAU
);
325 if (const_offset
!= 0) {
326 offset
= vir_ADD(c
, offset
,
327 vir_uniform_ui(c
, const_offset
));
329 tmu
= vir_ADD_dest(c
, tmua
, offset
,
330 ntq_get_src(c
, instr
->src
[offset_src
], 0));
332 if (const_offset
!= 0) {
333 tmu
= vir_ADD_dest(c
, tmua
, offset
,
334 vir_uniform_ui(c
, const_offset
));
336 tmu
= vir_MOV_dest(c
, tmua
, offset
);
341 tmu
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
345 if (vir_in_nonuniform_control_flow(c
))
346 vir_set_cond(tmu
, V3D_QPU_COND_IFA
);
350 /* Read the result, or wait for the TMU op to complete. */
351 for (int i
= 0; i
< nir_intrinsic_dest_components(instr
); i
++)
352 ntq_store_dest(c
, &instr
->dest
, i
, vir_MOV(c
, vir_LDTMU(c
)));
354 if (nir_intrinsic_dest_components(instr
) == 0)
359 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
361 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
362 def
->num_components
);
363 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
368 * This function is responsible for getting VIR results into the associated
369 * storage for a NIR instruction.
371 * If it's a NIR SSA def, then we just set the associated hash table entry to
374 * If it's a NIR reg, then we need to update the existing qreg assigned to the
375 * NIR destination with the incoming value. To do that without introducing
376 * new MOVs, we require that the incoming qreg either be a uniform, or be
377 * SSA-defined by the previous VIR instruction in the block and rewritable by
378 * this function. That lets us sneak ahead and insert the SF flag beforehand
379 * (knowing that the previous instruction doesn't depend on flags) and rewrite
380 * its destination to be the NIR reg's destination
383 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
386 struct qinst
*last_inst
= NULL
;
387 if (!list_empty(&c
->cur_block
->instructions
))
388 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
390 assert((result
.file
== QFILE_TEMP
&&
391 last_inst
&& last_inst
== c
->defs
[result
.index
]));
394 assert(chan
< dest
->ssa
.num_components
);
397 struct hash_entry
*entry
=
398 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
403 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
405 qregs
[chan
] = result
;
407 nir_register
*reg
= dest
->reg
.reg
;
408 assert(dest
->reg
.base_offset
== 0);
409 assert(reg
->num_array_elems
== 0);
410 struct hash_entry
*entry
=
411 _mesa_hash_table_search(c
->def_ht
, reg
);
412 struct qreg
*qregs
= entry
->data
;
414 /* Insert a MOV if the source wasn't an SSA def in the
415 * previous instruction.
417 if ((vir_in_nonuniform_control_flow(c
) &&
418 c
->defs
[last_inst
->dst
.index
]->qpu
.sig
.ldunif
)) {
419 result
= vir_MOV(c
, result
);
420 last_inst
= c
->defs
[result
.index
];
423 /* We know they're both temps, so just rewrite index. */
424 c
->defs
[last_inst
->dst
.index
] = NULL
;
425 last_inst
->dst
.index
= qregs
[chan
].index
;
427 /* If we're in control flow, then make this update of the reg
428 * conditional on the execution mask.
430 if (vir_in_nonuniform_control_flow(c
)) {
431 last_inst
->dst
.index
= qregs
[chan
].index
;
433 /* Set the flags to the current exec mask.
435 c
->cursor
= vir_before_inst(last_inst
);
436 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
438 c
->cursor
= vir_after_inst(last_inst
);
440 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
446 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
448 struct hash_entry
*entry
;
450 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
451 assert(i
< src
.ssa
->num_components
);
453 nir_register
*reg
= src
.reg
.reg
;
454 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
455 assert(reg
->num_array_elems
== 0);
456 assert(src
.reg
.base_offset
== 0);
457 assert(i
< reg
->num_components
);
460 struct qreg
*qregs
= entry
->data
;
465 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
468 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
469 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
470 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
471 instr
->src
[src
].swizzle
[chan
]);
473 assert(!instr
->src
[src
].abs
);
474 assert(!instr
->src
[src
].negate
);
480 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
482 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
486 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
488 unsigned unit
= instr
->texture_index
;
489 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
490 int dest_size
= nir_tex_instr_dest_size(instr
);
492 struct qreg lod
= c
->undef
;
494 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
496 for (int i
= 0; i
< dest_size
; i
++) {
498 enum quniform_contents contents
;
500 if (instr
->is_array
&& i
== dest_size
- 1)
501 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
503 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
505 struct qreg size
= vir_uniform(c
, contents
, unit
);
507 switch (instr
->sampler_dim
) {
508 case GLSL_SAMPLER_DIM_1D
:
509 case GLSL_SAMPLER_DIM_2D
:
510 case GLSL_SAMPLER_DIM_MS
:
511 case GLSL_SAMPLER_DIM_3D
:
512 case GLSL_SAMPLER_DIM_CUBE
:
513 /* Don't minify the array size. */
514 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
515 size
= ntq_minify(c
, size
, lod
);
519 case GLSL_SAMPLER_DIM_RECT
:
520 /* There's no LOD field for rects */
524 unreachable("Bad sampler type");
527 ntq_store_dest(c
, &instr
->dest
, i
, size
);
532 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
534 unsigned unit
= instr
->texture_index
;
536 /* Since each texture sampling op requires uploading uniforms to
537 * reference the texture, there's no HW support for texture size and
538 * you just upload uniforms containing the size.
541 case nir_texop_query_levels
:
542 ntq_store_dest(c
, &instr
->dest
, 0,
543 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
546 ntq_emit_txs(c
, instr
);
552 if (c
->devinfo
->ver
>= 40)
553 v3d40_vir_emit_tex(c
, instr
);
555 v3d33_vir_emit_tex(c
, instr
);
559 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
561 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
563 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
565 struct qreg periods
= vir_FROUND(c
, input
);
566 struct qreg sin_output
= vir_SIN(c
, vir_FSUB(c
, input
, periods
));
567 return vir_XOR(c
, sin_output
, vir_SHL(c
,
568 vir_FTOIN(c
, periods
),
569 vir_uniform_ui(c
, -1)));
573 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
575 struct qreg t
= vir_get_temp(c
);
577 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
578 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHZ
);
579 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
580 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHN
);
581 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
582 return vir_MOV(c
, t
);
586 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
588 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
589 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
590 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
591 c
->inputs
[attr
* 4 + 3] = vir_RECIP(c
, c
->payload_w
);
595 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
596 uint8_t swizzle
, int array_index
)
598 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
599 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
602 if (c
->devinfo
->ver
>= 41) {
603 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
605 ldvary
->qpu
.sig
.ldvary
= true;
606 vary
= vir_emit_def(c
, ldvary
);
608 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
612 /* For gl_PointCoord input or distance along a line, we'll be called
613 * with no nir_variable, and we don't count toward VPM size so we
614 * don't track an input slot.
617 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
620 int i
= c
->num_inputs
++;
622 v3d_slot_from_slot_and_component(var
->data
.location
+
623 array_index
, swizzle
);
625 switch (var
->data
.interpolation
) {
626 case INTERP_MODE_NONE
:
627 /* If a gl_FrontColor or gl_BackColor input has no interp
628 * qualifier, then if we're using glShadeModel(GL_FLAT) it
629 * needs to be flat shaded.
631 switch (var
->data
.location
+ array_index
) {
632 case VARYING_SLOT_COL0
:
633 case VARYING_SLOT_COL1
:
634 case VARYING_SLOT_BFC0
:
635 case VARYING_SLOT_BFC1
:
636 if (c
->fs_key
->shade_model_flat
) {
637 BITSET_SET(c
->flat_shade_flags
, i
);
638 vir_MOV_dest(c
, c
->undef
, vary
);
639 return vir_MOV(c
, r5
);
641 return vir_FADD(c
, vir_FMUL(c
, vary
,
648 case INTERP_MODE_SMOOTH
:
649 if (var
->data
.centroid
) {
650 BITSET_SET(c
->centroid_flags
, i
);
651 return vir_FADD(c
, vir_FMUL(c
, vary
,
652 c
->payload_w_centroid
), r5
);
654 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
656 case INTERP_MODE_NOPERSPECTIVE
:
657 BITSET_SET(c
->noperspective_flags
, i
);
658 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
659 case INTERP_MODE_FLAT
:
660 BITSET_SET(c
->flat_shade_flags
, i
);
661 vir_MOV_dest(c
, c
->undef
, vary
);
662 return vir_MOV(c
, r5
);
664 unreachable("Bad interp mode");
669 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
,
672 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
673 int chan
= var
->data
.location_frac
+ i
;
674 c
->inputs
[attr
* 4 + chan
] =
675 emit_fragment_varying(c
, var
, chan
, array_index
);
680 add_output(struct v3d_compile
*c
,
681 uint32_t decl_offset
,
685 uint32_t old_array_size
= c
->outputs_array_size
;
686 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
689 if (old_array_size
!= c
->outputs_array_size
) {
690 c
->output_slots
= reralloc(c
,
692 struct v3d_varying_slot
,
693 c
->outputs_array_size
);
696 c
->output_slots
[decl_offset
] =
697 v3d_slot_from_slot_and_component(slot
, swizzle
);
701 * If compare_instr is a valid comparison instruction, emits the
702 * compare_instr's comparison and returns the sel_instr's return value based
703 * on the compare_instr's result.
706 ntq_emit_comparison(struct v3d_compile
*c
,
707 nir_alu_instr
*compare_instr
,
708 enum v3d_qpu_cond
*out_cond
)
710 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
712 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
713 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
714 bool cond_invert
= false;
715 struct qreg nop
= vir_nop_reg();
717 switch (compare_instr
->op
) {
720 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
723 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
728 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
732 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
738 vir_set_pf(vir_FCMP_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
741 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
745 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
751 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHN
);
754 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
757 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
761 vir_set_pf(vir_MOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
766 vir_set_pf(vir_FMOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
774 *out_cond
= cond_invert
? V3D_QPU_COND_IFNA
: V3D_QPU_COND_IFA
;
779 /* Finds an ALU instruction that generates our src value that could
780 * (potentially) be greedily emitted in the consuming instruction.
782 static struct nir_alu_instr
*
783 ntq_get_alu_parent(nir_src src
)
785 if (!src
.is_ssa
|| src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
787 nir_alu_instr
*instr
= nir_instr_as_alu(src
.ssa
->parent_instr
);
791 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
792 * moving emission of the ALU instr down past another write of the
795 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
796 if (!instr
->src
[i
].src
.is_ssa
)
803 /* Turns a NIR bool into a condition code to predicate on. */
804 static enum v3d_qpu_cond
805 ntq_emit_bool_to_cond(struct v3d_compile
*c
, nir_src src
)
807 nir_alu_instr
*compare
= ntq_get_alu_parent(src
);
811 enum v3d_qpu_cond cond
;
812 if (ntq_emit_comparison(c
, compare
, &cond
))
816 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), ntq_get_src(c
, src
, 0)),
818 return V3D_QPU_COND_IFNA
;
822 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
824 /* This should always be lowered to ALU operations for V3D. */
825 assert(!instr
->dest
.saturate
);
827 /* Vectors are special in that they have non-scalarized writemasks,
828 * and just take the first swizzle channel for each argument in order
829 * into each writemask channel.
831 if (instr
->op
== nir_op_vec2
||
832 instr
->op
== nir_op_vec3
||
833 instr
->op
== nir_op_vec4
) {
835 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
836 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
837 instr
->src
[i
].swizzle
[0]);
838 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
839 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
840 vir_MOV(c
, srcs
[i
]));
844 /* General case: We can just grab the one used channel per src. */
845 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
846 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
847 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
854 result
= vir_MOV(c
, src
[0]);
858 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
861 result
= vir_NEG(c
, src
[0]);
865 result
= vir_FMUL(c
, src
[0], src
[1]);
868 result
= vir_FADD(c
, src
[0], src
[1]);
871 result
= vir_FSUB(c
, src
[0], src
[1]);
874 result
= vir_FMIN(c
, src
[0], src
[1]);
877 result
= vir_FMAX(c
, src
[0], src
[1]);
881 nir_alu_instr
*src0_alu
= ntq_get_alu_parent(instr
->src
[0].src
);
882 if (src0_alu
&& src0_alu
->op
== nir_op_fround_even
) {
883 result
= vir_FTOIN(c
, ntq_get_alu_src(c
, src0_alu
, 0));
885 result
= vir_FTOIZ(c
, src
[0]);
891 result
= vir_FTOUZ(c
, src
[0]);
894 result
= vir_ITOF(c
, src
[0]);
897 result
= vir_UTOF(c
, src
[0]);
900 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
903 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
907 result
= vir_ADD(c
, src
[0], src
[1]);
910 result
= vir_SHR(c
, src
[0], src
[1]);
913 result
= vir_SUB(c
, src
[0], src
[1]);
916 result
= vir_ASR(c
, src
[0], src
[1]);
919 result
= vir_SHL(c
, src
[0], src
[1]);
922 result
= vir_MIN(c
, src
[0], src
[1]);
925 result
= vir_UMIN(c
, src
[0], src
[1]);
928 result
= vir_MAX(c
, src
[0], src
[1]);
931 result
= vir_UMAX(c
, src
[0], src
[1]);
934 result
= vir_AND(c
, src
[0], src
[1]);
937 result
= vir_OR(c
, src
[0], src
[1]);
940 result
= vir_XOR(c
, src
[0], src
[1]);
943 result
= vir_NOT(c
, src
[0]);
946 case nir_op_ufind_msb
:
947 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
951 result
= vir_UMUL(c
, src
[0], src
[1]);
958 enum v3d_qpu_cond cond
;
959 ASSERTED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
961 result
= vir_MOV(c
, vir_SEL(c
, cond
,
962 vir_uniform_f(c
, 1.0),
963 vir_uniform_f(c
, 0.0)));
979 enum v3d_qpu_cond cond
;
980 ASSERTED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
982 result
= vir_MOV(c
, vir_SEL(c
, cond
,
983 vir_uniform_ui(c
, ~0),
984 vir_uniform_ui(c
, 0)));
991 ntq_emit_bool_to_cond(c
, instr
->src
[0].src
),
996 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), src
[0]),
998 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
1003 result
= vir_RECIP(c
, src
[0]);
1006 result
= vir_RSQRT(c
, src
[0]);
1009 result
= vir_EXP(c
, src
[0]);
1012 result
= vir_LOG(c
, src
[0]);
1016 result
= vir_FCEIL(c
, src
[0]);
1019 result
= vir_FFLOOR(c
, src
[0]);
1021 case nir_op_fround_even
:
1022 result
= vir_FROUND(c
, src
[0]);
1025 result
= vir_FTRUNC(c
, src
[0]);
1029 result
= ntq_fsincos(c
, src
[0], false);
1032 result
= ntq_fsincos(c
, src
[0], true);
1036 result
= ntq_fsign(c
, src
[0]);
1040 result
= vir_FMOV(c
, src
[0]);
1041 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
1046 result
= vir_MAX(c
, src
[0], vir_NEG(c
, src
[0]));
1050 case nir_op_fddx_coarse
:
1051 case nir_op_fddx_fine
:
1052 result
= vir_FDX(c
, src
[0]);
1056 case nir_op_fddy_coarse
:
1057 case nir_op_fddy_fine
:
1058 result
= vir_FDY(c
, src
[0]);
1061 case nir_op_uadd_carry
:
1062 vir_set_pf(vir_ADD_dest(c
, vir_nop_reg(), src
[0], src
[1]),
1064 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1065 vir_uniform_ui(c
, ~0),
1066 vir_uniform_ui(c
, 0)));
1069 case nir_op_pack_half_2x16_split
:
1070 result
= vir_VFPACK(c
, src
[0], src
[1]);
1073 case nir_op_unpack_half_2x16_split_x
:
1074 result
= vir_FMOV(c
, src
[0]);
1075 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_L
);
1078 case nir_op_unpack_half_2x16_split_y
:
1079 result
= vir_FMOV(c
, src
[0]);
1080 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_H
);
1084 fprintf(stderr
, "unknown NIR ALU inst: ");
1085 nir_print_instr(&instr
->instr
, stderr
);
1086 fprintf(stderr
, "\n");
1090 /* We have a scalar result, so the instruction should only have a
1091 * single channel written to.
1093 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
1094 ntq_store_dest(c
, &instr
->dest
.dest
,
1095 ffs(instr
->dest
.write_mask
) - 1, result
);
1098 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
1099 * specifier. They come from a register that's preloaded with 0xffffffff
1100 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
1101 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
1103 #define TLB_TYPE_F16_COLOR (3 << 6)
1104 #define TLB_TYPE_I32_COLOR (1 << 6)
1105 #define TLB_TYPE_F32_COLOR (0 << 6)
1106 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
1107 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
1108 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
1109 #define TLB_F16_SWAP_HI_LO (1 << 1)
1110 #define TLB_VEC_SIZE_4_F16 (1 << 0)
1111 #define TLB_VEC_SIZE_2_F16 (0 << 0)
1112 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
1114 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
1117 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
1118 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
1119 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
1120 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
1121 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
1123 /* Stencil is a single 32-bit write. */
1124 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
1127 vir_emit_tlb_color_write(struct v3d_compile
*c
, unsigned rt
)
1129 if (!(c
->fs_key
->cbufs
& (1 << rt
)) || !c
->output_color_var
[rt
])
1132 struct qreg tlb_reg
= vir_magic_reg(V3D_QPU_WADDR_TLB
);
1133 struct qreg tlbu_reg
= vir_magic_reg(V3D_QPU_WADDR_TLBU
);
1135 nir_variable
*var
= c
->output_color_var
[rt
];
1136 int num_components
= glsl_get_vector_elements(var
->type
);
1137 uint32_t conf
= 0xffffff00;
1140 conf
|= c
->msaa_per_sample_output
? TLB_SAMPLE_MODE_PER_SAMPLE
:
1141 TLB_SAMPLE_MODE_PER_PIXEL
;
1142 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
1144 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
1145 num_components
= MAX2(num_components
, 3);
1146 assert(num_components
!= 0);
1148 enum glsl_base_type type
= glsl_get_base_type(var
->type
);
1149 bool is_int_format
= type
== GLSL_TYPE_INT
|| type
== GLSL_TYPE_UINT
;
1150 bool is_32b_tlb_format
= is_int_format
||
1151 (c
->fs_key
->f32_color_rb
& (1 << rt
));
1153 if (is_int_format
) {
1154 /* The F32 vs I32 distinction was dropped in 4.2. */
1155 if (c
->devinfo
->ver
< 42)
1156 conf
|= TLB_TYPE_I32_COLOR
;
1158 conf
|= TLB_TYPE_F32_COLOR
;
1159 conf
|= ((num_components
- 1) << TLB_VEC_SIZE_MINUS_1_SHIFT
);
1161 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1162 conf
|= TLB_TYPE_F32_COLOR
;
1163 conf
|= ((num_components
- 1) <<
1164 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1166 conf
|= TLB_TYPE_F16_COLOR
;
1167 conf
|= TLB_F16_SWAP_HI_LO
;
1168 if (num_components
>= 3)
1169 conf
|= TLB_VEC_SIZE_4_F16
;
1171 conf
|= TLB_VEC_SIZE_2_F16
;
1175 int num_samples
= c
->msaa_per_sample_output
? V3D_MAX_SAMPLES
: 1;
1176 for (int i
= 0; i
< num_samples
; i
++) {
1177 struct qreg
*color
= c
->msaa_per_sample_output
?
1178 &c
->sample_colors
[(rt
* V3D_MAX_SAMPLES
+ i
) * 4] :
1179 &c
->outputs
[var
->data
.driver_location
* 4];
1181 struct qreg r
= color
[0];
1182 struct qreg g
= color
[1];
1183 struct qreg b
= color
[2];
1184 struct qreg a
= color
[3];
1186 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1191 if (c
->fs_key
->sample_alpha_to_one
)
1192 a
= vir_uniform_f(c
, 1.0);
1194 if (is_32b_tlb_format
) {
1196 inst
= vir_MOV_dest(c
, tlbu_reg
, r
);
1198 vir_get_uniform_index(c
,
1202 inst
= vir_MOV_dest(c
, tlb_reg
, r
);
1205 if (num_components
>= 2)
1206 vir_MOV_dest(c
, tlb_reg
, g
);
1207 if (num_components
>= 3)
1208 vir_MOV_dest(c
, tlb_reg
, b
);
1209 if (num_components
>= 4)
1210 vir_MOV_dest(c
, tlb_reg
, a
);
1212 inst
= vir_VFPACK_dest(c
, tlb_reg
, r
, g
);
1213 if (conf
!= ~0 && i
== 0) {
1214 inst
->dst
= tlbu_reg
;
1216 vir_get_uniform_index(c
,
1221 if (num_components
>= 3)
1222 inst
= vir_VFPACK_dest(c
, tlb_reg
, b
, a
);
1228 emit_frag_end(struct v3d_compile
*c
)
1231 if (c->output_sample_mask_index != -1) {
1232 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1236 bool has_any_tlb_color_write
= false;
1237 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++) {
1238 if (c
->fs_key
->cbufs
& (1 << rt
) && c
->output_color_var
[rt
])
1239 has_any_tlb_color_write
= true;
1242 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
1243 struct nir_variable
*var
= c
->output_color_var
[0];
1244 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
1246 vir_SETMSF_dest(c
, vir_nop_reg(),
1249 vir_FTOC(c
, color
[3])));
1252 struct qreg tlbu_reg
= vir_magic_reg(V3D_QPU_WADDR_TLBU
);
1253 if (c
->output_position_index
!= -1) {
1254 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1255 c
->outputs
[c
->output_position_index
]);
1256 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1258 if (c
->devinfo
->ver
>= 42) {
1259 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_PER_PIXEL
|
1260 TLB_SAMPLE_MODE_PER_PIXEL
);
1262 tlb_specifier
|= TLB_DEPTH_TYPE_PER_PIXEL
;
1264 inst
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
1268 } else if (c
->s
->info
.fs
.uses_discard
||
1269 !c
->s
->info
.fs
.early_fragment_tests
||
1270 c
->fs_key
->sample_alpha_to_coverage
||
1271 !has_any_tlb_color_write
) {
1272 /* Emit passthrough Z if it needed to be delayed until shader
1273 * end due to potential discards.
1275 * Since (single-threaded) fragment shaders always need a TLB
1276 * write, emit passthrouh Z if we didn't have any color
1277 * buffers and flag us as potentially discarding, so that we
1278 * can use Z as the TLB write.
1280 c
->s
->info
.fs
.uses_discard
= true;
1282 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1284 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1286 if (c
->devinfo
->ver
>= 42) {
1287 /* The spec says the PER_PIXEL flag is ignored for
1288 * invariant writes, but the simulator demands it.
1290 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_INVARIANT
|
1291 TLB_SAMPLE_MODE_PER_PIXEL
);
1293 tlb_specifier
|= TLB_DEPTH_TYPE_INVARIANT
;
1296 inst
->uniform
= vir_get_uniform_index(c
,
1303 /* XXX: Performance improvement: Merge Z write and color writes TLB
1306 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++)
1307 vir_emit_tlb_color_write(c
, rt
);
1311 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t vpm_index
)
1313 if (c
->devinfo
->ver
>= 40) {
1314 vir_STVPMV(c
, vir_uniform_ui(c
, vpm_index
), val
);
1316 /* XXX: v3d33_vir_vpm_write_setup(c); */
1317 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1322 emit_vert_end(struct v3d_compile
*c
)
1324 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1326 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1331 v3d_optimize_nir(struct nir_shader
*s
)
1334 unsigned lower_flrp
=
1335 (s
->options
->lower_flrp16
? 16 : 0) |
1336 (s
->options
->lower_flrp32
? 32 : 0) |
1337 (s
->options
->lower_flrp64
? 64 : 0);
1342 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1343 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
, NULL
);
1344 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1345 NIR_PASS(progress
, s
, nir_copy_prop
);
1346 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1347 NIR_PASS(progress
, s
, nir_opt_dce
);
1348 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1349 NIR_PASS(progress
, s
, nir_opt_cse
);
1350 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8, true, true);
1351 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1352 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1354 if (lower_flrp
!= 0) {
1355 bool lower_flrp_progress
= false;
1357 NIR_PASS(lower_flrp_progress
, s
, nir_lower_flrp
,
1359 false /* always_precise */,
1360 s
->options
->lower_ffma
);
1361 if (lower_flrp_progress
) {
1362 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1366 /* Nothing should rematerialize any flrps, so we only
1367 * need to do this lowering once.
1372 NIR_PASS(progress
, s
, nir_opt_undef
);
1375 NIR_PASS(progress
, s
, nir_opt_move
, nir_move_load_ubo
);
1379 driver_location_compare(const void *in_a
, const void *in_b
)
1381 const nir_variable
*const *a
= in_a
;
1382 const nir_variable
*const *b
= in_b
;
1384 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1388 ntq_emit_vpm_read(struct v3d_compile
*c
,
1389 uint32_t *num_components_queued
,
1390 uint32_t *remaining
,
1393 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1395 if (c
->devinfo
->ver
>= 40 ) {
1396 return vir_LDVPMV_IN(c
,
1398 (*num_components_queued
)++));
1401 if (*num_components_queued
!= 0) {
1402 (*num_components_queued
)--;
1403 return vir_MOV(c
, vpm
);
1406 uint32_t num_components
= MIN2(*remaining
, 32);
1408 v3d33_vir_vpm_read_setup(c
, num_components
);
1410 *num_components_queued
= num_components
- 1;
1411 *remaining
-= num_components
;
1413 return vir_MOV(c
, vpm
);
1417 ntq_setup_vpm_inputs(struct v3d_compile
*c
)
1419 /* Figure out how many components of each vertex attribute the shader
1420 * uses. Each variable should have been split to individual
1421 * components and unused ones DCEed. The vertex fetcher will load
1422 * from the start of the attribute to the number of components we
1423 * declare we need in c->vattr_sizes[].
1425 nir_foreach_variable(var
, &c
->s
->inputs
) {
1426 /* No VS attribute array support. */
1427 assert(MAX2(glsl_get_length(var
->type
), 1) == 1);
1429 unsigned loc
= var
->data
.driver_location
;
1430 int start_component
= var
->data
.location_frac
;
1431 int num_components
= glsl_get_components(var
->type
);
1433 c
->vattr_sizes
[loc
] = MAX2(c
->vattr_sizes
[loc
],
1434 start_component
+ num_components
);
1437 unsigned num_components
= 0;
1438 uint32_t vpm_components_queued
= 0;
1439 bool uses_iid
= c
->s
->info
.system_values_read
&
1440 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1441 bool uses_vid
= c
->s
->info
.system_values_read
&
1442 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1443 num_components
+= uses_iid
;
1444 num_components
+= uses_vid
;
1446 for (int i
= 0; i
< ARRAY_SIZE(c
->vattr_sizes
); i
++)
1447 num_components
+= c
->vattr_sizes
[i
];
1450 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1451 &num_components
, ~0);
1455 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1456 &num_components
, ~0);
1459 /* The actual loads will happen directly in nir_intrinsic_load_input
1460 * on newer versions.
1462 if (c
->devinfo
->ver
>= 40)
1465 for (int loc
= 0; loc
< ARRAY_SIZE(c
->vattr_sizes
); loc
++) {
1466 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1469 for (int i
= 0; i
< c
->vattr_sizes
[loc
]; i
++) {
1470 c
->inputs
[loc
* 4 + i
] =
1471 ntq_emit_vpm_read(c
,
1472 &vpm_components_queued
,
1479 if (c
->devinfo
->ver
>= 40) {
1480 assert(vpm_components_queued
== num_components
);
1482 assert(vpm_components_queued
== 0);
1483 assert(num_components
== 0);
1488 var_needs_point_coord(struct v3d_compile
*c
, nir_variable
*var
)
1490 return (var
->data
.location
== VARYING_SLOT_PNTC
||
1491 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1492 (c
->fs_key
->point_sprite_mask
&
1493 (1 << (var
->data
.location
- VARYING_SLOT_VAR0
)))));
1497 program_reads_point_coord(struct v3d_compile
*c
)
1499 nir_foreach_variable(var
, &c
->s
->inputs
) {
1500 if (var_needs_point_coord(c
, var
))
1508 ntq_setup_fs_inputs(struct v3d_compile
*c
)
1510 unsigned num_entries
= 0;
1511 unsigned num_components
= 0;
1512 nir_foreach_variable(var
, &c
->s
->inputs
) {
1514 num_components
+= glsl_get_components(var
->type
);
1517 nir_variable
*vars
[num_entries
];
1520 nir_foreach_variable(var
, &c
->s
->inputs
)
1523 /* Sort the variables so that we emit the input setup in
1524 * driver_location order. This is required for VPM reads, whose data
1525 * is fetched into the VPM in driver_location (TGSI register index)
1528 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1530 for (unsigned i
= 0; i
< num_entries
; i
++) {
1531 nir_variable
*var
= vars
[i
];
1532 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1533 unsigned loc
= var
->data
.driver_location
;
1535 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1536 (loc
+ array_len
) * 4);
1538 if (var
->data
.location
== VARYING_SLOT_POS
) {
1539 emit_fragcoord_input(c
, loc
);
1540 } else if (var_needs_point_coord(c
, var
)) {
1541 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1542 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1544 for (int j
= 0; j
< array_len
; j
++)
1545 emit_fragment_input(c
, loc
+ j
, var
, j
);
1551 ntq_setup_outputs(struct v3d_compile
*c
)
1553 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
1556 nir_foreach_variable(var
, &c
->s
->outputs
) {
1557 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1558 unsigned loc
= var
->data
.driver_location
* 4;
1560 assert(array_len
== 1);
1563 for (int i
= 0; i
< 4 - var
->data
.location_frac
; i
++) {
1564 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1566 var
->data
.location_frac
+ i
);
1569 switch (var
->data
.location
) {
1570 case FRAG_RESULT_COLOR
:
1571 c
->output_color_var
[0] = var
;
1572 c
->output_color_var
[1] = var
;
1573 c
->output_color_var
[2] = var
;
1574 c
->output_color_var
[3] = var
;
1576 case FRAG_RESULT_DATA0
:
1577 case FRAG_RESULT_DATA1
:
1578 case FRAG_RESULT_DATA2
:
1579 case FRAG_RESULT_DATA3
:
1580 c
->output_color_var
[var
->data
.location
-
1581 FRAG_RESULT_DATA0
] = var
;
1583 case FRAG_RESULT_DEPTH
:
1584 c
->output_position_index
= loc
;
1586 case FRAG_RESULT_SAMPLE_MASK
:
1587 c
->output_sample_mask_index
= loc
;
1594 * Sets up the mapping from nir_register to struct qreg *.
1596 * Each nir_register gets a struct qreg per 32-bit component being stored.
1599 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1601 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1602 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1603 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1605 nir_reg
->num_components
);
1607 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1609 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1610 qregs
[i
] = vir_get_temp(c
);
1615 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1617 /* XXX perf: Experiment with using immediate loads to avoid having
1618 * these end up in the uniform stream. Watch out for breaking the
1619 * small immediates optimization in the process!
1621 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1622 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1623 qregs
[i
] = vir_uniform_ui(c
, instr
->value
[i
].u32
);
1625 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1629 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1631 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1633 /* VIR needs there to be *some* value, so pick 0 (same as for
1634 * ntq_setup_registers().
1636 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1637 qregs
[i
] = vir_uniform_ui(c
, 0);
1641 ntq_emit_image_size(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1643 assert(instr
->intrinsic
== nir_intrinsic_image_deref_size
);
1644 nir_variable
*var
= nir_intrinsic_get_var(instr
, 0);
1645 unsigned image_index
= var
->data
.driver_location
;
1646 const struct glsl_type
*sampler_type
= glsl_without_array(var
->type
);
1647 bool is_array
= glsl_sampler_type_is_array(sampler_type
);
1649 ntq_store_dest(c
, &instr
->dest
, 0,
1650 vir_uniform(c
, QUNIFORM_IMAGE_WIDTH
, image_index
));
1651 if (instr
->num_components
> 1) {
1652 ntq_store_dest(c
, &instr
->dest
, 1,
1653 vir_uniform(c
, QUNIFORM_IMAGE_HEIGHT
,
1656 if (instr
->num_components
> 2) {
1657 ntq_store_dest(c
, &instr
->dest
, 2,
1660 QUNIFORM_IMAGE_ARRAY_SIZE
:
1661 QUNIFORM_IMAGE_DEPTH
,
1667 vir_emit_tlb_color_read(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1669 assert(c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
);
1671 int rt
= nir_src_as_uint(instr
->src
[0]);
1672 assert(rt
< V3D_MAX_DRAW_BUFFERS
);
1674 int sample_index
= nir_intrinsic_base(instr
) ;
1675 assert(sample_index
< V3D_MAX_SAMPLES
);
1677 int component
= nir_intrinsic_component(instr
);
1678 assert(component
< 4);
1680 /* We need to emit our TLB reads after we have acquired the scoreboard
1681 * lock, or the GPU will hang. Usually, we do our scoreboard locking on
1682 * the last thread switch to improve parallelism, however, that is only
1683 * guaranteed to happen before the tlb color writes.
1685 * To fix that, we make sure we always emit a thread switch before the
1686 * first tlb color read. If that happens to be the last thread switch
1687 * we emit, then everything is fine, but otherwsie, if any code after
1688 * this point needs to emit additional thread switches, then we will
1689 * switch the strategy to locking the scoreboard on the first thread
1690 * switch instead -- see vir_emit_thrsw().
1692 if (!c
->emitted_tlb_load
) {
1693 if (!c
->last_thrsw_at_top_level
) {
1694 assert(c
->devinfo
->ver
>= 41);
1698 c
->emitted_tlb_load
= true;
1701 struct qreg
*color_reads_for_sample
=
1702 &c
->color_reads
[(rt
* V3D_MAX_SAMPLES
+ sample_index
) * 4];
1704 if (color_reads_for_sample
[component
].file
== QFILE_NULL
) {
1705 enum pipe_format rt_format
= c
->fs_key
->color_fmt
[rt
].format
;
1706 int num_components
=
1707 util_format_get_nr_components(rt_format
);
1709 const bool swap_rb
= c
->fs_key
->swap_color_rb
& (1 << rt
);
1711 num_components
= MAX2(num_components
, 3);
1713 nir_variable
*var
= c
->output_color_var
[rt
];
1714 enum glsl_base_type type
= glsl_get_base_type(var
->type
);
1716 bool is_int_format
= type
== GLSL_TYPE_INT
||
1717 type
== GLSL_TYPE_UINT
;
1719 bool is_32b_tlb_format
= is_int_format
||
1720 (c
->fs_key
->f32_color_rb
& (1 << rt
));
1722 int num_samples
= c
->fs_key
->msaa
? V3D_MAX_SAMPLES
: 1;
1724 uint32_t conf
= 0xffffff00;
1725 conf
|= c
->fs_key
->msaa
? TLB_SAMPLE_MODE_PER_SAMPLE
:
1726 TLB_SAMPLE_MODE_PER_PIXEL
;
1727 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
1729 if (is_32b_tlb_format
) {
1730 /* The F32 vs I32 distinction was dropped in 4.2. */
1731 conf
|= (c
->devinfo
->ver
< 42 && is_int_format
) ?
1732 TLB_TYPE_I32_COLOR
: TLB_TYPE_F32_COLOR
;
1734 conf
|= ((num_components
- 1) <<
1735 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1737 conf
|= TLB_TYPE_F16_COLOR
;
1738 conf
|= TLB_F16_SWAP_HI_LO
;
1740 if (num_components
>= 3)
1741 conf
|= TLB_VEC_SIZE_4_F16
;
1743 conf
|= TLB_VEC_SIZE_2_F16
;
1747 for (int i
= 0; i
< num_samples
; i
++) {
1748 struct qreg r
, g
, b
, a
;
1749 if (is_32b_tlb_format
) {
1750 r
= conf
!= 0xffffffff && i
== 0?
1751 vir_TLBU_COLOR_READ(c
, conf
) :
1752 vir_TLB_COLOR_READ(c
);
1753 if (num_components
>= 2)
1754 g
= vir_TLB_COLOR_READ(c
);
1755 if (num_components
>= 3)
1756 b
= vir_TLB_COLOR_READ(c
);
1757 if (num_components
>= 4)
1758 a
= vir_TLB_COLOR_READ(c
);
1760 struct qreg rg
= conf
!= 0xffffffff && i
== 0 ?
1761 vir_TLBU_COLOR_READ(c
, conf
) :
1762 vir_TLB_COLOR_READ(c
);
1763 r
= vir_FMOV(c
, rg
);
1764 vir_set_unpack(c
->defs
[r
.index
], 0,
1766 g
= vir_FMOV(c
, rg
);
1767 vir_set_unpack(c
->defs
[g
.index
], 0,
1770 if (num_components
> 2) {
1771 struct qreg ba
= vir_TLB_COLOR_READ(c
);
1772 b
= vir_FMOV(c
, ba
);
1773 vir_set_unpack(c
->defs
[b
.index
], 0,
1775 a
= vir_FMOV(c
, ba
);
1776 vir_set_unpack(c
->defs
[a
.index
], 0,
1781 struct qreg
*color_reads
=
1782 &c
->color_reads
[(rt
* V3D_MAX_SAMPLES
+ i
) * 4];
1784 color_reads
[0] = swap_rb
? b
: r
;
1785 if (num_components
>= 2)
1787 if (num_components
>= 3)
1788 color_reads
[2] = swap_rb
? r
: b
;
1789 if (num_components
>= 4)
1794 assert(color_reads_for_sample
[component
].file
!= QFILE_NULL
);
1795 ntq_store_dest(c
, &instr
->dest
, 0,
1796 vir_MOV(c
, color_reads_for_sample
[component
]));
1800 ntq_emit_load_uniform(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1802 if (nir_src_is_const(instr
->src
[0])) {
1803 int offset
= (nir_intrinsic_base(instr
) +
1804 nir_src_as_uint(instr
->src
[0]));
1805 assert(offset
% 4 == 0);
1806 /* We need dwords */
1807 offset
= offset
/ 4;
1808 for (int i
= 0; i
< instr
->num_components
; i
++) {
1809 ntq_store_dest(c
, &instr
->dest
, i
,
1810 vir_uniform(c
, QUNIFORM_UNIFORM
,
1814 ntq_emit_tmu_general(c
, instr
, false);
1819 ntq_emit_load_input(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1821 /* XXX: Use ldvpmv (uniform offset) or ldvpmd (non-uniform offset)
1822 * and enable PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR.
1825 nir_intrinsic_base(instr
) + nir_src_as_uint(instr
->src
[0]);
1827 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
&& c
->devinfo
->ver
>= 40) {
1828 /* Emit the LDVPM directly now, rather than at the top
1829 * of the shader like we did for V3D 3.x (which needs
1830 * vpmsetup when not just taking the next offset).
1832 * Note that delaying like this may introduce stalls,
1833 * as LDVPMV takes a minimum of 1 instruction but may
1834 * be slower if the VPM unit is busy with another QPU.
1837 if (c
->s
->info
.system_values_read
&
1838 (1ull << SYSTEM_VALUE_INSTANCE_ID
)) {
1841 if (c
->s
->info
.system_values_read
&
1842 (1ull << SYSTEM_VALUE_VERTEX_ID
)) {
1845 for (int i
= 0; i
< offset
; i
++)
1846 index
+= c
->vattr_sizes
[i
];
1847 index
+= nir_intrinsic_component(instr
);
1848 for (int i
= 0; i
< instr
->num_components
; i
++) {
1849 struct qreg vpm_offset
= vir_uniform_ui(c
, index
++);
1850 ntq_store_dest(c
, &instr
->dest
, i
,
1851 vir_LDVPMV_IN(c
, vpm_offset
));
1854 for (int i
= 0; i
< instr
->num_components
; i
++) {
1855 int comp
= nir_intrinsic_component(instr
) + i
;
1856 ntq_store_dest(c
, &instr
->dest
, i
,
1857 vir_MOV(c
, c
->inputs
[offset
* 4 + comp
]));
1863 ntq_emit_per_sample_color_write(struct v3d_compile
*c
,
1864 nir_intrinsic_instr
*instr
)
1866 assert(instr
->intrinsic
== nir_intrinsic_store_tlb_sample_color_v3d
);
1868 unsigned rt
= nir_src_as_uint(instr
->src
[1]);
1869 assert(rt
< V3D_MAX_DRAW_BUFFERS
);
1871 unsigned sample_idx
= nir_intrinsic_base(instr
);
1872 assert(sample_idx
< V3D_MAX_SAMPLES
);
1874 unsigned offset
= (rt
* V3D_MAX_SAMPLES
+ sample_idx
) * 4;
1875 for (int i
= 0; i
< instr
->num_components
; i
++) {
1876 c
->sample_colors
[offset
+ i
] =
1877 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
1882 ntq_emit_color_write(struct v3d_compile
*c
,
1883 nir_intrinsic_instr
*instr
)
1885 unsigned offset
= (nir_intrinsic_base(instr
) +
1886 nir_src_as_uint(instr
->src
[1])) * 4 +
1887 nir_intrinsic_component(instr
);
1888 for (int i
= 0; i
< instr
->num_components
; i
++) {
1889 c
->outputs
[offset
+ i
] =
1890 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
1895 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1897 switch (instr
->intrinsic
) {
1898 case nir_intrinsic_load_uniform
:
1899 ntq_emit_load_uniform(c
, instr
);
1902 case nir_intrinsic_load_ubo
:
1903 ntq_emit_tmu_general(c
, instr
, false);
1906 case nir_intrinsic_ssbo_atomic_add
:
1907 case nir_intrinsic_ssbo_atomic_imin
:
1908 case nir_intrinsic_ssbo_atomic_umin
:
1909 case nir_intrinsic_ssbo_atomic_imax
:
1910 case nir_intrinsic_ssbo_atomic_umax
:
1911 case nir_intrinsic_ssbo_atomic_and
:
1912 case nir_intrinsic_ssbo_atomic_or
:
1913 case nir_intrinsic_ssbo_atomic_xor
:
1914 case nir_intrinsic_ssbo_atomic_exchange
:
1915 case nir_intrinsic_ssbo_atomic_comp_swap
:
1916 case nir_intrinsic_load_ssbo
:
1917 case nir_intrinsic_store_ssbo
:
1918 ntq_emit_tmu_general(c
, instr
, false);
1921 case nir_intrinsic_shared_atomic_add
:
1922 case nir_intrinsic_shared_atomic_imin
:
1923 case nir_intrinsic_shared_atomic_umin
:
1924 case nir_intrinsic_shared_atomic_imax
:
1925 case nir_intrinsic_shared_atomic_umax
:
1926 case nir_intrinsic_shared_atomic_and
:
1927 case nir_intrinsic_shared_atomic_or
:
1928 case nir_intrinsic_shared_atomic_xor
:
1929 case nir_intrinsic_shared_atomic_exchange
:
1930 case nir_intrinsic_shared_atomic_comp_swap
:
1931 case nir_intrinsic_load_shared
:
1932 case nir_intrinsic_store_shared
:
1933 case nir_intrinsic_load_scratch
:
1934 case nir_intrinsic_store_scratch
:
1935 ntq_emit_tmu_general(c
, instr
, true);
1938 case nir_intrinsic_image_deref_load
:
1939 case nir_intrinsic_image_deref_store
:
1940 case nir_intrinsic_image_deref_atomic_add
:
1941 case nir_intrinsic_image_deref_atomic_min
:
1942 case nir_intrinsic_image_deref_atomic_max
:
1943 case nir_intrinsic_image_deref_atomic_and
:
1944 case nir_intrinsic_image_deref_atomic_or
:
1945 case nir_intrinsic_image_deref_atomic_xor
:
1946 case nir_intrinsic_image_deref_atomic_exchange
:
1947 case nir_intrinsic_image_deref_atomic_comp_swap
:
1948 v3d40_vir_emit_image_load_store(c
, instr
);
1951 case nir_intrinsic_get_buffer_size
:
1952 ntq_store_dest(c
, &instr
->dest
, 0,
1953 vir_uniform(c
, QUNIFORM_GET_BUFFER_SIZE
,
1954 nir_src_as_uint(instr
->src
[0])));
1957 case nir_intrinsic_load_user_clip_plane
:
1958 for (int i
= 0; i
< instr
->num_components
; i
++) {
1959 ntq_store_dest(c
, &instr
->dest
, i
,
1960 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1961 nir_intrinsic_ucp_id(instr
) *
1966 case nir_intrinsic_load_viewport_x_scale
:
1967 ntq_store_dest(c
, &instr
->dest
, 0,
1968 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
, 0));
1971 case nir_intrinsic_load_viewport_y_scale
:
1972 ntq_store_dest(c
, &instr
->dest
, 0,
1973 vir_uniform(c
, QUNIFORM_VIEWPORT_Y_SCALE
, 0));
1976 case nir_intrinsic_load_viewport_z_scale
:
1977 ntq_store_dest(c
, &instr
->dest
, 0,
1978 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0));
1981 case nir_intrinsic_load_viewport_z_offset
:
1982 ntq_store_dest(c
, &instr
->dest
, 0,
1983 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0));
1986 case nir_intrinsic_load_alpha_ref_float
:
1987 ntq_store_dest(c
, &instr
->dest
, 0,
1988 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1991 case nir_intrinsic_load_sample_mask_in
:
1992 ntq_store_dest(c
, &instr
->dest
, 0, vir_MSF(c
));
1995 case nir_intrinsic_load_helper_invocation
:
1996 vir_set_pf(vir_MSF_dest(c
, vir_nop_reg()), V3D_QPU_PF_PUSHZ
);
1997 ntq_store_dest(c
, &instr
->dest
, 0,
1998 vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1999 vir_uniform_ui(c
, ~0),
2000 vir_uniform_ui(c
, 0))));
2003 case nir_intrinsic_load_front_face
:
2004 /* The register contains 0 (front) or 1 (back), and we need to
2005 * turn it into a NIR bool where true means front.
2007 ntq_store_dest(c
, &instr
->dest
, 0,
2009 vir_uniform_ui(c
, -1),
2013 case nir_intrinsic_load_instance_id
:
2014 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
2017 case nir_intrinsic_load_vertex_id
:
2018 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
2021 case nir_intrinsic_load_tlb_color_v3d
:
2022 vir_emit_tlb_color_read(c
, instr
);
2025 case nir_intrinsic_load_input
:
2026 ntq_emit_load_input(c
, instr
);
2029 case nir_intrinsic_store_tlb_sample_color_v3d
:
2030 ntq_emit_per_sample_color_write(c
, instr
);
2033 case nir_intrinsic_store_output
:
2034 /* XXX perf: Use stvpmv with uniform non-constant offsets and
2035 * stvpmd with non-uniform offsets and enable
2036 * PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR.
2038 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2039 ntq_emit_color_write(c
, instr
);
2041 assert(instr
->num_components
== 1);
2044 ntq_get_src(c
, instr
->src
[0], 0),
2045 nir_intrinsic_base(instr
));
2049 case nir_intrinsic_image_deref_size
:
2050 ntq_emit_image_size(c
, instr
);
2053 case nir_intrinsic_discard
:
2054 if (vir_in_nonuniform_control_flow(c
)) {
2055 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2057 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
2058 vir_uniform_ui(c
, 0)),
2061 vir_SETMSF_dest(c
, vir_nop_reg(),
2062 vir_uniform_ui(c
, 0));
2066 case nir_intrinsic_discard_if
: {
2067 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, instr
->src
[0]);
2069 if (vir_in_nonuniform_control_flow(c
)) {
2070 struct qinst
*exec_flag
= vir_MOV_dest(c
, vir_nop_reg(),
2072 if (cond
== V3D_QPU_COND_IFA
) {
2073 vir_set_uf(exec_flag
, V3D_QPU_UF_ANDZ
);
2075 vir_set_uf(exec_flag
, V3D_QPU_UF_NORNZ
);
2076 cond
= V3D_QPU_COND_IFA
;
2080 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
2081 vir_uniform_ui(c
, 0)), cond
);
2086 case nir_intrinsic_memory_barrier
:
2087 case nir_intrinsic_memory_barrier_atomic_counter
:
2088 case nir_intrinsic_memory_barrier_buffer
:
2089 case nir_intrinsic_memory_barrier_image
:
2090 case nir_intrinsic_memory_barrier_shared
:
2091 case nir_intrinsic_group_memory_barrier
:
2092 /* We don't do any instruction scheduling of these NIR
2093 * instructions between each other, so we just need to make
2094 * sure that the TMU operations before the barrier are flushed
2095 * before the ones after the barrier. That is currently
2096 * handled by having a THRSW in each of them and a LDTMU
2097 * series or a TMUWT after.
2101 case nir_intrinsic_barrier
:
2102 /* Emit a TSY op to get all invocations in the workgroup
2103 * (actually supergroup) to block until the last invocation
2104 * reaches the TSY op.
2106 if (c
->devinfo
->ver
>= 42) {
2107 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
2108 V3D_QPU_WADDR_SYNCB
));
2110 struct qinst
*sync
=
2111 vir_BARRIERID_dest(c
,
2112 vir_reg(QFILE_MAGIC
,
2113 V3D_QPU_WADDR_SYNCU
));
2115 vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
2117 V3D_TSY_WAIT_INC_CHECK
);
2121 /* The blocking of a TSY op only happens at the next thread
2122 * switch. No texturing may be outstanding at the time of a
2123 * TSY blocking operation.
2128 case nir_intrinsic_load_num_work_groups
:
2129 for (int i
= 0; i
< 3; i
++) {
2130 ntq_store_dest(c
, &instr
->dest
, i
,
2131 vir_uniform(c
, QUNIFORM_NUM_WORK_GROUPS
,
2136 case nir_intrinsic_load_local_invocation_index
:
2137 ntq_store_dest(c
, &instr
->dest
, 0,
2138 vir_SHR(c
, c
->cs_payload
[1],
2139 vir_uniform_ui(c
, 32 - c
->local_invocation_index_bits
)));
2142 case nir_intrinsic_load_work_group_id
:
2143 ntq_store_dest(c
, &instr
->dest
, 0,
2144 vir_AND(c
, c
->cs_payload
[0],
2145 vir_uniform_ui(c
, 0xffff)));
2146 ntq_store_dest(c
, &instr
->dest
, 1,
2147 vir_SHR(c
, c
->cs_payload
[0],
2148 vir_uniform_ui(c
, 16)));
2149 ntq_store_dest(c
, &instr
->dest
, 2,
2150 vir_AND(c
, c
->cs_payload
[1],
2151 vir_uniform_ui(c
, 0xffff)));
2154 case nir_intrinsic_load_subgroup_id
:
2155 ntq_store_dest(c
, &instr
->dest
, 0, vir_EIDX(c
));
2159 fprintf(stderr
, "Unknown intrinsic: ");
2160 nir_print_instr(&instr
->instr
, stderr
);
2161 fprintf(stderr
, "\n");
2166 /* Clears (activates) the execute flags for any channels whose jump target
2167 * matches this block.
2169 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
2172 * XXX perf: For uniform control flow, we should be able to skip c->execute
2173 * handling entirely.
2176 ntq_activate_execute_for_block(struct v3d_compile
*c
)
2178 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
2179 c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
2182 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
2186 ntq_emit_uniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
2188 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
2189 bool empty_else_block
=
2190 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
2191 exec_list_is_empty(&nir_else_block
->instr_list
));
2193 struct qblock
*then_block
= vir_new_block(c
);
2194 struct qblock
*after_block
= vir_new_block(c
);
2195 struct qblock
*else_block
;
2196 if (empty_else_block
)
2197 else_block
= after_block
;
2199 else_block
= vir_new_block(c
);
2201 /* Set up the flags for the IF condition (taking the THEN branch). */
2202 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
2205 vir_BRANCH(c
, cond
== V3D_QPU_COND_IFA
?
2206 V3D_QPU_BRANCH_COND_ALLNA
:
2207 V3D_QPU_BRANCH_COND_ALLA
);
2208 vir_link_blocks(c
->cur_block
, else_block
);
2209 vir_link_blocks(c
->cur_block
, then_block
);
2211 /* Process the THEN block. */
2212 vir_set_emit_block(c
, then_block
);
2213 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
2215 if (!empty_else_block
) {
2216 /* At the end of the THEN block, jump to ENDIF */
2217 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALWAYS
);
2218 vir_link_blocks(c
->cur_block
, after_block
);
2220 /* Emit the else block. */
2221 vir_set_emit_block(c
, else_block
);
2222 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
2225 vir_link_blocks(c
->cur_block
, after_block
);
2227 vir_set_emit_block(c
, after_block
);
2231 ntq_emit_nonuniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
2233 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
2234 bool empty_else_block
=
2235 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
2236 exec_list_is_empty(&nir_else_block
->instr_list
));
2238 struct qblock
*then_block
= vir_new_block(c
);
2239 struct qblock
*after_block
= vir_new_block(c
);
2240 struct qblock
*else_block
;
2241 if (empty_else_block
)
2242 else_block
= after_block
;
2244 else_block
= vir_new_block(c
);
2246 bool was_uniform_control_flow
= false;
2247 if (!vir_in_nonuniform_control_flow(c
)) {
2248 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
2249 was_uniform_control_flow
= true;
2252 /* Set up the flags for the IF condition (taking the THEN branch). */
2253 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
2255 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
2256 * was previously active (execute Z) for updating the exec flags.
2258 if (was_uniform_control_flow
) {
2259 cond
= v3d_qpu_cond_invert(cond
);
2261 struct qinst
*inst
= vir_MOV_dest(c
, vir_nop_reg(), c
->execute
);
2262 if (cond
== V3D_QPU_COND_IFA
) {
2263 vir_set_uf(inst
, V3D_QPU_UF_NORNZ
);
2265 vir_set_uf(inst
, V3D_QPU_UF_ANDZ
);
2266 cond
= V3D_QPU_COND_IFA
;
2270 vir_MOV_cond(c
, cond
,
2272 vir_uniform_ui(c
, else_block
->index
));
2274 /* Jump to ELSE if nothing is active for THEN, otherwise fall
2277 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2278 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
2279 vir_link_blocks(c
->cur_block
, else_block
);
2280 vir_link_blocks(c
->cur_block
, then_block
);
2282 /* Process the THEN block. */
2283 vir_set_emit_block(c
, then_block
);
2284 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
2286 if (!empty_else_block
) {
2287 /* Handle the end of the THEN block. First, all currently
2288 * active channels update their execute flags to point to
2291 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2293 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2294 vir_uniform_ui(c
, after_block
->index
));
2296 /* If everything points at ENDIF, then jump there immediately. */
2297 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
2299 vir_uniform_ui(c
, after_block
->index
)),
2301 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
2302 vir_link_blocks(c
->cur_block
, after_block
);
2303 vir_link_blocks(c
->cur_block
, else_block
);
2305 vir_set_emit_block(c
, else_block
);
2306 ntq_activate_execute_for_block(c
);
2307 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
2310 vir_link_blocks(c
->cur_block
, after_block
);
2312 vir_set_emit_block(c
, after_block
);
2313 if (was_uniform_control_flow
)
2314 c
->execute
= c
->undef
;
2316 ntq_activate_execute_for_block(c
);
2320 ntq_emit_if(struct v3d_compile
*c
, nir_if
*nif
)
2322 bool was_in_control_flow
= c
->in_control_flow
;
2323 c
->in_control_flow
= true;
2324 if (!vir_in_nonuniform_control_flow(c
) &&
2325 nir_src_is_dynamically_uniform(nif
->condition
)) {
2326 ntq_emit_uniform_if(c
, nif
);
2328 ntq_emit_nonuniform_if(c
, nif
);
2330 c
->in_control_flow
= was_in_control_flow
;
2334 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
2336 switch (jump
->type
) {
2337 case nir_jump_break
:
2338 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2340 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2341 vir_uniform_ui(c
, c
->loop_break_block
->index
));
2344 case nir_jump_continue
:
2345 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2347 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2348 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
2351 case nir_jump_return
:
2352 unreachable("All returns shouold be lowered\n");
2357 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
2359 switch (instr
->type
) {
2360 case nir_instr_type_deref
:
2361 /* ignored, will be walked by the intrinsic using it. */
2364 case nir_instr_type_alu
:
2365 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
2368 case nir_instr_type_intrinsic
:
2369 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
2372 case nir_instr_type_load_const
:
2373 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
2376 case nir_instr_type_ssa_undef
:
2377 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
2380 case nir_instr_type_tex
:
2381 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
2384 case nir_instr_type_jump
:
2385 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
2389 fprintf(stderr
, "Unknown NIR instr type: ");
2390 nir_print_instr(instr
, stderr
);
2391 fprintf(stderr
, "\n");
2397 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
2399 nir_foreach_instr(instr
, block
) {
2400 ntq_emit_instr(c
, instr
);
2404 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
2407 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
2409 bool was_in_control_flow
= c
->in_control_flow
;
2410 c
->in_control_flow
= true;
2412 bool was_uniform_control_flow
= false;
2413 if (!vir_in_nonuniform_control_flow(c
)) {
2414 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
2415 was_uniform_control_flow
= true;
2418 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
2419 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
2421 c
->loop_cont_block
= vir_new_block(c
);
2422 c
->loop_break_block
= vir_new_block(c
);
2424 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2425 vir_set_emit_block(c
, c
->loop_cont_block
);
2426 ntq_activate_execute_for_block(c
);
2428 ntq_emit_cf_list(c
, &loop
->body
);
2430 /* Re-enable any previous continues now, so our ANYA check below
2433 * XXX: Use the .ORZ flags update, instead.
2435 vir_set_pf(vir_XOR_dest(c
,
2438 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
2440 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
2442 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2444 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
2445 /* Pixels that were not dispatched or have been discarded should not
2446 * contribute to looping again.
2448 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
2449 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2450 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
2452 vir_set_emit_block(c
, c
->loop_break_block
);
2453 if (was_uniform_control_flow
)
2454 c
->execute
= c
->undef
;
2456 ntq_activate_execute_for_block(c
);
2458 c
->loop_break_block
= save_loop_break_block
;
2459 c
->loop_cont_block
= save_loop_cont_block
;
2463 c
->in_control_flow
= was_in_control_flow
;
2467 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
2469 fprintf(stderr
, "FUNCTIONS not handled.\n");
2474 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
2476 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
2477 switch (node
->type
) {
2478 case nir_cf_node_block
:
2479 ntq_emit_block(c
, nir_cf_node_as_block(node
));
2482 case nir_cf_node_if
:
2483 ntq_emit_if(c
, nir_cf_node_as_if(node
));
2486 case nir_cf_node_loop
:
2487 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
2490 case nir_cf_node_function
:
2491 ntq_emit_function(c
, nir_cf_node_as_function(node
));
2495 fprintf(stderr
, "Unknown NIR node type\n");
2502 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
2504 ntq_setup_registers(c
, &impl
->registers
);
2505 ntq_emit_cf_list(c
, &impl
->body
);
2509 nir_to_vir(struct v3d_compile
*c
)
2511 switch (c
->s
->info
.stage
) {
2512 case MESA_SHADER_FRAGMENT
:
2513 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2514 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
2515 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2517 /* V3D 4.x can disable implicit point coordinate varyings if
2518 * they are not used.
2520 if (c
->fs_key
->is_points
&&
2521 (c
->devinfo
->ver
< 40 || program_reads_point_coord(c
))) {
2522 c
->point_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2523 c
->point_y
= emit_fragment_varying(c
, NULL
, 0, 0);
2524 c
->uses_implicit_point_line_varyings
= true;
2525 } else if (c
->fs_key
->is_lines
&& c
->devinfo
->ver
< 40) {
2526 c
->line_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2527 c
->uses_implicit_point_line_varyings
= true;
2530 case MESA_SHADER_COMPUTE
:
2531 /* Set up the TSO for barriers, assuming we do some. */
2532 if (c
->devinfo
->ver
< 42) {
2533 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
2534 V3D_QPU_WADDR_SYNC
));
2537 c
->cs_payload
[0] = vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2538 c
->cs_payload
[1] = vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2540 /* Set up the division between gl_LocalInvocationIndex and
2541 * wg_in_mem in the payload reg.
2543 int wg_size
= (c
->s
->info
.cs
.local_size
[0] *
2544 c
->s
->info
.cs
.local_size
[1] *
2545 c
->s
->info
.cs
.local_size
[2]);
2546 c
->local_invocation_index_bits
=
2547 ffs(util_next_power_of_two(MAX2(wg_size
, 64))) - 1;
2548 assert(c
->local_invocation_index_bits
<= 8);
2550 if (c
->s
->info
.cs
.shared_size
) {
2551 struct qreg wg_in_mem
= vir_SHR(c
, c
->cs_payload
[1],
2552 vir_uniform_ui(c
, 16));
2553 if (c
->s
->info
.cs
.local_size
[0] != 1 ||
2554 c
->s
->info
.cs
.local_size
[1] != 1 ||
2555 c
->s
->info
.cs
.local_size
[2] != 1) {
2557 c
->local_invocation_index_bits
);
2558 int wg_mask
= (1 << wg_bits
) - 1;
2559 wg_in_mem
= vir_AND(c
, wg_in_mem
,
2560 vir_uniform_ui(c
, wg_mask
));
2562 struct qreg shared_per_wg
=
2563 vir_uniform_ui(c
, c
->s
->info
.cs
.shared_size
);
2565 c
->cs_shared_offset
=
2567 vir_uniform(c
, QUNIFORM_SHARED_OFFSET
,0),
2568 vir_UMUL(c
, wg_in_mem
, shared_per_wg
));
2575 if (c
->s
->scratch_size
) {
2576 v3d_setup_spill_base(c
);
2577 c
->spill_size
+= V3D_CHANNELS
* c
->s
->scratch_size
;
2580 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
)
2581 ntq_setup_fs_inputs(c
);
2583 ntq_setup_vpm_inputs(c
);
2585 ntq_setup_outputs(c
);
2587 /* Find the main function and emit the body. */
2588 nir_foreach_function(function
, c
->s
) {
2589 assert(strcmp(function
->name
, "main") == 0);
2590 assert(function
->impl
);
2591 ntq_emit_impl(c
, function
->impl
);
2595 const nir_shader_compiler_options v3d_nir_options
= {
2596 .lower_all_io_to_temps
= true,
2597 .lower_extract_byte
= true,
2598 .lower_extract_word
= true,
2599 .lower_bitfield_insert_to_shifts
= true,
2600 .lower_bitfield_extract_to_shifts
= true,
2601 .lower_bitfield_reverse
= true,
2602 .lower_bit_count
= true,
2603 .lower_cs_local_id_from_index
= true,
2604 .lower_ffract
= true,
2606 .lower_pack_unorm_2x16
= true,
2607 .lower_pack_snorm_2x16
= true,
2608 .lower_pack_unorm_4x8
= true,
2609 .lower_pack_snorm_4x8
= true,
2610 .lower_unpack_unorm_4x8
= true,
2611 .lower_unpack_snorm_4x8
= true,
2612 .lower_pack_half_2x16
= true,
2613 .lower_unpack_half_2x16
= true,
2615 .lower_find_lsb
= true,
2617 .lower_flrp32
= true,
2620 .lower_fsqrt
= true,
2621 .lower_ifind_msb
= true,
2622 .lower_isign
= true,
2623 .lower_ldexp
= true,
2624 .lower_mul_high
= true,
2625 .lower_wpos_pntc
= true,
2626 .lower_rotate
= true,
2630 * When demoting a shader down to single-threaded, removes the THRSW
2631 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2635 vir_remove_thrsw(struct v3d_compile
*c
)
2637 vir_for_each_block(block
, c
) {
2638 vir_for_each_inst_safe(inst
, block
) {
2639 if (inst
->qpu
.sig
.thrsw
)
2640 vir_remove_instruction(c
, inst
);
2644 c
->last_thrsw
= NULL
;
2648 vir_emit_last_thrsw(struct v3d_compile
*c
)
2650 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2651 * switching, so disable threads if we didn't do any TMU ops (each of
2652 * which would have emitted a THRSW).
2654 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
2657 vir_remove_thrsw(c
);
2661 /* If we're threaded and the last THRSW was in conditional code, then
2662 * we need to emit another one so that we can flag it as the last
2665 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
2666 assert(c
->devinfo
->ver
>= 41);
2670 /* If we're threaded, then we need to mark the last THRSW instruction
2671 * so we can emit a pair of them at QPU emit time.
2673 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2674 * post-last-THRSW state, so we can skip this.
2676 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2677 assert(c
->devinfo
->ver
>= 41);
2682 c
->last_thrsw
->is_last_thrsw
= true;
2685 /* There's a flag in the shader for "center W is needed for reasons other than
2686 * non-centroid varyings", so we just walk the program after VIR optimization
2687 * to see if it's used. It should be harmless to set even if we only use
2688 * center W for varyings.
2691 vir_check_payload_w(struct v3d_compile
*c
)
2693 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2696 vir_for_each_inst_inorder(inst
, c
) {
2697 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2698 if (inst
->src
[i
].file
== QFILE_REG
&&
2699 inst
->src
[i
].index
== 0) {
2700 c
->uses_center_w
= true;
2709 v3d_nir_to_vir(struct v3d_compile
*c
)
2711 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2712 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2713 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2714 vir_get_stage_name(c
),
2715 c
->program_id
, c
->variant_id
);
2716 nir_print_shader(c
->s
, stderr
);
2721 /* Emit the last THRSW before STVPM and TLB writes. */
2722 vir_emit_last_thrsw(c
);
2724 switch (c
->s
->info
.stage
) {
2725 case MESA_SHADER_FRAGMENT
:
2728 case MESA_SHADER_VERTEX
:
2731 case MESA_SHADER_COMPUTE
:
2734 unreachable("bad stage");
2737 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2738 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2739 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
2740 vir_get_stage_name(c
),
2741 c
->program_id
, c
->variant_id
);
2743 fprintf(stderr
, "\n");
2748 vir_check_payload_w(c
);
2750 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2751 * We used that on that platform to pipeline TMU writes and reduce the
2752 * number of thread switches, as well as try (mostly successfully) to
2753 * reduce maximum register pressure to allow more threads. We should
2754 * do something of that sort for V3D -- either instruction scheduling
2755 * here, or delay the the THRSW and LDTMUs from our texture
2756 * instructions until the results are needed.
2759 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2760 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2761 fprintf(stderr
, "%s prog %d/%d VIR:\n",
2762 vir_get_stage_name(c
),
2763 c
->program_id
, c
->variant_id
);
2765 fprintf(stderr
, "\n");
2768 /* Attempt to allocate registers for the temporaries. If we fail,
2769 * reduce thread count and try again.
2771 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
2772 struct qpu_reg
*temp_registers
;
2775 temp_registers
= v3d_register_allocate(c
, &spilled
);
2782 if (c
->threads
== min_threads
) {
2783 fprintf(stderr
, "Failed to register allocate at %d threads:\n",
2792 if (c
->threads
== 1)
2793 vir_remove_thrsw(c
);
2797 (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2798 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
)))) {
2799 fprintf(stderr
, "%s prog %d/%d spilled VIR:\n",
2800 vir_get_stage_name(c
),
2801 c
->program_id
, c
->variant_id
);
2803 fprintf(stderr
, "\n");
2806 v3d_vir_to_qpu(c
, temp_registers
);