2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/format/u_format.h"
26 #include "util/u_helpers.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "util/ralloc.h"
30 #include "util/hash_table.h"
31 #include "compiler/nir/nir.h"
32 #include "compiler/nir/nir_builder.h"
33 #include "common/v3d_device_info.h"
34 #include "v3d_compiler.h"
36 /* We don't do any address packing. */
37 #define __gen_user_data void
38 #define __gen_address_type uint32_t
39 #define __gen_address_offset(reloc) (*reloc)
40 #define __gen_emit_reloc(cl, reloc)
41 #include "cle/v3d_packet_v41_pack.h"
43 #define GENERAL_TMU_LOOKUP_PER_QUAD (0 << 7)
44 #define GENERAL_TMU_LOOKUP_PER_PIXEL (1 << 7)
45 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_I (0 << 0)
46 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_I (1 << 0)
47 #define GENERAL_TMU_LOOKUP_TYPE_VEC2 (2 << 0)
48 #define GENERAL_TMU_LOOKUP_TYPE_VEC3 (3 << 0)
49 #define GENERAL_TMU_LOOKUP_TYPE_VEC4 (4 << 0)
50 #define GENERAL_TMU_LOOKUP_TYPE_8BIT_UI (5 << 0)
51 #define GENERAL_TMU_LOOKUP_TYPE_16BIT_UI (6 << 0)
52 #define GENERAL_TMU_LOOKUP_TYPE_32BIT_UI (7 << 0)
54 #define V3D_TSY_SET_QUORUM 0
55 #define V3D_TSY_INC_WAITERS 1
56 #define V3D_TSY_DEC_WAITERS 2
57 #define V3D_TSY_INC_QUORUM 3
58 #define V3D_TSY_DEC_QUORUM 4
59 #define V3D_TSY_FREE_ALL 5
60 #define V3D_TSY_RELEASE 6
61 #define V3D_TSY_ACQUIRE 7
62 #define V3D_TSY_WAIT 8
63 #define V3D_TSY_WAIT_INC 9
64 #define V3D_TSY_WAIT_CHECK 10
65 #define V3D_TSY_WAIT_INC_CHECK 11
66 #define V3D_TSY_WAIT_CV 12
67 #define V3D_TSY_INC_SEMAPHORE 13
68 #define V3D_TSY_DEC_SEMAPHORE 14
69 #define V3D_TSY_SET_QUORUM_FREE_ALL 15
72 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
75 resize_qreg_array(struct v3d_compile
*c
,
80 if (*size
>= decl_size
)
83 uint32_t old_size
= *size
;
84 *size
= MAX2(*size
* 2, decl_size
);
85 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
87 fprintf(stderr
, "Malloc failure\n");
91 for (uint32_t i
= old_size
; i
< *size
; i
++)
92 (*regs
)[i
] = c
->undef
;
96 vir_emit_thrsw(struct v3d_compile
*c
)
101 /* Always thread switch after each texture operation for now.
103 * We could do better by batching a bunch of texture fetches up and
104 * then doing one thread switch and collecting all their results
107 c
->last_thrsw
= vir_NOP(c
);
108 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
109 c
->last_thrsw_at_top_level
= !c
->in_control_flow
;
111 /* We need to lock the scoreboard before any tlb acess happens. If this
112 * thread switch comes after we have emitted a tlb load, then it means
113 * that we can't lock on the last thread switch any more.
115 if (c
->emitted_tlb_load
)
116 c
->lock_scoreboard_on_first_thrsw
= true;
120 v3d_get_op_for_atomic_add(nir_intrinsic_instr
*instr
, unsigned src
)
122 if (nir_src_is_const(instr
->src
[src
])) {
123 int64_t add_val
= nir_src_as_int(instr
->src
[src
]);
125 return V3D_TMU_OP_WRITE_AND_READ_INC
;
126 else if (add_val
== -1)
127 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
130 return V3D_TMU_OP_WRITE_ADD_READ_PREFETCH
;
134 v3d_general_tmu_op(nir_intrinsic_instr
*instr
)
136 switch (instr
->intrinsic
) {
137 case nir_intrinsic_load_ssbo
:
138 case nir_intrinsic_load_ubo
:
139 case nir_intrinsic_load_uniform
:
140 case nir_intrinsic_load_shared
:
141 case nir_intrinsic_load_scratch
:
142 case nir_intrinsic_store_ssbo
:
143 case nir_intrinsic_store_shared
:
144 case nir_intrinsic_store_scratch
:
145 return V3D_TMU_OP_REGULAR
;
146 case nir_intrinsic_ssbo_atomic_add
:
147 return v3d_get_op_for_atomic_add(instr
, 2);
148 case nir_intrinsic_shared_atomic_add
:
149 return v3d_get_op_for_atomic_add(instr
, 1);
150 case nir_intrinsic_ssbo_atomic_imin
:
151 case nir_intrinsic_shared_atomic_imin
:
152 return V3D_TMU_OP_WRITE_SMIN
;
153 case nir_intrinsic_ssbo_atomic_umin
:
154 case nir_intrinsic_shared_atomic_umin
:
155 return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR
;
156 case nir_intrinsic_ssbo_atomic_imax
:
157 case nir_intrinsic_shared_atomic_imax
:
158 return V3D_TMU_OP_WRITE_SMAX
;
159 case nir_intrinsic_ssbo_atomic_umax
:
160 case nir_intrinsic_shared_atomic_umax
:
161 return V3D_TMU_OP_WRITE_UMAX
;
162 case nir_intrinsic_ssbo_atomic_and
:
163 case nir_intrinsic_shared_atomic_and
:
164 return V3D_TMU_OP_WRITE_AND_READ_INC
;
165 case nir_intrinsic_ssbo_atomic_or
:
166 case nir_intrinsic_shared_atomic_or
:
167 return V3D_TMU_OP_WRITE_OR_READ_DEC
;
168 case nir_intrinsic_ssbo_atomic_xor
:
169 case nir_intrinsic_shared_atomic_xor
:
170 return V3D_TMU_OP_WRITE_XOR_READ_NOT
;
171 case nir_intrinsic_ssbo_atomic_exchange
:
172 case nir_intrinsic_shared_atomic_exchange
:
173 return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH
;
174 case nir_intrinsic_ssbo_atomic_comp_swap
:
175 case nir_intrinsic_shared_atomic_comp_swap
:
176 return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
;
178 unreachable("unknown intrinsic op");
183 * Implements indirect uniform loads and SSBO accesses through the TMU general
184 * memory access interface.
187 ntq_emit_tmu_general(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
,
188 bool is_shared_or_scratch
)
190 uint32_t tmu_op
= v3d_general_tmu_op(instr
);
192 /* If we were able to replace atomic_add for an inc/dec, then we
193 * need/can to do things slightly different, like not loading the
194 * amount to add/sub, as that is implicit.
196 bool atomic_add_replaced
=
197 ((instr
->intrinsic
== nir_intrinsic_ssbo_atomic_add
||
198 instr
->intrinsic
== nir_intrinsic_shared_atomic_add
) &&
199 (tmu_op
== V3D_TMU_OP_WRITE_AND_READ_INC
||
200 tmu_op
== V3D_TMU_OP_WRITE_OR_READ_DEC
));
202 bool is_store
= (instr
->intrinsic
== nir_intrinsic_store_ssbo
||
203 instr
->intrinsic
== nir_intrinsic_store_scratch
||
204 instr
->intrinsic
== nir_intrinsic_store_shared
);
206 bool is_load
= (instr
->intrinsic
== nir_intrinsic_load_uniform
||
207 instr
->intrinsic
== nir_intrinsic_load_ubo
||
208 instr
->intrinsic
== nir_intrinsic_load_ssbo
||
209 instr
->intrinsic
== nir_intrinsic_load_scratch
||
210 instr
->intrinsic
== nir_intrinsic_load_shared
);
213 c
->tmu_dirty_rcl
= true;
215 bool has_index
= !is_shared_or_scratch
;
218 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
220 } else if (instr
->intrinsic
== nir_intrinsic_load_ssbo
||
221 instr
->intrinsic
== nir_intrinsic_load_ubo
||
222 instr
->intrinsic
== nir_intrinsic_load_scratch
||
223 instr
->intrinsic
== nir_intrinsic_load_shared
||
224 atomic_add_replaced
) {
225 offset_src
= 0 + has_index
;
226 } else if (is_store
) {
227 offset_src
= 1 + has_index
;
229 offset_src
= 0 + has_index
;
232 bool dynamic_src
= !nir_src_is_const(instr
->src
[offset_src
]);
233 uint32_t const_offset
= 0;
235 const_offset
= nir_src_as_uint(instr
->src
[offset_src
]);
237 struct qreg base_offset
;
238 if (instr
->intrinsic
== nir_intrinsic_load_uniform
) {
239 const_offset
+= nir_intrinsic_base(instr
);
240 base_offset
= vir_uniform(c
, QUNIFORM_UBO_ADDR
,
241 v3d_unit_data_create(0, const_offset
));
243 } else if (instr
->intrinsic
== nir_intrinsic_load_ubo
) {
244 uint32_t index
= nir_src_as_uint(instr
->src
[0]) + 1;
245 /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
246 * 1 (0 is gallium's constant buffer 0).
249 vir_uniform(c
, QUNIFORM_UBO_ADDR
,
250 v3d_unit_data_create(index
, const_offset
));
252 } else if (is_shared_or_scratch
) {
253 /* Shared and scratch variables have no buffer index, and all
254 * start from a common base that we set up at the start of
257 if (instr
->intrinsic
== nir_intrinsic_load_scratch
||
258 instr
->intrinsic
== nir_intrinsic_store_scratch
) {
259 base_offset
= c
->spill_base
;
261 base_offset
= c
->cs_shared_offset
;
262 const_offset
+= nir_intrinsic_base(instr
);
265 base_offset
= vir_uniform(c
, QUNIFORM_SSBO_OFFSET
,
266 nir_src_as_uint(instr
->src
[is_store
?
270 struct qreg tmud
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
);
271 unsigned writemask
= is_store
? nir_intrinsic_write_mask(instr
) : 0;
272 uint32_t base_const_offset
= const_offset
;
273 int first_component
= -1;
274 int last_component
= -1;
276 int tmu_writes
= 1; /* address */
279 /* Find the first set of consecutive components that
280 * are enabled in the writemask and emit the TMUD
281 * instructions for them.
283 first_component
= ffs(writemask
) - 1;
284 last_component
= first_component
;
285 while (writemask
& BITFIELD_BIT(last_component
+ 1))
288 assert(first_component
>= 0 &&
289 first_component
<= last_component
&&
290 last_component
< instr
->num_components
);
292 struct qreg tmud
= vir_reg(QFILE_MAGIC
,
294 for (int i
= first_component
; i
<= last_component
; i
++) {
296 ntq_get_src(c
, instr
->src
[0], i
);
297 vir_MOV_dest(c
, tmud
, data
);
301 /* Update the offset for the TMU write based on the
302 * the first component we are writing.
304 const_offset
= base_const_offset
+ first_component
* 4;
306 /* Clear these components from the writemask */
307 uint32_t written_mask
=
308 BITFIELD_RANGE(first_component
, tmu_writes
- 1);
309 writemask
&= ~written_mask
;
310 } else if (!is_load
&& !atomic_add_replaced
) {
312 ntq_get_src(c
, instr
->src
[1 + has_index
], 0);
313 vir_MOV_dest(c
, tmud
, data
);
315 if (tmu_op
== V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH
) {
316 data
= ntq_get_src(c
, instr
->src
[2 + has_index
],
318 vir_MOV_dest(c
, tmud
, data
);
323 /* Make sure we won't exceed the 16-entry TMU fifo if each
324 * thread is storing at the same time.
326 while (tmu_writes
> 16 / c
->threads
)
329 /* The spec says that for atomics, the TYPE field is ignored,
330 * but that doesn't seem to be the case for CMPXCHG. Just use
331 * the number of tmud writes we did to decide the type (or
332 * choose "32bit" for atomic reads, which has been fine).
334 uint32_t num_components
;
335 if (is_load
|| atomic_add_replaced
) {
336 num_components
= instr
->num_components
;
338 assert(tmu_writes
> 1);
339 num_components
= tmu_writes
- 1;
342 uint32_t perquad
= is_load
343 ? GENERAL_TMU_LOOKUP_PER_QUAD
344 : GENERAL_TMU_LOOKUP_PER_PIXEL
;
345 uint32_t config
= (0xffffff00 |
348 if (num_components
== 1) {
349 config
|= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI
;
351 config
|= GENERAL_TMU_LOOKUP_TYPE_VEC2
+
355 if (vir_in_nonuniform_control_flow(c
)) {
356 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
362 tmua
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
);
364 tmua
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUAU
);
368 struct qreg offset
= base_offset
;
369 if (const_offset
!= 0) {
370 offset
= vir_ADD(c
, offset
,
371 vir_uniform_ui(c
, const_offset
));
374 ntq_get_src(c
, instr
->src
[offset_src
], 0);
375 tmu
= vir_ADD_dest(c
, tmua
, offset
, data
);
377 if (const_offset
!= 0) {
378 tmu
= vir_ADD_dest(c
, tmua
, base_offset
,
379 vir_uniform_ui(c
, const_offset
));
381 tmu
= vir_MOV_dest(c
, tmua
, base_offset
);
387 vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
391 if (vir_in_nonuniform_control_flow(c
))
392 vir_set_cond(tmu
, V3D_QPU_COND_IFA
);
396 /* Read the result, or wait for the TMU op to complete. */
397 for (int i
= 0; i
< nir_intrinsic_dest_components(instr
); i
++) {
398 ntq_store_dest(c
, &instr
->dest
, i
,
399 vir_MOV(c
, vir_LDTMU(c
)));
402 if (nir_intrinsic_dest_components(instr
) == 0)
404 } while (is_store
&& writemask
!= 0);
408 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
410 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
411 def
->num_components
);
412 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
417 is_ld_signal(const struct v3d_qpu_sig
*sig
)
419 return (sig
->ldunif
||
431 * This function is responsible for getting VIR results into the associated
432 * storage for a NIR instruction.
434 * If it's a NIR SSA def, then we just set the associated hash table entry to
437 * If it's a NIR reg, then we need to update the existing qreg assigned to the
438 * NIR destination with the incoming value. To do that without introducing
439 * new MOVs, we require that the incoming qreg either be a uniform, or be
440 * SSA-defined by the previous VIR instruction in the block and rewritable by
441 * this function. That lets us sneak ahead and insert the SF flag beforehand
442 * (knowing that the previous instruction doesn't depend on flags) and rewrite
443 * its destination to be the NIR reg's destination
446 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
449 struct qinst
*last_inst
= NULL
;
450 if (!list_is_empty(&c
->cur_block
->instructions
))
451 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
453 assert((result
.file
== QFILE_TEMP
&&
454 last_inst
&& last_inst
== c
->defs
[result
.index
]));
457 assert(chan
< dest
->ssa
.num_components
);
460 struct hash_entry
*entry
=
461 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
466 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
468 qregs
[chan
] = result
;
470 nir_register
*reg
= dest
->reg
.reg
;
471 assert(dest
->reg
.base_offset
== 0);
472 assert(reg
->num_array_elems
== 0);
473 struct hash_entry
*entry
=
474 _mesa_hash_table_search(c
->def_ht
, reg
);
475 struct qreg
*qregs
= entry
->data
;
477 /* If the previous instruction can't be predicated for
478 * the store into the nir_register, then emit a MOV
481 if (vir_in_nonuniform_control_flow(c
) &&
482 is_ld_signal(&c
->defs
[last_inst
->dst
.index
]->qpu
.sig
)) {
483 result
= vir_MOV(c
, result
);
484 last_inst
= c
->defs
[result
.index
];
487 /* We know they're both temps, so just rewrite index. */
488 c
->defs
[last_inst
->dst
.index
] = NULL
;
489 last_inst
->dst
.index
= qregs
[chan
].index
;
491 /* If we're in control flow, then make this update of the reg
492 * conditional on the execution mask.
494 if (vir_in_nonuniform_control_flow(c
)) {
495 last_inst
->dst
.index
= qregs
[chan
].index
;
497 /* Set the flags to the current exec mask.
499 c
->cursor
= vir_before_inst(last_inst
);
500 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
502 c
->cursor
= vir_after_inst(last_inst
);
504 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
510 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
512 struct hash_entry
*entry
;
514 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
515 assert(i
< src
.ssa
->num_components
);
517 nir_register
*reg
= src
.reg
.reg
;
518 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
519 assert(reg
->num_array_elems
== 0);
520 assert(src
.reg
.base_offset
== 0);
521 assert(i
< reg
->num_components
);
524 struct qreg
*qregs
= entry
->data
;
529 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
532 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
533 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
534 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
535 instr
->src
[src
].swizzle
[chan
]);
537 assert(!instr
->src
[src
].abs
);
538 assert(!instr
->src
[src
].negate
);
544 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
546 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
550 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
552 unsigned unit
= instr
->texture_index
;
553 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
554 int dest_size
= nir_tex_instr_dest_size(instr
);
556 struct qreg lod
= c
->undef
;
558 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
560 for (int i
= 0; i
< dest_size
; i
++) {
562 enum quniform_contents contents
;
564 if (instr
->is_array
&& i
== dest_size
- 1)
565 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
567 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
569 struct qreg size
= vir_uniform(c
, contents
, unit
);
571 switch (instr
->sampler_dim
) {
572 case GLSL_SAMPLER_DIM_1D
:
573 case GLSL_SAMPLER_DIM_2D
:
574 case GLSL_SAMPLER_DIM_MS
:
575 case GLSL_SAMPLER_DIM_3D
:
576 case GLSL_SAMPLER_DIM_CUBE
:
577 /* Don't minify the array size. */
578 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
579 size
= ntq_minify(c
, size
, lod
);
583 case GLSL_SAMPLER_DIM_RECT
:
584 /* There's no LOD field for rects */
588 unreachable("Bad sampler type");
591 ntq_store_dest(c
, &instr
->dest
, i
, size
);
596 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
598 unsigned unit
= instr
->texture_index
;
600 /* Since each texture sampling op requires uploading uniforms to
601 * reference the texture, there's no HW support for texture size and
602 * you just upload uniforms containing the size.
605 case nir_texop_query_levels
:
606 ntq_store_dest(c
, &instr
->dest
, 0,
607 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
610 ntq_emit_txs(c
, instr
);
616 if (c
->devinfo
->ver
>= 40)
617 v3d40_vir_emit_tex(c
, instr
);
619 v3d33_vir_emit_tex(c
, instr
);
623 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
625 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
627 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
629 struct qreg periods
= vir_FROUND(c
, input
);
630 struct qreg sin_output
= vir_SIN(c
, vir_FSUB(c
, input
, periods
));
631 return vir_XOR(c
, sin_output
, vir_SHL(c
,
632 vir_FTOIN(c
, periods
),
633 vir_uniform_ui(c
, -1)));
637 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
639 struct qreg t
= vir_get_temp(c
);
641 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
642 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHZ
);
643 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
644 vir_set_pf(vir_FMOV_dest(c
, vir_nop_reg(), src
), V3D_QPU_PF_PUSHN
);
645 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
646 return vir_MOV(c
, t
);
650 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
652 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
653 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
654 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
655 c
->inputs
[attr
* 4 + 3] = vir_RECIP(c
, c
->payload_w
);
659 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
660 uint8_t swizzle
, int array_index
)
662 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
663 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
666 if (c
->devinfo
->ver
>= 41) {
667 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
669 ldvary
->qpu
.sig
.ldvary
= true;
670 vary
= vir_emit_def(c
, ldvary
);
672 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
676 /* For gl_PointCoord input or distance along a line, we'll be called
677 * with no nir_variable, and we don't count toward VPM size so we
678 * don't track an input slot.
681 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
684 int i
= c
->num_inputs
++;
686 v3d_slot_from_slot_and_component(var
->data
.location
+
687 array_index
, swizzle
);
689 switch (var
->data
.interpolation
) {
690 case INTERP_MODE_NONE
:
691 /* If a gl_FrontColor or gl_BackColor input has no interp
692 * qualifier, then if we're using glShadeModel(GL_FLAT) it
693 * needs to be flat shaded.
695 switch (var
->data
.location
+ array_index
) {
696 case VARYING_SLOT_COL0
:
697 case VARYING_SLOT_COL1
:
698 case VARYING_SLOT_BFC0
:
699 case VARYING_SLOT_BFC1
:
700 if (c
->fs_key
->shade_model_flat
) {
701 BITSET_SET(c
->flat_shade_flags
, i
);
702 vir_MOV_dest(c
, c
->undef
, vary
);
703 return vir_MOV(c
, r5
);
705 return vir_FADD(c
, vir_FMUL(c
, vary
,
712 case INTERP_MODE_SMOOTH
:
713 if (var
->data
.centroid
) {
714 BITSET_SET(c
->centroid_flags
, i
);
715 return vir_FADD(c
, vir_FMUL(c
, vary
,
716 c
->payload_w_centroid
), r5
);
718 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
720 case INTERP_MODE_NOPERSPECTIVE
:
721 BITSET_SET(c
->noperspective_flags
, i
);
722 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
723 case INTERP_MODE_FLAT
:
724 BITSET_SET(c
->flat_shade_flags
, i
);
725 vir_MOV_dest(c
, c
->undef
, vary
);
726 return vir_MOV(c
, r5
);
728 unreachable("Bad interp mode");
733 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
,
736 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
737 int chan
= var
->data
.location_frac
+ i
;
738 c
->inputs
[attr
* 4 + chan
] =
739 emit_fragment_varying(c
, var
, chan
, array_index
);
744 emit_compact_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
,
747 /* Compact variables are scalar arrays where each set of 4 elements
748 * consumes a single location.
750 int loc_offset
= array_index
/ 4;
751 int chan
= var
->data
.location_frac
+ array_index
% 4;
752 c
->inputs
[(attr
+ loc_offset
) * 4 + chan
] =
753 emit_fragment_varying(c
, var
, chan
, loc_offset
);
757 add_output(struct v3d_compile
*c
,
758 uint32_t decl_offset
,
762 uint32_t old_array_size
= c
->outputs_array_size
;
763 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
766 if (old_array_size
!= c
->outputs_array_size
) {
767 c
->output_slots
= reralloc(c
,
769 struct v3d_varying_slot
,
770 c
->outputs_array_size
);
773 c
->output_slots
[decl_offset
] =
774 v3d_slot_from_slot_and_component(slot
, swizzle
);
778 * If compare_instr is a valid comparison instruction, emits the
779 * compare_instr's comparison and returns the sel_instr's return value based
780 * on the compare_instr's result.
783 ntq_emit_comparison(struct v3d_compile
*c
,
784 nir_alu_instr
*compare_instr
,
785 enum v3d_qpu_cond
*out_cond
)
787 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
789 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
790 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
791 bool cond_invert
= false;
792 struct qreg nop
= vir_nop_reg();
794 switch (compare_instr
->op
) {
797 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
800 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
805 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
809 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
815 vir_set_pf(vir_FCMP_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
818 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
822 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
828 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHN
);
831 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
834 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
838 vir_set_pf(vir_MOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
843 vir_set_pf(vir_FMOV_dest(c
, nop
, src0
), V3D_QPU_PF_PUSHZ
);
851 *out_cond
= cond_invert
? V3D_QPU_COND_IFNA
: V3D_QPU_COND_IFA
;
856 /* Finds an ALU instruction that generates our src value that could
857 * (potentially) be greedily emitted in the consuming instruction.
859 static struct nir_alu_instr
*
860 ntq_get_alu_parent(nir_src src
)
862 if (!src
.is_ssa
|| src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
864 nir_alu_instr
*instr
= nir_instr_as_alu(src
.ssa
->parent_instr
);
868 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
869 * moving emission of the ALU instr down past another write of the
872 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
873 if (!instr
->src
[i
].src
.is_ssa
)
880 /* Turns a NIR bool into a condition code to predicate on. */
881 static enum v3d_qpu_cond
882 ntq_emit_bool_to_cond(struct v3d_compile
*c
, nir_src src
)
884 nir_alu_instr
*compare
= ntq_get_alu_parent(src
);
888 enum v3d_qpu_cond cond
;
889 if (ntq_emit_comparison(c
, compare
, &cond
))
893 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), ntq_get_src(c
, src
, 0)),
895 return V3D_QPU_COND_IFNA
;
899 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
901 /* This should always be lowered to ALU operations for V3D. */
902 assert(!instr
->dest
.saturate
);
904 /* Vectors are special in that they have non-scalarized writemasks,
905 * and just take the first swizzle channel for each argument in order
906 * into each writemask channel.
908 if (instr
->op
== nir_op_vec2
||
909 instr
->op
== nir_op_vec3
||
910 instr
->op
== nir_op_vec4
) {
912 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
913 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
914 instr
->src
[i
].swizzle
[0]);
915 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
916 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
917 vir_MOV(c
, srcs
[i
]));
921 /* General case: We can just grab the one used channel per src. */
922 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
923 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
924 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
931 result
= vir_MOV(c
, src
[0]);
935 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
938 result
= vir_NEG(c
, src
[0]);
942 result
= vir_FMUL(c
, src
[0], src
[1]);
945 result
= vir_FADD(c
, src
[0], src
[1]);
948 result
= vir_FSUB(c
, src
[0], src
[1]);
951 result
= vir_FMIN(c
, src
[0], src
[1]);
954 result
= vir_FMAX(c
, src
[0], src
[1]);
958 nir_alu_instr
*src0_alu
= ntq_get_alu_parent(instr
->src
[0].src
);
959 if (src0_alu
&& src0_alu
->op
== nir_op_fround_even
) {
960 result
= vir_FTOIN(c
, ntq_get_alu_src(c
, src0_alu
, 0));
962 result
= vir_FTOIZ(c
, src
[0]);
968 result
= vir_FTOUZ(c
, src
[0]);
971 result
= vir_ITOF(c
, src
[0]);
974 result
= vir_UTOF(c
, src
[0]);
977 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
980 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
984 result
= vir_ADD(c
, src
[0], src
[1]);
987 result
= vir_SHR(c
, src
[0], src
[1]);
990 result
= vir_SUB(c
, src
[0], src
[1]);
993 result
= vir_ASR(c
, src
[0], src
[1]);
996 result
= vir_SHL(c
, src
[0], src
[1]);
999 result
= vir_MIN(c
, src
[0], src
[1]);
1002 result
= vir_UMIN(c
, src
[0], src
[1]);
1005 result
= vir_MAX(c
, src
[0], src
[1]);
1008 result
= vir_UMAX(c
, src
[0], src
[1]);
1011 result
= vir_AND(c
, src
[0], src
[1]);
1014 result
= vir_OR(c
, src
[0], src
[1]);
1017 result
= vir_XOR(c
, src
[0], src
[1]);
1020 result
= vir_NOT(c
, src
[0]);
1023 case nir_op_ufind_msb
:
1024 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
1028 result
= vir_UMUL(c
, src
[0], src
[1]);
1035 enum v3d_qpu_cond cond
;
1036 ASSERTED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
1038 result
= vir_MOV(c
, vir_SEL(c
, cond
,
1039 vir_uniform_f(c
, 1.0),
1040 vir_uniform_f(c
, 0.0)));
1055 case nir_op_ult32
: {
1056 enum v3d_qpu_cond cond
;
1057 ASSERTED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
1059 result
= vir_MOV(c
, vir_SEL(c
, cond
,
1060 vir_uniform_ui(c
, ~0),
1061 vir_uniform_ui(c
, 0)));
1065 case nir_op_b32csel
:
1068 ntq_emit_bool_to_cond(c
, instr
->src
[0].src
),
1073 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), src
[0]),
1075 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
1080 result
= vir_RECIP(c
, src
[0]);
1083 result
= vir_RSQRT(c
, src
[0]);
1086 result
= vir_EXP(c
, src
[0]);
1089 result
= vir_LOG(c
, src
[0]);
1093 result
= vir_FCEIL(c
, src
[0]);
1096 result
= vir_FFLOOR(c
, src
[0]);
1098 case nir_op_fround_even
:
1099 result
= vir_FROUND(c
, src
[0]);
1102 result
= vir_FTRUNC(c
, src
[0]);
1106 result
= ntq_fsincos(c
, src
[0], false);
1109 result
= ntq_fsincos(c
, src
[0], true);
1113 result
= ntq_fsign(c
, src
[0]);
1117 result
= vir_FMOV(c
, src
[0]);
1118 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
1123 result
= vir_MAX(c
, src
[0], vir_NEG(c
, src
[0]));
1127 case nir_op_fddx_coarse
:
1128 case nir_op_fddx_fine
:
1129 result
= vir_FDX(c
, src
[0]);
1133 case nir_op_fddy_coarse
:
1134 case nir_op_fddy_fine
:
1135 result
= vir_FDY(c
, src
[0]);
1138 case nir_op_uadd_carry
:
1139 vir_set_pf(vir_ADD_dest(c
, vir_nop_reg(), src
[0], src
[1]),
1141 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1142 vir_uniform_ui(c
, ~0),
1143 vir_uniform_ui(c
, 0)));
1146 case nir_op_pack_half_2x16_split
:
1147 result
= vir_VFPACK(c
, src
[0], src
[1]);
1150 case nir_op_unpack_half_2x16_split_x
:
1151 result
= vir_FMOV(c
, src
[0]);
1152 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_L
);
1155 case nir_op_unpack_half_2x16_split_y
:
1156 result
= vir_FMOV(c
, src
[0]);
1157 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_H
);
1161 fprintf(stderr
, "unknown NIR ALU inst: ");
1162 nir_print_instr(&instr
->instr
, stderr
);
1163 fprintf(stderr
, "\n");
1167 /* We have a scalar result, so the instruction should only have a
1168 * single channel written to.
1170 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
1171 ntq_store_dest(c
, &instr
->dest
.dest
,
1172 ffs(instr
->dest
.write_mask
) - 1, result
);
1175 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
1176 * specifier. They come from a register that's preloaded with 0xffffffff
1177 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
1178 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
1180 #define TLB_TYPE_F16_COLOR (3 << 6)
1181 #define TLB_TYPE_I32_COLOR (1 << 6)
1182 #define TLB_TYPE_F32_COLOR (0 << 6)
1183 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
1184 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
1185 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
1186 #define TLB_F16_SWAP_HI_LO (1 << 1)
1187 #define TLB_VEC_SIZE_4_F16 (1 << 0)
1188 #define TLB_VEC_SIZE_2_F16 (0 << 0)
1189 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
1191 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
1194 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
1195 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
1196 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
1197 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
1198 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
1200 /* Stencil is a single 32-bit write. */
1201 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
1204 vir_emit_tlb_color_write(struct v3d_compile
*c
, unsigned rt
)
1206 if (!(c
->fs_key
->cbufs
& (1 << rt
)) || !c
->output_color_var
[rt
])
1209 struct qreg tlb_reg
= vir_magic_reg(V3D_QPU_WADDR_TLB
);
1210 struct qreg tlbu_reg
= vir_magic_reg(V3D_QPU_WADDR_TLBU
);
1212 nir_variable
*var
= c
->output_color_var
[rt
];
1213 int num_components
= glsl_get_vector_elements(var
->type
);
1214 uint32_t conf
= 0xffffff00;
1217 conf
|= c
->msaa_per_sample_output
? TLB_SAMPLE_MODE_PER_SAMPLE
:
1218 TLB_SAMPLE_MODE_PER_PIXEL
;
1219 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
1221 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
1222 num_components
= MAX2(num_components
, 3);
1223 assert(num_components
!= 0);
1225 enum glsl_base_type type
= glsl_get_base_type(var
->type
);
1226 bool is_int_format
= type
== GLSL_TYPE_INT
|| type
== GLSL_TYPE_UINT
;
1227 bool is_32b_tlb_format
= is_int_format
||
1228 (c
->fs_key
->f32_color_rb
& (1 << rt
));
1230 if (is_int_format
) {
1231 /* The F32 vs I32 distinction was dropped in 4.2. */
1232 if (c
->devinfo
->ver
< 42)
1233 conf
|= TLB_TYPE_I32_COLOR
;
1235 conf
|= TLB_TYPE_F32_COLOR
;
1236 conf
|= ((num_components
- 1) << TLB_VEC_SIZE_MINUS_1_SHIFT
);
1238 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1239 conf
|= TLB_TYPE_F32_COLOR
;
1240 conf
|= ((num_components
- 1) <<
1241 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1243 conf
|= TLB_TYPE_F16_COLOR
;
1244 conf
|= TLB_F16_SWAP_HI_LO
;
1245 if (num_components
>= 3)
1246 conf
|= TLB_VEC_SIZE_4_F16
;
1248 conf
|= TLB_VEC_SIZE_2_F16
;
1252 int num_samples
= c
->msaa_per_sample_output
? V3D_MAX_SAMPLES
: 1;
1253 for (int i
= 0; i
< num_samples
; i
++) {
1254 struct qreg
*color
= c
->msaa_per_sample_output
?
1255 &c
->sample_colors
[(rt
* V3D_MAX_SAMPLES
+ i
) * 4] :
1256 &c
->outputs
[var
->data
.driver_location
* 4];
1258 struct qreg r
= color
[0];
1259 struct qreg g
= color
[1];
1260 struct qreg b
= color
[2];
1261 struct qreg a
= color
[3];
1263 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1268 if (c
->fs_key
->sample_alpha_to_one
)
1269 a
= vir_uniform_f(c
, 1.0);
1271 if (is_32b_tlb_format
) {
1273 inst
= vir_MOV_dest(c
, tlbu_reg
, r
);
1275 vir_get_uniform_index(c
,
1279 inst
= vir_MOV_dest(c
, tlb_reg
, r
);
1282 if (num_components
>= 2)
1283 vir_MOV_dest(c
, tlb_reg
, g
);
1284 if (num_components
>= 3)
1285 vir_MOV_dest(c
, tlb_reg
, b
);
1286 if (num_components
>= 4)
1287 vir_MOV_dest(c
, tlb_reg
, a
);
1289 inst
= vir_VFPACK_dest(c
, tlb_reg
, r
, g
);
1290 if (conf
!= ~0 && i
== 0) {
1291 inst
->dst
= tlbu_reg
;
1293 vir_get_uniform_index(c
,
1298 if (num_components
>= 3)
1299 inst
= vir_VFPACK_dest(c
, tlb_reg
, b
, a
);
1305 emit_frag_end(struct v3d_compile
*c
)
1308 if (c->output_sample_mask_index != -1) {
1309 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1313 bool has_any_tlb_color_write
= false;
1314 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++) {
1315 if (c
->fs_key
->cbufs
& (1 << rt
) && c
->output_color_var
[rt
])
1316 has_any_tlb_color_write
= true;
1319 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
1320 struct nir_variable
*var
= c
->output_color_var
[0];
1321 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
1323 vir_SETMSF_dest(c
, vir_nop_reg(),
1326 vir_FTOC(c
, color
[3])));
1329 struct qreg tlbu_reg
= vir_magic_reg(V3D_QPU_WADDR_TLBU
);
1330 if (c
->output_position_index
!= -1) {
1331 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1332 c
->outputs
[c
->output_position_index
]);
1333 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1335 if (c
->devinfo
->ver
>= 42) {
1336 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_PER_PIXEL
|
1337 TLB_SAMPLE_MODE_PER_PIXEL
);
1339 tlb_specifier
|= TLB_DEPTH_TYPE_PER_PIXEL
;
1341 inst
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
1345 } else if (c
->s
->info
.fs
.uses_discard
||
1346 !c
->s
->info
.fs
.early_fragment_tests
||
1347 c
->fs_key
->sample_alpha_to_coverage
||
1348 !has_any_tlb_color_write
) {
1349 /* Emit passthrough Z if it needed to be delayed until shader
1350 * end due to potential discards.
1352 * Since (single-threaded) fragment shaders always need a TLB
1353 * write, emit passthrouh Z if we didn't have any color
1354 * buffers and flag us as potentially discarding, so that we
1355 * can use Z as the TLB write.
1357 c
->s
->info
.fs
.uses_discard
= true;
1359 struct qinst
*inst
= vir_MOV_dest(c
, tlbu_reg
,
1361 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
1363 if (c
->devinfo
->ver
>= 42) {
1364 /* The spec says the PER_PIXEL flag is ignored for
1365 * invariant writes, but the simulator demands it.
1367 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_INVARIANT
|
1368 TLB_SAMPLE_MODE_PER_PIXEL
);
1370 tlb_specifier
|= TLB_DEPTH_TYPE_INVARIANT
;
1373 inst
->uniform
= vir_get_uniform_index(c
,
1380 /* XXX: Performance improvement: Merge Z write and color writes TLB
1383 for (int rt
= 0; rt
< V3D_MAX_DRAW_BUFFERS
; rt
++)
1384 vir_emit_tlb_color_write(c
, rt
);
1388 vir_VPM_WRITE_indirect(struct v3d_compile
*c
,
1390 struct qreg vpm_index
)
1392 assert(c
->devinfo
->ver
>= 40);
1393 vir_STVPMV(c
, vpm_index
, val
);
1397 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t vpm_index
)
1399 if (c
->devinfo
->ver
>= 40) {
1400 vir_VPM_WRITE_indirect(c
, val
, vir_uniform_ui(c
, vpm_index
));
1402 /* XXX: v3d33_vir_vpm_write_setup(c); */
1403 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1408 emit_vert_end(struct v3d_compile
*c
)
1410 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1412 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1417 emit_geom_end(struct v3d_compile
*c
)
1419 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1421 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1426 v3d_optimize_nir(struct nir_shader
*s
)
1429 unsigned lower_flrp
=
1430 (s
->options
->lower_flrp16
? 16 : 0) |
1431 (s
->options
->lower_flrp32
? 32 : 0) |
1432 (s
->options
->lower_flrp64
? 64 : 0);
1437 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1438 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
, NULL
, NULL
);
1439 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1440 NIR_PASS(progress
, s
, nir_copy_prop
);
1441 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1442 NIR_PASS(progress
, s
, nir_opt_dce
);
1443 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1444 NIR_PASS(progress
, s
, nir_opt_cse
);
1445 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8, true, true);
1446 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1447 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1449 if (lower_flrp
!= 0) {
1450 bool lower_flrp_progress
= false;
1452 NIR_PASS(lower_flrp_progress
, s
, nir_lower_flrp
,
1454 false /* always_precise */);
1455 if (lower_flrp_progress
) {
1456 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1460 /* Nothing should rematerialize any flrps, so we only
1461 * need to do this lowering once.
1466 NIR_PASS(progress
, s
, nir_opt_undef
);
1469 NIR_PASS(progress
, s
, nir_opt_move
, nir_move_load_ubo
);
1473 driver_location_compare(const void *in_a
, const void *in_b
)
1475 const nir_variable
*const *a
= in_a
;
1476 const nir_variable
*const *b
= in_b
;
1478 if ((*a
)->data
.driver_location
== (*b
)->data
.driver_location
)
1479 return (*a
)->data
.location_frac
- (*b
)->data
.location_frac
;
1481 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1485 ntq_emit_vpm_read(struct v3d_compile
*c
,
1486 uint32_t *num_components_queued
,
1487 uint32_t *remaining
,
1490 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1492 if (c
->devinfo
->ver
>= 40 ) {
1493 return vir_LDVPMV_IN(c
,
1495 (*num_components_queued
)++));
1498 if (*num_components_queued
!= 0) {
1499 (*num_components_queued
)--;
1500 return vir_MOV(c
, vpm
);
1503 uint32_t num_components
= MIN2(*remaining
, 32);
1505 v3d33_vir_vpm_read_setup(c
, num_components
);
1507 *num_components_queued
= num_components
- 1;
1508 *remaining
-= num_components
;
1510 return vir_MOV(c
, vpm
);
1514 ntq_setup_vs_inputs(struct v3d_compile
*c
)
1516 /* Figure out how many components of each vertex attribute the shader
1517 * uses. Each variable should have been split to individual
1518 * components and unused ones DCEed. The vertex fetcher will load
1519 * from the start of the attribute to the number of components we
1520 * declare we need in c->vattr_sizes[].
1522 nir_foreach_shader_in_variable(var
, c
->s
) {
1523 /* No VS attribute array support. */
1524 assert(MAX2(glsl_get_length(var
->type
), 1) == 1);
1526 unsigned loc
= var
->data
.driver_location
;
1527 int start_component
= var
->data
.location_frac
;
1528 int num_components
= glsl_get_components(var
->type
);
1530 c
->vattr_sizes
[loc
] = MAX2(c
->vattr_sizes
[loc
],
1531 start_component
+ num_components
);
1534 unsigned num_components
= 0;
1535 uint32_t vpm_components_queued
= 0;
1536 bool uses_iid
= c
->s
->info
.system_values_read
&
1537 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1538 bool uses_vid
= c
->s
->info
.system_values_read
&
1539 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1540 num_components
+= uses_iid
;
1541 num_components
+= uses_vid
;
1543 for (int i
= 0; i
< ARRAY_SIZE(c
->vattr_sizes
); i
++)
1544 num_components
+= c
->vattr_sizes
[i
];
1547 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1548 &num_components
, ~0);
1552 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1553 &num_components
, ~0);
1556 /* The actual loads will happen directly in nir_intrinsic_load_input
1557 * on newer versions.
1559 if (c
->devinfo
->ver
>= 40)
1562 for (int loc
= 0; loc
< ARRAY_SIZE(c
->vattr_sizes
); loc
++) {
1563 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1566 for (int i
= 0; i
< c
->vattr_sizes
[loc
]; i
++) {
1567 c
->inputs
[loc
* 4 + i
] =
1568 ntq_emit_vpm_read(c
,
1569 &vpm_components_queued
,
1576 if (c
->devinfo
->ver
>= 40) {
1577 assert(vpm_components_queued
== num_components
);
1579 assert(vpm_components_queued
== 0);
1580 assert(num_components
== 0);
1585 program_reads_point_coord(struct v3d_compile
*c
)
1587 nir_foreach_shader_in_variable(var
, c
->s
) {
1588 if (util_varying_is_point_coord(var
->data
.location
,
1589 c
->fs_key
->point_sprite_mask
)) {
1598 get_sorted_input_variables(struct v3d_compile
*c
,
1599 unsigned *num_entries
,
1600 nir_variable
***vars
)
1603 nir_foreach_shader_in_variable(var
, c
->s
)
1606 *vars
= ralloc_array(c
, nir_variable
*, *num_entries
);
1609 nir_foreach_shader_in_variable(var
, c
->s
)
1612 /* Sort the variables so that we emit the input setup in
1613 * driver_location order. This is required for VPM reads, whose data
1614 * is fetched into the VPM in driver_location (TGSI register index)
1617 qsort(*vars
, *num_entries
, sizeof(**vars
), driver_location_compare
);
1621 ntq_setup_gs_inputs(struct v3d_compile
*c
)
1623 nir_variable
**vars
;
1624 unsigned num_entries
;
1625 get_sorted_input_variables(c
, &num_entries
, &vars
);
1627 for (unsigned i
= 0; i
< num_entries
; i
++) {
1628 nir_variable
*var
= vars
[i
];
1630 /* All GS inputs are arrays with as many entries as vertices
1631 * in the input primitive, but here we only care about the
1632 * per-vertex input type.
1634 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1635 unsigned array_len
= MAX2(glsl_get_length(type
), 1);
1636 unsigned loc
= var
->data
.driver_location
;
1638 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1639 (loc
+ array_len
) * 4);
1641 for (unsigned j
= 0; j
< array_len
; j
++) {
1642 unsigned num_elements
= glsl_get_vector_elements(type
);
1643 for (unsigned k
= 0; k
< num_elements
; k
++) {
1644 unsigned chan
= var
->data
.location_frac
+ k
;
1645 unsigned input_idx
= c
->num_inputs
++;
1646 struct v3d_varying_slot slot
=
1647 v3d_slot_from_slot_and_component(var
->data
.location
+ j
, chan
);
1648 c
->input_slots
[input_idx
] = slot
;
1656 ntq_setup_fs_inputs(struct v3d_compile
*c
)
1658 nir_variable
**vars
;
1659 unsigned num_entries
;
1660 get_sorted_input_variables(c
, &num_entries
, &vars
);
1662 for (unsigned i
= 0; i
< num_entries
; i
++) {
1663 nir_variable
*var
= vars
[i
];
1664 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1665 unsigned loc
= var
->data
.driver_location
;
1667 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1668 (loc
+ array_len
) * 4);
1670 if (var
->data
.location
== VARYING_SLOT_POS
) {
1671 emit_fragcoord_input(c
, loc
);
1672 } else if (util_varying_is_point_coord(var
->data
.location
,
1673 c
->fs_key
->point_sprite_mask
)) {
1674 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1675 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1676 } else if (var
->data
.compact
) {
1677 for (int j
= 0; j
< array_len
; j
++)
1678 emit_compact_fragment_input(c
, loc
, var
, j
);
1680 for (int j
= 0; j
< array_len
; j
++)
1681 emit_fragment_input(c
, loc
+ j
, var
, j
);
1687 ntq_setup_outputs(struct v3d_compile
*c
)
1689 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
1692 nir_foreach_shader_out_variable(var
, c
->s
) {
1693 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1694 unsigned loc
= var
->data
.driver_location
* 4;
1696 assert(array_len
== 1);
1699 for (int i
= 0; i
< 4 - var
->data
.location_frac
; i
++) {
1700 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1702 var
->data
.location_frac
+ i
);
1705 switch (var
->data
.location
) {
1706 case FRAG_RESULT_COLOR
:
1707 c
->output_color_var
[0] = var
;
1708 c
->output_color_var
[1] = var
;
1709 c
->output_color_var
[2] = var
;
1710 c
->output_color_var
[3] = var
;
1712 case FRAG_RESULT_DATA0
:
1713 case FRAG_RESULT_DATA1
:
1714 case FRAG_RESULT_DATA2
:
1715 case FRAG_RESULT_DATA3
:
1716 c
->output_color_var
[var
->data
.location
-
1717 FRAG_RESULT_DATA0
] = var
;
1719 case FRAG_RESULT_DEPTH
:
1720 c
->output_position_index
= loc
;
1722 case FRAG_RESULT_SAMPLE_MASK
:
1723 c
->output_sample_mask_index
= loc
;
1730 * Sets up the mapping from nir_register to struct qreg *.
1732 * Each nir_register gets a struct qreg per 32-bit component being stored.
1735 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1737 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1738 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1739 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1741 nir_reg
->num_components
);
1743 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1745 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1746 qregs
[i
] = vir_get_temp(c
);
1751 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1753 /* XXX perf: Experiment with using immediate loads to avoid having
1754 * these end up in the uniform stream. Watch out for breaking the
1755 * small immediates optimization in the process!
1757 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1758 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1759 qregs
[i
] = vir_uniform_ui(c
, instr
->value
[i
].u32
);
1761 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1765 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1767 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1769 /* VIR needs there to be *some* value, so pick 0 (same as for
1770 * ntq_setup_registers().
1772 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1773 qregs
[i
] = vir_uniform_ui(c
, 0);
1777 ntq_emit_image_size(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1779 unsigned image_index
= nir_src_as_uint(instr
->src
[0]);
1780 bool is_array
= nir_intrinsic_image_array(instr
);
1782 assert(nir_src_as_uint(instr
->src
[1]) == 0);
1784 ntq_store_dest(c
, &instr
->dest
, 0,
1785 vir_uniform(c
, QUNIFORM_IMAGE_WIDTH
, image_index
));
1786 if (instr
->num_components
> 1) {
1787 ntq_store_dest(c
, &instr
->dest
, 1,
1789 instr
->num_components
== 2 && is_array
?
1790 QUNIFORM_IMAGE_ARRAY_SIZE
:
1791 QUNIFORM_IMAGE_HEIGHT
,
1794 if (instr
->num_components
> 2) {
1795 ntq_store_dest(c
, &instr
->dest
, 2,
1798 QUNIFORM_IMAGE_ARRAY_SIZE
:
1799 QUNIFORM_IMAGE_DEPTH
,
1805 vir_emit_tlb_color_read(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1807 assert(c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
);
1809 int rt
= nir_src_as_uint(instr
->src
[0]);
1810 assert(rt
< V3D_MAX_DRAW_BUFFERS
);
1812 int sample_index
= nir_intrinsic_base(instr
) ;
1813 assert(sample_index
< V3D_MAX_SAMPLES
);
1815 int component
= nir_intrinsic_component(instr
);
1816 assert(component
< 4);
1818 /* We need to emit our TLB reads after we have acquired the scoreboard
1819 * lock, or the GPU will hang. Usually, we do our scoreboard locking on
1820 * the last thread switch to improve parallelism, however, that is only
1821 * guaranteed to happen before the tlb color writes.
1823 * To fix that, we make sure we always emit a thread switch before the
1824 * first tlb color read. If that happens to be the last thread switch
1825 * we emit, then everything is fine, but otherwsie, if any code after
1826 * this point needs to emit additional thread switches, then we will
1827 * switch the strategy to locking the scoreboard on the first thread
1828 * switch instead -- see vir_emit_thrsw().
1830 if (!c
->emitted_tlb_load
) {
1831 if (!c
->last_thrsw_at_top_level
) {
1832 assert(c
->devinfo
->ver
>= 41);
1836 c
->emitted_tlb_load
= true;
1839 struct qreg
*color_reads_for_sample
=
1840 &c
->color_reads
[(rt
* V3D_MAX_SAMPLES
+ sample_index
) * 4];
1842 if (color_reads_for_sample
[component
].file
== QFILE_NULL
) {
1843 enum pipe_format rt_format
= c
->fs_key
->color_fmt
[rt
].format
;
1844 int num_components
=
1845 util_format_get_nr_components(rt_format
);
1847 const bool swap_rb
= c
->fs_key
->swap_color_rb
& (1 << rt
);
1849 num_components
= MAX2(num_components
, 3);
1851 nir_variable
*var
= c
->output_color_var
[rt
];
1852 enum glsl_base_type type
= glsl_get_base_type(var
->type
);
1854 bool is_int_format
= type
== GLSL_TYPE_INT
||
1855 type
== GLSL_TYPE_UINT
;
1857 bool is_32b_tlb_format
= is_int_format
||
1858 (c
->fs_key
->f32_color_rb
& (1 << rt
));
1860 int num_samples
= c
->fs_key
->msaa
? V3D_MAX_SAMPLES
: 1;
1862 uint32_t conf
= 0xffffff00;
1863 conf
|= c
->fs_key
->msaa
? TLB_SAMPLE_MODE_PER_SAMPLE
:
1864 TLB_SAMPLE_MODE_PER_PIXEL
;
1865 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
1867 if (is_32b_tlb_format
) {
1868 /* The F32 vs I32 distinction was dropped in 4.2. */
1869 conf
|= (c
->devinfo
->ver
< 42 && is_int_format
) ?
1870 TLB_TYPE_I32_COLOR
: TLB_TYPE_F32_COLOR
;
1872 conf
|= ((num_components
- 1) <<
1873 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1875 conf
|= TLB_TYPE_F16_COLOR
;
1876 conf
|= TLB_F16_SWAP_HI_LO
;
1878 if (num_components
>= 3)
1879 conf
|= TLB_VEC_SIZE_4_F16
;
1881 conf
|= TLB_VEC_SIZE_2_F16
;
1885 for (int i
= 0; i
< num_samples
; i
++) {
1886 struct qreg r
, g
, b
, a
;
1887 if (is_32b_tlb_format
) {
1888 r
= conf
!= 0xffffffff && i
== 0?
1889 vir_TLBU_COLOR_READ(c
, conf
) :
1890 vir_TLB_COLOR_READ(c
);
1891 if (num_components
>= 2)
1892 g
= vir_TLB_COLOR_READ(c
);
1893 if (num_components
>= 3)
1894 b
= vir_TLB_COLOR_READ(c
);
1895 if (num_components
>= 4)
1896 a
= vir_TLB_COLOR_READ(c
);
1898 struct qreg rg
= conf
!= 0xffffffff && i
== 0 ?
1899 vir_TLBU_COLOR_READ(c
, conf
) :
1900 vir_TLB_COLOR_READ(c
);
1901 r
= vir_FMOV(c
, rg
);
1902 vir_set_unpack(c
->defs
[r
.index
], 0,
1904 g
= vir_FMOV(c
, rg
);
1905 vir_set_unpack(c
->defs
[g
.index
], 0,
1908 if (num_components
> 2) {
1909 struct qreg ba
= vir_TLB_COLOR_READ(c
);
1910 b
= vir_FMOV(c
, ba
);
1911 vir_set_unpack(c
->defs
[b
.index
], 0,
1913 a
= vir_FMOV(c
, ba
);
1914 vir_set_unpack(c
->defs
[a
.index
], 0,
1919 struct qreg
*color_reads
=
1920 &c
->color_reads
[(rt
* V3D_MAX_SAMPLES
+ i
) * 4];
1922 color_reads
[0] = swap_rb
? b
: r
;
1923 if (num_components
>= 2)
1925 if (num_components
>= 3)
1926 color_reads
[2] = swap_rb
? r
: b
;
1927 if (num_components
>= 4)
1932 assert(color_reads_for_sample
[component
].file
!= QFILE_NULL
);
1933 ntq_store_dest(c
, &instr
->dest
, 0,
1934 vir_MOV(c
, color_reads_for_sample
[component
]));
1938 ntq_emit_load_uniform(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1940 if (nir_src_is_const(instr
->src
[0])) {
1941 int offset
= (nir_intrinsic_base(instr
) +
1942 nir_src_as_uint(instr
->src
[0]));
1943 assert(offset
% 4 == 0);
1944 /* We need dwords */
1945 offset
= offset
/ 4;
1946 for (int i
= 0; i
< instr
->num_components
; i
++) {
1947 ntq_store_dest(c
, &instr
->dest
, i
,
1948 vir_uniform(c
, QUNIFORM_UNIFORM
,
1952 ntq_emit_tmu_general(c
, instr
, false);
1957 ntq_emit_load_input(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1959 /* XXX: Use ldvpmv (uniform offset) or ldvpmd (non-uniform offset)
1960 * and enable PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR.
1963 nir_intrinsic_base(instr
) + nir_src_as_uint(instr
->src
[0]);
1965 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
&& c
->devinfo
->ver
>= 40) {
1966 /* Emit the LDVPM directly now, rather than at the top
1967 * of the shader like we did for V3D 3.x (which needs
1968 * vpmsetup when not just taking the next offset).
1970 * Note that delaying like this may introduce stalls,
1971 * as LDVPMV takes a minimum of 1 instruction but may
1972 * be slower if the VPM unit is busy with another QPU.
1975 if (c
->s
->info
.system_values_read
&
1976 (1ull << SYSTEM_VALUE_INSTANCE_ID
)) {
1979 if (c
->s
->info
.system_values_read
&
1980 (1ull << SYSTEM_VALUE_VERTEX_ID
)) {
1983 for (int i
= 0; i
< offset
; i
++)
1984 index
+= c
->vattr_sizes
[i
];
1985 index
+= nir_intrinsic_component(instr
);
1986 for (int i
= 0; i
< instr
->num_components
; i
++) {
1987 struct qreg vpm_offset
= vir_uniform_ui(c
, index
++);
1988 ntq_store_dest(c
, &instr
->dest
, i
,
1989 vir_LDVPMV_IN(c
, vpm_offset
));
1992 for (int i
= 0; i
< instr
->num_components
; i
++) {
1993 int comp
= nir_intrinsic_component(instr
) + i
;
1994 ntq_store_dest(c
, &instr
->dest
, i
,
1995 vir_MOV(c
, c
->inputs
[offset
* 4 + comp
]));
2001 ntq_emit_per_sample_color_write(struct v3d_compile
*c
,
2002 nir_intrinsic_instr
*instr
)
2004 assert(instr
->intrinsic
== nir_intrinsic_store_tlb_sample_color_v3d
);
2006 unsigned rt
= nir_src_as_uint(instr
->src
[1]);
2007 assert(rt
< V3D_MAX_DRAW_BUFFERS
);
2009 unsigned sample_idx
= nir_intrinsic_base(instr
);
2010 assert(sample_idx
< V3D_MAX_SAMPLES
);
2012 unsigned offset
= (rt
* V3D_MAX_SAMPLES
+ sample_idx
) * 4;
2013 for (int i
= 0; i
< instr
->num_components
; i
++) {
2014 c
->sample_colors
[offset
+ i
] =
2015 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
2020 ntq_emit_color_write(struct v3d_compile
*c
,
2021 nir_intrinsic_instr
*instr
)
2023 unsigned offset
= (nir_intrinsic_base(instr
) +
2024 nir_src_as_uint(instr
->src
[1])) * 4 +
2025 nir_intrinsic_component(instr
);
2026 for (int i
= 0; i
< instr
->num_components
; i
++) {
2027 c
->outputs
[offset
+ i
] =
2028 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
2033 emit_store_output_gs(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
2035 assert(instr
->num_components
== 1);
2037 struct qreg offset
= ntq_get_src(c
, instr
->src
[1], 0);
2039 uint32_t base_offset
= nir_intrinsic_base(instr
);
2042 offset
= vir_ADD(c
, vir_uniform_ui(c
, base_offset
), offset
);
2044 /* Usually, for VS or FS, we only emit outputs once at program end so
2045 * our VPM writes are never in non-uniform control flow, but this
2046 * is not true for GS, where we are emitting multiple vertices.
2048 if (vir_in_nonuniform_control_flow(c
)) {
2049 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2053 struct qreg val
= ntq_get_src(c
, instr
->src
[0], 0);
2055 /* The offset isn’t necessarily dynamically uniform for a geometry
2056 * shader. This can happen if the shader sometimes doesn’t emit one of
2057 * the vertices. In that case subsequent vertices will be written to
2058 * different offsets in the VPM and we need to use the scatter write
2059 * instruction to have a different offset for each lane.
2061 if (nir_src_is_dynamically_uniform(instr
->src
[1]))
2062 vir_VPM_WRITE_indirect(c
, val
, offset
);
2064 vir_STVPMD(c
, offset
, val
);
2066 if (vir_in_nonuniform_control_flow(c
)) {
2067 struct qinst
*last_inst
=
2068 (struct qinst
*)c
->cur_block
->instructions
.prev
;
2069 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
2074 ntq_emit_store_output(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
2076 /* XXX perf: Use stvpmv with uniform non-constant offsets and
2077 * stvpmd with non-uniform offsets and enable
2078 * PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR.
2080 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2081 ntq_emit_color_write(c
, instr
);
2082 } else if (c
->s
->info
.stage
== MESA_SHADER_GEOMETRY
) {
2083 emit_store_output_gs(c
, instr
);
2085 assert(c
->s
->info
.stage
== MESA_SHADER_VERTEX
);
2086 assert(instr
->num_components
== 1);
2089 ntq_get_src(c
, instr
->src
[0], 0),
2090 nir_intrinsic_base(instr
));
2095 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
2097 switch (instr
->intrinsic
) {
2098 case nir_intrinsic_load_uniform
:
2099 ntq_emit_load_uniform(c
, instr
);
2102 case nir_intrinsic_load_ubo
:
2103 ntq_emit_tmu_general(c
, instr
, false);
2106 case nir_intrinsic_ssbo_atomic_add
:
2107 case nir_intrinsic_ssbo_atomic_imin
:
2108 case nir_intrinsic_ssbo_atomic_umin
:
2109 case nir_intrinsic_ssbo_atomic_imax
:
2110 case nir_intrinsic_ssbo_atomic_umax
:
2111 case nir_intrinsic_ssbo_atomic_and
:
2112 case nir_intrinsic_ssbo_atomic_or
:
2113 case nir_intrinsic_ssbo_atomic_xor
:
2114 case nir_intrinsic_ssbo_atomic_exchange
:
2115 case nir_intrinsic_ssbo_atomic_comp_swap
:
2116 case nir_intrinsic_load_ssbo
:
2117 case nir_intrinsic_store_ssbo
:
2118 ntq_emit_tmu_general(c
, instr
, false);
2121 case nir_intrinsic_shared_atomic_add
:
2122 case nir_intrinsic_shared_atomic_imin
:
2123 case nir_intrinsic_shared_atomic_umin
:
2124 case nir_intrinsic_shared_atomic_imax
:
2125 case nir_intrinsic_shared_atomic_umax
:
2126 case nir_intrinsic_shared_atomic_and
:
2127 case nir_intrinsic_shared_atomic_or
:
2128 case nir_intrinsic_shared_atomic_xor
:
2129 case nir_intrinsic_shared_atomic_exchange
:
2130 case nir_intrinsic_shared_atomic_comp_swap
:
2131 case nir_intrinsic_load_shared
:
2132 case nir_intrinsic_store_shared
:
2133 case nir_intrinsic_load_scratch
:
2134 case nir_intrinsic_store_scratch
:
2135 ntq_emit_tmu_general(c
, instr
, true);
2138 case nir_intrinsic_image_load
:
2139 case nir_intrinsic_image_store
:
2140 case nir_intrinsic_image_atomic_add
:
2141 case nir_intrinsic_image_atomic_imin
:
2142 case nir_intrinsic_image_atomic_umin
:
2143 case nir_intrinsic_image_atomic_imax
:
2144 case nir_intrinsic_image_atomic_umax
:
2145 case nir_intrinsic_image_atomic_and
:
2146 case nir_intrinsic_image_atomic_or
:
2147 case nir_intrinsic_image_atomic_xor
:
2148 case nir_intrinsic_image_atomic_exchange
:
2149 case nir_intrinsic_image_atomic_comp_swap
:
2150 v3d40_vir_emit_image_load_store(c
, instr
);
2153 case nir_intrinsic_get_buffer_size
:
2154 ntq_store_dest(c
, &instr
->dest
, 0,
2155 vir_uniform(c
, QUNIFORM_GET_BUFFER_SIZE
,
2156 nir_src_as_uint(instr
->src
[0])));
2159 case nir_intrinsic_load_user_clip_plane
:
2160 for (int i
= 0; i
< nir_intrinsic_dest_components(instr
); i
++) {
2161 ntq_store_dest(c
, &instr
->dest
, i
,
2162 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
2163 nir_intrinsic_ucp_id(instr
) *
2168 case nir_intrinsic_load_viewport_x_scale
:
2169 ntq_store_dest(c
, &instr
->dest
, 0,
2170 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
, 0));
2173 case nir_intrinsic_load_viewport_y_scale
:
2174 ntq_store_dest(c
, &instr
->dest
, 0,
2175 vir_uniform(c
, QUNIFORM_VIEWPORT_Y_SCALE
, 0));
2178 case nir_intrinsic_load_viewport_z_scale
:
2179 ntq_store_dest(c
, &instr
->dest
, 0,
2180 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0));
2183 case nir_intrinsic_load_viewport_z_offset
:
2184 ntq_store_dest(c
, &instr
->dest
, 0,
2185 vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0));
2188 case nir_intrinsic_load_alpha_ref_float
:
2189 ntq_store_dest(c
, &instr
->dest
, 0,
2190 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
2193 case nir_intrinsic_load_line_coord
:
2194 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->line_x
));
2197 case nir_intrinsic_load_line_width
:
2198 ntq_store_dest(c
, &instr
->dest
, 0,
2199 vir_uniform(c
, QUNIFORM_LINE_WIDTH
, 0));
2202 case nir_intrinsic_load_aa_line_width
:
2203 ntq_store_dest(c
, &instr
->dest
, 0,
2204 vir_uniform(c
, QUNIFORM_AA_LINE_WIDTH
, 0));
2207 case nir_intrinsic_load_sample_mask_in
:
2208 ntq_store_dest(c
, &instr
->dest
, 0, vir_MSF(c
));
2211 case nir_intrinsic_load_helper_invocation
:
2212 vir_set_pf(vir_MSF_dest(c
, vir_nop_reg()), V3D_QPU_PF_PUSHZ
);
2213 ntq_store_dest(c
, &instr
->dest
, 0,
2214 vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
2215 vir_uniform_ui(c
, ~0),
2216 vir_uniform_ui(c
, 0))));
2219 case nir_intrinsic_load_front_face
:
2220 /* The register contains 0 (front) or 1 (back), and we need to
2221 * turn it into a NIR bool where true means front.
2223 ntq_store_dest(c
, &instr
->dest
, 0,
2225 vir_uniform_ui(c
, -1),
2229 case nir_intrinsic_load_instance_id
:
2230 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
2233 case nir_intrinsic_load_vertex_id
:
2234 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
2237 case nir_intrinsic_load_tlb_color_v3d
:
2238 vir_emit_tlb_color_read(c
, instr
);
2241 case nir_intrinsic_load_input
:
2242 ntq_emit_load_input(c
, instr
);
2245 case nir_intrinsic_store_tlb_sample_color_v3d
:
2246 ntq_emit_per_sample_color_write(c
, instr
);
2249 case nir_intrinsic_store_output
:
2250 ntq_emit_store_output(c
, instr
);
2253 case nir_intrinsic_image_size
:
2254 ntq_emit_image_size(c
, instr
);
2257 case nir_intrinsic_discard
:
2258 if (vir_in_nonuniform_control_flow(c
)) {
2259 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2261 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
2262 vir_uniform_ui(c
, 0)),
2265 vir_SETMSF_dest(c
, vir_nop_reg(),
2266 vir_uniform_ui(c
, 0));
2270 case nir_intrinsic_discard_if
: {
2271 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, instr
->src
[0]);
2273 if (vir_in_nonuniform_control_flow(c
)) {
2274 struct qinst
*exec_flag
= vir_MOV_dest(c
, vir_nop_reg(),
2276 if (cond
== V3D_QPU_COND_IFA
) {
2277 vir_set_uf(exec_flag
, V3D_QPU_UF_ANDZ
);
2279 vir_set_uf(exec_flag
, V3D_QPU_UF_NORNZ
);
2280 cond
= V3D_QPU_COND_IFA
;
2284 vir_set_cond(vir_SETMSF_dest(c
, vir_nop_reg(),
2285 vir_uniform_ui(c
, 0)), cond
);
2290 case nir_intrinsic_memory_barrier
:
2291 case nir_intrinsic_memory_barrier_buffer
:
2292 case nir_intrinsic_memory_barrier_image
:
2293 case nir_intrinsic_memory_barrier_shared
:
2294 case nir_intrinsic_memory_barrier_tcs_patch
:
2295 case nir_intrinsic_group_memory_barrier
:
2296 /* We don't do any instruction scheduling of these NIR
2297 * instructions between each other, so we just need to make
2298 * sure that the TMU operations before the barrier are flushed
2299 * before the ones after the barrier. That is currently
2300 * handled by having a THRSW in each of them and a LDTMU
2301 * series or a TMUWT after.
2305 case nir_intrinsic_control_barrier
:
2306 /* Emit a TSY op to get all invocations in the workgroup
2307 * (actually supergroup) to block until the last invocation
2308 * reaches the TSY op.
2310 if (c
->devinfo
->ver
>= 42) {
2311 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
2312 V3D_QPU_WADDR_SYNCB
));
2314 struct qinst
*sync
=
2315 vir_BARRIERID_dest(c
,
2316 vir_reg(QFILE_MAGIC
,
2317 V3D_QPU_WADDR_SYNCU
));
2319 vir_get_uniform_index(c
, QUNIFORM_CONSTANT
,
2321 V3D_TSY_WAIT_INC_CHECK
);
2325 /* The blocking of a TSY op only happens at the next thread
2326 * switch. No texturing may be outstanding at the time of a
2327 * TSY blocking operation.
2332 case nir_intrinsic_load_num_work_groups
:
2333 for (int i
= 0; i
< 3; i
++) {
2334 ntq_store_dest(c
, &instr
->dest
, i
,
2335 vir_uniform(c
, QUNIFORM_NUM_WORK_GROUPS
,
2340 case nir_intrinsic_load_local_invocation_index
:
2341 ntq_store_dest(c
, &instr
->dest
, 0,
2342 vir_SHR(c
, c
->cs_payload
[1],
2343 vir_uniform_ui(c
, 32 - c
->local_invocation_index_bits
)));
2346 case nir_intrinsic_load_work_group_id
:
2347 ntq_store_dest(c
, &instr
->dest
, 0,
2348 vir_AND(c
, c
->cs_payload
[0],
2349 vir_uniform_ui(c
, 0xffff)));
2350 ntq_store_dest(c
, &instr
->dest
, 1,
2351 vir_SHR(c
, c
->cs_payload
[0],
2352 vir_uniform_ui(c
, 16)));
2353 ntq_store_dest(c
, &instr
->dest
, 2,
2354 vir_AND(c
, c
->cs_payload
[1],
2355 vir_uniform_ui(c
, 0xffff)));
2358 case nir_intrinsic_load_subgroup_id
:
2359 ntq_store_dest(c
, &instr
->dest
, 0, vir_EIDX(c
));
2362 case nir_intrinsic_load_per_vertex_input
: {
2363 /* col: vertex index, row = varying index */
2364 struct qreg col
= ntq_get_src(c
, instr
->src
[0], 0);
2365 uint32_t row_idx
= nir_intrinsic_base(instr
) * 4 +
2366 nir_intrinsic_component(instr
);
2367 for (int i
= 0; i
< instr
->num_components
; i
++) {
2368 struct qreg row
= vir_uniform_ui(c
, row_idx
++);
2369 ntq_store_dest(c
, &instr
->dest
, i
,
2370 vir_LDVPMG_IN(c
, row
, col
));
2375 case nir_intrinsic_emit_vertex
:
2376 case nir_intrinsic_end_primitive
:
2377 unreachable("Should have been lowered in v3d_nir_lower_io");
2380 case nir_intrinsic_load_primitive_id
: {
2381 /* gl_PrimitiveIdIn is written by the GBG in the first word of
2382 * VPM output header. According to docs, we should read this
2383 * using ldvpm(v,d)_in (See Table 71).
2385 ntq_store_dest(c
, &instr
->dest
, 0,
2386 vir_LDVPMV_IN(c
, vir_uniform_ui(c
, 0)));
2390 case nir_intrinsic_load_invocation_id
:
2391 ntq_store_dest(c
, &instr
->dest
, 0, vir_IID(c
));
2394 case nir_intrinsic_load_fb_layers_v3d
:
2395 ntq_store_dest(c
, &instr
->dest
, 0,
2396 vir_uniform(c
, QUNIFORM_FB_LAYERS
, 0));
2399 case nir_intrinsic_load_sample_id
:
2400 ntq_store_dest(c
, &instr
->dest
, 0, vir_SAMPID(c
));
2404 fprintf(stderr
, "Unknown intrinsic: ");
2405 nir_print_instr(&instr
->instr
, stderr
);
2406 fprintf(stderr
, "\n");
2411 /* Clears (activates) the execute flags for any channels whose jump target
2412 * matches this block.
2414 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
2417 * XXX perf: For uniform control flow, we should be able to skip c->execute
2418 * handling entirely.
2421 ntq_activate_execute_for_block(struct v3d_compile
*c
)
2423 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
2424 c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
2427 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
2431 ntq_emit_uniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
2433 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
2434 bool empty_else_block
=
2435 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
2436 exec_list_is_empty(&nir_else_block
->instr_list
));
2438 struct qblock
*then_block
= vir_new_block(c
);
2439 struct qblock
*after_block
= vir_new_block(c
);
2440 struct qblock
*else_block
;
2441 if (empty_else_block
)
2442 else_block
= after_block
;
2444 else_block
= vir_new_block(c
);
2446 /* Set up the flags for the IF condition (taking the THEN branch). */
2447 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
2450 vir_BRANCH(c
, cond
== V3D_QPU_COND_IFA
?
2451 V3D_QPU_BRANCH_COND_ALLNA
:
2452 V3D_QPU_BRANCH_COND_ALLA
);
2453 vir_link_blocks(c
->cur_block
, else_block
);
2454 vir_link_blocks(c
->cur_block
, then_block
);
2456 /* Process the THEN block. */
2457 vir_set_emit_block(c
, then_block
);
2458 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
2460 if (!empty_else_block
) {
2461 /* At the end of the THEN block, jump to ENDIF */
2462 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALWAYS
);
2463 vir_link_blocks(c
->cur_block
, after_block
);
2465 /* Emit the else block. */
2466 vir_set_emit_block(c
, else_block
);
2467 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
2470 vir_link_blocks(c
->cur_block
, after_block
);
2472 vir_set_emit_block(c
, after_block
);
2476 ntq_emit_nonuniform_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
2478 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
2479 bool empty_else_block
=
2480 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
2481 exec_list_is_empty(&nir_else_block
->instr_list
));
2483 struct qblock
*then_block
= vir_new_block(c
);
2484 struct qblock
*after_block
= vir_new_block(c
);
2485 struct qblock
*else_block
;
2486 if (empty_else_block
)
2487 else_block
= after_block
;
2489 else_block
= vir_new_block(c
);
2491 bool was_uniform_control_flow
= false;
2492 if (!vir_in_nonuniform_control_flow(c
)) {
2493 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
2494 was_uniform_control_flow
= true;
2497 /* Set up the flags for the IF condition (taking the THEN branch). */
2498 enum v3d_qpu_cond cond
= ntq_emit_bool_to_cond(c
, if_stmt
->condition
);
2500 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
2501 * was previously active (execute Z) for updating the exec flags.
2503 if (was_uniform_control_flow
) {
2504 cond
= v3d_qpu_cond_invert(cond
);
2506 struct qinst
*inst
= vir_MOV_dest(c
, vir_nop_reg(), c
->execute
);
2507 if (cond
== V3D_QPU_COND_IFA
) {
2508 vir_set_uf(inst
, V3D_QPU_UF_NORNZ
);
2510 vir_set_uf(inst
, V3D_QPU_UF_ANDZ
);
2511 cond
= V3D_QPU_COND_IFA
;
2515 vir_MOV_cond(c
, cond
,
2517 vir_uniform_ui(c
, else_block
->index
));
2519 /* Jump to ELSE if nothing is active for THEN, otherwise fall
2522 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2523 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
2524 vir_link_blocks(c
->cur_block
, else_block
);
2525 vir_link_blocks(c
->cur_block
, then_block
);
2527 /* Process the THEN block. */
2528 vir_set_emit_block(c
, then_block
);
2529 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
2531 if (!empty_else_block
) {
2532 /* Handle the end of the THEN block. First, all currently
2533 * active channels update their execute flags to point to
2536 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2538 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2539 vir_uniform_ui(c
, after_block
->index
));
2541 /* If everything points at ENDIF, then jump there immediately. */
2542 vir_set_pf(vir_XOR_dest(c
, vir_nop_reg(),
2544 vir_uniform_ui(c
, after_block
->index
)),
2546 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
2547 vir_link_blocks(c
->cur_block
, after_block
);
2548 vir_link_blocks(c
->cur_block
, else_block
);
2550 vir_set_emit_block(c
, else_block
);
2551 ntq_activate_execute_for_block(c
);
2552 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
2555 vir_link_blocks(c
->cur_block
, after_block
);
2557 vir_set_emit_block(c
, after_block
);
2558 if (was_uniform_control_flow
)
2559 c
->execute
= c
->undef
;
2561 ntq_activate_execute_for_block(c
);
2565 ntq_emit_if(struct v3d_compile
*c
, nir_if
*nif
)
2567 bool was_in_control_flow
= c
->in_control_flow
;
2568 c
->in_control_flow
= true;
2569 if (!vir_in_nonuniform_control_flow(c
) &&
2570 nir_src_is_dynamically_uniform(nif
->condition
)) {
2571 ntq_emit_uniform_if(c
, nif
);
2573 ntq_emit_nonuniform_if(c
, nif
);
2575 c
->in_control_flow
= was_in_control_flow
;
2579 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
2581 switch (jump
->type
) {
2582 case nir_jump_break
:
2583 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2585 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2586 vir_uniform_ui(c
, c
->loop_break_block
->index
));
2589 case nir_jump_continue
:
2590 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
),
2592 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
2593 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
2596 case nir_jump_return
:
2597 unreachable("All returns shouold be lowered\n");
2601 case nir_jump_goto_if
:
2602 unreachable("not supported\n");
2608 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
2610 switch (instr
->type
) {
2611 case nir_instr_type_alu
:
2612 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
2615 case nir_instr_type_intrinsic
:
2616 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
2619 case nir_instr_type_load_const
:
2620 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
2623 case nir_instr_type_ssa_undef
:
2624 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
2627 case nir_instr_type_tex
:
2628 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
2631 case nir_instr_type_jump
:
2632 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
2636 fprintf(stderr
, "Unknown NIR instr type: ");
2637 nir_print_instr(instr
, stderr
);
2638 fprintf(stderr
, "\n");
2644 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
2646 nir_foreach_instr(instr
, block
) {
2647 ntq_emit_instr(c
, instr
);
2651 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
2654 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
2656 bool was_in_control_flow
= c
->in_control_flow
;
2657 c
->in_control_flow
= true;
2659 bool was_uniform_control_flow
= false;
2660 if (!vir_in_nonuniform_control_flow(c
)) {
2661 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
2662 was_uniform_control_flow
= true;
2665 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
2666 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
2668 c
->loop_cont_block
= vir_new_block(c
);
2669 c
->loop_break_block
= vir_new_block(c
);
2671 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2672 vir_set_emit_block(c
, c
->loop_cont_block
);
2673 ntq_activate_execute_for_block(c
);
2675 ntq_emit_cf_list(c
, &loop
->body
);
2677 /* Re-enable any previous continues now, so our ANYA check below
2680 * XXX: Use the .ORZ flags update, instead.
2682 vir_set_pf(vir_XOR_dest(c
,
2685 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
2687 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
2689 vir_set_pf(vir_MOV_dest(c
, vir_nop_reg(), c
->execute
), V3D_QPU_PF_PUSHZ
);
2691 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
2692 /* Pixels that were not dispatched or have been discarded should not
2693 * contribute to looping again.
2695 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
2696 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
2697 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
2699 vir_set_emit_block(c
, c
->loop_break_block
);
2700 if (was_uniform_control_flow
)
2701 c
->execute
= c
->undef
;
2703 ntq_activate_execute_for_block(c
);
2705 c
->loop_break_block
= save_loop_break_block
;
2706 c
->loop_cont_block
= save_loop_cont_block
;
2710 c
->in_control_flow
= was_in_control_flow
;
2714 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
2716 fprintf(stderr
, "FUNCTIONS not handled.\n");
2721 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
2723 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
2724 switch (node
->type
) {
2725 case nir_cf_node_block
:
2726 ntq_emit_block(c
, nir_cf_node_as_block(node
));
2729 case nir_cf_node_if
:
2730 ntq_emit_if(c
, nir_cf_node_as_if(node
));
2733 case nir_cf_node_loop
:
2734 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
2737 case nir_cf_node_function
:
2738 ntq_emit_function(c
, nir_cf_node_as_function(node
));
2742 fprintf(stderr
, "Unknown NIR node type\n");
2749 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
2751 ntq_setup_registers(c
, &impl
->registers
);
2752 ntq_emit_cf_list(c
, &impl
->body
);
2756 nir_to_vir(struct v3d_compile
*c
)
2758 switch (c
->s
->info
.stage
) {
2759 case MESA_SHADER_FRAGMENT
:
2760 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2761 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
2762 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2764 /* V3D 4.x can disable implicit point coordinate varyings if
2765 * they are not used.
2767 if (c
->fs_key
->is_points
&&
2768 (c
->devinfo
->ver
< 40 || program_reads_point_coord(c
))) {
2769 c
->point_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2770 c
->point_y
= emit_fragment_varying(c
, NULL
, 0, 0);
2771 c
->uses_implicit_point_line_varyings
= true;
2772 } else if (c
->fs_key
->is_lines
&&
2773 (c
->devinfo
->ver
< 40 ||
2774 (c
->s
->info
.system_values_read
&
2775 BITFIELD64_BIT(SYSTEM_VALUE_LINE_COORD
)))) {
2776 c
->line_x
= emit_fragment_varying(c
, NULL
, 0, 0);
2777 c
->uses_implicit_point_line_varyings
= true;
2780 case MESA_SHADER_COMPUTE
:
2781 /* Set up the TSO for barriers, assuming we do some. */
2782 if (c
->devinfo
->ver
< 42) {
2783 vir_BARRIERID_dest(c
, vir_reg(QFILE_MAGIC
,
2784 V3D_QPU_WADDR_SYNC
));
2787 c
->cs_payload
[0] = vir_MOV(c
, vir_reg(QFILE_REG
, 0));
2788 c
->cs_payload
[1] = vir_MOV(c
, vir_reg(QFILE_REG
, 2));
2790 /* Set up the division between gl_LocalInvocationIndex and
2791 * wg_in_mem in the payload reg.
2793 int wg_size
= (c
->s
->info
.cs
.local_size
[0] *
2794 c
->s
->info
.cs
.local_size
[1] *
2795 c
->s
->info
.cs
.local_size
[2]);
2796 c
->local_invocation_index_bits
=
2797 ffs(util_next_power_of_two(MAX2(wg_size
, 64))) - 1;
2798 assert(c
->local_invocation_index_bits
<= 8);
2800 if (c
->s
->info
.cs
.shared_size
) {
2801 struct qreg wg_in_mem
= vir_SHR(c
, c
->cs_payload
[1],
2802 vir_uniform_ui(c
, 16));
2803 if (c
->s
->info
.cs
.local_size
[0] != 1 ||
2804 c
->s
->info
.cs
.local_size
[1] != 1 ||
2805 c
->s
->info
.cs
.local_size
[2] != 1) {
2807 c
->local_invocation_index_bits
);
2808 int wg_mask
= (1 << wg_bits
) - 1;
2809 wg_in_mem
= vir_AND(c
, wg_in_mem
,
2810 vir_uniform_ui(c
, wg_mask
));
2812 struct qreg shared_per_wg
=
2813 vir_uniform_ui(c
, c
->s
->info
.cs
.shared_size
);
2815 c
->cs_shared_offset
=
2817 vir_uniform(c
, QUNIFORM_SHARED_OFFSET
,0),
2818 vir_UMUL(c
, wg_in_mem
, shared_per_wg
));
2825 if (c
->s
->scratch_size
) {
2826 v3d_setup_spill_base(c
);
2827 c
->spill_size
+= V3D_CHANNELS
* c
->s
->scratch_size
;
2830 switch (c
->s
->info
.stage
) {
2831 case MESA_SHADER_VERTEX
:
2832 ntq_setup_vs_inputs(c
);
2834 case MESA_SHADER_GEOMETRY
:
2835 ntq_setup_gs_inputs(c
);
2837 case MESA_SHADER_FRAGMENT
:
2838 ntq_setup_fs_inputs(c
);
2840 case MESA_SHADER_COMPUTE
:
2843 unreachable("unsupported shader stage");
2846 ntq_setup_outputs(c
);
2848 /* Find the main function and emit the body. */
2849 nir_foreach_function(function
, c
->s
) {
2850 assert(strcmp(function
->name
, "main") == 0);
2851 assert(function
->impl
);
2852 ntq_emit_impl(c
, function
->impl
);
2856 const nir_shader_compiler_options v3d_nir_options
= {
2857 .lower_all_io_to_temps
= true,
2858 .lower_extract_byte
= true,
2859 .lower_extract_word
= true,
2860 .lower_bitfield_insert_to_shifts
= true,
2861 .lower_bitfield_extract_to_shifts
= true,
2862 .lower_bitfield_reverse
= true,
2863 .lower_bit_count
= true,
2864 .lower_cs_local_id_from_index
= true,
2865 .lower_ffract
= true,
2867 .lower_pack_unorm_2x16
= true,
2868 .lower_pack_snorm_2x16
= true,
2869 .lower_pack_unorm_4x8
= true,
2870 .lower_pack_snorm_4x8
= true,
2871 .lower_unpack_unorm_4x8
= true,
2872 .lower_unpack_snorm_4x8
= true,
2873 .lower_pack_half_2x16
= true,
2874 .lower_unpack_half_2x16
= true,
2876 .lower_find_lsb
= true,
2878 .lower_flrp32
= true,
2881 .lower_fsqrt
= true,
2882 .lower_ifind_msb
= true,
2883 .lower_isign
= true,
2884 .lower_ldexp
= true,
2885 .lower_mul_high
= true,
2886 .lower_wpos_pntc
= true,
2887 .lower_rotate
= true,
2888 .lower_to_scalar
= true,
2892 * When demoting a shader down to single-threaded, removes the THRSW
2893 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2897 vir_remove_thrsw(struct v3d_compile
*c
)
2899 vir_for_each_block(block
, c
) {
2900 vir_for_each_inst_safe(inst
, block
) {
2901 if (inst
->qpu
.sig
.thrsw
)
2902 vir_remove_instruction(c
, inst
);
2906 c
->last_thrsw
= NULL
;
2910 vir_emit_last_thrsw(struct v3d_compile
*c
)
2912 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2913 * switching, so disable threads if we didn't do any TMU ops (each of
2914 * which would have emitted a THRSW).
2916 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
2919 vir_remove_thrsw(c
);
2923 /* If we're threaded and the last THRSW was in conditional code, then
2924 * we need to emit another one so that we can flag it as the last
2927 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
2928 assert(c
->devinfo
->ver
>= 41);
2932 /* If we're threaded, then we need to mark the last THRSW instruction
2933 * so we can emit a pair of them at QPU emit time.
2935 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2936 * post-last-THRSW state, so we can skip this.
2938 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2939 assert(c
->devinfo
->ver
>= 41);
2944 c
->last_thrsw
->is_last_thrsw
= true;
2947 /* There's a flag in the shader for "center W is needed for reasons other than
2948 * non-centroid varyings", so we just walk the program after VIR optimization
2949 * to see if it's used. It should be harmless to set even if we only use
2950 * center W for varyings.
2953 vir_check_payload_w(struct v3d_compile
*c
)
2955 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2958 vir_for_each_inst_inorder(inst
, c
) {
2959 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2960 if (inst
->src
[i
].file
== QFILE_REG
&&
2961 inst
->src
[i
].index
== 0) {
2962 c
->uses_center_w
= true;
2971 v3d_nir_to_vir(struct v3d_compile
*c
)
2973 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2974 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2975 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2976 vir_get_stage_name(c
),
2977 c
->program_id
, c
->variant_id
);
2978 nir_print_shader(c
->s
, stderr
);
2983 /* Emit the last THRSW before STVPM and TLB writes. */
2984 vir_emit_last_thrsw(c
);
2986 switch (c
->s
->info
.stage
) {
2987 case MESA_SHADER_FRAGMENT
:
2990 case MESA_SHADER_GEOMETRY
:
2993 case MESA_SHADER_VERTEX
:
2996 case MESA_SHADER_COMPUTE
:
2999 unreachable("bad stage");
3002 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
3003 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
3004 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
3005 vir_get_stage_name(c
),
3006 c
->program_id
, c
->variant_id
);
3008 fprintf(stderr
, "\n");
3013 vir_check_payload_w(c
);
3015 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
3016 * We used that on that platform to pipeline TMU writes and reduce the
3017 * number of thread switches, as well as try (mostly successfully) to
3018 * reduce maximum register pressure to allow more threads. We should
3019 * do something of that sort for V3D -- either instruction scheduling
3020 * here, or delay the the THRSW and LDTMUs from our texture
3021 * instructions until the results are needed.
3024 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
3025 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
3026 fprintf(stderr
, "%s prog %d/%d VIR:\n",
3027 vir_get_stage_name(c
),
3028 c
->program_id
, c
->variant_id
);
3030 fprintf(stderr
, "\n");
3033 /* Attempt to allocate registers for the temporaries. If we fail,
3034 * reduce thread count and try again.
3036 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
3037 struct qpu_reg
*temp_registers
;
3040 temp_registers
= v3d_register_allocate(c
, &spilled
);
3047 if (c
->threads
== min_threads
) {
3048 if (c
->fallback_scheduler
) {
3050 "Failed to register allocate at %d "
3055 c
->compilation_result
=
3056 V3D_COMPILATION_FAILED_REGISTER_ALLOCATION
;
3062 if (c
->threads
== 1)
3063 vir_remove_thrsw(c
);
3067 (V3D_DEBUG
& (V3D_DEBUG_VIR
|
3068 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
)))) {
3069 fprintf(stderr
, "%s prog %d/%d spilled VIR:\n",
3070 vir_get_stage_name(c
),
3071 c
->program_id
, c
->variant_id
);
3073 fprintf(stderr
, "\n");
3076 v3d_vir_to_qpu(c
, temp_registers
);