2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
36 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
39 resize_qreg_array(struct v3d_compile
*c
,
44 if (*size
>= decl_size
)
47 uint32_t old_size
= *size
;
48 *size
= MAX2(*size
* 2, decl_size
);
49 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
51 fprintf(stderr
, "Malloc failure\n");
55 for (uint32_t i
= old_size
; i
< *size
; i
++)
56 (*regs
)[i
] = c
->undef
;
60 vir_emit_thrsw(struct v3d_compile
*c
)
65 /* Always thread switch after each texture operation for now.
67 * We could do better by batching a bunch of texture fetches up and
68 * then doing one thread switch and collecting all their results
71 c
->last_thrsw
= vir_NOP(c
);
72 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
73 c
->last_thrsw_at_top_level
= (c
->execute
.file
== QFILE_NULL
);
77 vir_SFU(struct v3d_compile
*c
, int waddr
, struct qreg src
)
79 vir_FMOV_dest(c
, vir_reg(QFILE_MAGIC
, waddr
), src
);
80 return vir_FMOV(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R4
));
84 indirect_uniform_load(struct v3d_compile
*c
, nir_intrinsic_instr
*intr
)
86 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
87 uint32_t offset
= nir_intrinsic_base(intr
);
88 struct v3d_ubo_range
*range
= NULL
;
91 for (i
= 0; i
< c
->num_ubo_ranges
; i
++) {
92 range
= &c
->ubo_ranges
[i
];
93 if (offset
>= range
->src_offset
&&
94 offset
< range
->src_offset
+ range
->size
) {
98 /* The driver-location-based offset always has to be within a declared
101 assert(i
!= c
->num_ubo_ranges
);
102 if (!c
->ubo_range_used
[i
]) {
103 c
->ubo_range_used
[i
] = true;
104 range
->dst_offset
= c
->next_ubo_dst_offset
;
105 c
->next_ubo_dst_offset
+= range
->size
;
108 offset
-= range
->src_offset
;
110 if (range
->dst_offset
+ offset
!= 0) {
111 indirect_offset
= vir_ADD(c
, indirect_offset
,
112 vir_uniform_ui(c
, range
->dst_offset
+
116 /* Adjust for where we stored the TGSI register base. */
118 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
119 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 0),
127 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
129 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
130 def
->num_components
);
131 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
136 * This function is responsible for getting VIR results into the associated
137 * storage for a NIR instruction.
139 * If it's a NIR SSA def, then we just set the associated hash table entry to
142 * If it's a NIR reg, then we need to update the existing qreg assigned to the
143 * NIR destination with the incoming value. To do that without introducing
144 * new MOVs, we require that the incoming qreg either be a uniform, or be
145 * SSA-defined by the previous VIR instruction in the block and rewritable by
146 * this function. That lets us sneak ahead and insert the SF flag beforehand
147 * (knowing that the previous instruction doesn't depend on flags) and rewrite
148 * its destination to be the NIR reg's destination
151 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
154 struct qinst
*last_inst
= NULL
;
155 if (!list_empty(&c
->cur_block
->instructions
))
156 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
158 assert(result
.file
== QFILE_UNIF
||
159 (result
.file
== QFILE_TEMP
&&
160 last_inst
&& last_inst
== c
->defs
[result
.index
]));
163 assert(chan
< dest
->ssa
.num_components
);
166 struct hash_entry
*entry
=
167 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
172 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
174 qregs
[chan
] = result
;
176 nir_register
*reg
= dest
->reg
.reg
;
177 assert(dest
->reg
.base_offset
== 0);
178 assert(reg
->num_array_elems
== 0);
179 struct hash_entry
*entry
=
180 _mesa_hash_table_search(c
->def_ht
, reg
);
181 struct qreg
*qregs
= entry
->data
;
183 /* Insert a MOV if the source wasn't an SSA def in the
184 * previous instruction.
186 if (result
.file
== QFILE_UNIF
) {
187 result
= vir_MOV(c
, result
);
188 last_inst
= c
->defs
[result
.index
];
191 /* We know they're both temps, so just rewrite index. */
192 c
->defs
[last_inst
->dst
.index
] = NULL
;
193 last_inst
->dst
.index
= qregs
[chan
].index
;
195 /* If we're in control flow, then make this update of the reg
196 * conditional on the execution mask.
198 if (c
->execute
.file
!= QFILE_NULL
) {
199 last_inst
->dst
.index
= qregs
[chan
].index
;
201 /* Set the flags to the current exec mask.
203 c
->cursor
= vir_before_inst(last_inst
);
204 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
205 c
->cursor
= vir_after_inst(last_inst
);
207 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
208 last_inst
->cond_is_exec_mask
= true;
214 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
216 struct hash_entry
*entry
;
218 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
219 assert(i
< src
.ssa
->num_components
);
221 nir_register
*reg
= src
.reg
.reg
;
222 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
223 assert(reg
->num_array_elems
== 0);
224 assert(src
.reg
.base_offset
== 0);
225 assert(i
< reg
->num_components
);
228 struct qreg
*qregs
= entry
->data
;
233 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
236 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
237 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
238 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
239 instr
->src
[src
].swizzle
[chan
]);
241 assert(!instr
->src
[src
].abs
);
242 assert(!instr
->src
[src
].negate
);
248 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
250 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
254 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
256 unsigned unit
= instr
->texture_index
;
257 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
258 int dest_size
= nir_tex_instr_dest_size(instr
);
260 struct qreg lod
= c
->undef
;
262 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
264 for (int i
= 0; i
< dest_size
; i
++) {
266 enum quniform_contents contents
;
268 if (instr
->is_array
&& i
== dest_size
- 1)
269 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
271 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
273 struct qreg size
= vir_uniform(c
, contents
, unit
);
275 switch (instr
->sampler_dim
) {
276 case GLSL_SAMPLER_DIM_1D
:
277 case GLSL_SAMPLER_DIM_2D
:
278 case GLSL_SAMPLER_DIM_3D
:
279 case GLSL_SAMPLER_DIM_CUBE
:
280 /* Don't minify the array size. */
281 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
282 size
= ntq_minify(c
, size
, lod
);
286 case GLSL_SAMPLER_DIM_RECT
:
287 /* There's no LOD field for rects */
291 unreachable("Bad sampler type");
294 ntq_store_dest(c
, &instr
->dest
, i
, size
);
299 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
301 unsigned unit
= instr
->texture_index
;
303 /* Since each texture sampling op requires uploading uniforms to
304 * reference the texture, there's no HW support for texture size and
305 * you just upload uniforms containing the size.
308 case nir_texop_query_levels
:
309 ntq_store_dest(c
, &instr
->dest
, 0,
310 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
313 ntq_emit_txs(c
, instr
);
319 if (c
->devinfo
->ver
>= 40)
320 v3d40_vir_emit_tex(c
, instr
);
322 v3d33_vir_emit_tex(c
, instr
);
326 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
328 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
330 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
332 struct qreg periods
= vir_FROUND(c
, input
);
333 struct qreg sin_output
= vir_SFU(c
, V3D_QPU_WADDR_SIN
,
334 vir_FSUB(c
, input
, periods
));
335 return vir_XOR(c
, sin_output
, vir_SHL(c
,
336 vir_FTOIN(c
, periods
),
337 vir_uniform_ui(c
, -1)));
341 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
343 struct qreg t
= vir_get_temp(c
);
345 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
346 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHZ
);
347 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
348 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHN
);
349 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
350 return vir_MOV(c
, t
);
354 ntq_isign(struct v3d_compile
*c
, struct qreg src
)
356 struct qreg t
= vir_get_temp(c
);
358 vir_MOV_dest(c
, t
, vir_uniform_ui(c
, 0));
359 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHZ
);
360 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_ui(c
, 1));
361 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHN
);
362 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_ui(c
, -1));
363 return vir_MOV(c
, t
);
367 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
369 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
370 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
371 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
372 c
->inputs
[attr
* 4 + 3] = vir_SFU(c
, V3D_QPU_WADDR_RECIP
,
377 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
380 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
381 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
384 if (c
->devinfo
->ver
>= 41) {
385 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
387 ldvary
->qpu
.sig
.ldvary
= true;
388 vary
= vir_emit_def(c
, ldvary
);
390 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
394 /* For gl_PointCoord input or distance along a line, we'll be called
395 * with no nir_variable, and we don't count toward VPM size so we
396 * don't track an input slot.
399 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
402 int i
= c
->num_inputs
++;
403 c
->input_slots
[i
] = v3d_slot_from_slot_and_component(var
->data
.location
,
406 switch (var
->data
.interpolation
) {
407 case INTERP_MODE_NONE
:
408 /* If a gl_FrontColor or gl_BackColor input has no interp
409 * qualifier, then if we're using glShadeModel(GL_FLAT) it
410 * needs to be flat shaded.
412 switch (var
->data
.location
) {
413 case VARYING_SLOT_COL0
:
414 case VARYING_SLOT_COL1
:
415 case VARYING_SLOT_BFC0
:
416 case VARYING_SLOT_BFC1
:
417 if (c
->fs_key
->shade_model_flat
) {
418 BITSET_SET(c
->flat_shade_flags
, i
);
419 vir_MOV_dest(c
, c
->undef
, vary
);
420 return vir_MOV(c
, r5
);
422 return vir_FADD(c
, vir_FMUL(c
, vary
,
429 case INTERP_MODE_SMOOTH
:
430 if (var
->data
.centroid
) {
431 BITSET_SET(c
->centroid_flags
, i
);
432 return vir_FADD(c
, vir_FMUL(c
, vary
,
433 c
->payload_w_centroid
), r5
);
435 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
437 case INTERP_MODE_NOPERSPECTIVE
:
438 BITSET_SET(c
->noperspective_flags
, i
);
439 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
440 case INTERP_MODE_FLAT
:
441 BITSET_SET(c
->flat_shade_flags
, i
);
442 vir_MOV_dest(c
, c
->undef
, vary
);
443 return vir_MOV(c
, r5
);
445 unreachable("Bad interp mode");
450 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
)
452 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
453 int chan
= var
->data
.location_frac
+ i
;
454 c
->inputs
[attr
* 4 + chan
] =
455 emit_fragment_varying(c
, var
, chan
);
460 add_output(struct v3d_compile
*c
,
461 uint32_t decl_offset
,
465 uint32_t old_array_size
= c
->outputs_array_size
;
466 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
469 if (old_array_size
!= c
->outputs_array_size
) {
470 c
->output_slots
= reralloc(c
,
472 struct v3d_varying_slot
,
473 c
->outputs_array_size
);
476 c
->output_slots
[decl_offset
] =
477 v3d_slot_from_slot_and_component(slot
, swizzle
);
481 declare_uniform_range(struct v3d_compile
*c
, uint32_t start
, uint32_t size
)
483 unsigned array_id
= c
->num_ubo_ranges
++;
484 if (array_id
>= c
->ubo_ranges_array_size
) {
485 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
487 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
488 struct v3d_ubo_range
,
489 c
->ubo_ranges_array_size
);
490 c
->ubo_range_used
= reralloc(c
, c
->ubo_range_used
,
492 c
->ubo_ranges_array_size
);
495 c
->ubo_ranges
[array_id
].dst_offset
= 0;
496 c
->ubo_ranges
[array_id
].src_offset
= start
;
497 c
->ubo_ranges
[array_id
].size
= size
;
498 c
->ubo_range_used
[array_id
] = false;
502 * If compare_instr is a valid comparison instruction, emits the
503 * compare_instr's comparison and returns the sel_instr's return value based
504 * on the compare_instr's result.
507 ntq_emit_comparison(struct v3d_compile
*c
, struct qreg
*dest
,
508 nir_alu_instr
*compare_instr
,
509 nir_alu_instr
*sel_instr
)
511 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
513 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
514 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
515 bool cond_invert
= false;
517 switch (compare_instr
->op
) {
520 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
523 vir_PF(c
, vir_XOR(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
528 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
532 vir_PF(c
, vir_XOR(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
538 vir_PF(c
, vir_FCMP(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
541 vir_PF(c
, vir_MIN(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
545 vir_PF(c
, vir_SUB(c
, src0
, src1
), V3D_QPU_PF_PUSHC
);
551 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHN
);
554 vir_PF(c
, vir_MIN(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
557 vir_PF(c
, vir_SUB(c
, src0
, src1
), V3D_QPU_PF_PUSHC
);
564 enum v3d_qpu_cond cond
= (cond_invert
?
568 switch (sel_instr
->op
) {
573 *dest
= vir_SEL(c
, cond
,
574 vir_uniform_f(c
, 1.0), vir_uniform_f(c
, 0.0));
578 *dest
= vir_SEL(c
, cond
,
579 ntq_get_alu_src(c
, sel_instr
, 1),
580 ntq_get_alu_src(c
, sel_instr
, 2));
584 *dest
= vir_SEL(c
, cond
,
585 vir_uniform_ui(c
, ~0), vir_uniform_ui(c
, 0));
589 /* Make the temporary for nir_store_dest(). */
590 *dest
= vir_MOV(c
, *dest
);
596 * Attempts to fold a comparison generating a boolean result into the
597 * condition code for selecting between two values, instead of comparing the
598 * boolean result against 0 to generate the condition code.
600 static struct qreg
ntq_emit_bcsel(struct v3d_compile
*c
, nir_alu_instr
*instr
,
603 if (!instr
->src
[0].src
.is_ssa
)
605 if (instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
607 nir_alu_instr
*compare
=
608 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
613 if (ntq_emit_comparison(c
, &dest
, compare
, instr
))
617 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
618 return vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
, src
[1], src
[2]));
623 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
625 /* This should always be lowered to ALU operations for V3D. */
626 assert(!instr
->dest
.saturate
);
628 /* Vectors are special in that they have non-scalarized writemasks,
629 * and just take the first swizzle channel for each argument in order
630 * into each writemask channel.
632 if (instr
->op
== nir_op_vec2
||
633 instr
->op
== nir_op_vec3
||
634 instr
->op
== nir_op_vec4
) {
636 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
637 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
638 instr
->src
[i
].swizzle
[0]);
639 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
640 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
641 vir_MOV(c
, srcs
[i
]));
645 /* General case: We can just grab the one used channel per src. */
646 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
647 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
648 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
656 result
= vir_MOV(c
, src
[0]);
660 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
663 result
= vir_NEG(c
, src
[0]);
667 result
= vir_FMUL(c
, src
[0], src
[1]);
670 result
= vir_FADD(c
, src
[0], src
[1]);
673 result
= vir_FSUB(c
, src
[0], src
[1]);
676 result
= vir_FMIN(c
, src
[0], src
[1]);
679 result
= vir_FMAX(c
, src
[0], src
[1]);
683 result
= vir_FTOIZ(c
, src
[0]);
686 result
= vir_FTOUZ(c
, src
[0]);
689 result
= vir_ITOF(c
, src
[0]);
692 result
= vir_UTOF(c
, src
[0]);
695 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
698 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
702 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
703 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
704 vir_uniform_ui(c
, ~0),
705 vir_uniform_ui(c
, 0)));
709 result
= vir_ADD(c
, src
[0], src
[1]);
712 result
= vir_SHR(c
, src
[0], src
[1]);
715 result
= vir_SUB(c
, src
[0], src
[1]);
718 result
= vir_ASR(c
, src
[0], src
[1]);
721 result
= vir_SHL(c
, src
[0], src
[1]);
724 result
= vir_MIN(c
, src
[0], src
[1]);
727 result
= vir_UMIN(c
, src
[0], src
[1]);
730 result
= vir_MAX(c
, src
[0], src
[1]);
733 result
= vir_UMAX(c
, src
[0], src
[1]);
736 result
= vir_AND(c
, src
[0], src
[1]);
739 result
= vir_OR(c
, src
[0], src
[1]);
742 result
= vir_XOR(c
, src
[0], src
[1]);
745 result
= vir_NOT(c
, src
[0]);
748 case nir_op_ufind_msb
:
749 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
753 result
= vir_UMUL(c
, src
[0], src
[1]);
770 if (!ntq_emit_comparison(c
, &result
, instr
, instr
)) {
771 fprintf(stderr
, "Bad comparison instruction\n");
776 result
= ntq_emit_bcsel(c
, instr
, src
);
779 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
780 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
785 result
= vir_SFU(c
, V3D_QPU_WADDR_RECIP
, src
[0]);
788 result
= vir_SFU(c
, V3D_QPU_WADDR_RSQRT
, src
[0]);
791 result
= vir_SFU(c
, V3D_QPU_WADDR_EXP
, src
[0]);
794 result
= vir_SFU(c
, V3D_QPU_WADDR_LOG
, src
[0]);
798 result
= vir_FCEIL(c
, src
[0]);
801 result
= vir_FFLOOR(c
, src
[0]);
803 case nir_op_fround_even
:
804 result
= vir_FROUND(c
, src
[0]);
807 result
= vir_FTRUNC(c
, src
[0]);
810 result
= vir_FSUB(c
, src
[0], vir_FFLOOR(c
, src
[0]));
814 result
= ntq_fsincos(c
, src
[0], false);
817 result
= ntq_fsincos(c
, src
[0], true);
821 result
= ntq_fsign(c
, src
[0]);
824 result
= ntq_isign(c
, src
[0]);
828 result
= vir_FMOV(c
, src
[0]);
829 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
834 result
= vir_MAX(c
, src
[0],
835 vir_SUB(c
, vir_uniform_ui(c
, 0), src
[0]));
839 case nir_op_fddx_coarse
:
840 case nir_op_fddx_fine
:
841 result
= vir_FDX(c
, src
[0]);
845 case nir_op_fddy_coarse
:
846 case nir_op_fddy_fine
:
847 result
= vir_FDY(c
, src
[0]);
850 case nir_op_uadd_carry
:
851 vir_PF(c
, vir_ADD(c
, src
[0], src
[1]), V3D_QPU_PF_PUSHC
);
852 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
853 vir_uniform_ui(c
, ~0),
854 vir_uniform_ui(c
, 0)));
858 fprintf(stderr
, "unknown NIR ALU inst: ");
859 nir_print_instr(&instr
->instr
, stderr
);
860 fprintf(stderr
, "\n");
864 /* We have a scalar result, so the instruction should only have a
865 * single channel written to.
867 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
868 ntq_store_dest(c
, &instr
->dest
.dest
,
869 ffs(instr
->dest
.write_mask
) - 1, result
);
872 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
873 * specifier. They come from a register that's preloaded with 0xffffffff
874 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
875 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
877 #define TLB_TYPE_F16_COLOR (3 << 6)
878 #define TLB_TYPE_I32_COLOR (1 << 6)
879 #define TLB_TYPE_F32_COLOR (0 << 6)
880 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
881 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
882 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
883 #define TLB_F16_SWAP_HI_LO (1 << 1)
884 #define TLB_VEC_SIZE_4_F16 (1 << 0)
885 #define TLB_VEC_SIZE_2_F16 (0 << 0)
886 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
888 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
891 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
892 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
893 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
895 /* Stencil is a single 32-bit write. */
896 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
899 emit_frag_end(struct v3d_compile
*c
)
902 if (c->output_sample_mask_index != -1) {
903 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
907 bool has_any_tlb_color_write
= false;
908 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
909 if (c
->output_color_var
[rt
])
910 has_any_tlb_color_write
= true;
913 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
914 struct nir_variable
*var
= c
->output_color_var
[0];
915 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
917 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
920 vir_FTOC(c
, color
[3])));
923 if (c
->output_position_index
!= -1) {
924 struct qinst
*inst
= vir_MOV_dest(c
,
925 vir_reg(QFILE_TLBU
, 0),
926 c
->outputs
[c
->output_position_index
]);
928 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
931 TLB_DEPTH_TYPE_PER_PIXEL
|
933 } else if (c
->s
->info
.fs
.uses_discard
||
934 c
->fs_key
->sample_alpha_to_coverage
||
935 !has_any_tlb_color_write
) {
936 /* Emit passthrough Z if it needed to be delayed until shader
937 * end due to potential discards.
939 * Since (single-threaded) fragment shaders always need a TLB
940 * write, emit passthrouh Z if we didn't have any color
941 * buffers and flag us as potentially discarding, so that we
942 * can use Z as the TLB write.
944 c
->s
->info
.fs
.uses_discard
= true;
946 struct qinst
*inst
= vir_MOV_dest(c
,
947 vir_reg(QFILE_TLBU
, 0),
948 vir_reg(QFILE_NULL
, 0));
950 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
953 TLB_DEPTH_TYPE_INVARIANT
|
957 /* XXX: Performance improvement: Merge Z write and color writes TLB
961 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
962 if (!c
->output_color_var
[rt
])
965 nir_variable
*var
= c
->output_color_var
[rt
];
966 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
967 int num_components
= glsl_get_vector_elements(var
->type
);
968 uint32_t conf
= 0xffffff00;
971 conf
|= TLB_SAMPLE_MODE_PER_PIXEL
;
972 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
974 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
975 num_components
= MAX2(num_components
, 3);
977 assert(num_components
!= 0);
978 switch (glsl_get_base_type(var
->type
)) {
981 /* The F32 vs I32 distinction was dropped in 4.2. */
982 if (c
->devinfo
->ver
< 42)
983 conf
|= TLB_TYPE_I32_COLOR
;
985 conf
|= TLB_TYPE_F32_COLOR
;
986 conf
|= ((num_components
- 1) <<
987 TLB_VEC_SIZE_MINUS_1_SHIFT
);
989 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), color
[0]);
990 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
991 vir_uniform_ui(c
, conf
);
993 for (int i
= 1; i
< num_components
; i
++) {
994 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0),
1000 struct qreg r
= color
[0];
1001 struct qreg g
= color
[1];
1002 struct qreg b
= color
[2];
1003 struct qreg a
= color
[3];
1005 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1006 conf
|= TLB_TYPE_F32_COLOR
;
1007 conf
|= ((num_components
- 1) <<
1008 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1010 conf
|= TLB_TYPE_F16_COLOR
;
1011 conf
|= TLB_F16_SWAP_HI_LO
;
1012 if (num_components
>= 3)
1013 conf
|= TLB_VEC_SIZE_4_F16
;
1015 conf
|= TLB_VEC_SIZE_2_F16
;
1018 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1023 if (c
->fs_key
->sample_alpha_to_one
)
1024 a
= vir_uniform_f(c
, 1.0);
1026 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1027 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), r
);
1028 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1029 vir_uniform_ui(c
, conf
);
1031 if (num_components
>= 2)
1032 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), g
);
1033 if (num_components
>= 3)
1034 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), b
);
1035 if (num_components
>= 4)
1036 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), a
);
1038 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), r
, g
);
1040 inst
->dst
.file
= QFILE_TLBU
;
1041 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1042 vir_uniform_ui(c
, conf
);
1045 if (num_components
>= 3)
1046 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), b
, a
);
1055 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t *vpm_index
)
1057 if (c
->devinfo
->ver
>= 40) {
1058 vir_STVPMV(c
, vir_uniform_ui(c
, *vpm_index
), val
);
1059 *vpm_index
= *vpm_index
+ 1;
1061 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1064 c
->num_vpm_writes
++;
1068 emit_scaled_viewport_write(struct v3d_compile
*c
, struct qreg rcp_w
,
1069 uint32_t *vpm_index
)
1071 for (int i
= 0; i
< 2; i
++) {
1072 struct qreg coord
= c
->outputs
[c
->output_position_index
+ i
];
1073 coord
= vir_FMUL(c
, coord
,
1074 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
,
1076 coord
= vir_FMUL(c
, coord
, rcp_w
);
1077 vir_VPM_WRITE(c
, vir_FTOIN(c
, coord
), vpm_index
);
1083 emit_zs_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1085 struct qreg zscale
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1086 struct qreg zoffset
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1088 struct qreg z
= c
->outputs
[c
->output_position_index
+ 2];
1089 z
= vir_FMUL(c
, z
, zscale
);
1090 z
= vir_FMUL(c
, z
, rcp_w
);
1091 z
= vir_FADD(c
, z
, zoffset
);
1092 vir_VPM_WRITE(c
, z
, vpm_index
);
1096 emit_rcp_wc_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1098 vir_VPM_WRITE(c
, rcp_w
, vpm_index
);
1102 emit_point_size_write(struct v3d_compile
*c
, uint32_t *vpm_index
)
1104 struct qreg point_size
;
1106 if (c
->output_point_size_index
!= -1)
1107 point_size
= c
->outputs
[c
->output_point_size_index
];
1109 point_size
= vir_uniform_f(c
, 1.0);
1111 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1114 point_size
= vir_FMAX(c
, point_size
, vir_uniform_f(c
, .125));
1116 vir_VPM_WRITE(c
, point_size
, vpm_index
);
1120 emit_vpm_write_setup(struct v3d_compile
*c
)
1122 if (c
->devinfo
->ver
>= 40)
1125 v3d33_vir_vpm_write_setup(c
);
1129 * Sets up c->outputs[c->output_position_index] for the vertex shader
1130 * epilogue, if an output vertex position wasn't specified in the user's
1131 * shader. This may be the case for transform feedback with rasterizer
1135 setup_default_position(struct v3d_compile
*c
)
1137 if (c
->output_position_index
!= -1)
1140 c
->output_position_index
= c
->outputs_array_size
;
1141 for (int i
= 0; i
< 4; i
++) {
1143 c
->output_position_index
+ i
,
1144 VARYING_SLOT_POS
, i
);
1149 emit_vert_end(struct v3d_compile
*c
)
1151 setup_default_position(c
);
1153 uint32_t vpm_index
= 0;
1154 struct qreg rcp_w
= vir_SFU(c
, V3D_QPU_WADDR_RECIP
,
1155 c
->outputs
[c
->output_position_index
+ 3]);
1157 emit_vpm_write_setup(c
);
1159 if (c
->vs_key
->is_coord
) {
1160 for (int i
= 0; i
< 4; i
++)
1161 vir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
],
1163 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1164 if (c
->vs_key
->per_vertex_point_size
) {
1165 emit_point_size_write(c
, &vpm_index
);
1166 /* emit_rcp_wc_write(c, rcp_w); */
1168 /* XXX: Z-only rendering */
1170 emit_zs_write(c
, rcp_w
, &vpm_index
);
1172 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1173 emit_zs_write(c
, rcp_w
, &vpm_index
);
1174 emit_rcp_wc_write(c
, rcp_w
, &vpm_index
);
1175 if (c
->vs_key
->per_vertex_point_size
)
1176 emit_point_size_write(c
, &vpm_index
);
1179 for (int i
= 0; i
< c
->vs_key
->num_fs_inputs
; i
++) {
1180 struct v3d_varying_slot input
= c
->vs_key
->fs_inputs
[i
];
1183 for (j
= 0; j
< c
->num_outputs
; j
++) {
1184 struct v3d_varying_slot output
= c
->output_slots
[j
];
1186 if (!memcmp(&input
, &output
, sizeof(input
))) {
1187 vir_VPM_WRITE(c
, c
->outputs
[j
],
1192 /* Emit padding if we didn't find a declared VS output for
1195 if (j
== c
->num_outputs
)
1196 vir_VPM_WRITE(c
, vir_uniform_f(c
, 0.0),
1200 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1202 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1207 v3d_optimize_nir(struct nir_shader
*s
)
1214 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1215 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
);
1216 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1217 NIR_PASS(progress
, s
, nir_copy_prop
);
1218 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1219 NIR_PASS(progress
, s
, nir_opt_dce
);
1220 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1221 NIR_PASS(progress
, s
, nir_opt_cse
);
1222 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8);
1223 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1224 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1225 NIR_PASS(progress
, s
, nir_opt_undef
);
1228 NIR_PASS(progress
, s
, nir_opt_move_load_ubo
);
1232 driver_location_compare(const void *in_a
, const void *in_b
)
1234 const nir_variable
*const *a
= in_a
;
1235 const nir_variable
*const *b
= in_b
;
1237 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1241 ntq_emit_vpm_read(struct v3d_compile
*c
,
1242 uint32_t *num_components_queued
,
1243 uint32_t *remaining
,
1246 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1248 if (c
->devinfo
->ver
>= 40 ) {
1249 return vir_LDVPMV_IN(c
,
1251 (*num_components_queued
)++));
1254 if (*num_components_queued
!= 0) {
1255 (*num_components_queued
)--;
1257 return vir_MOV(c
, vpm
);
1260 uint32_t num_components
= MIN2(*remaining
, 32);
1262 v3d33_vir_vpm_read_setup(c
, num_components
);
1264 *num_components_queued
= num_components
- 1;
1265 *remaining
-= num_components
;
1268 return vir_MOV(c
, vpm
);
1272 ntq_setup_inputs(struct v3d_compile
*c
)
1274 unsigned num_entries
= 0;
1275 unsigned num_components
= 0;
1276 nir_foreach_variable(var
, &c
->s
->inputs
) {
1278 num_components
+= glsl_get_components(var
->type
);
1281 nir_variable
*vars
[num_entries
];
1284 nir_foreach_variable(var
, &c
->s
->inputs
)
1287 /* Sort the variables so that we emit the input setup in
1288 * driver_location order. This is required for VPM reads, whose data
1289 * is fetched into the VPM in driver_location (TGSI register index)
1292 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1294 uint32_t vpm_components_queued
= 0;
1295 if (c
->s
->info
.stage
== MESA_SHADER_VERTEX
) {
1296 bool uses_iid
= c
->s
->info
.system_values_read
&
1297 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1298 bool uses_vid
= c
->s
->info
.system_values_read
&
1299 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1301 num_components
+= uses_iid
;
1302 num_components
+= uses_vid
;
1305 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1306 &num_components
, ~0);
1310 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1311 &num_components
, ~0);
1315 for (unsigned i
= 0; i
< num_entries
; i
++) {
1316 nir_variable
*var
= vars
[i
];
1317 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1318 unsigned loc
= var
->data
.driver_location
;
1320 assert(array_len
== 1);
1322 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1325 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1326 if (var
->data
.location
== VARYING_SLOT_POS
) {
1327 emit_fragcoord_input(c
, loc
);
1328 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1329 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1330 (c
->fs_key
->point_sprite_mask
&
1331 (1 << (var
->data
.location
-
1332 VARYING_SLOT_VAR0
))))) {
1333 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1334 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1336 emit_fragment_input(c
, loc
, var
);
1339 int var_components
= glsl_get_components(var
->type
);
1341 for (int i
= 0; i
< var_components
; i
++) {
1342 c
->inputs
[loc
* 4 + i
] =
1343 ntq_emit_vpm_read(c
,
1344 &vpm_components_queued
,
1349 c
->vattr_sizes
[loc
] = var_components
;
1353 if (c
->s
->info
.stage
== MESA_SHADER_VERTEX
) {
1354 if (c
->devinfo
->ver
>= 40) {
1355 assert(vpm_components_queued
== num_components
);
1357 assert(vpm_components_queued
== 0);
1358 assert(num_components
== 0);
1364 ntq_setup_outputs(struct v3d_compile
*c
)
1366 nir_foreach_variable(var
, &c
->s
->outputs
) {
1367 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1368 unsigned loc
= var
->data
.driver_location
* 4;
1370 assert(array_len
== 1);
1373 for (int i
= 0; i
< 4; i
++) {
1374 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1376 var
->data
.location_frac
+ i
);
1379 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1380 switch (var
->data
.location
) {
1381 case FRAG_RESULT_COLOR
:
1382 c
->output_color_var
[0] = var
;
1383 c
->output_color_var
[1] = var
;
1384 c
->output_color_var
[2] = var
;
1385 c
->output_color_var
[3] = var
;
1387 case FRAG_RESULT_DATA0
:
1388 case FRAG_RESULT_DATA1
:
1389 case FRAG_RESULT_DATA2
:
1390 case FRAG_RESULT_DATA3
:
1391 c
->output_color_var
[var
->data
.location
-
1392 FRAG_RESULT_DATA0
] = var
;
1394 case FRAG_RESULT_DEPTH
:
1395 c
->output_position_index
= loc
;
1397 case FRAG_RESULT_SAMPLE_MASK
:
1398 c
->output_sample_mask_index
= loc
;
1402 switch (var
->data
.location
) {
1403 case VARYING_SLOT_POS
:
1404 c
->output_position_index
= loc
;
1406 case VARYING_SLOT_PSIZ
:
1407 c
->output_point_size_index
= loc
;
1415 ntq_setup_uniforms(struct v3d_compile
*c
)
1417 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1418 uint32_t vec4_count
= glsl_count_attribute_slots(var
->type
,
1420 unsigned vec4_size
= 4 * sizeof(float);
1422 declare_uniform_range(c
, var
->data
.driver_location
* vec4_size
,
1423 vec4_count
* vec4_size
);
1429 * Sets up the mapping from nir_register to struct qreg *.
1431 * Each nir_register gets a struct qreg per 32-bit component being stored.
1434 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1436 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1437 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1438 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1440 nir_reg
->num_components
);
1442 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1444 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1445 qregs
[i
] = vir_get_temp(c
);
1450 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1452 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1453 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1454 qregs
[i
] = vir_uniform_ui(c
, instr
->value
.u32
[i
]);
1456 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1460 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1462 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1464 /* VIR needs there to be *some* value, so pick 0 (same as for
1465 * ntq_setup_registers().
1467 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1468 qregs
[i
] = vir_uniform_ui(c
, 0);
1472 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1474 nir_const_value
*const_offset
;
1477 switch (instr
->intrinsic
) {
1478 case nir_intrinsic_load_uniform
:
1479 assert(instr
->num_components
== 1);
1480 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1482 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1483 assert(offset
% 4 == 0);
1484 /* We need dwords */
1485 offset
= offset
/ 4;
1486 ntq_store_dest(c
, &instr
->dest
, 0,
1487 vir_uniform(c
, QUNIFORM_UNIFORM
,
1490 ntq_store_dest(c
, &instr
->dest
, 0,
1491 indirect_uniform_load(c
, instr
));
1495 case nir_intrinsic_load_ubo
:
1496 for (int i
= 0; i
< instr
->num_components
; i
++) {
1497 int ubo
= nir_src_as_const_value(instr
->src
[0])->u32
[0];
1499 /* Adjust for where we stored the TGSI register base. */
1501 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
1502 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 1 + ubo
),
1504 ntq_get_src(c
, instr
->src
[1], 0),
1505 vir_uniform_ui(c
, i
* 4)));
1509 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
1513 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1515 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1516 assert(offset
% 4 == 0);
1517 /* We need dwords */
1518 offset
= offset
/ 4;
1519 ntq_store_dest(c
, &instr
->dest
, 0,
1520 vir_uniform(c
, QUNIFORM_UNIFORM
,
1523 ntq_store_dest(c
, &instr
->dest
, 0,
1524 indirect_uniform_load(c
, instr
));
1528 case nir_intrinsic_load_user_clip_plane
:
1529 for (int i
= 0; i
< instr
->num_components
; i
++) {
1530 ntq_store_dest(c
, &instr
->dest
, i
,
1531 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1532 nir_intrinsic_ucp_id(instr
) *
1537 case nir_intrinsic_load_alpha_ref_float
:
1538 ntq_store_dest(c
, &instr
->dest
, 0,
1539 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1542 case nir_intrinsic_load_sample_mask_in
:
1543 ntq_store_dest(c
, &instr
->dest
, 0,
1544 vir_uniform(c
, QUNIFORM_SAMPLE_MASK
, 0));
1547 case nir_intrinsic_load_front_face
:
1548 /* The register contains 0 (front) or 1 (back), and we need to
1549 * turn it into a NIR bool where true means front.
1551 ntq_store_dest(c
, &instr
->dest
, 0,
1553 vir_uniform_ui(c
, -1),
1557 case nir_intrinsic_load_instance_id
:
1558 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
1561 case nir_intrinsic_load_vertex_id
:
1562 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
1565 case nir_intrinsic_load_input
:
1566 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1567 assert(const_offset
&& "v3d doesn't support indirect inputs");
1568 for (int i
= 0; i
< instr
->num_components
; i
++) {
1569 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1570 int comp
= nir_intrinsic_component(instr
) + i
;
1571 ntq_store_dest(c
, &instr
->dest
, i
,
1572 vir_MOV(c
, c
->inputs
[offset
* 4 + comp
]));
1576 case nir_intrinsic_store_output
:
1577 const_offset
= nir_src_as_const_value(instr
->src
[1]);
1578 assert(const_offset
&& "v3d doesn't support indirect outputs");
1579 offset
= ((nir_intrinsic_base(instr
) +
1580 const_offset
->u32
[0]) * 4 +
1581 nir_intrinsic_component(instr
));
1583 for (int i
= 0; i
< instr
->num_components
; i
++) {
1584 c
->outputs
[offset
+ i
] =
1585 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
1587 c
->num_outputs
= MAX2(c
->num_outputs
,
1588 offset
+ instr
->num_components
);
1591 case nir_intrinsic_discard
:
1592 if (c
->execute
.file
!= QFILE_NULL
) {
1593 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1594 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1595 vir_uniform_ui(c
, 0)),
1598 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1599 vir_uniform_ui(c
, 0));
1603 case nir_intrinsic_discard_if
: {
1604 /* true (~0) if we're discarding */
1605 struct qreg cond
= ntq_get_src(c
, instr
->src
[0], 0);
1607 if (c
->execute
.file
!= QFILE_NULL
) {
1608 /* execute == 0 means the channel is active. Invert
1609 * the condition so that we can use zero as "executing
1612 vir_PF(c
, vir_OR(c
, c
->execute
, vir_NOT(c
, cond
)),
1614 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1615 vir_uniform_ui(c
, 0)),
1618 vir_PF(c
, cond
, V3D_QPU_PF_PUSHZ
);
1619 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1620 vir_uniform_ui(c
, 0)),
1628 fprintf(stderr
, "Unknown intrinsic: ");
1629 nir_print_instr(&instr
->instr
, stderr
);
1630 fprintf(stderr
, "\n");
1635 /* Clears (activates) the execute flags for any channels whose jump target
1636 * matches this block.
1639 ntq_activate_execute_for_block(struct v3d_compile
*c
)
1641 vir_PF(c
, vir_XOR(c
, c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
1644 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1648 ntq_emit_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
1650 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1651 bool empty_else_block
=
1652 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1653 exec_list_is_empty(&nir_else_block
->instr_list
));
1655 struct qblock
*then_block
= vir_new_block(c
);
1656 struct qblock
*after_block
= vir_new_block(c
);
1657 struct qblock
*else_block
;
1658 if (empty_else_block
)
1659 else_block
= after_block
;
1661 else_block
= vir_new_block(c
);
1663 bool was_top_level
= false;
1664 if (c
->execute
.file
== QFILE_NULL
) {
1665 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1666 was_top_level
= true;
1669 /* Set A for executing (execute == 0) and jumping (if->condition ==
1670 * 0) channels, and then update execute flags for those to point to
1675 ntq_get_src(c
, if_stmt
->condition
, 0)),
1677 vir_MOV_cond(c
, V3D_QPU_COND_IFA
,
1679 vir_uniform_ui(c
, else_block
->index
));
1681 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1684 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1685 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
1686 vir_link_blocks(c
->cur_block
, else_block
);
1687 vir_link_blocks(c
->cur_block
, then_block
);
1689 /* Process the THEN block. */
1690 vir_set_emit_block(c
, then_block
);
1691 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1693 if (!empty_else_block
) {
1694 /* Handle the end of the THEN block. First, all currently
1695 * active channels update their execute flags to point to
1698 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1699 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1700 vir_uniform_ui(c
, after_block
->index
));
1702 /* If everything points at ENDIF, then jump there immediately. */
1703 vir_PF(c
, vir_XOR(c
, c
->execute
,
1704 vir_uniform_ui(c
, after_block
->index
)),
1706 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
1707 vir_link_blocks(c
->cur_block
, after_block
);
1708 vir_link_blocks(c
->cur_block
, else_block
);
1710 vir_set_emit_block(c
, else_block
);
1711 ntq_activate_execute_for_block(c
);
1712 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
1715 vir_link_blocks(c
->cur_block
, after_block
);
1717 vir_set_emit_block(c
, after_block
);
1719 c
->execute
= c
->undef
;
1721 ntq_activate_execute_for_block(c
);
1725 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
1727 switch (jump
->type
) {
1728 case nir_jump_break
:
1729 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1730 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1731 vir_uniform_ui(c
, c
->loop_break_block
->index
));
1734 case nir_jump_continue
:
1735 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1736 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1737 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
1740 case nir_jump_return
:
1741 unreachable("All returns shouold be lowered\n");
1746 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
1748 switch (instr
->type
) {
1749 case nir_instr_type_alu
:
1750 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1753 case nir_instr_type_intrinsic
:
1754 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1757 case nir_instr_type_load_const
:
1758 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1761 case nir_instr_type_ssa_undef
:
1762 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1765 case nir_instr_type_tex
:
1766 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1769 case nir_instr_type_jump
:
1770 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
1774 fprintf(stderr
, "Unknown NIR instr type: ");
1775 nir_print_instr(instr
, stderr
);
1776 fprintf(stderr
, "\n");
1782 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
1784 nir_foreach_instr(instr
, block
) {
1785 ntq_emit_instr(c
, instr
);
1789 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
1792 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
1794 bool was_top_level
= false;
1795 if (c
->execute
.file
== QFILE_NULL
) {
1796 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1797 was_top_level
= true;
1800 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
1801 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
1803 c
->loop_cont_block
= vir_new_block(c
);
1804 c
->loop_break_block
= vir_new_block(c
);
1806 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1807 vir_set_emit_block(c
, c
->loop_cont_block
);
1808 ntq_activate_execute_for_block(c
);
1810 ntq_emit_cf_list(c
, &loop
->body
);
1812 /* Re-enable any previous continues now, so our ANYA check below
1815 * XXX: Use the .ORZ flags update, instead.
1817 vir_PF(c
, vir_XOR(c
,
1819 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
1821 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1823 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1825 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
1826 /* Pixels that were not dispatched or have been discarded should not
1827 * contribute to looping again.
1829 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
1830 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1831 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
1833 vir_set_emit_block(c
, c
->loop_break_block
);
1835 c
->execute
= c
->undef
;
1837 ntq_activate_execute_for_block(c
);
1839 c
->loop_break_block
= save_loop_break_block
;
1840 c
->loop_cont_block
= save_loop_cont_block
;
1844 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
1846 fprintf(stderr
, "FUNCTIONS not handled.\n");
1851 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
1853 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1854 switch (node
->type
) {
1855 case nir_cf_node_block
:
1856 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1859 case nir_cf_node_if
:
1860 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1863 case nir_cf_node_loop
:
1864 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
1867 case nir_cf_node_function
:
1868 ntq_emit_function(c
, nir_cf_node_as_function(node
));
1872 fprintf(stderr
, "Unknown NIR node type\n");
1879 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
1881 ntq_setup_registers(c
, &impl
->registers
);
1882 ntq_emit_cf_list(c
, &impl
->body
);
1886 nir_to_vir(struct v3d_compile
*c
)
1888 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1889 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
1890 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
1891 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
1893 if (c
->fs_key
->is_points
) {
1894 c
->point_x
= emit_fragment_varying(c
, NULL
, 0);
1895 c
->point_y
= emit_fragment_varying(c
, NULL
, 0);
1896 } else if (c
->fs_key
->is_lines
) {
1897 c
->line_x
= emit_fragment_varying(c
, NULL
, 0);
1901 ntq_setup_inputs(c
);
1902 ntq_setup_outputs(c
);
1903 ntq_setup_uniforms(c
);
1904 ntq_setup_registers(c
, &c
->s
->registers
);
1906 /* Find the main function and emit the body. */
1907 nir_foreach_function(function
, c
->s
) {
1908 assert(strcmp(function
->name
, "main") == 0);
1909 assert(function
->impl
);
1910 ntq_emit_impl(c
, function
->impl
);
1914 const nir_shader_compiler_options v3d_nir_options
= {
1915 .lower_all_io_to_temps
= true,
1916 .lower_extract_byte
= true,
1917 .lower_extract_word
= true,
1919 .lower_bitfield_insert_to_shifts
= true,
1920 .lower_bitfield_extract_to_shifts
= true,
1921 .lower_bitfield_reverse
= true,
1922 .lower_bit_count
= true,
1923 .lower_pack_unorm_2x16
= true,
1924 .lower_pack_snorm_2x16
= true,
1925 .lower_pack_unorm_4x8
= true,
1926 .lower_pack_snorm_4x8
= true,
1927 .lower_unpack_unorm_4x8
= true,
1928 .lower_unpack_snorm_4x8
= true,
1930 .lower_find_lsb
= true,
1932 .lower_flrp32
= true,
1935 .lower_fsqrt
= true,
1936 .lower_ifind_msb
= true,
1937 .lower_ldexp
= true,
1938 .lower_mul_high
= true,
1939 .native_integers
= true,
1945 count_nir_instrs(nir_shader
*nir
)
1948 nir_foreach_function(function
, nir
) {
1949 if (!function
->impl
)
1951 nir_foreach_block(block
, function
->impl
) {
1952 nir_foreach_instr(instr
, block
)
1961 * When demoting a shader down to single-threaded, removes the THRSW
1962 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
1966 vir_remove_thrsw(struct v3d_compile
*c
)
1968 vir_for_each_block(block
, c
) {
1969 vir_for_each_inst_safe(inst
, block
) {
1970 if (inst
->qpu
.sig
.thrsw
)
1971 vir_remove_instruction(c
, inst
);
1975 c
->last_thrsw
= NULL
;
1979 vir_emit_last_thrsw(struct v3d_compile
*c
)
1981 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
1982 * switching, so disable threads if we didn't do any TMU ops (each of
1983 * which would have emitted a THRSW).
1985 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
1988 vir_remove_thrsw(c
);
1992 /* If we're threaded and the last THRSW was in conditional code, then
1993 * we need to emit another one so that we can flag it as the last
1996 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
1997 assert(c
->devinfo
->ver
>= 41);
2001 /* If we're threaded, then we need to mark the last THRSW instruction
2002 * so we can emit a pair of them at QPU emit time.
2004 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2005 * post-last-THRSW state, so we can skip this.
2007 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2008 assert(c
->devinfo
->ver
>= 41);
2013 c
->last_thrsw
->is_last_thrsw
= true;
2016 /* There's a flag in the shader for "center W is needed for reasons other than
2017 * non-centroid varyings", so we just walk the program after VIR optimization
2018 * to see if it's used. It should be harmless to set even if we only use
2019 * center W for varyings.
2022 vir_check_payload_w(struct v3d_compile
*c
)
2024 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2027 vir_for_each_inst_inorder(inst
, c
) {
2028 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2029 if (inst
->src
[i
].file
== QFILE_REG
&&
2030 inst
->src
[i
].index
== 0) {
2031 c
->uses_center_w
= true;
2040 v3d_nir_to_vir(struct v3d_compile
*c
)
2042 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2043 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2044 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2045 vir_get_stage_name(c
),
2046 c
->program_id
, c
->variant_id
);
2047 nir_print_shader(c
->s
, stderr
);
2052 /* Emit the last THRSW before STVPM and TLB writes. */
2053 vir_emit_last_thrsw(c
);
2055 switch (c
->s
->info
.stage
) {
2056 case MESA_SHADER_FRAGMENT
:
2059 case MESA_SHADER_VERTEX
:
2063 unreachable("bad stage");
2066 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2067 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2068 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
2069 vir_get_stage_name(c
),
2070 c
->program_id
, c
->variant_id
);
2072 fprintf(stderr
, "\n");
2076 vir_lower_uniforms(c
);
2078 vir_check_payload_w(c
);
2080 /* XXX: vir_schedule_instructions(c); */
2082 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2083 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2084 fprintf(stderr
, "%s prog %d/%d VIR:\n",
2085 vir_get_stage_name(c
),
2086 c
->program_id
, c
->variant_id
);
2088 fprintf(stderr
, "\n");
2091 /* Attempt to allocate registers for the temporaries. If we fail,
2092 * reduce thread count and try again.
2094 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
2095 struct qpu_reg
*temp_registers
;
2098 temp_registers
= v3d_register_allocate(c
, &spilled
);
2105 if (c
->threads
== min_threads
) {
2106 fprintf(stderr
, "Failed to register allocate at %d threads:\n",
2115 if (c
->threads
== 1)
2116 vir_remove_thrsw(c
);
2119 v3d_vir_to_qpu(c
, temp_registers
);