2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
36 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
39 resize_qreg_array(struct v3d_compile
*c
,
44 if (*size
>= decl_size
)
47 uint32_t old_size
= *size
;
48 *size
= MAX2(*size
* 2, decl_size
);
49 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
51 fprintf(stderr
, "Malloc failure\n");
55 for (uint32_t i
= old_size
; i
< *size
; i
++)
56 (*regs
)[i
] = c
->undef
;
60 vir_emit_thrsw(struct v3d_compile
*c
)
65 /* Always thread switch after each texture operation for now.
67 * We could do better by batching a bunch of texture fetches up and
68 * then doing one thread switch and collecting all their results
71 c
->last_thrsw
= vir_NOP(c
);
72 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
73 c
->last_thrsw_at_top_level
= (c
->execute
.file
== QFILE_NULL
);
77 vir_SFU(struct v3d_compile
*c
, int waddr
, struct qreg src
)
79 vir_FMOV_dest(c
, vir_reg(QFILE_MAGIC
, waddr
), src
);
80 return vir_FMOV(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R4
));
84 indirect_uniform_load(struct v3d_compile
*c
, nir_intrinsic_instr
*intr
)
86 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
87 uint32_t offset
= nir_intrinsic_base(intr
);
88 struct v3d_ubo_range
*range
= NULL
;
91 for (i
= 0; i
< c
->num_ubo_ranges
; i
++) {
92 range
= &c
->ubo_ranges
[i
];
93 if (offset
>= range
->src_offset
&&
94 offset
< range
->src_offset
+ range
->size
) {
98 /* The driver-location-based offset always has to be within a declared
101 assert(i
!= c
->num_ubo_ranges
);
102 if (!c
->ubo_range_used
[i
]) {
103 c
->ubo_range_used
[i
] = true;
104 range
->dst_offset
= c
->next_ubo_dst_offset
;
105 c
->next_ubo_dst_offset
+= range
->size
;
108 offset
-= range
->src_offset
;
110 if (range
->dst_offset
+ offset
!= 0) {
111 indirect_offset
= vir_ADD(c
, indirect_offset
,
112 vir_uniform_ui(c
, range
->dst_offset
+
116 /* Adjust for where we stored the TGSI register base. */
118 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
119 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 0),
127 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
129 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
130 def
->num_components
);
131 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
136 * This function is responsible for getting VIR results into the associated
137 * storage for a NIR instruction.
139 * If it's a NIR SSA def, then we just set the associated hash table entry to
142 * If it's a NIR reg, then we need to update the existing qreg assigned to the
143 * NIR destination with the incoming value. To do that without introducing
144 * new MOVs, we require that the incoming qreg either be a uniform, or be
145 * SSA-defined by the previous VIR instruction in the block and rewritable by
146 * this function. That lets us sneak ahead and insert the SF flag beforehand
147 * (knowing that the previous instruction doesn't depend on flags) and rewrite
148 * its destination to be the NIR reg's destination
151 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
154 struct qinst
*last_inst
= NULL
;
155 if (!list_empty(&c
->cur_block
->instructions
))
156 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
158 assert(result
.file
== QFILE_UNIF
||
159 (result
.file
== QFILE_TEMP
&&
160 last_inst
&& last_inst
== c
->defs
[result
.index
]));
163 assert(chan
< dest
->ssa
.num_components
);
166 struct hash_entry
*entry
=
167 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
172 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
174 qregs
[chan
] = result
;
176 nir_register
*reg
= dest
->reg
.reg
;
177 assert(dest
->reg
.base_offset
== 0);
178 assert(reg
->num_array_elems
== 0);
179 struct hash_entry
*entry
=
180 _mesa_hash_table_search(c
->def_ht
, reg
);
181 struct qreg
*qregs
= entry
->data
;
183 /* Insert a MOV if the source wasn't an SSA def in the
184 * previous instruction.
186 if (result
.file
== QFILE_UNIF
) {
187 result
= vir_MOV(c
, result
);
188 last_inst
= c
->defs
[result
.index
];
191 /* We know they're both temps, so just rewrite index. */
192 c
->defs
[last_inst
->dst
.index
] = NULL
;
193 last_inst
->dst
.index
= qregs
[chan
].index
;
195 /* If we're in control flow, then make this update of the reg
196 * conditional on the execution mask.
198 if (c
->execute
.file
!= QFILE_NULL
) {
199 last_inst
->dst
.index
= qregs
[chan
].index
;
201 /* Set the flags to the current exec mask.
203 c
->cursor
= vir_before_inst(last_inst
);
204 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
205 c
->cursor
= vir_after_inst(last_inst
);
207 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
208 last_inst
->cond_is_exec_mask
= true;
214 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
216 struct hash_entry
*entry
;
218 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
219 assert(i
< src
.ssa
->num_components
);
221 nir_register
*reg
= src
.reg
.reg
;
222 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
223 assert(reg
->num_array_elems
== 0);
224 assert(src
.reg
.base_offset
== 0);
225 assert(i
< reg
->num_components
);
228 struct qreg
*qregs
= entry
->data
;
233 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
236 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
237 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
238 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
239 instr
->src
[src
].swizzle
[chan
]);
241 assert(!instr
->src
[src
].abs
);
242 assert(!instr
->src
[src
].negate
);
247 static inline struct qreg
248 vir_SAT(struct v3d_compile
*c
, struct qreg val
)
251 vir_FMIN(c
, val
, vir_uniform_f(c
, 1.0)),
252 vir_uniform_f(c
, 0.0));
256 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
258 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
262 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
264 unsigned unit
= instr
->texture_index
;
265 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
266 int dest_size
= nir_tex_instr_dest_size(instr
);
268 struct qreg lod
= c
->undef
;
270 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
272 for (int i
= 0; i
< dest_size
; i
++) {
274 enum quniform_contents contents
;
276 if (instr
->is_array
&& i
== dest_size
- 1)
277 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
279 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
281 struct qreg size
= vir_uniform(c
, contents
, unit
);
283 switch (instr
->sampler_dim
) {
284 case GLSL_SAMPLER_DIM_1D
:
285 case GLSL_SAMPLER_DIM_2D
:
286 case GLSL_SAMPLER_DIM_3D
:
287 case GLSL_SAMPLER_DIM_CUBE
:
288 /* Don't minify the array size. */
289 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
290 size
= ntq_minify(c
, size
, lod
);
294 case GLSL_SAMPLER_DIM_RECT
:
295 /* There's no LOD field for rects */
299 unreachable("Bad sampler type");
302 ntq_store_dest(c
, &instr
->dest
, i
, size
);
307 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
309 unsigned unit
= instr
->texture_index
;
311 /* Since each texture sampling op requires uploading uniforms to
312 * reference the texture, there's no HW support for texture size and
313 * you just upload uniforms containing the size.
316 case nir_texop_query_levels
:
317 ntq_store_dest(c
, &instr
->dest
, 0,
318 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
321 ntq_emit_txs(c
, instr
);
327 if (c
->devinfo
->ver
>= 40)
328 v3d40_vir_emit_tex(c
, instr
);
330 v3d33_vir_emit_tex(c
, instr
);
334 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
336 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
338 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
340 struct qreg periods
= vir_FROUND(c
, input
);
341 struct qreg sin_output
= vir_SFU(c
, V3D_QPU_WADDR_SIN
,
342 vir_FSUB(c
, input
, periods
));
343 return vir_XOR(c
, sin_output
, vir_SHL(c
,
344 vir_FTOIN(c
, periods
),
345 vir_uniform_ui(c
, -1)));
349 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
351 struct qreg t
= vir_get_temp(c
);
353 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
354 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHZ
);
355 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
356 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHN
);
357 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
358 return vir_MOV(c
, t
);
362 ntq_isign(struct v3d_compile
*c
, struct qreg src
)
364 struct qreg t
= vir_get_temp(c
);
366 vir_MOV_dest(c
, t
, vir_uniform_ui(c
, 0));
367 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHZ
);
368 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_ui(c
, 1));
369 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHN
);
370 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_ui(c
, -1));
371 return vir_MOV(c
, t
);
375 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
377 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
378 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
379 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
380 c
->inputs
[attr
* 4 + 3] = vir_SFU(c
, V3D_QPU_WADDR_RECIP
,
385 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
388 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
389 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
392 if (c
->devinfo
->ver
>= 41) {
393 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
395 ldvary
->qpu
.sig
.ldvary
= true;
396 vary
= vir_emit_def(c
, ldvary
);
398 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
402 /* For gl_PointCoord input or distance along a line, we'll be called
403 * with no nir_variable, and we don't count toward VPM size so we
404 * don't track an input slot.
407 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
410 int i
= c
->num_inputs
++;
411 c
->input_slots
[i
] = v3d_slot_from_slot_and_component(var
->data
.location
,
414 switch (var
->data
.interpolation
) {
415 case INTERP_MODE_NONE
:
416 /* If a gl_FrontColor or gl_BackColor input has no interp
417 * qualifier, then if we're using glShadeModel(GL_FLAT) it
418 * needs to be flat shaded.
420 switch (var
->data
.location
) {
421 case VARYING_SLOT_COL0
:
422 case VARYING_SLOT_COL1
:
423 case VARYING_SLOT_BFC0
:
424 case VARYING_SLOT_BFC1
:
425 if (c
->fs_key
->shade_model_flat
) {
426 BITSET_SET(c
->flat_shade_flags
, i
);
427 vir_MOV_dest(c
, c
->undef
, vary
);
428 return vir_MOV(c
, r5
);
430 return vir_FADD(c
, vir_FMUL(c
, vary
,
437 case INTERP_MODE_SMOOTH
:
438 if (var
->data
.centroid
) {
439 BITSET_SET(c
->centroid_flags
, i
);
440 return vir_FADD(c
, vir_FMUL(c
, vary
,
441 c
->payload_w_centroid
), r5
);
443 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
445 case INTERP_MODE_NOPERSPECTIVE
:
446 /* C appears after the mov from the varying.
447 XXX: improve ldvary setup.
449 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
450 case INTERP_MODE_FLAT
:
451 BITSET_SET(c
->flat_shade_flags
, i
);
452 vir_MOV_dest(c
, c
->undef
, vary
);
453 return vir_MOV(c
, r5
);
455 unreachable("Bad interp mode");
460 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
)
462 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
463 int chan
= var
->data
.location_frac
+ i
;
464 c
->inputs
[attr
* 4 + chan
] =
465 emit_fragment_varying(c
, var
, chan
);
470 add_output(struct v3d_compile
*c
,
471 uint32_t decl_offset
,
475 uint32_t old_array_size
= c
->outputs_array_size
;
476 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
479 if (old_array_size
!= c
->outputs_array_size
) {
480 c
->output_slots
= reralloc(c
,
482 struct v3d_varying_slot
,
483 c
->outputs_array_size
);
486 c
->output_slots
[decl_offset
] =
487 v3d_slot_from_slot_and_component(slot
, swizzle
);
491 declare_uniform_range(struct v3d_compile
*c
, uint32_t start
, uint32_t size
)
493 unsigned array_id
= c
->num_ubo_ranges
++;
494 if (array_id
>= c
->ubo_ranges_array_size
) {
495 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
497 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
498 struct v3d_ubo_range
,
499 c
->ubo_ranges_array_size
);
500 c
->ubo_range_used
= reralloc(c
, c
->ubo_range_used
,
502 c
->ubo_ranges_array_size
);
505 c
->ubo_ranges
[array_id
].dst_offset
= 0;
506 c
->ubo_ranges
[array_id
].src_offset
= start
;
507 c
->ubo_ranges
[array_id
].size
= size
;
508 c
->ubo_range_used
[array_id
] = false;
512 * If compare_instr is a valid comparison instruction, emits the
513 * compare_instr's comparison and returns the sel_instr's return value based
514 * on the compare_instr's result.
517 ntq_emit_comparison(struct v3d_compile
*c
, struct qreg
*dest
,
518 nir_alu_instr
*compare_instr
,
519 nir_alu_instr
*sel_instr
)
521 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
523 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
524 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
525 bool cond_invert
= false;
527 switch (compare_instr
->op
) {
530 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
533 vir_PF(c
, vir_XOR(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
538 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
542 vir_PF(c
, vir_XOR(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
548 vir_PF(c
, vir_FCMP(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
551 vir_PF(c
, vir_MIN(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
555 vir_PF(c
, vir_SUB(c
, src0
, src1
), V3D_QPU_PF_PUSHC
);
561 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHN
);
564 vir_PF(c
, vir_MIN(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
567 vir_PF(c
, vir_SUB(c
, src0
, src1
), V3D_QPU_PF_PUSHC
);
574 enum v3d_qpu_cond cond
= (cond_invert
?
578 switch (sel_instr
->op
) {
583 *dest
= vir_SEL(c
, cond
,
584 vir_uniform_f(c
, 1.0), vir_uniform_f(c
, 0.0));
588 *dest
= vir_SEL(c
, cond
,
589 ntq_get_alu_src(c
, sel_instr
, 1),
590 ntq_get_alu_src(c
, sel_instr
, 2));
594 *dest
= vir_SEL(c
, cond
,
595 vir_uniform_ui(c
, ~0), vir_uniform_ui(c
, 0));
599 /* Make the temporary for nir_store_dest(). */
600 *dest
= vir_MOV(c
, *dest
);
606 * Attempts to fold a comparison generating a boolean result into the
607 * condition code for selecting between two values, instead of comparing the
608 * boolean result against 0 to generate the condition code.
610 static struct qreg
ntq_emit_bcsel(struct v3d_compile
*c
, nir_alu_instr
*instr
,
613 if (!instr
->src
[0].src
.is_ssa
)
615 if (instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
617 nir_alu_instr
*compare
=
618 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
623 if (ntq_emit_comparison(c
, &dest
, compare
, instr
))
627 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
628 return vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
, src
[1], src
[2]));
633 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
635 /* This should always be lowered to ALU operations for V3D. */
636 assert(!instr
->dest
.saturate
);
638 /* Vectors are special in that they have non-scalarized writemasks,
639 * and just take the first swizzle channel for each argument in order
640 * into each writemask channel.
642 if (instr
->op
== nir_op_vec2
||
643 instr
->op
== nir_op_vec3
||
644 instr
->op
== nir_op_vec4
) {
646 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
647 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
648 instr
->src
[i
].swizzle
[0]);
649 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
650 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
651 vir_MOV(c
, srcs
[i
]));
655 /* General case: We can just grab the one used channel per src. */
656 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
657 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
658 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
666 result
= vir_MOV(c
, src
[0]);
670 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
673 result
= vir_NEG(c
, src
[0]);
677 result
= vir_FMUL(c
, src
[0], src
[1]);
680 result
= vir_FADD(c
, src
[0], src
[1]);
683 result
= vir_FSUB(c
, src
[0], src
[1]);
686 result
= vir_FMIN(c
, src
[0], src
[1]);
689 result
= vir_FMAX(c
, src
[0], src
[1]);
693 result
= vir_FTOIZ(c
, src
[0]);
696 result
= vir_FTOUZ(c
, src
[0]);
699 result
= vir_ITOF(c
, src
[0]);
702 result
= vir_UTOF(c
, src
[0]);
705 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
708 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
712 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
713 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
714 vir_uniform_ui(c
, ~0),
715 vir_uniform_ui(c
, 0)));
719 result
= vir_ADD(c
, src
[0], src
[1]);
722 result
= vir_SHR(c
, src
[0], src
[1]);
725 result
= vir_SUB(c
, src
[0], src
[1]);
728 result
= vir_ASR(c
, src
[0], src
[1]);
731 result
= vir_SHL(c
, src
[0], src
[1]);
734 result
= vir_MIN(c
, src
[0], src
[1]);
737 result
= vir_UMIN(c
, src
[0], src
[1]);
740 result
= vir_MAX(c
, src
[0], src
[1]);
743 result
= vir_UMAX(c
, src
[0], src
[1]);
746 result
= vir_AND(c
, src
[0], src
[1]);
749 result
= vir_OR(c
, src
[0], src
[1]);
752 result
= vir_XOR(c
, src
[0], src
[1]);
755 result
= vir_NOT(c
, src
[0]);
758 case nir_op_ufind_msb
:
759 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
763 result
= vir_UMUL(c
, src
[0], src
[1]);
780 if (!ntq_emit_comparison(c
, &result
, instr
, instr
)) {
781 fprintf(stderr
, "Bad comparison instruction\n");
786 result
= ntq_emit_bcsel(c
, instr
, src
);
789 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
790 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
795 result
= vir_SFU(c
, V3D_QPU_WADDR_RECIP
, src
[0]);
798 result
= vir_SFU(c
, V3D_QPU_WADDR_RSQRT
, src
[0]);
801 result
= vir_SFU(c
, V3D_QPU_WADDR_EXP
, src
[0]);
804 result
= vir_SFU(c
, V3D_QPU_WADDR_LOG
, src
[0]);
808 result
= vir_FCEIL(c
, src
[0]);
811 result
= vir_FFLOOR(c
, src
[0]);
813 case nir_op_fround_even
:
814 result
= vir_FROUND(c
, src
[0]);
817 result
= vir_FTRUNC(c
, src
[0]);
820 result
= vir_FSUB(c
, src
[0], vir_FFLOOR(c
, src
[0]));
824 result
= ntq_fsincos(c
, src
[0], false);
827 result
= ntq_fsincos(c
, src
[0], true);
831 result
= ntq_fsign(c
, src
[0]);
834 result
= ntq_isign(c
, src
[0]);
838 result
= vir_FMOV(c
, src
[0]);
839 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
844 result
= vir_MAX(c
, src
[0],
845 vir_SUB(c
, vir_uniform_ui(c
, 0), src
[0]));
849 case nir_op_fddx_coarse
:
850 case nir_op_fddx_fine
:
851 result
= vir_FDX(c
, src
[0]);
855 case nir_op_fddy_coarse
:
856 case nir_op_fddy_fine
:
857 result
= vir_FDY(c
, src
[0]);
860 case nir_op_uadd_carry
:
861 vir_PF(c
, vir_ADD(c
, src
[0], src
[1]), V3D_QPU_PF_PUSHC
);
862 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
863 vir_uniform_ui(c
, ~0),
864 vir_uniform_ui(c
, 0)));
868 fprintf(stderr
, "unknown NIR ALU inst: ");
869 nir_print_instr(&instr
->instr
, stderr
);
870 fprintf(stderr
, "\n");
874 /* We have a scalar result, so the instruction should only have a
875 * single channel written to.
877 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
878 ntq_store_dest(c
, &instr
->dest
.dest
,
879 ffs(instr
->dest
.write_mask
) - 1, result
);
882 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
883 * specifier. They come from a register that's preloaded with 0xffffffff
884 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
885 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
887 #define TLB_TYPE_F16_COLOR (3 << 6)
888 #define TLB_TYPE_I32_COLOR (1 << 6)
889 #define TLB_TYPE_F32_COLOR (0 << 6)
890 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
891 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
892 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
893 #define TLB_F16_SWAP_HI_LO (1 << 1)
894 #define TLB_VEC_SIZE_4_F16 (1 << 0)
895 #define TLB_VEC_SIZE_2_F16 (0 << 0)
896 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
898 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
901 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
902 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
903 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
905 /* Stencil is a single 32-bit write. */
906 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
909 emit_frag_end(struct v3d_compile
*c
)
912 if (c->output_sample_mask_index != -1) {
913 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
917 bool has_any_tlb_color_write
= false;
918 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
919 if (c
->output_color_var
[rt
])
920 has_any_tlb_color_write
= true;
923 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
924 struct nir_variable
*var
= c
->output_color_var
[0];
925 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
927 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
930 vir_FTOC(c
, color
[3])));
933 if (c
->output_position_index
!= -1) {
934 struct qinst
*inst
= vir_MOV_dest(c
,
935 vir_reg(QFILE_TLBU
, 0),
936 c
->outputs
[c
->output_position_index
]);
938 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
941 TLB_DEPTH_TYPE_PER_PIXEL
|
943 } else if (c
->s
->info
.fs
.uses_discard
||
944 c
->fs_key
->sample_alpha_to_coverage
||
945 !has_any_tlb_color_write
) {
946 /* Emit passthrough Z if it needed to be delayed until shader
947 * end due to potential discards.
949 * Since (single-threaded) fragment shaders always need a TLB
950 * write, emit passthrouh Z if we didn't have any color
951 * buffers and flag us as potentially discarding, so that we
952 * can use Z as the TLB write.
954 c
->s
->info
.fs
.uses_discard
= true;
956 struct qinst
*inst
= vir_MOV_dest(c
,
957 vir_reg(QFILE_TLBU
, 0),
958 vir_reg(QFILE_NULL
, 0));
960 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
963 TLB_DEPTH_TYPE_INVARIANT
|
967 /* XXX: Performance improvement: Merge Z write and color writes TLB
971 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
972 if (!c
->output_color_var
[rt
])
975 nir_variable
*var
= c
->output_color_var
[rt
];
976 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
977 int num_components
= glsl_get_vector_elements(var
->type
);
978 uint32_t conf
= 0xffffff00;
981 conf
|= TLB_SAMPLE_MODE_PER_PIXEL
;
982 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
984 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
985 num_components
= MAX2(num_components
, 3);
987 assert(num_components
!= 0);
988 switch (glsl_get_base_type(var
->type
)) {
991 /* The F32 vs I32 distinction was dropped in 4.2. */
992 if (c
->devinfo
->ver
< 42)
993 conf
|= TLB_TYPE_I32_COLOR
;
995 conf
|= TLB_TYPE_F32_COLOR
;
996 conf
|= ((num_components
- 1) <<
997 TLB_VEC_SIZE_MINUS_1_SHIFT
);
999 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), color
[0]);
1000 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1001 vir_uniform_ui(c
, conf
);
1003 for (int i
= 1; i
< num_components
; i
++) {
1004 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0),
1010 struct qreg r
= color
[0];
1011 struct qreg g
= color
[1];
1012 struct qreg b
= color
[2];
1013 struct qreg a
= color
[3];
1015 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1016 conf
|= TLB_TYPE_F32_COLOR
;
1017 conf
|= ((num_components
- 1) <<
1018 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1020 conf
|= TLB_TYPE_F16_COLOR
;
1021 conf
|= TLB_F16_SWAP_HI_LO
;
1022 if (num_components
>= 3)
1023 conf
|= TLB_VEC_SIZE_4_F16
;
1025 conf
|= TLB_VEC_SIZE_2_F16
;
1028 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1033 if (c
->fs_key
->sample_alpha_to_one
)
1034 a
= vir_uniform_f(c
, 1.0);
1036 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1037 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), r
);
1038 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1039 vir_uniform_ui(c
, conf
);
1041 if (num_components
>= 2)
1042 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), g
);
1043 if (num_components
>= 3)
1044 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), b
);
1045 if (num_components
>= 4)
1046 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), a
);
1048 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), r
, g
);
1050 inst
->dst
.file
= QFILE_TLBU
;
1051 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1052 vir_uniform_ui(c
, conf
);
1055 if (num_components
>= 3)
1056 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), b
, a
);
1065 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t *vpm_index
)
1067 if (c
->devinfo
->ver
>= 40) {
1068 vir_STVPMV(c
, vir_uniform_ui(c
, *vpm_index
), val
);
1069 *vpm_index
= *vpm_index
+ 1;
1071 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1074 c
->num_vpm_writes
++;
1078 emit_scaled_viewport_write(struct v3d_compile
*c
, struct qreg rcp_w
,
1079 uint32_t *vpm_index
)
1081 for (int i
= 0; i
< 2; i
++) {
1082 struct qreg coord
= c
->outputs
[c
->output_position_index
+ i
];
1083 coord
= vir_FMUL(c
, coord
,
1084 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
,
1086 coord
= vir_FMUL(c
, coord
, rcp_w
);
1087 vir_VPM_WRITE(c
, vir_FTOIN(c
, coord
), vpm_index
);
1093 emit_zs_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1095 struct qreg zscale
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1096 struct qreg zoffset
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1098 struct qreg z
= c
->outputs
[c
->output_position_index
+ 2];
1099 z
= vir_FMUL(c
, z
, zscale
);
1100 z
= vir_FMUL(c
, z
, rcp_w
);
1101 z
= vir_FADD(c
, z
, zoffset
);
1102 vir_VPM_WRITE(c
, z
, vpm_index
);
1106 emit_rcp_wc_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1108 vir_VPM_WRITE(c
, rcp_w
, vpm_index
);
1112 emit_point_size_write(struct v3d_compile
*c
, uint32_t *vpm_index
)
1114 struct qreg point_size
;
1116 if (c
->output_point_size_index
!= -1)
1117 point_size
= c
->outputs
[c
->output_point_size_index
];
1119 point_size
= vir_uniform_f(c
, 1.0);
1121 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1124 point_size
= vir_FMAX(c
, point_size
, vir_uniform_f(c
, .125));
1126 vir_VPM_WRITE(c
, point_size
, vpm_index
);
1130 emit_vpm_write_setup(struct v3d_compile
*c
)
1132 if (c
->devinfo
->ver
>= 40)
1135 v3d33_vir_vpm_write_setup(c
);
1139 * Sets up c->outputs[c->output_position_index] for the vertex shader
1140 * epilogue, if an output vertex position wasn't specified in the user's
1141 * shader. This may be the case for transform feedback with rasterizer
1145 setup_default_position(struct v3d_compile
*c
)
1147 if (c
->output_position_index
!= -1)
1150 c
->output_position_index
= c
->outputs_array_size
;
1151 for (int i
= 0; i
< 4; i
++) {
1153 c
->output_position_index
+ i
,
1154 VARYING_SLOT_POS
, i
);
1159 emit_vert_end(struct v3d_compile
*c
)
1161 setup_default_position(c
);
1163 uint32_t vpm_index
= 0;
1164 struct qreg rcp_w
= vir_SFU(c
, V3D_QPU_WADDR_RECIP
,
1165 c
->outputs
[c
->output_position_index
+ 3]);
1167 emit_vpm_write_setup(c
);
1169 if (c
->vs_key
->is_coord
) {
1170 for (int i
= 0; i
< 4; i
++)
1171 vir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
],
1173 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1174 if (c
->vs_key
->per_vertex_point_size
) {
1175 emit_point_size_write(c
, &vpm_index
);
1176 /* emit_rcp_wc_write(c, rcp_w); */
1178 /* XXX: Z-only rendering */
1180 emit_zs_write(c
, rcp_w
, &vpm_index
);
1182 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1183 emit_zs_write(c
, rcp_w
, &vpm_index
);
1184 emit_rcp_wc_write(c
, rcp_w
, &vpm_index
);
1185 if (c
->vs_key
->per_vertex_point_size
)
1186 emit_point_size_write(c
, &vpm_index
);
1189 for (int i
= 0; i
< c
->vs_key
->num_fs_inputs
; i
++) {
1190 struct v3d_varying_slot input
= c
->vs_key
->fs_inputs
[i
];
1193 for (j
= 0; j
< c
->num_outputs
; j
++) {
1194 struct v3d_varying_slot output
= c
->output_slots
[j
];
1196 if (!memcmp(&input
, &output
, sizeof(input
))) {
1197 vir_VPM_WRITE(c
, c
->outputs
[j
],
1202 /* Emit padding if we didn't find a declared VS output for
1205 if (j
== c
->num_outputs
)
1206 vir_VPM_WRITE(c
, vir_uniform_f(c
, 0.0),
1210 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1212 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1217 v3d_optimize_nir(struct nir_shader
*s
)
1224 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1225 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
);
1226 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1227 NIR_PASS(progress
, s
, nir_copy_prop
);
1228 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1229 NIR_PASS(progress
, s
, nir_opt_dce
);
1230 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1231 NIR_PASS(progress
, s
, nir_opt_cse
);
1232 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8);
1233 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1234 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1235 NIR_PASS(progress
, s
, nir_opt_undef
);
1238 NIR_PASS(progress
, s
, nir_opt_move_load_ubo
);
1242 driver_location_compare(const void *in_a
, const void *in_b
)
1244 const nir_variable
*const *a
= in_a
;
1245 const nir_variable
*const *b
= in_b
;
1247 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1251 ntq_emit_vpm_read(struct v3d_compile
*c
,
1252 uint32_t *num_components_queued
,
1253 uint32_t *remaining
,
1256 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1258 if (c
->devinfo
->ver
>= 40 ) {
1259 return vir_LDVPMV_IN(c
,
1261 (*num_components_queued
)++));
1264 if (*num_components_queued
!= 0) {
1265 (*num_components_queued
)--;
1267 return vir_MOV(c
, vpm
);
1270 uint32_t num_components
= MIN2(*remaining
, 32);
1272 v3d33_vir_vpm_read_setup(c
, num_components
);
1274 *num_components_queued
= num_components
- 1;
1275 *remaining
-= num_components
;
1278 return vir_MOV(c
, vpm
);
1282 ntq_setup_inputs(struct v3d_compile
*c
)
1284 unsigned num_entries
= 0;
1285 unsigned num_components
= 0;
1286 nir_foreach_variable(var
, &c
->s
->inputs
) {
1288 num_components
+= glsl_get_components(var
->type
);
1291 nir_variable
*vars
[num_entries
];
1294 nir_foreach_variable(var
, &c
->s
->inputs
)
1297 /* Sort the variables so that we emit the input setup in
1298 * driver_location order. This is required for VPM reads, whose data
1299 * is fetched into the VPM in driver_location (TGSI register index)
1302 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1304 uint32_t vpm_components_queued
= 0;
1305 if (c
->s
->info
.stage
== MESA_SHADER_VERTEX
) {
1306 bool uses_iid
= c
->s
->info
.system_values_read
&
1307 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1308 bool uses_vid
= c
->s
->info
.system_values_read
&
1309 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1311 num_components
+= uses_iid
;
1312 num_components
+= uses_vid
;
1315 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1316 &num_components
, ~0);
1320 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1321 &num_components
, ~0);
1325 for (unsigned i
= 0; i
< num_entries
; i
++) {
1326 nir_variable
*var
= vars
[i
];
1327 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1328 unsigned loc
= var
->data
.driver_location
;
1330 assert(array_len
== 1);
1332 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1335 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1336 if (var
->data
.location
== VARYING_SLOT_POS
) {
1337 emit_fragcoord_input(c
, loc
);
1338 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1339 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1340 (c
->fs_key
->point_sprite_mask
&
1341 (1 << (var
->data
.location
-
1342 VARYING_SLOT_VAR0
))))) {
1343 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1344 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1346 emit_fragment_input(c
, loc
, var
);
1349 int var_components
= glsl_get_components(var
->type
);
1351 for (int i
= 0; i
< var_components
; i
++) {
1352 c
->inputs
[loc
* 4 + i
] =
1353 ntq_emit_vpm_read(c
,
1354 &vpm_components_queued
,
1359 c
->vattr_sizes
[loc
] = var_components
;
1363 if (c
->s
->info
.stage
== MESA_SHADER_VERTEX
) {
1364 if (c
->devinfo
->ver
>= 40) {
1365 assert(vpm_components_queued
== num_components
);
1367 assert(vpm_components_queued
== 0);
1368 assert(num_components
== 0);
1374 ntq_setup_outputs(struct v3d_compile
*c
)
1376 nir_foreach_variable(var
, &c
->s
->outputs
) {
1377 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1378 unsigned loc
= var
->data
.driver_location
* 4;
1380 assert(array_len
== 1);
1383 for (int i
= 0; i
< 4; i
++) {
1384 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1386 var
->data
.location_frac
+ i
);
1389 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1390 switch (var
->data
.location
) {
1391 case FRAG_RESULT_COLOR
:
1392 c
->output_color_var
[0] = var
;
1393 c
->output_color_var
[1] = var
;
1394 c
->output_color_var
[2] = var
;
1395 c
->output_color_var
[3] = var
;
1397 case FRAG_RESULT_DATA0
:
1398 case FRAG_RESULT_DATA1
:
1399 case FRAG_RESULT_DATA2
:
1400 case FRAG_RESULT_DATA3
:
1401 c
->output_color_var
[var
->data
.location
-
1402 FRAG_RESULT_DATA0
] = var
;
1404 case FRAG_RESULT_DEPTH
:
1405 c
->output_position_index
= loc
;
1407 case FRAG_RESULT_SAMPLE_MASK
:
1408 c
->output_sample_mask_index
= loc
;
1412 switch (var
->data
.location
) {
1413 case VARYING_SLOT_POS
:
1414 c
->output_position_index
= loc
;
1416 case VARYING_SLOT_PSIZ
:
1417 c
->output_point_size_index
= loc
;
1425 ntq_setup_uniforms(struct v3d_compile
*c
)
1427 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1428 uint32_t vec4_count
= glsl_count_attribute_slots(var
->type
,
1430 unsigned vec4_size
= 4 * sizeof(float);
1432 declare_uniform_range(c
, var
->data
.driver_location
* vec4_size
,
1433 vec4_count
* vec4_size
);
1439 * Sets up the mapping from nir_register to struct qreg *.
1441 * Each nir_register gets a struct qreg per 32-bit component being stored.
1444 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1446 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1447 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1448 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1450 nir_reg
->num_components
);
1452 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1454 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1455 qregs
[i
] = vir_get_temp(c
);
1460 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1462 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1463 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1464 qregs
[i
] = vir_uniform_ui(c
, instr
->value
.u32
[i
]);
1466 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1470 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1472 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1474 /* VIR needs there to be *some* value, so pick 0 (same as for
1475 * ntq_setup_registers().
1477 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1478 qregs
[i
] = vir_uniform_ui(c
, 0);
1482 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1484 nir_const_value
*const_offset
;
1487 switch (instr
->intrinsic
) {
1488 case nir_intrinsic_load_uniform
:
1489 assert(instr
->num_components
== 1);
1490 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1492 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1493 assert(offset
% 4 == 0);
1494 /* We need dwords */
1495 offset
= offset
/ 4;
1496 ntq_store_dest(c
, &instr
->dest
, 0,
1497 vir_uniform(c
, QUNIFORM_UNIFORM
,
1500 ntq_store_dest(c
, &instr
->dest
, 0,
1501 indirect_uniform_load(c
, instr
));
1505 case nir_intrinsic_load_ubo
:
1506 for (int i
= 0; i
< instr
->num_components
; i
++) {
1507 int ubo
= nir_src_as_const_value(instr
->src
[0])->u32
[0];
1509 /* Adjust for where we stored the TGSI register base. */
1511 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
1512 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 1 + ubo
),
1514 ntq_get_src(c
, instr
->src
[1], 0),
1515 vir_uniform_ui(c
, i
* 4)));
1519 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
1523 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1525 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1526 assert(offset
% 4 == 0);
1527 /* We need dwords */
1528 offset
= offset
/ 4;
1529 ntq_store_dest(c
, &instr
->dest
, 0,
1530 vir_uniform(c
, QUNIFORM_UNIFORM
,
1533 ntq_store_dest(c
, &instr
->dest
, 0,
1534 indirect_uniform_load(c
, instr
));
1538 case nir_intrinsic_load_user_clip_plane
:
1539 for (int i
= 0; i
< instr
->num_components
; i
++) {
1540 ntq_store_dest(c
, &instr
->dest
, i
,
1541 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1542 nir_intrinsic_ucp_id(instr
) *
1547 case nir_intrinsic_load_alpha_ref_float
:
1548 ntq_store_dest(c
, &instr
->dest
, 0,
1549 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1552 case nir_intrinsic_load_sample_mask_in
:
1553 ntq_store_dest(c
, &instr
->dest
, 0,
1554 vir_uniform(c
, QUNIFORM_SAMPLE_MASK
, 0));
1557 case nir_intrinsic_load_front_face
:
1558 /* The register contains 0 (front) or 1 (back), and we need to
1559 * turn it into a NIR bool where true means front.
1561 ntq_store_dest(c
, &instr
->dest
, 0,
1563 vir_uniform_ui(c
, -1),
1567 case nir_intrinsic_load_instance_id
:
1568 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
1571 case nir_intrinsic_load_vertex_id
:
1572 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
1575 case nir_intrinsic_load_input
:
1576 const_offset
= nir_src_as_const_value(instr
->src
[0]);
1577 assert(const_offset
&& "v3d doesn't support indirect inputs");
1578 for (int i
= 0; i
< instr
->num_components
; i
++) {
1579 offset
= nir_intrinsic_base(instr
) + const_offset
->u32
[0];
1580 int comp
= nir_intrinsic_component(instr
) + i
;
1581 ntq_store_dest(c
, &instr
->dest
, i
,
1582 vir_MOV(c
, c
->inputs
[offset
* 4 + comp
]));
1586 case nir_intrinsic_store_output
:
1587 const_offset
= nir_src_as_const_value(instr
->src
[1]);
1588 assert(const_offset
&& "v3d doesn't support indirect outputs");
1589 offset
= ((nir_intrinsic_base(instr
) +
1590 const_offset
->u32
[0]) * 4 +
1591 nir_intrinsic_component(instr
));
1593 for (int i
= 0; i
< instr
->num_components
; i
++) {
1594 c
->outputs
[offset
+ i
] =
1595 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
1597 c
->num_outputs
= MAX2(c
->num_outputs
,
1598 offset
+ instr
->num_components
);
1601 case nir_intrinsic_discard
:
1602 if (c
->execute
.file
!= QFILE_NULL
) {
1603 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1604 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1605 vir_uniform_ui(c
, 0)),
1608 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1609 vir_uniform_ui(c
, 0));
1613 case nir_intrinsic_discard_if
: {
1614 /* true (~0) if we're discarding */
1615 struct qreg cond
= ntq_get_src(c
, instr
->src
[0], 0);
1617 if (c
->execute
.file
!= QFILE_NULL
) {
1618 /* execute == 0 means the channel is active. Invert
1619 * the condition so that we can use zero as "executing
1622 vir_PF(c
, vir_OR(c
, c
->execute
, vir_NOT(c
, cond
)),
1624 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1625 vir_uniform_ui(c
, 0)),
1628 vir_PF(c
, cond
, V3D_QPU_PF_PUSHZ
);
1629 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1630 vir_uniform_ui(c
, 0)),
1638 fprintf(stderr
, "Unknown intrinsic: ");
1639 nir_print_instr(&instr
->instr
, stderr
);
1640 fprintf(stderr
, "\n");
1645 /* Clears (activates) the execute flags for any channels whose jump target
1646 * matches this block.
1649 ntq_activate_execute_for_block(struct v3d_compile
*c
)
1651 vir_PF(c
, vir_XOR(c
, c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
1654 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1658 ntq_emit_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
1660 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1661 bool empty_else_block
=
1662 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1663 exec_list_is_empty(&nir_else_block
->instr_list
));
1665 struct qblock
*then_block
= vir_new_block(c
);
1666 struct qblock
*after_block
= vir_new_block(c
);
1667 struct qblock
*else_block
;
1668 if (empty_else_block
)
1669 else_block
= after_block
;
1671 else_block
= vir_new_block(c
);
1673 bool was_top_level
= false;
1674 if (c
->execute
.file
== QFILE_NULL
) {
1675 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1676 was_top_level
= true;
1679 /* Set A for executing (execute == 0) and jumping (if->condition ==
1680 * 0) channels, and then update execute flags for those to point to
1685 ntq_get_src(c
, if_stmt
->condition
, 0)),
1687 vir_MOV_cond(c
, V3D_QPU_COND_IFA
,
1689 vir_uniform_ui(c
, else_block
->index
));
1691 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1694 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1695 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
1696 vir_link_blocks(c
->cur_block
, else_block
);
1697 vir_link_blocks(c
->cur_block
, then_block
);
1699 /* Process the THEN block. */
1700 vir_set_emit_block(c
, then_block
);
1701 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1703 if (!empty_else_block
) {
1704 /* Handle the end of the THEN block. First, all currently
1705 * active channels update their execute flags to point to
1708 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1709 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1710 vir_uniform_ui(c
, after_block
->index
));
1712 /* If everything points at ENDIF, then jump there immediately. */
1713 vir_PF(c
, vir_XOR(c
, c
->execute
,
1714 vir_uniform_ui(c
, after_block
->index
)),
1716 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
1717 vir_link_blocks(c
->cur_block
, after_block
);
1718 vir_link_blocks(c
->cur_block
, else_block
);
1720 vir_set_emit_block(c
, else_block
);
1721 ntq_activate_execute_for_block(c
);
1722 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
1725 vir_link_blocks(c
->cur_block
, after_block
);
1727 vir_set_emit_block(c
, after_block
);
1729 c
->execute
= c
->undef
;
1731 ntq_activate_execute_for_block(c
);
1735 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
1737 switch (jump
->type
) {
1738 case nir_jump_break
:
1739 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1740 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1741 vir_uniform_ui(c
, c
->loop_break_block
->index
));
1744 case nir_jump_continue
:
1745 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1746 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1747 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
1750 case nir_jump_return
:
1751 unreachable("All returns shouold be lowered\n");
1756 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
1758 switch (instr
->type
) {
1759 case nir_instr_type_alu
:
1760 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1763 case nir_instr_type_intrinsic
:
1764 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1767 case nir_instr_type_load_const
:
1768 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1771 case nir_instr_type_ssa_undef
:
1772 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1775 case nir_instr_type_tex
:
1776 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1779 case nir_instr_type_jump
:
1780 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
1784 fprintf(stderr
, "Unknown NIR instr type: ");
1785 nir_print_instr(instr
, stderr
);
1786 fprintf(stderr
, "\n");
1792 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
1794 nir_foreach_instr(instr
, block
) {
1795 ntq_emit_instr(c
, instr
);
1799 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
1802 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
1804 bool was_top_level
= false;
1805 if (c
->execute
.file
== QFILE_NULL
) {
1806 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1807 was_top_level
= true;
1810 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
1811 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
1813 c
->loop_cont_block
= vir_new_block(c
);
1814 c
->loop_break_block
= vir_new_block(c
);
1816 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1817 vir_set_emit_block(c
, c
->loop_cont_block
);
1818 ntq_activate_execute_for_block(c
);
1820 ntq_emit_cf_list(c
, &loop
->body
);
1822 /* Re-enable any previous continues now, so our ANYA check below
1825 * XXX: Use the .ORZ flags update, instead.
1827 vir_PF(c
, vir_XOR(c
,
1829 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
1831 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1833 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1835 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
1836 /* Pixels that were not dispatched or have been discarded should not
1837 * contribute to looping again.
1839 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
1840 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1841 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
1843 vir_set_emit_block(c
, c
->loop_break_block
);
1845 c
->execute
= c
->undef
;
1847 ntq_activate_execute_for_block(c
);
1849 c
->loop_break_block
= save_loop_break_block
;
1850 c
->loop_cont_block
= save_loop_cont_block
;
1854 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
1856 fprintf(stderr
, "FUNCTIONS not handled.\n");
1861 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
1863 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1864 switch (node
->type
) {
1865 case nir_cf_node_block
:
1866 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1869 case nir_cf_node_if
:
1870 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1873 case nir_cf_node_loop
:
1874 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
1877 case nir_cf_node_function
:
1878 ntq_emit_function(c
, nir_cf_node_as_function(node
));
1882 fprintf(stderr
, "Unknown NIR node type\n");
1889 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
1891 ntq_setup_registers(c
, &impl
->registers
);
1892 ntq_emit_cf_list(c
, &impl
->body
);
1896 nir_to_vir(struct v3d_compile
*c
)
1898 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1899 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
1900 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
1901 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
1903 if (c
->fs_key
->is_points
) {
1904 c
->point_x
= emit_fragment_varying(c
, NULL
, 0);
1905 c
->point_y
= emit_fragment_varying(c
, NULL
, 0);
1906 } else if (c
->fs_key
->is_lines
) {
1907 c
->line_x
= emit_fragment_varying(c
, NULL
, 0);
1911 ntq_setup_inputs(c
);
1912 ntq_setup_outputs(c
);
1913 ntq_setup_uniforms(c
);
1914 ntq_setup_registers(c
, &c
->s
->registers
);
1916 /* Find the main function and emit the body. */
1917 nir_foreach_function(function
, c
->s
) {
1918 assert(strcmp(function
->name
, "main") == 0);
1919 assert(function
->impl
);
1920 ntq_emit_impl(c
, function
->impl
);
1924 const nir_shader_compiler_options v3d_nir_options
= {
1925 .lower_all_io_to_temps
= true,
1926 .lower_extract_byte
= true,
1927 .lower_extract_word
= true,
1929 .lower_bitfield_insert_to_shifts
= true,
1930 .lower_bitfield_extract_to_shifts
= true,
1931 .lower_bitfield_reverse
= true,
1932 .lower_bit_count
= true,
1933 .lower_pack_unorm_2x16
= true,
1934 .lower_pack_snorm_2x16
= true,
1935 .lower_pack_unorm_4x8
= true,
1936 .lower_pack_snorm_4x8
= true,
1937 .lower_unpack_unorm_4x8
= true,
1938 .lower_unpack_snorm_4x8
= true,
1940 .lower_find_lsb
= true,
1942 .lower_flrp32
= true,
1945 .lower_fsqrt
= true,
1946 .lower_ifind_msb
= true,
1947 .lower_ldexp
= true,
1948 .lower_mul_high
= true,
1949 .native_integers
= true,
1955 count_nir_instrs(nir_shader
*nir
)
1958 nir_foreach_function(function
, nir
) {
1959 if (!function
->impl
)
1961 nir_foreach_block(block
, function
->impl
) {
1962 nir_foreach_instr(instr
, block
)
1971 * When demoting a shader down to single-threaded, removes the THRSW
1972 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
1976 vir_remove_thrsw(struct v3d_compile
*c
)
1978 vir_for_each_block(block
, c
) {
1979 vir_for_each_inst_safe(inst
, block
) {
1980 if (inst
->qpu
.sig
.thrsw
)
1981 vir_remove_instruction(c
, inst
);
1985 c
->last_thrsw
= NULL
;
1989 vir_emit_last_thrsw(struct v3d_compile
*c
)
1991 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
1992 * switching, so disable threads if we didn't do any TMU ops (each of
1993 * which would have emitted a THRSW).
1995 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
1998 vir_remove_thrsw(c
);
2002 /* If we're threaded and the last THRSW was in conditional code, then
2003 * we need to emit another one so that we can flag it as the last
2006 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
2007 assert(c
->devinfo
->ver
>= 41);
2011 /* If we're threaded, then we need to mark the last THRSW instruction
2012 * so we can emit a pair of them at QPU emit time.
2014 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2015 * post-last-THRSW state, so we can skip this.
2017 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2018 assert(c
->devinfo
->ver
>= 41);
2023 c
->last_thrsw
->is_last_thrsw
= true;
2026 /* There's a flag in the shader for "center W is needed for reasons other than
2027 * non-centroid varyings", so we just walk the program after VIR optimization
2028 * to see if it's used. It should be harmless to set even if we only use
2029 * center W for varyings.
2032 vir_check_payload_w(struct v3d_compile
*c
)
2034 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2037 vir_for_each_inst_inorder(inst
, c
) {
2038 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2039 if (inst
->src
[i
].file
== QFILE_REG
&&
2040 inst
->src
[i
].index
== 0) {
2041 c
->uses_center_w
= true;
2050 v3d_nir_to_vir(struct v3d_compile
*c
)
2052 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2053 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2054 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2055 vir_get_stage_name(c
),
2056 c
->program_id
, c
->variant_id
);
2057 nir_print_shader(c
->s
, stderr
);
2062 /* Emit the last THRSW before STVPM and TLB writes. */
2063 vir_emit_last_thrsw(c
);
2065 switch (c
->s
->info
.stage
) {
2066 case MESA_SHADER_FRAGMENT
:
2069 case MESA_SHADER_VERTEX
:
2073 unreachable("bad stage");
2076 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2077 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2078 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
2079 vir_get_stage_name(c
),
2080 c
->program_id
, c
->variant_id
);
2082 fprintf(stderr
, "\n");
2086 vir_lower_uniforms(c
);
2088 vir_check_payload_w(c
);
2090 /* XXX: vir_schedule_instructions(c); */
2092 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2093 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2094 fprintf(stderr
, "%s prog %d/%d VIR:\n",
2095 vir_get_stage_name(c
),
2096 c
->program_id
, c
->variant_id
);
2098 fprintf(stderr
, "\n");
2101 /* Attempt to allocate registers for the temporaries. If we fail,
2102 * reduce thread count and try again.
2104 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
2105 struct qpu_reg
*temp_registers
;
2108 temp_registers
= v3d_register_allocate(c
, &spilled
);
2115 if (c
->threads
== min_threads
) {
2116 fprintf(stderr
, "Failed to register allocate at %d threads:\n",
2125 if (c
->threads
== 1)
2126 vir_remove_thrsw(c
);
2129 v3d_vir_to_qpu(c
, temp_registers
);