2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
36 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
39 resize_qreg_array(struct v3d_compile
*c
,
44 if (*size
>= decl_size
)
47 uint32_t old_size
= *size
;
48 *size
= MAX2(*size
* 2, decl_size
);
49 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
51 fprintf(stderr
, "Malloc failure\n");
55 for (uint32_t i
= old_size
; i
< *size
; i
++)
56 (*regs
)[i
] = c
->undef
;
60 vir_emit_thrsw(struct v3d_compile
*c
)
65 /* Always thread switch after each texture operation for now.
67 * We could do better by batching a bunch of texture fetches up and
68 * then doing one thread switch and collecting all their results
71 c
->last_thrsw
= vir_NOP(c
);
72 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
73 c
->last_thrsw_at_top_level
= (c
->execute
.file
== QFILE_NULL
);
77 indirect_uniform_load(struct v3d_compile
*c
, nir_intrinsic_instr
*intr
)
79 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
80 uint32_t offset
= nir_intrinsic_base(intr
);
81 struct v3d_ubo_range
*range
= NULL
;
84 for (i
= 0; i
< c
->num_ubo_ranges
; i
++) {
85 range
= &c
->ubo_ranges
[i
];
86 if (offset
>= range
->src_offset
&&
87 offset
< range
->src_offset
+ range
->size
) {
91 /* The driver-location-based offset always has to be within a declared
94 assert(i
!= c
->num_ubo_ranges
);
95 if (!c
->ubo_range_used
[i
]) {
96 c
->ubo_range_used
[i
] = true;
97 range
->dst_offset
= c
->next_ubo_dst_offset
;
98 c
->next_ubo_dst_offset
+= range
->size
;
101 offset
-= range
->src_offset
;
103 if (range
->dst_offset
+ offset
!= 0) {
104 indirect_offset
= vir_ADD(c
, indirect_offset
,
105 vir_uniform_ui(c
, range
->dst_offset
+
109 /* Adjust for where we stored the TGSI register base. */
111 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
112 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 0),
120 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
122 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
123 def
->num_components
);
124 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
129 * This function is responsible for getting VIR results into the associated
130 * storage for a NIR instruction.
132 * If it's a NIR SSA def, then we just set the associated hash table entry to
135 * If it's a NIR reg, then we need to update the existing qreg assigned to the
136 * NIR destination with the incoming value. To do that without introducing
137 * new MOVs, we require that the incoming qreg either be a uniform, or be
138 * SSA-defined by the previous VIR instruction in the block and rewritable by
139 * this function. That lets us sneak ahead and insert the SF flag beforehand
140 * (knowing that the previous instruction doesn't depend on flags) and rewrite
141 * its destination to be the NIR reg's destination
144 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
147 struct qinst
*last_inst
= NULL
;
148 if (!list_empty(&c
->cur_block
->instructions
))
149 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
151 assert(result
.file
== QFILE_UNIF
||
152 (result
.file
== QFILE_TEMP
&&
153 last_inst
&& last_inst
== c
->defs
[result
.index
]));
156 assert(chan
< dest
->ssa
.num_components
);
159 struct hash_entry
*entry
=
160 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
165 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
167 qregs
[chan
] = result
;
169 nir_register
*reg
= dest
->reg
.reg
;
170 assert(dest
->reg
.base_offset
== 0);
171 assert(reg
->num_array_elems
== 0);
172 struct hash_entry
*entry
=
173 _mesa_hash_table_search(c
->def_ht
, reg
);
174 struct qreg
*qregs
= entry
->data
;
176 /* Insert a MOV if the source wasn't an SSA def in the
177 * previous instruction.
179 if (result
.file
== QFILE_UNIF
) {
180 result
= vir_MOV(c
, result
);
181 last_inst
= c
->defs
[result
.index
];
184 /* We know they're both temps, so just rewrite index. */
185 c
->defs
[last_inst
->dst
.index
] = NULL
;
186 last_inst
->dst
.index
= qregs
[chan
].index
;
188 /* If we're in control flow, then make this update of the reg
189 * conditional on the execution mask.
191 if (c
->execute
.file
!= QFILE_NULL
) {
192 last_inst
->dst
.index
= qregs
[chan
].index
;
194 /* Set the flags to the current exec mask.
196 c
->cursor
= vir_before_inst(last_inst
);
197 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
198 c
->cursor
= vir_after_inst(last_inst
);
200 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
201 last_inst
->cond_is_exec_mask
= true;
207 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
209 struct hash_entry
*entry
;
211 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
212 assert(i
< src
.ssa
->num_components
);
214 nir_register
*reg
= src
.reg
.reg
;
215 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
216 assert(reg
->num_array_elems
== 0);
217 assert(src
.reg
.base_offset
== 0);
218 assert(i
< reg
->num_components
);
221 struct qreg
*qregs
= entry
->data
;
226 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
229 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
230 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
231 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
232 instr
->src
[src
].swizzle
[chan
]);
234 assert(!instr
->src
[src
].abs
);
235 assert(!instr
->src
[src
].negate
);
241 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
243 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
247 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
249 unsigned unit
= instr
->texture_index
;
250 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
251 int dest_size
= nir_tex_instr_dest_size(instr
);
253 struct qreg lod
= c
->undef
;
255 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
257 for (int i
= 0; i
< dest_size
; i
++) {
259 enum quniform_contents contents
;
261 if (instr
->is_array
&& i
== dest_size
- 1)
262 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
264 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
266 struct qreg size
= vir_uniform(c
, contents
, unit
);
268 switch (instr
->sampler_dim
) {
269 case GLSL_SAMPLER_DIM_1D
:
270 case GLSL_SAMPLER_DIM_2D
:
271 case GLSL_SAMPLER_DIM_MS
:
272 case GLSL_SAMPLER_DIM_3D
:
273 case GLSL_SAMPLER_DIM_CUBE
:
274 /* Don't minify the array size. */
275 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
276 size
= ntq_minify(c
, size
, lod
);
280 case GLSL_SAMPLER_DIM_RECT
:
281 /* There's no LOD field for rects */
285 unreachable("Bad sampler type");
288 ntq_store_dest(c
, &instr
->dest
, i
, size
);
293 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
295 unsigned unit
= instr
->texture_index
;
297 /* Since each texture sampling op requires uploading uniforms to
298 * reference the texture, there's no HW support for texture size and
299 * you just upload uniforms containing the size.
302 case nir_texop_query_levels
:
303 ntq_store_dest(c
, &instr
->dest
, 0,
304 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
307 ntq_emit_txs(c
, instr
);
313 if (c
->devinfo
->ver
>= 40)
314 v3d40_vir_emit_tex(c
, instr
);
316 v3d33_vir_emit_tex(c
, instr
);
320 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
322 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
324 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
326 struct qreg periods
= vir_FROUND(c
, input
);
327 struct qreg sin_output
= vir_SIN(c
, vir_FSUB(c
, input
, periods
));
328 return vir_XOR(c
, sin_output
, vir_SHL(c
,
329 vir_FTOIN(c
, periods
),
330 vir_uniform_ui(c
, -1)));
334 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
336 struct qreg t
= vir_get_temp(c
);
338 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
339 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHZ
);
340 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
341 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHN
);
342 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
343 return vir_MOV(c
, t
);
347 ntq_isign(struct v3d_compile
*c
, struct qreg src
)
349 struct qreg t
= vir_get_temp(c
);
351 vir_MOV_dest(c
, t
, vir_uniform_ui(c
, 0));
352 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHZ
);
353 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_ui(c
, 1));
354 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHN
);
355 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_ui(c
, -1));
356 return vir_MOV(c
, t
);
360 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
362 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
363 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
364 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
365 c
->inputs
[attr
* 4 + 3] = vir_RECIP(c
, c
->payload_w
);
369 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
372 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
373 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
376 if (c
->devinfo
->ver
>= 41) {
377 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
379 ldvary
->qpu
.sig
.ldvary
= true;
380 vary
= vir_emit_def(c
, ldvary
);
382 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
386 /* For gl_PointCoord input or distance along a line, we'll be called
387 * with no nir_variable, and we don't count toward VPM size so we
388 * don't track an input slot.
391 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
394 int i
= c
->num_inputs
++;
395 c
->input_slots
[i
] = v3d_slot_from_slot_and_component(var
->data
.location
,
398 switch (var
->data
.interpolation
) {
399 case INTERP_MODE_NONE
:
400 /* If a gl_FrontColor or gl_BackColor input has no interp
401 * qualifier, then if we're using glShadeModel(GL_FLAT) it
402 * needs to be flat shaded.
404 switch (var
->data
.location
) {
405 case VARYING_SLOT_COL0
:
406 case VARYING_SLOT_COL1
:
407 case VARYING_SLOT_BFC0
:
408 case VARYING_SLOT_BFC1
:
409 if (c
->fs_key
->shade_model_flat
) {
410 BITSET_SET(c
->flat_shade_flags
, i
);
411 vir_MOV_dest(c
, c
->undef
, vary
);
412 return vir_MOV(c
, r5
);
414 return vir_FADD(c
, vir_FMUL(c
, vary
,
421 case INTERP_MODE_SMOOTH
:
422 if (var
->data
.centroid
) {
423 BITSET_SET(c
->centroid_flags
, i
);
424 return vir_FADD(c
, vir_FMUL(c
, vary
,
425 c
->payload_w_centroid
), r5
);
427 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
429 case INTERP_MODE_NOPERSPECTIVE
:
430 BITSET_SET(c
->noperspective_flags
, i
);
431 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
432 case INTERP_MODE_FLAT
:
433 BITSET_SET(c
->flat_shade_flags
, i
);
434 vir_MOV_dest(c
, c
->undef
, vary
);
435 return vir_MOV(c
, r5
);
437 unreachable("Bad interp mode");
442 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
)
444 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
445 int chan
= var
->data
.location_frac
+ i
;
446 c
->inputs
[attr
* 4 + chan
] =
447 emit_fragment_varying(c
, var
, chan
);
452 add_output(struct v3d_compile
*c
,
453 uint32_t decl_offset
,
457 uint32_t old_array_size
= c
->outputs_array_size
;
458 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
461 if (old_array_size
!= c
->outputs_array_size
) {
462 c
->output_slots
= reralloc(c
,
464 struct v3d_varying_slot
,
465 c
->outputs_array_size
);
468 c
->output_slots
[decl_offset
] =
469 v3d_slot_from_slot_and_component(slot
, swizzle
);
473 declare_uniform_range(struct v3d_compile
*c
, uint32_t start
, uint32_t size
)
475 unsigned array_id
= c
->num_ubo_ranges
++;
476 if (array_id
>= c
->ubo_ranges_array_size
) {
477 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
479 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
480 struct v3d_ubo_range
,
481 c
->ubo_ranges_array_size
);
482 c
->ubo_range_used
= reralloc(c
, c
->ubo_range_used
,
484 c
->ubo_ranges_array_size
);
487 c
->ubo_ranges
[array_id
].dst_offset
= 0;
488 c
->ubo_ranges
[array_id
].src_offset
= start
;
489 c
->ubo_ranges
[array_id
].size
= size
;
490 c
->ubo_range_used
[array_id
] = false;
494 * If compare_instr is a valid comparison instruction, emits the
495 * compare_instr's comparison and returns the sel_instr's return value based
496 * on the compare_instr's result.
499 ntq_emit_comparison(struct v3d_compile
*c
,
500 nir_alu_instr
*compare_instr
,
501 enum v3d_qpu_cond
*out_cond
)
503 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
505 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
506 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
507 bool cond_invert
= false;
508 struct qreg nop
= vir_reg(QFILE_NULL
, 0);
510 switch (compare_instr
->op
) {
513 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
516 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
521 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
525 vir_set_pf(vir_XOR_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
531 vir_set_pf(vir_FCMP_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
534 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
538 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
544 vir_set_pf(vir_FCMP_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHN
);
547 vir_set_pf(vir_MIN_dest(c
, nop
, src1
, src0
), V3D_QPU_PF_PUSHC
);
550 vir_set_pf(vir_SUB_dest(c
, nop
, src0
, src1
), V3D_QPU_PF_PUSHC
);
557 *out_cond
= cond_invert
? V3D_QPU_COND_IFNA
: V3D_QPU_COND_IFA
;
562 /* Finds an ALU instruction that generates our src value that could
563 * (potentially) be greedily emitted in the consuming instruction.
565 static struct nir_alu_instr
*
566 ntq_get_alu_parent(nir_src src
)
568 if (!src
.is_ssa
|| src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
570 nir_alu_instr
*instr
= nir_instr_as_alu(src
.ssa
->parent_instr
);
574 /* If the ALU instr's srcs are non-SSA, then we would have to avoid
575 * moving emission of the ALU instr down past another write of the
578 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
579 if (!instr
->src
[i
].src
.is_ssa
)
587 * Attempts to fold a comparison generating a boolean result into the
588 * condition code for selecting between two values, instead of comparing the
589 * boolean result against 0 to generate the condition code.
591 static struct qreg
ntq_emit_bcsel(struct v3d_compile
*c
, nir_alu_instr
*instr
,
594 nir_alu_instr
*compare
= ntq_get_alu_parent(instr
->src
[0].src
);
598 enum v3d_qpu_cond cond
;
599 if (ntq_emit_comparison(c
, compare
, &cond
))
600 return vir_MOV(c
, vir_SEL(c
, cond
, src
[1], src
[2]));
603 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
604 return vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
, src
[1], src
[2]));
609 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
611 /* This should always be lowered to ALU operations for V3D. */
612 assert(!instr
->dest
.saturate
);
614 /* Vectors are special in that they have non-scalarized writemasks,
615 * and just take the first swizzle channel for each argument in order
616 * into each writemask channel.
618 if (instr
->op
== nir_op_vec2
||
619 instr
->op
== nir_op_vec3
||
620 instr
->op
== nir_op_vec4
) {
622 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
623 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
624 instr
->src
[i
].swizzle
[0]);
625 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
626 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
627 vir_MOV(c
, srcs
[i
]));
631 /* General case: We can just grab the one used channel per src. */
632 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
633 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
634 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
642 result
= vir_MOV(c
, src
[0]);
646 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
649 result
= vir_NEG(c
, src
[0]);
653 result
= vir_FMUL(c
, src
[0], src
[1]);
656 result
= vir_FADD(c
, src
[0], src
[1]);
659 result
= vir_FSUB(c
, src
[0], src
[1]);
662 result
= vir_FMIN(c
, src
[0], src
[1]);
665 result
= vir_FMAX(c
, src
[0], src
[1]);
669 result
= vir_FTOIZ(c
, src
[0]);
672 result
= vir_FTOUZ(c
, src
[0]);
675 result
= vir_ITOF(c
, src
[0]);
678 result
= vir_UTOF(c
, src
[0]);
681 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
684 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
688 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
689 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
690 vir_uniform_ui(c
, ~0),
691 vir_uniform_ui(c
, 0)));
695 result
= vir_ADD(c
, src
[0], src
[1]);
698 result
= vir_SHR(c
, src
[0], src
[1]);
701 result
= vir_SUB(c
, src
[0], src
[1]);
704 result
= vir_ASR(c
, src
[0], src
[1]);
707 result
= vir_SHL(c
, src
[0], src
[1]);
710 result
= vir_MIN(c
, src
[0], src
[1]);
713 result
= vir_UMIN(c
, src
[0], src
[1]);
716 result
= vir_MAX(c
, src
[0], src
[1]);
719 result
= vir_UMAX(c
, src
[0], src
[1]);
722 result
= vir_AND(c
, src
[0], src
[1]);
725 result
= vir_OR(c
, src
[0], src
[1]);
728 result
= vir_XOR(c
, src
[0], src
[1]);
731 result
= vir_NOT(c
, src
[0]);
734 case nir_op_ufind_msb
:
735 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
739 result
= vir_UMUL(c
, src
[0], src
[1]);
746 enum v3d_qpu_cond cond
;
747 MAYBE_UNUSED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
749 result
= vir_MOV(c
, vir_SEL(c
, cond
,
750 vir_uniform_f(c
, 1.0),
751 vir_uniform_f(c
, 0.0)));
765 enum v3d_qpu_cond cond
;
766 MAYBE_UNUSED
bool ok
= ntq_emit_comparison(c
, instr
, &cond
);
768 result
= vir_MOV(c
, vir_SEL(c
, cond
,
769 vir_uniform_ui(c
, ~0),
770 vir_uniform_ui(c
, 0)));
775 result
= ntq_emit_bcsel(c
, instr
, src
);
778 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
779 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
784 result
= vir_RECIP(c
, src
[0]);
787 result
= vir_RSQRT(c
, src
[0]);
790 result
= vir_EXP(c
, src
[0]);
793 result
= vir_LOG(c
, src
[0]);
797 result
= vir_FCEIL(c
, src
[0]);
800 result
= vir_FFLOOR(c
, src
[0]);
802 case nir_op_fround_even
:
803 result
= vir_FROUND(c
, src
[0]);
806 result
= vir_FTRUNC(c
, src
[0]);
809 result
= vir_FSUB(c
, src
[0], vir_FFLOOR(c
, src
[0]));
813 result
= ntq_fsincos(c
, src
[0], false);
816 result
= ntq_fsincos(c
, src
[0], true);
820 result
= ntq_fsign(c
, src
[0]);
823 result
= ntq_isign(c
, src
[0]);
827 result
= vir_FMOV(c
, src
[0]);
828 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
833 result
= vir_MAX(c
, src
[0],
834 vir_SUB(c
, vir_uniform_ui(c
, 0), src
[0]));
838 case nir_op_fddx_coarse
:
839 case nir_op_fddx_fine
:
840 result
= vir_FDX(c
, src
[0]);
844 case nir_op_fddy_coarse
:
845 case nir_op_fddy_fine
:
846 result
= vir_FDY(c
, src
[0]);
849 case nir_op_uadd_carry
:
850 vir_PF(c
, vir_ADD(c
, src
[0], src
[1]), V3D_QPU_PF_PUSHC
);
851 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
852 vir_uniform_ui(c
, ~0),
853 vir_uniform_ui(c
, 0)));
856 case nir_op_pack_half_2x16_split
:
857 result
= vir_VFPACK(c
, src
[0], src
[1]);
860 case nir_op_unpack_half_2x16_split_x
:
861 /* XXX perf: It would be good to be able to merge this unpack
862 * with whatever uses our result.
864 result
= vir_FMOV(c
, src
[0]);
865 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_L
);
868 case nir_op_unpack_half_2x16_split_y
:
869 result
= vir_FMOV(c
, src
[0]);
870 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_H
);
874 fprintf(stderr
, "unknown NIR ALU inst: ");
875 nir_print_instr(&instr
->instr
, stderr
);
876 fprintf(stderr
, "\n");
880 /* We have a scalar result, so the instruction should only have a
881 * single channel written to.
883 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
884 ntq_store_dest(c
, &instr
->dest
.dest
,
885 ffs(instr
->dest
.write_mask
) - 1, result
);
888 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
889 * specifier. They come from a register that's preloaded with 0xffffffff
890 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
891 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
893 #define TLB_TYPE_F16_COLOR (3 << 6)
894 #define TLB_TYPE_I32_COLOR (1 << 6)
895 #define TLB_TYPE_F32_COLOR (0 << 6)
896 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
897 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
898 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
899 #define TLB_F16_SWAP_HI_LO (1 << 1)
900 #define TLB_VEC_SIZE_4_F16 (1 << 0)
901 #define TLB_VEC_SIZE_2_F16 (0 << 0)
902 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
904 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
907 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
908 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
909 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
910 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
911 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
913 /* Stencil is a single 32-bit write. */
914 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
917 emit_frag_end(struct v3d_compile
*c
)
920 if (c->output_sample_mask_index != -1) {
921 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
925 bool has_any_tlb_color_write
= false;
926 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
927 if (c
->output_color_var
[rt
])
928 has_any_tlb_color_write
= true;
931 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
932 struct nir_variable
*var
= c
->output_color_var
[0];
933 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
935 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
938 vir_FTOC(c
, color
[3])));
941 if (c
->output_position_index
!= -1) {
942 struct qinst
*inst
= vir_MOV_dest(c
,
943 vir_reg(QFILE_TLBU
, 0),
944 c
->outputs
[c
->output_position_index
]);
945 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
947 if (c
->devinfo
->ver
>= 42) {
948 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_PER_PIXEL
|
949 TLB_SAMPLE_MODE_PER_PIXEL
);
951 tlb_specifier
|= TLB_DEPTH_TYPE_PER_PIXEL
;
953 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
954 vir_uniform_ui(c
, tlb_specifier
| 0xffffff00);
955 } else if (c
->s
->info
.fs
.uses_discard
||
956 c
->fs_key
->sample_alpha_to_coverage
||
957 !has_any_tlb_color_write
) {
958 /* Emit passthrough Z if it needed to be delayed until shader
959 * end due to potential discards.
961 * Since (single-threaded) fragment shaders always need a TLB
962 * write, emit passthrouh Z if we didn't have any color
963 * buffers and flag us as potentially discarding, so that we
964 * can use Z as the TLB write.
966 c
->s
->info
.fs
.uses_discard
= true;
968 struct qinst
*inst
= vir_MOV_dest(c
,
969 vir_reg(QFILE_TLBU
, 0),
970 vir_reg(QFILE_NULL
, 0));
971 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
973 if (c
->devinfo
->ver
>= 42) {
974 /* The spec says the PER_PIXEL flag is ignored for
975 * invariant writes, but the simulator demands it.
977 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_INVARIANT
|
978 TLB_SAMPLE_MODE_PER_PIXEL
);
980 tlb_specifier
|= TLB_DEPTH_TYPE_INVARIANT
;
983 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
984 vir_uniform_ui(c
, tlb_specifier
| 0xffffff00);
987 /* XXX: Performance improvement: Merge Z write and color writes TLB
991 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
992 if (!c
->output_color_var
[rt
])
995 nir_variable
*var
= c
->output_color_var
[rt
];
996 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
997 int num_components
= glsl_get_vector_elements(var
->type
);
998 uint32_t conf
= 0xffffff00;
1001 conf
|= TLB_SAMPLE_MODE_PER_PIXEL
;
1002 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
1004 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
1005 num_components
= MAX2(num_components
, 3);
1007 assert(num_components
!= 0);
1008 switch (glsl_get_base_type(var
->type
)) {
1009 case GLSL_TYPE_UINT
:
1011 /* The F32 vs I32 distinction was dropped in 4.2. */
1012 if (c
->devinfo
->ver
< 42)
1013 conf
|= TLB_TYPE_I32_COLOR
;
1015 conf
|= TLB_TYPE_F32_COLOR
;
1016 conf
|= ((num_components
- 1) <<
1017 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1019 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), color
[0]);
1020 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1021 vir_uniform_ui(c
, conf
);
1023 for (int i
= 1; i
< num_components
; i
++) {
1024 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0),
1030 struct qreg r
= color
[0];
1031 struct qreg g
= color
[1];
1032 struct qreg b
= color
[2];
1033 struct qreg a
= color
[3];
1035 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1036 conf
|= TLB_TYPE_F32_COLOR
;
1037 conf
|= ((num_components
- 1) <<
1038 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1040 conf
|= TLB_TYPE_F16_COLOR
;
1041 conf
|= TLB_F16_SWAP_HI_LO
;
1042 if (num_components
>= 3)
1043 conf
|= TLB_VEC_SIZE_4_F16
;
1045 conf
|= TLB_VEC_SIZE_2_F16
;
1048 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1053 if (c
->fs_key
->sample_alpha_to_one
)
1054 a
= vir_uniform_f(c
, 1.0);
1056 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1057 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), r
);
1058 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1059 vir_uniform_ui(c
, conf
);
1061 if (num_components
>= 2)
1062 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), g
);
1063 if (num_components
>= 3)
1064 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), b
);
1065 if (num_components
>= 4)
1066 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), a
);
1068 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), r
, g
);
1070 inst
->dst
.file
= QFILE_TLBU
;
1071 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1072 vir_uniform_ui(c
, conf
);
1075 if (num_components
>= 3)
1076 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), b
, a
);
1085 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t *vpm_index
)
1087 if (c
->devinfo
->ver
>= 40) {
1088 vir_STVPMV(c
, vir_uniform_ui(c
, *vpm_index
), val
);
1089 *vpm_index
= *vpm_index
+ 1;
1091 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1094 c
->num_vpm_writes
++;
1098 emit_scaled_viewport_write(struct v3d_compile
*c
, struct qreg rcp_w
,
1099 uint32_t *vpm_index
)
1101 for (int i
= 0; i
< 2; i
++) {
1102 struct qreg coord
= c
->outputs
[c
->output_position_index
+ i
];
1103 coord
= vir_FMUL(c
, coord
,
1104 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
,
1106 coord
= vir_FMUL(c
, coord
, rcp_w
);
1107 vir_VPM_WRITE(c
, vir_FTOIN(c
, coord
), vpm_index
);
1113 emit_zs_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1115 struct qreg zscale
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1116 struct qreg zoffset
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1118 struct qreg z
= c
->outputs
[c
->output_position_index
+ 2];
1119 z
= vir_FMUL(c
, z
, zscale
);
1120 z
= vir_FMUL(c
, z
, rcp_w
);
1121 z
= vir_FADD(c
, z
, zoffset
);
1122 vir_VPM_WRITE(c
, z
, vpm_index
);
1126 emit_rcp_wc_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1128 vir_VPM_WRITE(c
, rcp_w
, vpm_index
);
1132 emit_point_size_write(struct v3d_compile
*c
, uint32_t *vpm_index
)
1134 struct qreg point_size
;
1136 if (c
->output_point_size_index
!= -1)
1137 point_size
= c
->outputs
[c
->output_point_size_index
];
1139 point_size
= vir_uniform_f(c
, 1.0);
1141 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1144 point_size
= vir_FMAX(c
, point_size
, vir_uniform_f(c
, .125));
1146 vir_VPM_WRITE(c
, point_size
, vpm_index
);
1150 emit_vpm_write_setup(struct v3d_compile
*c
)
1152 if (c
->devinfo
->ver
>= 40)
1155 v3d33_vir_vpm_write_setup(c
);
1159 * Sets up c->outputs[c->output_position_index] for the vertex shader
1160 * epilogue, if an output vertex position wasn't specified in the user's
1161 * shader. This may be the case for transform feedback with rasterizer
1165 setup_default_position(struct v3d_compile
*c
)
1167 if (c
->output_position_index
!= -1)
1170 c
->output_position_index
= c
->outputs_array_size
;
1171 for (int i
= 0; i
< 4; i
++) {
1173 c
->output_position_index
+ i
,
1174 VARYING_SLOT_POS
, i
);
1179 emit_vert_end(struct v3d_compile
*c
)
1181 setup_default_position(c
);
1183 uint32_t vpm_index
= 0;
1184 struct qreg rcp_w
= vir_RECIP(c
,
1185 c
->outputs
[c
->output_position_index
+ 3]);
1187 emit_vpm_write_setup(c
);
1189 if (c
->vs_key
->is_coord
) {
1190 for (int i
= 0; i
< 4; i
++)
1191 vir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
],
1193 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1194 if (c
->vs_key
->per_vertex_point_size
) {
1195 emit_point_size_write(c
, &vpm_index
);
1196 /* emit_rcp_wc_write(c, rcp_w); */
1198 /* XXX: Z-only rendering */
1200 emit_zs_write(c
, rcp_w
, &vpm_index
);
1202 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1203 emit_zs_write(c
, rcp_w
, &vpm_index
);
1204 emit_rcp_wc_write(c
, rcp_w
, &vpm_index
);
1205 if (c
->vs_key
->per_vertex_point_size
)
1206 emit_point_size_write(c
, &vpm_index
);
1209 for (int i
= 0; i
< c
->vs_key
->num_fs_inputs
; i
++) {
1210 struct v3d_varying_slot input
= c
->vs_key
->fs_inputs
[i
];
1213 for (j
= 0; j
< c
->num_outputs
; j
++) {
1214 struct v3d_varying_slot output
= c
->output_slots
[j
];
1216 if (!memcmp(&input
, &output
, sizeof(input
))) {
1217 vir_VPM_WRITE(c
, c
->outputs
[j
],
1222 /* Emit padding if we didn't find a declared VS output for
1225 if (j
== c
->num_outputs
)
1226 vir_VPM_WRITE(c
, vir_uniform_f(c
, 0.0),
1230 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1232 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1237 v3d_optimize_nir(struct nir_shader
*s
)
1244 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1245 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
);
1246 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1247 NIR_PASS(progress
, s
, nir_copy_prop
);
1248 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1249 NIR_PASS(progress
, s
, nir_opt_dce
);
1250 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1251 NIR_PASS(progress
, s
, nir_opt_cse
);
1252 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8, true, true);
1253 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1254 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1255 NIR_PASS(progress
, s
, nir_opt_undef
);
1258 NIR_PASS(progress
, s
, nir_opt_move_load_ubo
);
1262 driver_location_compare(const void *in_a
, const void *in_b
)
1264 const nir_variable
*const *a
= in_a
;
1265 const nir_variable
*const *b
= in_b
;
1267 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1271 ntq_emit_vpm_read(struct v3d_compile
*c
,
1272 uint32_t *num_components_queued
,
1273 uint32_t *remaining
,
1276 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1278 if (c
->devinfo
->ver
>= 40 ) {
1279 return vir_LDVPMV_IN(c
,
1281 (*num_components_queued
)++));
1284 if (*num_components_queued
!= 0) {
1285 (*num_components_queued
)--;
1287 return vir_MOV(c
, vpm
);
1290 uint32_t num_components
= MIN2(*remaining
, 32);
1292 v3d33_vir_vpm_read_setup(c
, num_components
);
1294 *num_components_queued
= num_components
- 1;
1295 *remaining
-= num_components
;
1298 return vir_MOV(c
, vpm
);
1302 ntq_setup_vpm_inputs(struct v3d_compile
*c
)
1304 /* Figure out how many components of each vertex attribute the shader
1305 * uses. Each variable should have been split to individual
1306 * components and unused ones DCEed. The vertex fetcher will load
1307 * from the start of the attribute to the number of components we
1308 * declare we need in c->vattr_sizes[].
1310 nir_foreach_variable(var
, &c
->s
->inputs
) {
1311 /* No VS attribute array support. */
1312 assert(MAX2(glsl_get_length(var
->type
), 1) == 1);
1314 unsigned loc
= var
->data
.driver_location
;
1315 int start_component
= var
->data
.location_frac
;
1316 int num_components
= glsl_get_components(var
->type
);
1318 c
->vattr_sizes
[loc
] = MAX2(c
->vattr_sizes
[loc
],
1319 start_component
+ num_components
);
1322 unsigned num_components
= 0;
1323 uint32_t vpm_components_queued
= 0;
1324 bool uses_iid
= c
->s
->info
.system_values_read
&
1325 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1326 bool uses_vid
= c
->s
->info
.system_values_read
&
1327 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1328 num_components
+= uses_iid
;
1329 num_components
+= uses_vid
;
1331 for (int i
= 0; i
< ARRAY_SIZE(c
->vattr_sizes
); i
++)
1332 num_components
+= c
->vattr_sizes
[i
];
1335 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1336 &num_components
, ~0);
1340 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1341 &num_components
, ~0);
1344 for (int loc
= 0; loc
< ARRAY_SIZE(c
->vattr_sizes
); loc
++) {
1345 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1348 for (int i
= 0; i
< c
->vattr_sizes
[loc
]; i
++) {
1349 c
->inputs
[loc
* 4 + i
] =
1350 ntq_emit_vpm_read(c
,
1351 &vpm_components_queued
,
1358 if (c
->devinfo
->ver
>= 40) {
1359 assert(vpm_components_queued
== num_components
);
1361 assert(vpm_components_queued
== 0);
1362 assert(num_components
== 0);
1367 ntq_setup_fs_inputs(struct v3d_compile
*c
)
1369 unsigned num_entries
= 0;
1370 unsigned num_components
= 0;
1371 nir_foreach_variable(var
, &c
->s
->inputs
) {
1373 num_components
+= glsl_get_components(var
->type
);
1376 nir_variable
*vars
[num_entries
];
1379 nir_foreach_variable(var
, &c
->s
->inputs
)
1382 /* Sort the variables so that we emit the input setup in
1383 * driver_location order. This is required for VPM reads, whose data
1384 * is fetched into the VPM in driver_location (TGSI register index)
1387 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1389 for (unsigned i
= 0; i
< num_entries
; i
++) {
1390 nir_variable
*var
= vars
[i
];
1391 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1392 unsigned loc
= var
->data
.driver_location
;
1394 assert(array_len
== 1);
1396 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1399 if (var
->data
.location
== VARYING_SLOT_POS
) {
1400 emit_fragcoord_input(c
, loc
);
1401 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1402 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1403 (c
->fs_key
->point_sprite_mask
&
1404 (1 << (var
->data
.location
-
1405 VARYING_SLOT_VAR0
))))) {
1406 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1407 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1409 emit_fragment_input(c
, loc
, var
);
1415 ntq_setup_outputs(struct v3d_compile
*c
)
1417 nir_foreach_variable(var
, &c
->s
->outputs
) {
1418 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1419 unsigned loc
= var
->data
.driver_location
* 4;
1421 assert(array_len
== 1);
1424 for (int i
= 0; i
< 4 - var
->data
.location_frac
; i
++) {
1425 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1427 var
->data
.location_frac
+ i
);
1430 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1431 switch (var
->data
.location
) {
1432 case FRAG_RESULT_COLOR
:
1433 c
->output_color_var
[0] = var
;
1434 c
->output_color_var
[1] = var
;
1435 c
->output_color_var
[2] = var
;
1436 c
->output_color_var
[3] = var
;
1438 case FRAG_RESULT_DATA0
:
1439 case FRAG_RESULT_DATA1
:
1440 case FRAG_RESULT_DATA2
:
1441 case FRAG_RESULT_DATA3
:
1442 c
->output_color_var
[var
->data
.location
-
1443 FRAG_RESULT_DATA0
] = var
;
1445 case FRAG_RESULT_DEPTH
:
1446 c
->output_position_index
= loc
;
1448 case FRAG_RESULT_SAMPLE_MASK
:
1449 c
->output_sample_mask_index
= loc
;
1453 switch (var
->data
.location
) {
1454 case VARYING_SLOT_POS
:
1455 c
->output_position_index
= loc
;
1457 case VARYING_SLOT_PSIZ
:
1458 c
->output_point_size_index
= loc
;
1466 ntq_setup_uniforms(struct v3d_compile
*c
)
1468 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1469 uint32_t vec4_count
= glsl_count_attribute_slots(var
->type
,
1471 unsigned vec4_size
= 4 * sizeof(float);
1473 declare_uniform_range(c
, var
->data
.driver_location
* vec4_size
,
1474 vec4_count
* vec4_size
);
1480 * Sets up the mapping from nir_register to struct qreg *.
1482 * Each nir_register gets a struct qreg per 32-bit component being stored.
1485 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1487 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1488 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1489 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1491 nir_reg
->num_components
);
1493 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1495 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1496 qregs
[i
] = vir_get_temp(c
);
1501 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1503 /* XXX perf: Experiment with using immediate loads to avoid having
1504 * these end up in the uniform stream. Watch out for breaking the
1505 * small immediates optimization in the process!
1507 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1508 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1509 qregs
[i
] = vir_uniform_ui(c
, instr
->value
.u32
[i
]);
1511 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1515 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1517 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1519 /* VIR needs there to be *some* value, so pick 0 (same as for
1520 * ntq_setup_registers().
1522 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1523 qregs
[i
] = vir_uniform_ui(c
, 0);
1527 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1531 switch (instr
->intrinsic
) {
1532 case nir_intrinsic_load_uniform
:
1533 assert(instr
->num_components
== 1);
1534 if (nir_src_is_const(instr
->src
[0])) {
1535 offset
= (nir_intrinsic_base(instr
) +
1536 nir_src_as_uint(instr
->src
[0]));
1537 assert(offset
% 4 == 0);
1538 /* We need dwords */
1539 offset
= offset
/ 4;
1540 ntq_store_dest(c
, &instr
->dest
, 0,
1541 vir_uniform(c
, QUNIFORM_UNIFORM
,
1544 ntq_store_dest(c
, &instr
->dest
, 0,
1545 indirect_uniform_load(c
, instr
));
1549 case nir_intrinsic_load_ubo
:
1550 for (int i
= 0; i
< instr
->num_components
; i
++) {
1551 int ubo
= nir_src_as_uint(instr
->src
[0]);
1553 /* XXX perf: On V3D 4.x with uniform offsets, we
1554 * should probably try setting UBOs up in the A
1555 * register file and doing a sequence of loads that
1558 /* Adjust for where we stored the TGSI register base. */
1560 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
1561 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 1 + ubo
),
1563 ntq_get_src(c
, instr
->src
[1], 0),
1564 vir_uniform_ui(c
, i
* 4)));
1568 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
1572 if (nir_src_is_const(instr
->src
[0])) {
1573 offset
= (nir_intrinsic_base(instr
) +
1574 nir_src_as_uint(instr
->src
[0]));
1575 assert(offset
% 4 == 0);
1576 /* We need dwords */
1577 offset
= offset
/ 4;
1578 ntq_store_dest(c
, &instr
->dest
, 0,
1579 vir_uniform(c
, QUNIFORM_UNIFORM
,
1582 ntq_store_dest(c
, &instr
->dest
, 0,
1583 indirect_uniform_load(c
, instr
));
1587 case nir_intrinsic_load_user_clip_plane
:
1588 for (int i
= 0; i
< instr
->num_components
; i
++) {
1589 ntq_store_dest(c
, &instr
->dest
, i
,
1590 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1591 nir_intrinsic_ucp_id(instr
) *
1596 case nir_intrinsic_load_alpha_ref_float
:
1597 ntq_store_dest(c
, &instr
->dest
, 0,
1598 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1601 case nir_intrinsic_load_sample_mask_in
:
1602 ntq_store_dest(c
, &instr
->dest
, 0, vir_MSF(c
));
1605 case nir_intrinsic_load_helper_invocation
:
1606 vir_PF(c
, vir_MSF(c
), V3D_QPU_PF_PUSHZ
);
1607 ntq_store_dest(c
, &instr
->dest
, 0,
1608 vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
1609 vir_uniform_ui(c
, ~0),
1610 vir_uniform_ui(c
, 0))));
1613 case nir_intrinsic_load_front_face
:
1614 /* The register contains 0 (front) or 1 (back), and we need to
1615 * turn it into a NIR bool where true means front.
1617 ntq_store_dest(c
, &instr
->dest
, 0,
1619 vir_uniform_ui(c
, -1),
1623 case nir_intrinsic_load_instance_id
:
1624 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
1627 case nir_intrinsic_load_vertex_id
:
1628 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
1631 case nir_intrinsic_load_input
:
1632 for (int i
= 0; i
< instr
->num_components
; i
++) {
1633 offset
= (nir_intrinsic_base(instr
) +
1634 nir_src_as_uint(instr
->src
[0]));
1635 int comp
= nir_intrinsic_component(instr
) + i
;
1636 ntq_store_dest(c
, &instr
->dest
, i
,
1637 vir_MOV(c
, c
->inputs
[offset
* 4 + comp
]));
1641 case nir_intrinsic_store_output
:
1642 offset
= ((nir_intrinsic_base(instr
) +
1643 nir_src_as_uint(instr
->src
[1])) * 4 +
1644 nir_intrinsic_component(instr
));
1646 for (int i
= 0; i
< instr
->num_components
; i
++) {
1647 c
->outputs
[offset
+ i
] =
1648 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
1650 c
->num_outputs
= MAX2(c
->num_outputs
,
1651 offset
+ instr
->num_components
);
1654 case nir_intrinsic_discard
:
1655 if (c
->execute
.file
!= QFILE_NULL
) {
1656 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1657 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1658 vir_uniform_ui(c
, 0)),
1661 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1662 vir_uniform_ui(c
, 0));
1666 case nir_intrinsic_discard_if
: {
1667 /* true (~0) if we're discarding */
1668 struct qreg cond
= ntq_get_src(c
, instr
->src
[0], 0);
1670 if (c
->execute
.file
!= QFILE_NULL
) {
1671 /* execute == 0 means the channel is active. Invert
1672 * the condition so that we can use zero as "executing
1675 vir_PF(c
, vir_OR(c
, c
->execute
, vir_NOT(c
, cond
)),
1677 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1678 vir_uniform_ui(c
, 0)),
1681 vir_PF(c
, cond
, V3D_QPU_PF_PUSHZ
);
1682 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1683 vir_uniform_ui(c
, 0)),
1691 fprintf(stderr
, "Unknown intrinsic: ");
1692 nir_print_instr(&instr
->instr
, stderr
);
1693 fprintf(stderr
, "\n");
1698 /* Clears (activates) the execute flags for any channels whose jump target
1699 * matches this block.
1701 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
1704 * XXX perf: For uniform control flow, we should be able to skip c->execute
1705 * handling entirely.
1708 ntq_activate_execute_for_block(struct v3d_compile
*c
)
1710 vir_set_pf(vir_XOR_dest(c
, vir_reg(QFILE_NULL
, 0),
1711 c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
1714 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1718 ntq_emit_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
1720 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1721 bool empty_else_block
=
1722 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1723 exec_list_is_empty(&nir_else_block
->instr_list
));
1725 struct qblock
*then_block
= vir_new_block(c
);
1726 struct qblock
*after_block
= vir_new_block(c
);
1727 struct qblock
*else_block
;
1728 if (empty_else_block
)
1729 else_block
= after_block
;
1731 else_block
= vir_new_block(c
);
1733 bool was_top_level
= false;
1734 if (c
->execute
.file
== QFILE_NULL
) {
1735 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1736 was_top_level
= true;
1739 /* Set up the flags for the IF condition (taking the THEN branch). */
1740 nir_alu_instr
*if_condition_alu
= ntq_get_alu_parent(if_stmt
->condition
);
1741 enum v3d_qpu_cond cond
;
1742 if (!if_condition_alu
||
1743 !ntq_emit_comparison(c
, if_condition_alu
, &cond
)) {
1744 vir_PF(c
, ntq_get_src(c
, if_stmt
->condition
, 0),
1746 cond
= V3D_QPU_COND_IFNA
;
1749 /* Update the flags+cond to mean "Taking the ELSE branch (!cond) and
1750 * was previously active (execute Z) for updating the exec flags.
1752 if (was_top_level
) {
1753 cond
= v3d_qpu_cond_invert(cond
);
1755 struct qinst
*inst
= vir_MOV_dest(c
, vir_reg(QFILE_NULL
, 0),
1757 if (cond
== V3D_QPU_COND_IFA
) {
1758 vir_set_uf(inst
, V3D_QPU_UF_NORNZ
);
1760 vir_set_uf(inst
, V3D_QPU_UF_ANDZ
);
1761 cond
= V3D_QPU_COND_IFA
;
1765 vir_MOV_cond(c
, cond
,
1767 vir_uniform_ui(c
, else_block
->index
));
1769 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1772 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1773 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
1774 vir_link_blocks(c
->cur_block
, else_block
);
1775 vir_link_blocks(c
->cur_block
, then_block
);
1777 /* Process the THEN block. */
1778 vir_set_emit_block(c
, then_block
);
1779 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1781 if (!empty_else_block
) {
1782 /* Handle the end of the THEN block. First, all currently
1783 * active channels update their execute flags to point to
1786 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1787 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1788 vir_uniform_ui(c
, after_block
->index
));
1790 /* If everything points at ENDIF, then jump there immediately. */
1791 vir_PF(c
, vir_XOR(c
, c
->execute
,
1792 vir_uniform_ui(c
, after_block
->index
)),
1794 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
1795 vir_link_blocks(c
->cur_block
, after_block
);
1796 vir_link_blocks(c
->cur_block
, else_block
);
1798 vir_set_emit_block(c
, else_block
);
1799 ntq_activate_execute_for_block(c
);
1800 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
1803 vir_link_blocks(c
->cur_block
, after_block
);
1805 vir_set_emit_block(c
, after_block
);
1807 c
->execute
= c
->undef
;
1809 ntq_activate_execute_for_block(c
);
1813 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
1815 switch (jump
->type
) {
1816 case nir_jump_break
:
1817 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1818 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1819 vir_uniform_ui(c
, c
->loop_break_block
->index
));
1822 case nir_jump_continue
:
1823 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1824 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1825 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
1828 case nir_jump_return
:
1829 unreachable("All returns shouold be lowered\n");
1834 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
1836 switch (instr
->type
) {
1837 case nir_instr_type_alu
:
1838 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1841 case nir_instr_type_intrinsic
:
1842 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1845 case nir_instr_type_load_const
:
1846 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1849 case nir_instr_type_ssa_undef
:
1850 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1853 case nir_instr_type_tex
:
1854 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1857 case nir_instr_type_jump
:
1858 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
1862 fprintf(stderr
, "Unknown NIR instr type: ");
1863 nir_print_instr(instr
, stderr
);
1864 fprintf(stderr
, "\n");
1870 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
1872 nir_foreach_instr(instr
, block
) {
1873 ntq_emit_instr(c
, instr
);
1877 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
1880 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
1882 bool was_top_level
= false;
1883 if (c
->execute
.file
== QFILE_NULL
) {
1884 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1885 was_top_level
= true;
1888 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
1889 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
1891 c
->loop_cont_block
= vir_new_block(c
);
1892 c
->loop_break_block
= vir_new_block(c
);
1894 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1895 vir_set_emit_block(c
, c
->loop_cont_block
);
1896 ntq_activate_execute_for_block(c
);
1898 ntq_emit_cf_list(c
, &loop
->body
);
1900 /* Re-enable any previous continues now, so our ANYA check below
1903 * XXX: Use the .ORZ flags update, instead.
1905 vir_PF(c
, vir_XOR(c
,
1907 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
1909 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1911 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1913 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
1914 /* Pixels that were not dispatched or have been discarded should not
1915 * contribute to looping again.
1917 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
1918 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1919 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
1921 vir_set_emit_block(c
, c
->loop_break_block
);
1923 c
->execute
= c
->undef
;
1925 ntq_activate_execute_for_block(c
);
1927 c
->loop_break_block
= save_loop_break_block
;
1928 c
->loop_cont_block
= save_loop_cont_block
;
1934 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
1936 fprintf(stderr
, "FUNCTIONS not handled.\n");
1941 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
1943 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1944 switch (node
->type
) {
1945 case nir_cf_node_block
:
1946 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1949 case nir_cf_node_if
:
1950 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1953 case nir_cf_node_loop
:
1954 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
1957 case nir_cf_node_function
:
1958 ntq_emit_function(c
, nir_cf_node_as_function(node
));
1962 fprintf(stderr
, "Unknown NIR node type\n");
1969 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
1971 ntq_setup_registers(c
, &impl
->registers
);
1972 ntq_emit_cf_list(c
, &impl
->body
);
1976 nir_to_vir(struct v3d_compile
*c
)
1978 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1979 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
1980 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
1981 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
1983 /* XXX perf: We could set the "disable implicit point/line
1984 * varyings" field in the shader record and not emit these, if
1985 * they're not going to be used.
1987 if (c
->fs_key
->is_points
) {
1988 c
->point_x
= emit_fragment_varying(c
, NULL
, 0);
1989 c
->point_y
= emit_fragment_varying(c
, NULL
, 0);
1990 } else if (c
->fs_key
->is_lines
) {
1991 c
->line_x
= emit_fragment_varying(c
, NULL
, 0);
1995 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
)
1996 ntq_setup_fs_inputs(c
);
1998 ntq_setup_vpm_inputs(c
);
2000 ntq_setup_outputs(c
);
2001 ntq_setup_uniforms(c
);
2002 ntq_setup_registers(c
, &c
->s
->registers
);
2004 /* Find the main function and emit the body. */
2005 nir_foreach_function(function
, c
->s
) {
2006 assert(strcmp(function
->name
, "main") == 0);
2007 assert(function
->impl
);
2008 ntq_emit_impl(c
, function
->impl
);
2012 const nir_shader_compiler_options v3d_nir_options
= {
2013 .lower_all_io_to_temps
= true,
2014 .lower_extract_byte
= true,
2015 .lower_extract_word
= true,
2017 .lower_bitfield_insert_to_shifts
= true,
2018 .lower_bitfield_extract_to_shifts
= true,
2019 .lower_bitfield_reverse
= true,
2020 .lower_bit_count
= true,
2021 .lower_pack_unorm_2x16
= true,
2022 .lower_pack_snorm_2x16
= true,
2023 .lower_pack_unorm_4x8
= true,
2024 .lower_pack_snorm_4x8
= true,
2025 .lower_unpack_unorm_4x8
= true,
2026 .lower_unpack_snorm_4x8
= true,
2027 .lower_pack_half_2x16
= true,
2028 .lower_unpack_half_2x16
= true,
2030 .lower_find_lsb
= true,
2032 .lower_flrp32
= true,
2035 .lower_fsqrt
= true,
2036 .lower_ifind_msb
= true,
2037 .lower_ldexp
= true,
2038 .lower_mul_high
= true,
2039 .lower_wpos_pntc
= true,
2040 .native_integers
= true,
2044 * When demoting a shader down to single-threaded, removes the THRSW
2045 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2049 vir_remove_thrsw(struct v3d_compile
*c
)
2051 vir_for_each_block(block
, c
) {
2052 vir_for_each_inst_safe(inst
, block
) {
2053 if (inst
->qpu
.sig
.thrsw
)
2054 vir_remove_instruction(c
, inst
);
2058 c
->last_thrsw
= NULL
;
2062 vir_emit_last_thrsw(struct v3d_compile
*c
)
2064 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2065 * switching, so disable threads if we didn't do any TMU ops (each of
2066 * which would have emitted a THRSW).
2068 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
2071 vir_remove_thrsw(c
);
2075 /* If we're threaded and the last THRSW was in conditional code, then
2076 * we need to emit another one so that we can flag it as the last
2079 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
2080 assert(c
->devinfo
->ver
>= 41);
2084 /* If we're threaded, then we need to mark the last THRSW instruction
2085 * so we can emit a pair of them at QPU emit time.
2087 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2088 * post-last-THRSW state, so we can skip this.
2090 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2091 assert(c
->devinfo
->ver
>= 41);
2096 c
->last_thrsw
->is_last_thrsw
= true;
2099 /* There's a flag in the shader for "center W is needed for reasons other than
2100 * non-centroid varyings", so we just walk the program after VIR optimization
2101 * to see if it's used. It should be harmless to set even if we only use
2102 * center W for varyings.
2105 vir_check_payload_w(struct v3d_compile
*c
)
2107 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2110 vir_for_each_inst_inorder(inst
, c
) {
2111 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2112 if (inst
->src
[i
].file
== QFILE_REG
&&
2113 inst
->src
[i
].index
== 0) {
2114 c
->uses_center_w
= true;
2123 v3d_nir_to_vir(struct v3d_compile
*c
)
2125 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2126 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2127 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2128 vir_get_stage_name(c
),
2129 c
->program_id
, c
->variant_id
);
2130 nir_print_shader(c
->s
, stderr
);
2135 /* Emit the last THRSW before STVPM and TLB writes. */
2136 vir_emit_last_thrsw(c
);
2138 switch (c
->s
->info
.stage
) {
2139 case MESA_SHADER_FRAGMENT
:
2142 case MESA_SHADER_VERTEX
:
2146 unreachable("bad stage");
2149 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2150 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2151 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
2152 vir_get_stage_name(c
),
2153 c
->program_id
, c
->variant_id
);
2155 fprintf(stderr
, "\n");
2159 vir_lower_uniforms(c
);
2161 vir_check_payload_w(c
);
2163 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2164 * We used that on that platform to pipeline TMU writes and reduce the
2165 * number of thread switches, as well as try (mostly successfully) to
2166 * reduce maximum register pressure to allow more threads. We should
2167 * do something of that sort for V3D -- either instruction scheduling
2168 * here, or delay the the THRSW and LDTMUs from our texture
2169 * instructions until the results are needed.
2172 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2173 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2174 fprintf(stderr
, "%s prog %d/%d VIR:\n",
2175 vir_get_stage_name(c
),
2176 c
->program_id
, c
->variant_id
);
2178 fprintf(stderr
, "\n");
2181 /* Attempt to allocate registers for the temporaries. If we fail,
2182 * reduce thread count and try again.
2184 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
2185 struct qpu_reg
*temp_registers
;
2188 temp_registers
= v3d_register_allocate(c
, &spilled
);
2195 if (c
->threads
== min_threads
) {
2196 fprintf(stderr
, "Failed to register allocate at %d threads:\n",
2205 if (c
->threads
== 1)
2206 vir_remove_thrsw(c
);
2209 v3d_vir_to_qpu(c
, temp_registers
);