2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "common/v3d_device_info.h"
33 #include "v3d_compiler.h"
36 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
39 resize_qreg_array(struct v3d_compile
*c
,
44 if (*size
>= decl_size
)
47 uint32_t old_size
= *size
;
48 *size
= MAX2(*size
* 2, decl_size
);
49 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
51 fprintf(stderr
, "Malloc failure\n");
55 for (uint32_t i
= old_size
; i
< *size
; i
++)
56 (*regs
)[i
] = c
->undef
;
60 vir_emit_thrsw(struct v3d_compile
*c
)
65 /* Always thread switch after each texture operation for now.
67 * We could do better by batching a bunch of texture fetches up and
68 * then doing one thread switch and collecting all their results
71 c
->last_thrsw
= vir_NOP(c
);
72 c
->last_thrsw
->qpu
.sig
.thrsw
= true;
73 c
->last_thrsw_at_top_level
= (c
->execute
.file
== QFILE_NULL
);
77 indirect_uniform_load(struct v3d_compile
*c
, nir_intrinsic_instr
*intr
)
79 struct qreg indirect_offset
= ntq_get_src(c
, intr
->src
[0], 0);
80 uint32_t offset
= nir_intrinsic_base(intr
);
81 struct v3d_ubo_range
*range
= NULL
;
84 for (i
= 0; i
< c
->num_ubo_ranges
; i
++) {
85 range
= &c
->ubo_ranges
[i
];
86 if (offset
>= range
->src_offset
&&
87 offset
< range
->src_offset
+ range
->size
) {
91 /* The driver-location-based offset always has to be within a declared
94 assert(i
!= c
->num_ubo_ranges
);
95 if (!c
->ubo_range_used
[i
]) {
96 c
->ubo_range_used
[i
] = true;
97 range
->dst_offset
= c
->next_ubo_dst_offset
;
98 c
->next_ubo_dst_offset
+= range
->size
;
101 offset
-= range
->src_offset
;
103 if (range
->dst_offset
+ offset
!= 0) {
104 indirect_offset
= vir_ADD(c
, indirect_offset
,
105 vir_uniform_ui(c
, range
->dst_offset
+
109 /* Adjust for where we stored the TGSI register base. */
111 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
112 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 0),
120 ntq_init_ssa_def(struct v3d_compile
*c
, nir_ssa_def
*def
)
122 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
123 def
->num_components
);
124 _mesa_hash_table_insert(c
->def_ht
, def
, qregs
);
129 * This function is responsible for getting VIR results into the associated
130 * storage for a NIR instruction.
132 * If it's a NIR SSA def, then we just set the associated hash table entry to
135 * If it's a NIR reg, then we need to update the existing qreg assigned to the
136 * NIR destination with the incoming value. To do that without introducing
137 * new MOVs, we require that the incoming qreg either be a uniform, or be
138 * SSA-defined by the previous VIR instruction in the block and rewritable by
139 * this function. That lets us sneak ahead and insert the SF flag beforehand
140 * (knowing that the previous instruction doesn't depend on flags) and rewrite
141 * its destination to be the NIR reg's destination
144 ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
147 struct qinst
*last_inst
= NULL
;
148 if (!list_empty(&c
->cur_block
->instructions
))
149 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
151 assert(result
.file
== QFILE_UNIF
||
152 (result
.file
== QFILE_TEMP
&&
153 last_inst
&& last_inst
== c
->defs
[result
.index
]));
156 assert(chan
< dest
->ssa
.num_components
);
159 struct hash_entry
*entry
=
160 _mesa_hash_table_search(c
->def_ht
, &dest
->ssa
);
165 qregs
= ntq_init_ssa_def(c
, &dest
->ssa
);
167 qregs
[chan
] = result
;
169 nir_register
*reg
= dest
->reg
.reg
;
170 assert(dest
->reg
.base_offset
== 0);
171 assert(reg
->num_array_elems
== 0);
172 struct hash_entry
*entry
=
173 _mesa_hash_table_search(c
->def_ht
, reg
);
174 struct qreg
*qregs
= entry
->data
;
176 /* Insert a MOV if the source wasn't an SSA def in the
177 * previous instruction.
179 if (result
.file
== QFILE_UNIF
) {
180 result
= vir_MOV(c
, result
);
181 last_inst
= c
->defs
[result
.index
];
184 /* We know they're both temps, so just rewrite index. */
185 c
->defs
[last_inst
->dst
.index
] = NULL
;
186 last_inst
->dst
.index
= qregs
[chan
].index
;
188 /* If we're in control flow, then make this update of the reg
189 * conditional on the execution mask.
191 if (c
->execute
.file
!= QFILE_NULL
) {
192 last_inst
->dst
.index
= qregs
[chan
].index
;
194 /* Set the flags to the current exec mask.
196 c
->cursor
= vir_before_inst(last_inst
);
197 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
198 c
->cursor
= vir_after_inst(last_inst
);
200 vir_set_cond(last_inst
, V3D_QPU_COND_IFA
);
201 last_inst
->cond_is_exec_mask
= true;
207 ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
)
209 struct hash_entry
*entry
;
211 entry
= _mesa_hash_table_search(c
->def_ht
, src
.ssa
);
212 assert(i
< src
.ssa
->num_components
);
214 nir_register
*reg
= src
.reg
.reg
;
215 entry
= _mesa_hash_table_search(c
->def_ht
, reg
);
216 assert(reg
->num_array_elems
== 0);
217 assert(src
.reg
.base_offset
== 0);
218 assert(i
< reg
->num_components
);
221 struct qreg
*qregs
= entry
->data
;
226 ntq_get_alu_src(struct v3d_compile
*c
, nir_alu_instr
*instr
,
229 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
230 unsigned chan
= ffs(instr
->dest
.write_mask
) - 1;
231 struct qreg r
= ntq_get_src(c
, instr
->src
[src
].src
,
232 instr
->src
[src
].swizzle
[chan
]);
234 assert(!instr
->src
[src
].abs
);
235 assert(!instr
->src
[src
].negate
);
241 ntq_minify(struct v3d_compile
*c
, struct qreg size
, struct qreg level
)
243 return vir_MAX(c
, vir_SHR(c
, size
, level
), vir_uniform_ui(c
, 1));
247 ntq_emit_txs(struct v3d_compile
*c
, nir_tex_instr
*instr
)
249 unsigned unit
= instr
->texture_index
;
250 int lod_index
= nir_tex_instr_src_index(instr
, nir_tex_src_lod
);
251 int dest_size
= nir_tex_instr_dest_size(instr
);
253 struct qreg lod
= c
->undef
;
255 lod
= ntq_get_src(c
, instr
->src
[lod_index
].src
, 0);
257 for (int i
= 0; i
< dest_size
; i
++) {
259 enum quniform_contents contents
;
261 if (instr
->is_array
&& i
== dest_size
- 1)
262 contents
= QUNIFORM_TEXTURE_ARRAY_SIZE
;
264 contents
= QUNIFORM_TEXTURE_WIDTH
+ i
;
266 struct qreg size
= vir_uniform(c
, contents
, unit
);
268 switch (instr
->sampler_dim
) {
269 case GLSL_SAMPLER_DIM_1D
:
270 case GLSL_SAMPLER_DIM_2D
:
271 case GLSL_SAMPLER_DIM_3D
:
272 case GLSL_SAMPLER_DIM_CUBE
:
273 /* Don't minify the array size. */
274 if (!(instr
->is_array
&& i
== dest_size
- 1)) {
275 size
= ntq_minify(c
, size
, lod
);
279 case GLSL_SAMPLER_DIM_RECT
:
280 /* There's no LOD field for rects */
284 unreachable("Bad sampler type");
287 ntq_store_dest(c
, &instr
->dest
, i
, size
);
292 ntq_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
)
294 unsigned unit
= instr
->texture_index
;
296 /* Since each texture sampling op requires uploading uniforms to
297 * reference the texture, there's no HW support for texture size and
298 * you just upload uniforms containing the size.
301 case nir_texop_query_levels
:
302 ntq_store_dest(c
, &instr
->dest
, 0,
303 vir_uniform(c
, QUNIFORM_TEXTURE_LEVELS
, unit
));
306 ntq_emit_txs(c
, instr
);
312 if (c
->devinfo
->ver
>= 40)
313 v3d40_vir_emit_tex(c
, instr
);
315 v3d33_vir_emit_tex(c
, instr
);
319 ntq_fsincos(struct v3d_compile
*c
, struct qreg src
, bool is_cos
)
321 struct qreg input
= vir_FMUL(c
, src
, vir_uniform_f(c
, 1.0f
/ M_PI
));
323 input
= vir_FADD(c
, input
, vir_uniform_f(c
, 0.5));
325 struct qreg periods
= vir_FROUND(c
, input
);
326 struct qreg sin_output
= vir_SIN(c
, vir_FSUB(c
, input
, periods
));
327 return vir_XOR(c
, sin_output
, vir_SHL(c
,
328 vir_FTOIN(c
, periods
),
329 vir_uniform_ui(c
, -1)));
333 ntq_fsign(struct v3d_compile
*c
, struct qreg src
)
335 struct qreg t
= vir_get_temp(c
);
337 vir_MOV_dest(c
, t
, vir_uniform_f(c
, 0.0));
338 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHZ
);
339 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_f(c
, 1.0));
340 vir_PF(c
, vir_FMOV(c
, src
), V3D_QPU_PF_PUSHN
);
341 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_f(c
, -1.0));
342 return vir_MOV(c
, t
);
346 ntq_isign(struct v3d_compile
*c
, struct qreg src
)
348 struct qreg t
= vir_get_temp(c
);
350 vir_MOV_dest(c
, t
, vir_uniform_ui(c
, 0));
351 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHZ
);
352 vir_MOV_cond(c
, V3D_QPU_COND_IFNA
, t
, vir_uniform_ui(c
, 1));
353 vir_PF(c
, vir_MOV(c
, src
), V3D_QPU_PF_PUSHN
);
354 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, t
, vir_uniform_ui(c
, -1));
355 return vir_MOV(c
, t
);
359 emit_fragcoord_input(struct v3d_compile
*c
, int attr
)
361 c
->inputs
[attr
* 4 + 0] = vir_FXCD(c
);
362 c
->inputs
[attr
* 4 + 1] = vir_FYCD(c
);
363 c
->inputs
[attr
* 4 + 2] = c
->payload_z
;
364 c
->inputs
[attr
* 4 + 3] = vir_RECIP(c
, c
->payload_w
);
368 emit_fragment_varying(struct v3d_compile
*c
, nir_variable
*var
,
371 struct qreg r3
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R3
);
372 struct qreg r5
= vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R5
);
375 if (c
->devinfo
->ver
>= 41) {
376 struct qinst
*ldvary
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
378 ldvary
->qpu
.sig
.ldvary
= true;
379 vary
= vir_emit_def(c
, ldvary
);
381 vir_NOP(c
)->qpu
.sig
.ldvary
= true;
385 /* For gl_PointCoord input or distance along a line, we'll be called
386 * with no nir_variable, and we don't count toward VPM size so we
387 * don't track an input slot.
390 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
393 int i
= c
->num_inputs
++;
394 c
->input_slots
[i
] = v3d_slot_from_slot_and_component(var
->data
.location
,
397 switch (var
->data
.interpolation
) {
398 case INTERP_MODE_NONE
:
399 /* If a gl_FrontColor or gl_BackColor input has no interp
400 * qualifier, then if we're using glShadeModel(GL_FLAT) it
401 * needs to be flat shaded.
403 switch (var
->data
.location
) {
404 case VARYING_SLOT_COL0
:
405 case VARYING_SLOT_COL1
:
406 case VARYING_SLOT_BFC0
:
407 case VARYING_SLOT_BFC1
:
408 if (c
->fs_key
->shade_model_flat
) {
409 BITSET_SET(c
->flat_shade_flags
, i
);
410 vir_MOV_dest(c
, c
->undef
, vary
);
411 return vir_MOV(c
, r5
);
413 return vir_FADD(c
, vir_FMUL(c
, vary
,
420 case INTERP_MODE_SMOOTH
:
421 if (var
->data
.centroid
) {
422 BITSET_SET(c
->centroid_flags
, i
);
423 return vir_FADD(c
, vir_FMUL(c
, vary
,
424 c
->payload_w_centroid
), r5
);
426 return vir_FADD(c
, vir_FMUL(c
, vary
, c
->payload_w
), r5
);
428 case INTERP_MODE_NOPERSPECTIVE
:
429 BITSET_SET(c
->noperspective_flags
, i
);
430 return vir_FADD(c
, vir_MOV(c
, vary
), r5
);
431 case INTERP_MODE_FLAT
:
432 BITSET_SET(c
->flat_shade_flags
, i
);
433 vir_MOV_dest(c
, c
->undef
, vary
);
434 return vir_MOV(c
, r5
);
436 unreachable("Bad interp mode");
441 emit_fragment_input(struct v3d_compile
*c
, int attr
, nir_variable
*var
)
443 for (int i
= 0; i
< glsl_get_vector_elements(var
->type
); i
++) {
444 int chan
= var
->data
.location_frac
+ i
;
445 c
->inputs
[attr
* 4 + chan
] =
446 emit_fragment_varying(c
, var
, chan
);
451 add_output(struct v3d_compile
*c
,
452 uint32_t decl_offset
,
456 uint32_t old_array_size
= c
->outputs_array_size
;
457 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
460 if (old_array_size
!= c
->outputs_array_size
) {
461 c
->output_slots
= reralloc(c
,
463 struct v3d_varying_slot
,
464 c
->outputs_array_size
);
467 c
->output_slots
[decl_offset
] =
468 v3d_slot_from_slot_and_component(slot
, swizzle
);
472 declare_uniform_range(struct v3d_compile
*c
, uint32_t start
, uint32_t size
)
474 unsigned array_id
= c
->num_ubo_ranges
++;
475 if (array_id
>= c
->ubo_ranges_array_size
) {
476 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
478 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
479 struct v3d_ubo_range
,
480 c
->ubo_ranges_array_size
);
481 c
->ubo_range_used
= reralloc(c
, c
->ubo_range_used
,
483 c
->ubo_ranges_array_size
);
486 c
->ubo_ranges
[array_id
].dst_offset
= 0;
487 c
->ubo_ranges
[array_id
].src_offset
= start
;
488 c
->ubo_ranges
[array_id
].size
= size
;
489 c
->ubo_range_used
[array_id
] = false;
493 * If compare_instr is a valid comparison instruction, emits the
494 * compare_instr's comparison and returns the sel_instr's return value based
495 * on the compare_instr's result.
498 ntq_emit_comparison(struct v3d_compile
*c
, struct qreg
*dest
,
499 nir_alu_instr
*compare_instr
,
500 nir_alu_instr
*sel_instr
)
502 struct qreg src0
= ntq_get_alu_src(c
, compare_instr
, 0);
504 if (nir_op_infos
[compare_instr
->op
].num_inputs
> 1)
505 src1
= ntq_get_alu_src(c
, compare_instr
, 1);
506 bool cond_invert
= false;
508 switch (compare_instr
->op
) {
511 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
514 vir_PF(c
, vir_XOR(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
519 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
523 vir_PF(c
, vir_XOR(c
, src0
, src1
), V3D_QPU_PF_PUSHZ
);
529 vir_PF(c
, vir_FCMP(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
532 vir_PF(c
, vir_MIN(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
536 vir_PF(c
, vir_SUB(c
, src0
, src1
), V3D_QPU_PF_PUSHC
);
542 vir_PF(c
, vir_FCMP(c
, src0
, src1
), V3D_QPU_PF_PUSHN
);
545 vir_PF(c
, vir_MIN(c
, src1
, src0
), V3D_QPU_PF_PUSHC
);
548 vir_PF(c
, vir_SUB(c
, src0
, src1
), V3D_QPU_PF_PUSHC
);
555 enum v3d_qpu_cond cond
= (cond_invert
?
559 switch (sel_instr
->op
) {
564 *dest
= vir_SEL(c
, cond
,
565 vir_uniform_f(c
, 1.0), vir_uniform_f(c
, 0.0));
569 *dest
= vir_SEL(c
, cond
,
570 ntq_get_alu_src(c
, sel_instr
, 1),
571 ntq_get_alu_src(c
, sel_instr
, 2));
575 *dest
= vir_SEL(c
, cond
,
576 vir_uniform_ui(c
, ~0), vir_uniform_ui(c
, 0));
580 /* Make the temporary for nir_store_dest(). */
581 *dest
= vir_MOV(c
, *dest
);
587 * Attempts to fold a comparison generating a boolean result into the
588 * condition code for selecting between two values, instead of comparing the
589 * boolean result against 0 to generate the condition code.
591 static struct qreg
ntq_emit_bcsel(struct v3d_compile
*c
, nir_alu_instr
*instr
,
594 if (!instr
->src
[0].src
.is_ssa
)
596 if (instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
598 nir_alu_instr
*compare
=
599 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
604 if (ntq_emit_comparison(c
, &dest
, compare
, instr
))
608 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
609 return vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
, src
[1], src
[2]));
614 ntq_emit_alu(struct v3d_compile
*c
, nir_alu_instr
*instr
)
616 /* This should always be lowered to ALU operations for V3D. */
617 assert(!instr
->dest
.saturate
);
619 /* Vectors are special in that they have non-scalarized writemasks,
620 * and just take the first swizzle channel for each argument in order
621 * into each writemask channel.
623 if (instr
->op
== nir_op_vec2
||
624 instr
->op
== nir_op_vec3
||
625 instr
->op
== nir_op_vec4
) {
627 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
628 srcs
[i
] = ntq_get_src(c
, instr
->src
[i
].src
,
629 instr
->src
[i
].swizzle
[0]);
630 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
631 ntq_store_dest(c
, &instr
->dest
.dest
, i
,
632 vir_MOV(c
, srcs
[i
]));
636 /* General case: We can just grab the one used channel per src. */
637 struct qreg src
[nir_op_infos
[instr
->op
].num_inputs
];
638 for (int i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
639 src
[i
] = ntq_get_alu_src(c
, instr
, i
);
647 result
= vir_MOV(c
, src
[0]);
651 result
= vir_XOR(c
, src
[0], vir_uniform_ui(c
, 1 << 31));
654 result
= vir_NEG(c
, src
[0]);
658 result
= vir_FMUL(c
, src
[0], src
[1]);
661 result
= vir_FADD(c
, src
[0], src
[1]);
664 result
= vir_FSUB(c
, src
[0], src
[1]);
667 result
= vir_FMIN(c
, src
[0], src
[1]);
670 result
= vir_FMAX(c
, src
[0], src
[1]);
674 result
= vir_FTOIZ(c
, src
[0]);
677 result
= vir_FTOUZ(c
, src
[0]);
680 result
= vir_ITOF(c
, src
[0]);
683 result
= vir_UTOF(c
, src
[0]);
686 result
= vir_AND(c
, src
[0], vir_uniform_f(c
, 1.0));
689 result
= vir_AND(c
, src
[0], vir_uniform_ui(c
, 1));
693 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
694 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
695 vir_uniform_ui(c
, ~0),
696 vir_uniform_ui(c
, 0)));
700 result
= vir_ADD(c
, src
[0], src
[1]);
703 result
= vir_SHR(c
, src
[0], src
[1]);
706 result
= vir_SUB(c
, src
[0], src
[1]);
709 result
= vir_ASR(c
, src
[0], src
[1]);
712 result
= vir_SHL(c
, src
[0], src
[1]);
715 result
= vir_MIN(c
, src
[0], src
[1]);
718 result
= vir_UMIN(c
, src
[0], src
[1]);
721 result
= vir_MAX(c
, src
[0], src
[1]);
724 result
= vir_UMAX(c
, src
[0], src
[1]);
727 result
= vir_AND(c
, src
[0], src
[1]);
730 result
= vir_OR(c
, src
[0], src
[1]);
733 result
= vir_XOR(c
, src
[0], src
[1]);
736 result
= vir_NOT(c
, src
[0]);
739 case nir_op_ufind_msb
:
740 result
= vir_SUB(c
, vir_uniform_ui(c
, 31), vir_CLZ(c
, src
[0]));
744 result
= vir_UMUL(c
, src
[0], src
[1]);
761 if (!ntq_emit_comparison(c
, &result
, instr
, instr
)) {
762 fprintf(stderr
, "Bad comparison instruction\n");
767 result
= ntq_emit_bcsel(c
, instr
, src
);
770 vir_PF(c
, src
[0], V3D_QPU_PF_PUSHZ
);
771 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFNA
,
776 result
= vir_RECIP(c
, src
[0]);
779 result
= vir_RSQRT(c
, src
[0]);
782 result
= vir_EXP(c
, src
[0]);
785 result
= vir_LOG(c
, src
[0]);
789 result
= vir_FCEIL(c
, src
[0]);
792 result
= vir_FFLOOR(c
, src
[0]);
794 case nir_op_fround_even
:
795 result
= vir_FROUND(c
, src
[0]);
798 result
= vir_FTRUNC(c
, src
[0]);
801 result
= vir_FSUB(c
, src
[0], vir_FFLOOR(c
, src
[0]));
805 result
= ntq_fsincos(c
, src
[0], false);
808 result
= ntq_fsincos(c
, src
[0], true);
812 result
= ntq_fsign(c
, src
[0]);
815 result
= ntq_isign(c
, src
[0]);
819 result
= vir_FMOV(c
, src
[0]);
820 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_ABS
);
825 result
= vir_MAX(c
, src
[0],
826 vir_SUB(c
, vir_uniform_ui(c
, 0), src
[0]));
830 case nir_op_fddx_coarse
:
831 case nir_op_fddx_fine
:
832 result
= vir_FDX(c
, src
[0]);
836 case nir_op_fddy_coarse
:
837 case nir_op_fddy_fine
:
838 result
= vir_FDY(c
, src
[0]);
841 case nir_op_uadd_carry
:
842 vir_PF(c
, vir_ADD(c
, src
[0], src
[1]), V3D_QPU_PF_PUSHC
);
843 result
= vir_MOV(c
, vir_SEL(c
, V3D_QPU_COND_IFA
,
844 vir_uniform_ui(c
, ~0),
845 vir_uniform_ui(c
, 0)));
848 case nir_op_pack_half_2x16_split
:
849 result
= vir_VFPACK(c
, src
[0], src
[1]);
852 case nir_op_unpack_half_2x16_split_x
:
853 /* XXX perf: It would be good to be able to merge this unpack
854 * with whatever uses our result.
856 result
= vir_FMOV(c
, src
[0]);
857 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_L
);
860 case nir_op_unpack_half_2x16_split_y
:
861 result
= vir_FMOV(c
, src
[0]);
862 vir_set_unpack(c
->defs
[result
.index
], 0, V3D_QPU_UNPACK_H
);
866 fprintf(stderr
, "unknown NIR ALU inst: ");
867 nir_print_instr(&instr
->instr
, stderr
);
868 fprintf(stderr
, "\n");
872 /* We have a scalar result, so the instruction should only have a
873 * single channel written to.
875 assert(util_is_power_of_two_or_zero(instr
->dest
.write_mask
));
876 ntq_store_dest(c
, &instr
->dest
.dest
,
877 ffs(instr
->dest
.write_mask
) - 1, result
);
880 /* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
881 * specifier. They come from a register that's preloaded with 0xffffffff
882 * (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
883 * 8 bits are shifted off the bottom and 0xff shifted in from the top.
885 #define TLB_TYPE_F16_COLOR (3 << 6)
886 #define TLB_TYPE_I32_COLOR (1 << 6)
887 #define TLB_TYPE_F32_COLOR (0 << 6)
888 #define TLB_RENDER_TARGET_SHIFT 3 /* Reversed! 7 = RT 0, 0 = RT 7. */
889 #define TLB_SAMPLE_MODE_PER_SAMPLE (0 << 2)
890 #define TLB_SAMPLE_MODE_PER_PIXEL (1 << 2)
891 #define TLB_F16_SWAP_HI_LO (1 << 1)
892 #define TLB_VEC_SIZE_4_F16 (1 << 0)
893 #define TLB_VEC_SIZE_2_F16 (0 << 0)
894 #define TLB_VEC_SIZE_MINUS_1_SHIFT 0
896 /* Triggers Z/Stencil testing, used when the shader state's "FS modifies Z"
899 #define TLB_TYPE_DEPTH ((2 << 6) | (0 << 4))
900 #define TLB_DEPTH_TYPE_INVARIANT (0 << 2) /* Unmodified sideband input used */
901 #define TLB_DEPTH_TYPE_PER_PIXEL (1 << 2) /* QPU result used */
902 #define TLB_V42_DEPTH_TYPE_INVARIANT (0 << 3) /* Unmodified sideband input used */
903 #define TLB_V42_DEPTH_TYPE_PER_PIXEL (1 << 3) /* QPU result used */
905 /* Stencil is a single 32-bit write. */
906 #define TLB_TYPE_STENCIL_ALPHA ((2 << 6) | (1 << 4))
909 emit_frag_end(struct v3d_compile
*c
)
912 if (c->output_sample_mask_index != -1) {
913 vir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
917 bool has_any_tlb_color_write
= false;
918 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
919 if (c
->output_color_var
[rt
])
920 has_any_tlb_color_write
= true;
923 if (c
->fs_key
->sample_alpha_to_coverage
&& c
->output_color_var
[0]) {
924 struct nir_variable
*var
= c
->output_color_var
[0];
925 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
927 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
930 vir_FTOC(c
, color
[3])));
933 if (c
->output_position_index
!= -1) {
934 struct qinst
*inst
= vir_MOV_dest(c
,
935 vir_reg(QFILE_TLBU
, 0),
936 c
->outputs
[c
->output_position_index
]);
937 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
939 if (c
->devinfo
->ver
>= 42) {
940 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_PER_PIXEL
|
941 TLB_SAMPLE_MODE_PER_PIXEL
);
943 tlb_specifier
|= TLB_DEPTH_TYPE_PER_PIXEL
;
945 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
946 vir_uniform_ui(c
, tlb_specifier
| 0xffffff00);
947 } else if (c
->s
->info
.fs
.uses_discard
||
948 c
->fs_key
->sample_alpha_to_coverage
||
949 !has_any_tlb_color_write
) {
950 /* Emit passthrough Z if it needed to be delayed until shader
951 * end due to potential discards.
953 * Since (single-threaded) fragment shaders always need a TLB
954 * write, emit passthrouh Z if we didn't have any color
955 * buffers and flag us as potentially discarding, so that we
956 * can use Z as the TLB write.
958 c
->s
->info
.fs
.uses_discard
= true;
960 struct qinst
*inst
= vir_MOV_dest(c
,
961 vir_reg(QFILE_TLBU
, 0),
962 vir_reg(QFILE_NULL
, 0));
963 uint8_t tlb_specifier
= TLB_TYPE_DEPTH
;
965 if (c
->devinfo
->ver
>= 42) {
966 /* The spec says the PER_PIXEL flag is ignored for
967 * invariant writes, but the simulator demands it.
969 tlb_specifier
|= (TLB_V42_DEPTH_TYPE_INVARIANT
|
970 TLB_SAMPLE_MODE_PER_PIXEL
);
972 tlb_specifier
|= TLB_DEPTH_TYPE_INVARIANT
;
975 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
976 vir_uniform_ui(c
, tlb_specifier
| 0xffffff00);
979 /* XXX: Performance improvement: Merge Z write and color writes TLB
983 for (int rt
= 0; rt
< c
->fs_key
->nr_cbufs
; rt
++) {
984 if (!c
->output_color_var
[rt
])
987 nir_variable
*var
= c
->output_color_var
[rt
];
988 struct qreg
*color
= &c
->outputs
[var
->data
.driver_location
* 4];
989 int num_components
= glsl_get_vector_elements(var
->type
);
990 uint32_t conf
= 0xffffff00;
993 conf
|= TLB_SAMPLE_MODE_PER_PIXEL
;
994 conf
|= (7 - rt
) << TLB_RENDER_TARGET_SHIFT
;
996 if (c
->fs_key
->swap_color_rb
& (1 << rt
))
997 num_components
= MAX2(num_components
, 3);
999 assert(num_components
!= 0);
1000 switch (glsl_get_base_type(var
->type
)) {
1001 case GLSL_TYPE_UINT
:
1003 /* The F32 vs I32 distinction was dropped in 4.2. */
1004 if (c
->devinfo
->ver
< 42)
1005 conf
|= TLB_TYPE_I32_COLOR
;
1007 conf
|= TLB_TYPE_F32_COLOR
;
1008 conf
|= ((num_components
- 1) <<
1009 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1011 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), color
[0]);
1012 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1013 vir_uniform_ui(c
, conf
);
1015 for (int i
= 1; i
< num_components
; i
++) {
1016 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0),
1022 struct qreg r
= color
[0];
1023 struct qreg g
= color
[1];
1024 struct qreg b
= color
[2];
1025 struct qreg a
= color
[3];
1027 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1028 conf
|= TLB_TYPE_F32_COLOR
;
1029 conf
|= ((num_components
- 1) <<
1030 TLB_VEC_SIZE_MINUS_1_SHIFT
);
1032 conf
|= TLB_TYPE_F16_COLOR
;
1033 conf
|= TLB_F16_SWAP_HI_LO
;
1034 if (num_components
>= 3)
1035 conf
|= TLB_VEC_SIZE_4_F16
;
1037 conf
|= TLB_VEC_SIZE_2_F16
;
1040 if (c
->fs_key
->swap_color_rb
& (1 << rt
)) {
1045 if (c
->fs_key
->sample_alpha_to_one
)
1046 a
= vir_uniform_f(c
, 1.0);
1048 if (c
->fs_key
->f32_color_rb
& (1 << rt
)) {
1049 inst
= vir_MOV_dest(c
, vir_reg(QFILE_TLBU
, 0), r
);
1050 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1051 vir_uniform_ui(c
, conf
);
1053 if (num_components
>= 2)
1054 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), g
);
1055 if (num_components
>= 3)
1056 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), b
);
1057 if (num_components
>= 4)
1058 vir_MOV_dest(c
, vir_reg(QFILE_TLB
, 0), a
);
1060 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), r
, g
);
1062 inst
->dst
.file
= QFILE_TLBU
;
1063 inst
->src
[vir_get_implicit_uniform_src(inst
)] =
1064 vir_uniform_ui(c
, conf
);
1067 if (num_components
>= 3)
1068 inst
= vir_VFPACK_dest(c
, vir_reg(QFILE_TLB
, 0), b
, a
);
1077 vir_VPM_WRITE(struct v3d_compile
*c
, struct qreg val
, uint32_t *vpm_index
)
1079 if (c
->devinfo
->ver
>= 40) {
1080 vir_STVPMV(c
, vir_uniform_ui(c
, *vpm_index
), val
);
1081 *vpm_index
= *vpm_index
+ 1;
1083 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_VPM
), val
);
1086 c
->num_vpm_writes
++;
1090 emit_scaled_viewport_write(struct v3d_compile
*c
, struct qreg rcp_w
,
1091 uint32_t *vpm_index
)
1093 for (int i
= 0; i
< 2; i
++) {
1094 struct qreg coord
= c
->outputs
[c
->output_position_index
+ i
];
1095 coord
= vir_FMUL(c
, coord
,
1096 vir_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
,
1098 coord
= vir_FMUL(c
, coord
, rcp_w
);
1099 vir_VPM_WRITE(c
, vir_FTOIN(c
, coord
), vpm_index
);
1105 emit_zs_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1107 struct qreg zscale
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1108 struct qreg zoffset
= vir_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1110 struct qreg z
= c
->outputs
[c
->output_position_index
+ 2];
1111 z
= vir_FMUL(c
, z
, zscale
);
1112 z
= vir_FMUL(c
, z
, rcp_w
);
1113 z
= vir_FADD(c
, z
, zoffset
);
1114 vir_VPM_WRITE(c
, z
, vpm_index
);
1118 emit_rcp_wc_write(struct v3d_compile
*c
, struct qreg rcp_w
, uint32_t *vpm_index
)
1120 vir_VPM_WRITE(c
, rcp_w
, vpm_index
);
1124 emit_point_size_write(struct v3d_compile
*c
, uint32_t *vpm_index
)
1126 struct qreg point_size
;
1128 if (c
->output_point_size_index
!= -1)
1129 point_size
= c
->outputs
[c
->output_point_size_index
];
1131 point_size
= vir_uniform_f(c
, 1.0);
1133 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1136 point_size
= vir_FMAX(c
, point_size
, vir_uniform_f(c
, .125));
1138 vir_VPM_WRITE(c
, point_size
, vpm_index
);
1142 emit_vpm_write_setup(struct v3d_compile
*c
)
1144 if (c
->devinfo
->ver
>= 40)
1147 v3d33_vir_vpm_write_setup(c
);
1151 * Sets up c->outputs[c->output_position_index] for the vertex shader
1152 * epilogue, if an output vertex position wasn't specified in the user's
1153 * shader. This may be the case for transform feedback with rasterizer
1157 setup_default_position(struct v3d_compile
*c
)
1159 if (c
->output_position_index
!= -1)
1162 c
->output_position_index
= c
->outputs_array_size
;
1163 for (int i
= 0; i
< 4; i
++) {
1165 c
->output_position_index
+ i
,
1166 VARYING_SLOT_POS
, i
);
1171 emit_vert_end(struct v3d_compile
*c
)
1173 setup_default_position(c
);
1175 uint32_t vpm_index
= 0;
1176 struct qreg rcp_w
= vir_RECIP(c
,
1177 c
->outputs
[c
->output_position_index
+ 3]);
1179 emit_vpm_write_setup(c
);
1181 if (c
->vs_key
->is_coord
) {
1182 for (int i
= 0; i
< 4; i
++)
1183 vir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
],
1185 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1186 if (c
->vs_key
->per_vertex_point_size
) {
1187 emit_point_size_write(c
, &vpm_index
);
1188 /* emit_rcp_wc_write(c, rcp_w); */
1190 /* XXX: Z-only rendering */
1192 emit_zs_write(c
, rcp_w
, &vpm_index
);
1194 emit_scaled_viewport_write(c
, rcp_w
, &vpm_index
);
1195 emit_zs_write(c
, rcp_w
, &vpm_index
);
1196 emit_rcp_wc_write(c
, rcp_w
, &vpm_index
);
1197 if (c
->vs_key
->per_vertex_point_size
)
1198 emit_point_size_write(c
, &vpm_index
);
1201 for (int i
= 0; i
< c
->vs_key
->num_fs_inputs
; i
++) {
1202 struct v3d_varying_slot input
= c
->vs_key
->fs_inputs
[i
];
1205 for (j
= 0; j
< c
->num_outputs
; j
++) {
1206 struct v3d_varying_slot output
= c
->output_slots
[j
];
1208 if (!memcmp(&input
, &output
, sizeof(input
))) {
1209 vir_VPM_WRITE(c
, c
->outputs
[j
],
1214 /* Emit padding if we didn't find a declared VS output for
1217 if (j
== c
->num_outputs
)
1218 vir_VPM_WRITE(c
, vir_uniform_f(c
, 0.0),
1222 /* GFXH-1684: VPM writes need to be complete by the end of the shader.
1224 if (c
->devinfo
->ver
>= 40 && c
->devinfo
->ver
<= 42)
1229 v3d_optimize_nir(struct nir_shader
*s
)
1236 NIR_PASS_V(s
, nir_lower_vars_to_ssa
);
1237 NIR_PASS(progress
, s
, nir_lower_alu_to_scalar
);
1238 NIR_PASS(progress
, s
, nir_lower_phis_to_scalar
);
1239 NIR_PASS(progress
, s
, nir_copy_prop
);
1240 NIR_PASS(progress
, s
, nir_opt_remove_phis
);
1241 NIR_PASS(progress
, s
, nir_opt_dce
);
1242 NIR_PASS(progress
, s
, nir_opt_dead_cf
);
1243 NIR_PASS(progress
, s
, nir_opt_cse
);
1244 NIR_PASS(progress
, s
, nir_opt_peephole_select
, 8, true, true);
1245 NIR_PASS(progress
, s
, nir_opt_algebraic
);
1246 NIR_PASS(progress
, s
, nir_opt_constant_folding
);
1247 NIR_PASS(progress
, s
, nir_opt_undef
);
1250 NIR_PASS(progress
, s
, nir_opt_move_load_ubo
);
1254 driver_location_compare(const void *in_a
, const void *in_b
)
1256 const nir_variable
*const *a
= in_a
;
1257 const nir_variable
*const *b
= in_b
;
1259 return (*a
)->data
.driver_location
- (*b
)->data
.driver_location
;
1263 ntq_emit_vpm_read(struct v3d_compile
*c
,
1264 uint32_t *num_components_queued
,
1265 uint32_t *remaining
,
1268 struct qreg vpm
= vir_reg(QFILE_VPM
, vpm_index
);
1270 if (c
->devinfo
->ver
>= 40 ) {
1271 return vir_LDVPMV_IN(c
,
1273 (*num_components_queued
)++));
1276 if (*num_components_queued
!= 0) {
1277 (*num_components_queued
)--;
1279 return vir_MOV(c
, vpm
);
1282 uint32_t num_components
= MIN2(*remaining
, 32);
1284 v3d33_vir_vpm_read_setup(c
, num_components
);
1286 *num_components_queued
= num_components
- 1;
1287 *remaining
-= num_components
;
1290 return vir_MOV(c
, vpm
);
1294 ntq_setup_vpm_inputs(struct v3d_compile
*c
)
1296 /* Figure out how many components of each vertex attribute the shader
1297 * uses. Each variable should have been split to individual
1298 * components and unused ones DCEed. The vertex fetcher will load
1299 * from the start of the attribute to the number of components we
1300 * declare we need in c->vattr_sizes[].
1302 nir_foreach_variable(var
, &c
->s
->inputs
) {
1303 /* No VS attribute array support. */
1304 assert(MAX2(glsl_get_length(var
->type
), 1) == 1);
1306 unsigned loc
= var
->data
.driver_location
;
1307 int start_component
= var
->data
.location_frac
;
1308 int num_components
= glsl_get_components(var
->type
);
1310 c
->vattr_sizes
[loc
] = MAX2(c
->vattr_sizes
[loc
],
1311 start_component
+ num_components
);
1314 unsigned num_components
= 0;
1315 uint32_t vpm_components_queued
= 0;
1316 bool uses_iid
= c
->s
->info
.system_values_read
&
1317 (1ull << SYSTEM_VALUE_INSTANCE_ID
);
1318 bool uses_vid
= c
->s
->info
.system_values_read
&
1319 (1ull << SYSTEM_VALUE_VERTEX_ID
);
1320 num_components
+= uses_iid
;
1321 num_components
+= uses_vid
;
1323 for (int i
= 0; i
< ARRAY_SIZE(c
->vattr_sizes
); i
++)
1324 num_components
+= c
->vattr_sizes
[i
];
1327 c
->iid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1328 &num_components
, ~0);
1332 c
->vid
= ntq_emit_vpm_read(c
, &vpm_components_queued
,
1333 &num_components
, ~0);
1336 for (int loc
= 0; loc
< ARRAY_SIZE(c
->vattr_sizes
); loc
++) {
1337 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1340 for (int i
= 0; i
< c
->vattr_sizes
[loc
]; i
++) {
1341 c
->inputs
[loc
* 4 + i
] =
1342 ntq_emit_vpm_read(c
,
1343 &vpm_components_queued
,
1350 if (c
->devinfo
->ver
>= 40) {
1351 assert(vpm_components_queued
== num_components
);
1353 assert(vpm_components_queued
== 0);
1354 assert(num_components
== 0);
1359 ntq_setup_fs_inputs(struct v3d_compile
*c
)
1361 unsigned num_entries
= 0;
1362 unsigned num_components
= 0;
1363 nir_foreach_variable(var
, &c
->s
->inputs
) {
1365 num_components
+= glsl_get_components(var
->type
);
1368 nir_variable
*vars
[num_entries
];
1371 nir_foreach_variable(var
, &c
->s
->inputs
)
1374 /* Sort the variables so that we emit the input setup in
1375 * driver_location order. This is required for VPM reads, whose data
1376 * is fetched into the VPM in driver_location (TGSI register index)
1379 qsort(&vars
, num_entries
, sizeof(*vars
), driver_location_compare
);
1381 for (unsigned i
= 0; i
< num_entries
; i
++) {
1382 nir_variable
*var
= vars
[i
];
1383 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1384 unsigned loc
= var
->data
.driver_location
;
1386 assert(array_len
== 1);
1388 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1391 if (var
->data
.location
== VARYING_SLOT_POS
) {
1392 emit_fragcoord_input(c
, loc
);
1393 } else if (var
->data
.location
== VARYING_SLOT_PNTC
||
1394 (var
->data
.location
>= VARYING_SLOT_VAR0
&&
1395 (c
->fs_key
->point_sprite_mask
&
1396 (1 << (var
->data
.location
-
1397 VARYING_SLOT_VAR0
))))) {
1398 c
->inputs
[loc
* 4 + 0] = c
->point_x
;
1399 c
->inputs
[loc
* 4 + 1] = c
->point_y
;
1401 emit_fragment_input(c
, loc
, var
);
1407 ntq_setup_outputs(struct v3d_compile
*c
)
1409 nir_foreach_variable(var
, &c
->s
->outputs
) {
1410 unsigned array_len
= MAX2(glsl_get_length(var
->type
), 1);
1411 unsigned loc
= var
->data
.driver_location
* 4;
1413 assert(array_len
== 1);
1416 for (int i
= 0; i
< 4 - var
->data
.location_frac
; i
++) {
1417 add_output(c
, loc
+ var
->data
.location_frac
+ i
,
1419 var
->data
.location_frac
+ i
);
1422 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1423 switch (var
->data
.location
) {
1424 case FRAG_RESULT_COLOR
:
1425 c
->output_color_var
[0] = var
;
1426 c
->output_color_var
[1] = var
;
1427 c
->output_color_var
[2] = var
;
1428 c
->output_color_var
[3] = var
;
1430 case FRAG_RESULT_DATA0
:
1431 case FRAG_RESULT_DATA1
:
1432 case FRAG_RESULT_DATA2
:
1433 case FRAG_RESULT_DATA3
:
1434 c
->output_color_var
[var
->data
.location
-
1435 FRAG_RESULT_DATA0
] = var
;
1437 case FRAG_RESULT_DEPTH
:
1438 c
->output_position_index
= loc
;
1440 case FRAG_RESULT_SAMPLE_MASK
:
1441 c
->output_sample_mask_index
= loc
;
1445 switch (var
->data
.location
) {
1446 case VARYING_SLOT_POS
:
1447 c
->output_position_index
= loc
;
1449 case VARYING_SLOT_PSIZ
:
1450 c
->output_point_size_index
= loc
;
1458 ntq_setup_uniforms(struct v3d_compile
*c
)
1460 nir_foreach_variable(var
, &c
->s
->uniforms
) {
1461 uint32_t vec4_count
= glsl_count_attribute_slots(var
->type
,
1463 unsigned vec4_size
= 4 * sizeof(float);
1465 declare_uniform_range(c
, var
->data
.driver_location
* vec4_size
,
1466 vec4_count
* vec4_size
);
1472 * Sets up the mapping from nir_register to struct qreg *.
1474 * Each nir_register gets a struct qreg per 32-bit component being stored.
1477 ntq_setup_registers(struct v3d_compile
*c
, struct exec_list
*list
)
1479 foreach_list_typed(nir_register
, nir_reg
, node
, list
) {
1480 unsigned array_len
= MAX2(nir_reg
->num_array_elems
, 1);
1481 struct qreg
*qregs
= ralloc_array(c
->def_ht
, struct qreg
,
1483 nir_reg
->num_components
);
1485 _mesa_hash_table_insert(c
->def_ht
, nir_reg
, qregs
);
1487 for (int i
= 0; i
< array_len
* nir_reg
->num_components
; i
++)
1488 qregs
[i
] = vir_get_temp(c
);
1493 ntq_emit_load_const(struct v3d_compile
*c
, nir_load_const_instr
*instr
)
1495 /* XXX perf: Experiment with using immediate loads to avoid having
1496 * these end up in the uniform stream. Watch out for breaking the
1497 * small immediates optimization in the process!
1499 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1500 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1501 qregs
[i
] = vir_uniform_ui(c
, instr
->value
.u32
[i
]);
1503 _mesa_hash_table_insert(c
->def_ht
, &instr
->def
, qregs
);
1507 ntq_emit_ssa_undef(struct v3d_compile
*c
, nir_ssa_undef_instr
*instr
)
1509 struct qreg
*qregs
= ntq_init_ssa_def(c
, &instr
->def
);
1511 /* VIR needs there to be *some* value, so pick 0 (same as for
1512 * ntq_setup_registers().
1514 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
1515 qregs
[i
] = vir_uniform_ui(c
, 0);
1519 ntq_emit_intrinsic(struct v3d_compile
*c
, nir_intrinsic_instr
*instr
)
1523 switch (instr
->intrinsic
) {
1524 case nir_intrinsic_load_uniform
:
1525 assert(instr
->num_components
== 1);
1526 if (nir_src_is_const(instr
->src
[0])) {
1527 offset
= (nir_intrinsic_base(instr
) +
1528 nir_src_as_uint(instr
->src
[0]));
1529 assert(offset
% 4 == 0);
1530 /* We need dwords */
1531 offset
= offset
/ 4;
1532 ntq_store_dest(c
, &instr
->dest
, 0,
1533 vir_uniform(c
, QUNIFORM_UNIFORM
,
1536 ntq_store_dest(c
, &instr
->dest
, 0,
1537 indirect_uniform_load(c
, instr
));
1541 case nir_intrinsic_load_ubo
:
1542 for (int i
= 0; i
< instr
->num_components
; i
++) {
1543 int ubo
= nir_src_as_uint(instr
->src
[0]);
1545 /* XXX perf: On V3D 4.x with uniform offsets, we
1546 * should probably try setting UBOs up in the A
1547 * register file and doing a sequence of loads that
1550 /* Adjust for where we stored the TGSI register base. */
1552 vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUA
),
1553 vir_uniform(c
, QUNIFORM_UBO_ADDR
, 1 + ubo
),
1555 ntq_get_src(c
, instr
->src
[1], 0),
1556 vir_uniform_ui(c
, i
* 4)));
1560 ntq_store_dest(c
, &instr
->dest
, i
, vir_LDTMU(c
));
1564 if (nir_src_is_const(instr
->src
[0])) {
1565 offset
= (nir_intrinsic_base(instr
) +
1566 nir_src_as_uint(instr
->src
[0]));
1567 assert(offset
% 4 == 0);
1568 /* We need dwords */
1569 offset
= offset
/ 4;
1570 ntq_store_dest(c
, &instr
->dest
, 0,
1571 vir_uniform(c
, QUNIFORM_UNIFORM
,
1574 ntq_store_dest(c
, &instr
->dest
, 0,
1575 indirect_uniform_load(c
, instr
));
1579 case nir_intrinsic_load_user_clip_plane
:
1580 for (int i
= 0; i
< instr
->num_components
; i
++) {
1581 ntq_store_dest(c
, &instr
->dest
, i
,
1582 vir_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1583 nir_intrinsic_ucp_id(instr
) *
1588 case nir_intrinsic_load_alpha_ref_float
:
1589 ntq_store_dest(c
, &instr
->dest
, 0,
1590 vir_uniform(c
, QUNIFORM_ALPHA_REF
, 0));
1593 case nir_intrinsic_load_sample_mask_in
:
1594 ntq_store_dest(c
, &instr
->dest
, 0, vir_MSF(c
));
1597 case nir_intrinsic_load_front_face
:
1598 /* The register contains 0 (front) or 1 (back), and we need to
1599 * turn it into a NIR bool where true means front.
1601 ntq_store_dest(c
, &instr
->dest
, 0,
1603 vir_uniform_ui(c
, -1),
1607 case nir_intrinsic_load_instance_id
:
1608 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->iid
));
1611 case nir_intrinsic_load_vertex_id
:
1612 ntq_store_dest(c
, &instr
->dest
, 0, vir_MOV(c
, c
->vid
));
1615 case nir_intrinsic_load_input
:
1616 for (int i
= 0; i
< instr
->num_components
; i
++) {
1617 offset
= (nir_intrinsic_base(instr
) +
1618 nir_src_as_uint(instr
->src
[0]));
1619 int comp
= nir_intrinsic_component(instr
) + i
;
1620 ntq_store_dest(c
, &instr
->dest
, i
,
1621 vir_MOV(c
, c
->inputs
[offset
* 4 + comp
]));
1625 case nir_intrinsic_store_output
:
1626 offset
= ((nir_intrinsic_base(instr
) +
1627 nir_src_as_uint(instr
->src
[1])) * 4 +
1628 nir_intrinsic_component(instr
));
1630 for (int i
= 0; i
< instr
->num_components
; i
++) {
1631 c
->outputs
[offset
+ i
] =
1632 vir_MOV(c
, ntq_get_src(c
, instr
->src
[0], i
));
1634 c
->num_outputs
= MAX2(c
->num_outputs
,
1635 offset
+ instr
->num_components
);
1638 case nir_intrinsic_discard
:
1639 if (c
->execute
.file
!= QFILE_NULL
) {
1640 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1641 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1642 vir_uniform_ui(c
, 0)),
1645 vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1646 vir_uniform_ui(c
, 0));
1650 case nir_intrinsic_discard_if
: {
1651 /* true (~0) if we're discarding */
1652 struct qreg cond
= ntq_get_src(c
, instr
->src
[0], 0);
1654 if (c
->execute
.file
!= QFILE_NULL
) {
1655 /* execute == 0 means the channel is active. Invert
1656 * the condition so that we can use zero as "executing
1659 vir_PF(c
, vir_OR(c
, c
->execute
, vir_NOT(c
, cond
)),
1661 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1662 vir_uniform_ui(c
, 0)),
1665 vir_PF(c
, cond
, V3D_QPU_PF_PUSHZ
);
1666 vir_set_cond(vir_SETMSF_dest(c
, vir_reg(QFILE_NULL
, 0),
1667 vir_uniform_ui(c
, 0)),
1675 fprintf(stderr
, "Unknown intrinsic: ");
1676 nir_print_instr(&instr
->instr
, stderr
);
1677 fprintf(stderr
, "\n");
1682 /* Clears (activates) the execute flags for any channels whose jump target
1683 * matches this block.
1685 * XXX perf: Could we be using flpush/flpop somehow for our execution channel
1688 * XXX perf: For uniform control flow, we should be able to skip c->execute
1689 * handling entirely.
1692 ntq_activate_execute_for_block(struct v3d_compile
*c
)
1694 vir_PF(c
, vir_XOR(c
, c
->execute
, vir_uniform_ui(c
, c
->cur_block
->index
)),
1697 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1701 ntq_emit_if(struct v3d_compile
*c
, nir_if
*if_stmt
)
1703 nir_block
*nir_else_block
= nir_if_first_else_block(if_stmt
);
1704 bool empty_else_block
=
1705 (nir_else_block
== nir_if_last_else_block(if_stmt
) &&
1706 exec_list_is_empty(&nir_else_block
->instr_list
));
1708 struct qblock
*then_block
= vir_new_block(c
);
1709 struct qblock
*after_block
= vir_new_block(c
);
1710 struct qblock
*else_block
;
1711 if (empty_else_block
)
1712 else_block
= after_block
;
1714 else_block
= vir_new_block(c
);
1716 bool was_top_level
= false;
1717 if (c
->execute
.file
== QFILE_NULL
) {
1718 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1719 was_top_level
= true;
1722 /* Set A for executing (execute == 0) and jumping (if->condition ==
1723 * 0) channels, and then update execute flags for those to point to
1726 * XXX perf: we could reuse ntq_emit_comparison() to generate our if
1727 * condition, and the .uf field to ignore non-executing channels, to
1728 * reduce the overhead of if statements.
1732 ntq_get_src(c
, if_stmt
->condition
, 0)),
1734 vir_MOV_cond(c
, V3D_QPU_COND_IFA
,
1736 vir_uniform_ui(c
, else_block
->index
));
1738 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1741 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1742 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLNA
);
1743 vir_link_blocks(c
->cur_block
, else_block
);
1744 vir_link_blocks(c
->cur_block
, then_block
);
1746 /* Process the THEN block. */
1747 vir_set_emit_block(c
, then_block
);
1748 ntq_emit_cf_list(c
, &if_stmt
->then_list
);
1750 if (!empty_else_block
) {
1751 /* Handle the end of the THEN block. First, all currently
1752 * active channels update their execute flags to point to
1755 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1756 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1757 vir_uniform_ui(c
, after_block
->index
));
1759 /* If everything points at ENDIF, then jump there immediately. */
1760 vir_PF(c
, vir_XOR(c
, c
->execute
,
1761 vir_uniform_ui(c
, after_block
->index
)),
1763 vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ALLA
);
1764 vir_link_blocks(c
->cur_block
, after_block
);
1765 vir_link_blocks(c
->cur_block
, else_block
);
1767 vir_set_emit_block(c
, else_block
);
1768 ntq_activate_execute_for_block(c
);
1769 ntq_emit_cf_list(c
, &if_stmt
->else_list
);
1772 vir_link_blocks(c
->cur_block
, after_block
);
1774 vir_set_emit_block(c
, after_block
);
1776 c
->execute
= c
->undef
;
1778 ntq_activate_execute_for_block(c
);
1782 ntq_emit_jump(struct v3d_compile
*c
, nir_jump_instr
*jump
)
1784 switch (jump
->type
) {
1785 case nir_jump_break
:
1786 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1787 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1788 vir_uniform_ui(c
, c
->loop_break_block
->index
));
1791 case nir_jump_continue
:
1792 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1793 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
,
1794 vir_uniform_ui(c
, c
->loop_cont_block
->index
));
1797 case nir_jump_return
:
1798 unreachable("All returns shouold be lowered\n");
1803 ntq_emit_instr(struct v3d_compile
*c
, nir_instr
*instr
)
1805 switch (instr
->type
) {
1806 case nir_instr_type_alu
:
1807 ntq_emit_alu(c
, nir_instr_as_alu(instr
));
1810 case nir_instr_type_intrinsic
:
1811 ntq_emit_intrinsic(c
, nir_instr_as_intrinsic(instr
));
1814 case nir_instr_type_load_const
:
1815 ntq_emit_load_const(c
, nir_instr_as_load_const(instr
));
1818 case nir_instr_type_ssa_undef
:
1819 ntq_emit_ssa_undef(c
, nir_instr_as_ssa_undef(instr
));
1822 case nir_instr_type_tex
:
1823 ntq_emit_tex(c
, nir_instr_as_tex(instr
));
1826 case nir_instr_type_jump
:
1827 ntq_emit_jump(c
, nir_instr_as_jump(instr
));
1831 fprintf(stderr
, "Unknown NIR instr type: ");
1832 nir_print_instr(instr
, stderr
);
1833 fprintf(stderr
, "\n");
1839 ntq_emit_block(struct v3d_compile
*c
, nir_block
*block
)
1841 nir_foreach_instr(instr
, block
) {
1842 ntq_emit_instr(c
, instr
);
1846 static void ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
);
1849 ntq_emit_loop(struct v3d_compile
*c
, nir_loop
*loop
)
1851 bool was_top_level
= false;
1852 if (c
->execute
.file
== QFILE_NULL
) {
1853 c
->execute
= vir_MOV(c
, vir_uniform_ui(c
, 0));
1854 was_top_level
= true;
1857 struct qblock
*save_loop_cont_block
= c
->loop_cont_block
;
1858 struct qblock
*save_loop_break_block
= c
->loop_break_block
;
1860 c
->loop_cont_block
= vir_new_block(c
);
1861 c
->loop_break_block
= vir_new_block(c
);
1863 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1864 vir_set_emit_block(c
, c
->loop_cont_block
);
1865 ntq_activate_execute_for_block(c
);
1867 ntq_emit_cf_list(c
, &loop
->body
);
1869 /* Re-enable any previous continues now, so our ANYA check below
1872 * XXX: Use the .ORZ flags update, instead.
1874 vir_PF(c
, vir_XOR(c
,
1876 vir_uniform_ui(c
, c
->loop_cont_block
->index
)),
1878 vir_MOV_cond(c
, V3D_QPU_COND_IFA
, c
->execute
, vir_uniform_ui(c
, 0));
1880 vir_PF(c
, c
->execute
, V3D_QPU_PF_PUSHZ
);
1882 struct qinst
*branch
= vir_BRANCH(c
, V3D_QPU_BRANCH_COND_ANYA
);
1883 /* Pixels that were not dispatched or have been discarded should not
1884 * contribute to looping again.
1886 branch
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_P
;
1887 vir_link_blocks(c
->cur_block
, c
->loop_cont_block
);
1888 vir_link_blocks(c
->cur_block
, c
->loop_break_block
);
1890 vir_set_emit_block(c
, c
->loop_break_block
);
1892 c
->execute
= c
->undef
;
1894 ntq_activate_execute_for_block(c
);
1896 c
->loop_break_block
= save_loop_break_block
;
1897 c
->loop_cont_block
= save_loop_cont_block
;
1901 ntq_emit_function(struct v3d_compile
*c
, nir_function_impl
*func
)
1903 fprintf(stderr
, "FUNCTIONS not handled.\n");
1908 ntq_emit_cf_list(struct v3d_compile
*c
, struct exec_list
*list
)
1910 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1911 switch (node
->type
) {
1912 case nir_cf_node_block
:
1913 ntq_emit_block(c
, nir_cf_node_as_block(node
));
1916 case nir_cf_node_if
:
1917 ntq_emit_if(c
, nir_cf_node_as_if(node
));
1920 case nir_cf_node_loop
:
1921 ntq_emit_loop(c
, nir_cf_node_as_loop(node
));
1924 case nir_cf_node_function
:
1925 ntq_emit_function(c
, nir_cf_node_as_function(node
));
1929 fprintf(stderr
, "Unknown NIR node type\n");
1936 ntq_emit_impl(struct v3d_compile
*c
, nir_function_impl
*impl
)
1938 ntq_setup_registers(c
, &impl
->registers
);
1939 ntq_emit_cf_list(c
, &impl
->body
);
1943 nir_to_vir(struct v3d_compile
*c
)
1945 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1946 c
->payload_w
= vir_MOV(c
, vir_reg(QFILE_REG
, 0));
1947 c
->payload_w_centroid
= vir_MOV(c
, vir_reg(QFILE_REG
, 1));
1948 c
->payload_z
= vir_MOV(c
, vir_reg(QFILE_REG
, 2));
1950 /* XXX perf: We could set the "disable implicit point/line
1951 * varyings" field in the shader record and not emit these, if
1952 * they're not going to be used.
1954 if (c
->fs_key
->is_points
) {
1955 c
->point_x
= emit_fragment_varying(c
, NULL
, 0);
1956 c
->point_y
= emit_fragment_varying(c
, NULL
, 0);
1957 } else if (c
->fs_key
->is_lines
) {
1958 c
->line_x
= emit_fragment_varying(c
, NULL
, 0);
1962 if (c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
)
1963 ntq_setup_fs_inputs(c
);
1965 ntq_setup_vpm_inputs(c
);
1967 ntq_setup_outputs(c
);
1968 ntq_setup_uniforms(c
);
1969 ntq_setup_registers(c
, &c
->s
->registers
);
1971 /* Find the main function and emit the body. */
1972 nir_foreach_function(function
, c
->s
) {
1973 assert(strcmp(function
->name
, "main") == 0);
1974 assert(function
->impl
);
1975 ntq_emit_impl(c
, function
->impl
);
1979 const nir_shader_compiler_options v3d_nir_options
= {
1980 .lower_all_io_to_temps
= true,
1981 .lower_extract_byte
= true,
1982 .lower_extract_word
= true,
1984 .lower_bitfield_insert_to_shifts
= true,
1985 .lower_bitfield_extract_to_shifts
= true,
1986 .lower_bitfield_reverse
= true,
1987 .lower_bit_count
= true,
1988 .lower_pack_unorm_2x16
= true,
1989 .lower_pack_snorm_2x16
= true,
1990 .lower_pack_unorm_4x8
= true,
1991 .lower_pack_snorm_4x8
= true,
1992 .lower_unpack_unorm_4x8
= true,
1993 .lower_unpack_snorm_4x8
= true,
1994 .lower_pack_half_2x16
= true,
1995 .lower_unpack_half_2x16
= true,
1997 .lower_find_lsb
= true,
1999 .lower_flrp32
= true,
2002 .lower_fsqrt
= true,
2003 .lower_ifind_msb
= true,
2004 .lower_ldexp
= true,
2005 .lower_mul_high
= true,
2006 .lower_wpos_pntc
= true,
2007 .native_integers
= true,
2013 count_nir_instrs(nir_shader
*nir
)
2016 nir_foreach_function(function
, nir
) {
2017 if (!function
->impl
)
2019 nir_foreach_block(block
, function
->impl
) {
2020 nir_foreach_instr(instr
, block
)
2029 * When demoting a shader down to single-threaded, removes the THRSW
2030 * instructions (one will still be inserted at v3d_vir_to_qpu() for the
2034 vir_remove_thrsw(struct v3d_compile
*c
)
2036 vir_for_each_block(block
, c
) {
2037 vir_for_each_inst_safe(inst
, block
) {
2038 if (inst
->qpu
.sig
.thrsw
)
2039 vir_remove_instruction(c
, inst
);
2043 c
->last_thrsw
= NULL
;
2047 vir_emit_last_thrsw(struct v3d_compile
*c
)
2049 /* On V3D before 4.1, we need a TMU op to be outstanding when thread
2050 * switching, so disable threads if we didn't do any TMU ops (each of
2051 * which would have emitted a THRSW).
2053 if (!c
->last_thrsw_at_top_level
&& c
->devinfo
->ver
< 41) {
2056 vir_remove_thrsw(c
);
2060 /* If we're threaded and the last THRSW was in conditional code, then
2061 * we need to emit another one so that we can flag it as the last
2064 if (c
->last_thrsw
&& !c
->last_thrsw_at_top_level
) {
2065 assert(c
->devinfo
->ver
>= 41);
2069 /* If we're threaded, then we need to mark the last THRSW instruction
2070 * so we can emit a pair of them at QPU emit time.
2072 * For V3D 4.x, we can spawn the non-fragment shaders already in the
2073 * post-last-THRSW state, so we can skip this.
2075 if (!c
->last_thrsw
&& c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
2076 assert(c
->devinfo
->ver
>= 41);
2081 c
->last_thrsw
->is_last_thrsw
= true;
2084 /* There's a flag in the shader for "center W is needed for reasons other than
2085 * non-centroid varyings", so we just walk the program after VIR optimization
2086 * to see if it's used. It should be harmless to set even if we only use
2087 * center W for varyings.
2090 vir_check_payload_w(struct v3d_compile
*c
)
2092 if (c
->s
->info
.stage
!= MESA_SHADER_FRAGMENT
)
2095 vir_for_each_inst_inorder(inst
, c
) {
2096 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
2097 if (inst
->src
[i
].file
== QFILE_REG
&&
2098 inst
->src
[i
].index
== 0) {
2099 c
->uses_center_w
= true;
2108 v3d_nir_to_vir(struct v3d_compile
*c
)
2110 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
2111 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2112 fprintf(stderr
, "%s prog %d/%d NIR:\n",
2113 vir_get_stage_name(c
),
2114 c
->program_id
, c
->variant_id
);
2115 nir_print_shader(c
->s
, stderr
);
2120 /* Emit the last THRSW before STVPM and TLB writes. */
2121 vir_emit_last_thrsw(c
);
2123 switch (c
->s
->info
.stage
) {
2124 case MESA_SHADER_FRAGMENT
:
2127 case MESA_SHADER_VERTEX
:
2131 unreachable("bad stage");
2134 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2135 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2136 fprintf(stderr
, "%s prog %d/%d pre-opt VIR:\n",
2137 vir_get_stage_name(c
),
2138 c
->program_id
, c
->variant_id
);
2140 fprintf(stderr
, "\n");
2144 vir_lower_uniforms(c
);
2146 vir_check_payload_w(c
);
2148 /* XXX perf: On VC4, we do a VIR-level instruction scheduling here.
2149 * We used that on that platform to pipeline TMU writes and reduce the
2150 * number of thread switches, as well as try (mostly successfully) to
2151 * reduce maximum register pressure to allow more threads. We should
2152 * do something of that sort for V3D -- either instruction scheduling
2153 * here, or delay the the THRSW and LDTMUs from our texture
2154 * instructions until the results are needed.
2157 if (V3D_DEBUG
& (V3D_DEBUG_VIR
|
2158 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
2159 fprintf(stderr
, "%s prog %d/%d VIR:\n",
2160 vir_get_stage_name(c
),
2161 c
->program_id
, c
->variant_id
);
2163 fprintf(stderr
, "\n");
2166 /* Attempt to allocate registers for the temporaries. If we fail,
2167 * reduce thread count and try again.
2169 int min_threads
= (c
->devinfo
->ver
>= 41) ? 2 : 1;
2170 struct qpu_reg
*temp_registers
;
2173 temp_registers
= v3d_register_allocate(c
, &spilled
);
2180 if (c
->threads
== min_threads
) {
2181 fprintf(stderr
, "Failed to register allocate at %d threads:\n",
2190 if (c
->threads
== 1)
2191 vir_remove_thrsw(c
);
2194 v3d_vir_to_qpu(c
, temp_registers
);