2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "ir3/ir3_nir.h"
31 #include "main/menums.h"
33 #include "nir/nir_builder.h"
34 #include "spirv/nir_spirv.h"
35 #include "util/debug.h"
36 #include "util/mesa-sha1.h"
37 #include "util/u_atomic.h"
38 #include "vk_format.h"
43 /* Emit IB that preloads the descriptors that the shader uses */
46 emit_load_state(struct tu_cs
*cs
, unsigned opcode
, enum a6xx_state_type st
,
47 enum a6xx_state_block sb
, unsigned base
, unsigned offset
,
50 /* Note: just emit one packet, even if count overflows NUM_UNIT. It's not
51 * clear if emitting more packets will even help anything. Presumably the
52 * descriptor cache is relatively small, and these packets stop doing
53 * anything when there are too many descriptors.
55 tu_cs_emit_pkt7(cs
, opcode
, 3);
57 CP_LOAD_STATE6_0_STATE_TYPE(st
) |
58 CP_LOAD_STATE6_0_STATE_SRC(SS6_BINDLESS
) |
59 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
60 CP_LOAD_STATE6_0_NUM_UNIT(MIN2(count
, 1024-1)));
61 tu_cs_emit_qw(cs
, offset
| (base
<< 28));
65 tu6_load_state_size(struct tu_pipeline_layout
*layout
, bool compute
)
67 const unsigned load_state_size
= 4;
69 for (unsigned i
= 0; i
< layout
->num_sets
; i
++) {
70 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[i
].layout
;
71 for (unsigned j
= 0; j
< set_layout
->binding_count
; j
++) {
72 struct tu_descriptor_set_binding_layout
*binding
= &set_layout
->binding
[j
];
74 /* Note: some users, like amber for example, pass in
75 * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
76 * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
78 VkShaderStageFlags stages
= compute
?
79 binding
->shader_stages
& VK_SHADER_STAGE_COMPUTE_BIT
:
80 binding
->shader_stages
& VK_SHADER_STAGE_ALL_GRAPHICS
;
81 unsigned stage_count
= util_bitcount(stages
);
82 switch (binding
->type
) {
83 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
84 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
85 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
86 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
87 /* IBO-backed resources only need one packet for all graphics stages */
88 if (stages
& ~VK_SHADER_STAGE_COMPUTE_BIT
)
90 if (stages
& VK_SHADER_STAGE_COMPUTE_BIT
)
93 case VK_DESCRIPTOR_TYPE_SAMPLER
:
94 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
95 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
96 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
97 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
98 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
99 /* Textures and UBO's needs a packet for each stage */
102 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
103 /* Because of how we pack combined images and samplers, we
104 * currently can't use one packet for the whole array.
106 count
= stage_count
* binding
->array_size
* 2;
109 unreachable("bad descriptor type");
111 size
+= count
* load_state_size
;
118 tu6_emit_load_state(struct tu_pipeline
*pipeline
, bool compute
)
120 unsigned size
= tu6_load_state_size(pipeline
->layout
, compute
);
125 tu_cs_begin_sub_stream(&pipeline
->cs
, size
, &cs
);
127 struct tu_pipeline_layout
*layout
= pipeline
->layout
;
128 for (unsigned i
= 0; i
< layout
->num_sets
; i
++) {
129 /* From 13.2.7. Descriptor Set Binding:
131 * A compatible descriptor set must be bound for all set numbers that
132 * any shaders in a pipeline access, at the time that a draw or
133 * dispatch command is recorded to execute using that pipeline.
134 * However, if none of the shaders in a pipeline statically use any
135 * bindings with a particular set number, then no descriptor set need
136 * be bound for that set number, even if the pipeline layout includes
137 * a non-trivial descriptor set layout for that set number.
139 * This means that descriptor sets unused by the pipeline may have a
140 * garbage or 0 BINDLESS_BASE register, which will cause context faults
141 * when prefetching descriptors from these sets. Skip prefetching for
142 * descriptors from them to avoid this. This is also an optimization,
143 * since these prefetches would be useless.
145 if (!(pipeline
->active_desc_sets
& (1u << i
)))
148 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[i
].layout
;
149 for (unsigned j
= 0; j
< set_layout
->binding_count
; j
++) {
150 struct tu_descriptor_set_binding_layout
*binding
= &set_layout
->binding
[j
];
152 unsigned offset
= binding
->offset
/ 4;
153 /* Note: some users, like amber for example, pass in
154 * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
155 * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
157 VkShaderStageFlags stages
= compute
?
158 binding
->shader_stages
& VK_SHADER_STAGE_COMPUTE_BIT
:
159 binding
->shader_stages
& VK_SHADER_STAGE_ALL_GRAPHICS
;
160 unsigned count
= binding
->array_size
;
161 if (count
== 0 || stages
== 0)
163 switch (binding
->type
) {
164 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
166 offset
= (layout
->set
[i
].dynamic_offset_start
+
167 binding
->dynamic_offset_offset
) * A6XX_TEX_CONST_DWORDS
;
169 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
170 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
171 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
172 /* IBO-backed resources only need one packet for all graphics stages */
173 if (stages
& ~VK_SHADER_STAGE_COMPUTE_BIT
) {
174 emit_load_state(&cs
, CP_LOAD_STATE6
, ST6_SHADER
, SB6_IBO
,
175 base
, offset
, count
);
177 if (stages
& VK_SHADER_STAGE_COMPUTE_BIT
) {
178 emit_load_state(&cs
, CP_LOAD_STATE6_FRAG
, ST6_IBO
, SB6_CS_SHADER
,
179 base
, offset
, count
);
182 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
183 /* nothing - input attachment doesn't use bindless */
185 case VK_DESCRIPTOR_TYPE_SAMPLER
:
186 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
187 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
: {
188 tu_foreach_stage(stage
, stages
) {
189 emit_load_state(&cs
, tu6_stage2opcode(stage
),
190 binding
->type
== VK_DESCRIPTOR_TYPE_SAMPLER
?
191 ST6_SHADER
: ST6_CONSTANTS
,
192 tu6_stage2texsb(stage
), base
, offset
, count
);
196 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
198 offset
= (layout
->set
[i
].dynamic_offset_start
+
199 binding
->dynamic_offset_offset
) * A6XX_TEX_CONST_DWORDS
;
201 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
: {
202 tu_foreach_stage(stage
, stages
) {
203 emit_load_state(&cs
, tu6_stage2opcode(stage
), ST6_UBO
,
204 tu6_stage2shadersb(stage
), base
, offset
, count
);
208 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
: {
209 tu_foreach_stage(stage
, stages
) {
210 /* TODO: We could emit less CP_LOAD_STATE6 if we used
211 * struct-of-arrays instead of array-of-structs.
213 for (unsigned i
= 0; i
< count
; i
++) {
214 unsigned tex_offset
= offset
+ 2 * i
* A6XX_TEX_CONST_DWORDS
;
215 unsigned sam_offset
= offset
+ (2 * i
+ 1) * A6XX_TEX_CONST_DWORDS
;
216 emit_load_state(&cs
, tu6_stage2opcode(stage
),
217 ST6_CONSTANTS
, tu6_stage2texsb(stage
),
218 base
, tex_offset
, 1);
219 emit_load_state(&cs
, tu6_stage2opcode(stage
),
220 ST6_SHADER
, tu6_stage2texsb(stage
),
221 base
, sam_offset
, 1);
227 unreachable("bad descriptor type");
232 pipeline
->load_state
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &cs
);
235 struct tu_pipeline_builder
237 struct tu_device
*device
;
238 struct tu_pipeline_cache
*cache
;
239 struct tu_pipeline_layout
*layout
;
240 const VkAllocationCallbacks
*alloc
;
241 const VkGraphicsPipelineCreateInfo
*create_info
;
243 struct tu_shader
*shaders
[MESA_SHADER_STAGES
];
244 struct ir3_shader_variant
*variants
[MESA_SHADER_STAGES
];
245 struct ir3_shader_variant
*binning_variant
;
246 uint32_t shader_offsets
[MESA_SHADER_STAGES
];
247 uint32_t binning_vs_offset
;
248 uint32_t shader_total_size
;
250 bool rasterizer_discard
;
251 /* these states are affectd by rasterizer_discard */
252 VkSampleCountFlagBits samples
;
253 bool use_color_attachments
;
254 bool use_dual_src_blend
;
255 uint32_t color_attachment_count
;
256 VkFormat color_attachment_formats
[MAX_RTS
];
257 VkFormat depth_attachment_format
;
258 uint32_t render_components
;
262 tu_logic_op_reads_dst(VkLogicOp op
)
265 case VK_LOGIC_OP_CLEAR
:
266 case VK_LOGIC_OP_COPY
:
267 case VK_LOGIC_OP_COPY_INVERTED
:
268 case VK_LOGIC_OP_SET
:
276 tu_blend_factor_no_dst_alpha(VkBlendFactor factor
)
278 /* treat dst alpha as 1.0 and avoid reading it */
280 case VK_BLEND_FACTOR_DST_ALPHA
:
281 return VK_BLEND_FACTOR_ONE
;
282 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
283 return VK_BLEND_FACTOR_ZERO
;
289 static bool tu_blend_factor_is_dual_src(VkBlendFactor factor
)
292 case VK_BLEND_FACTOR_SRC1_COLOR
:
293 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
294 case VK_BLEND_FACTOR_SRC1_ALPHA
:
295 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
303 tu_blend_state_is_dual_src(const VkPipelineColorBlendStateCreateInfo
*info
)
308 for (unsigned i
= 0; i
< info
->attachmentCount
; i
++) {
309 const VkPipelineColorBlendAttachmentState
*blend
= &info
->pAttachments
[i
];
310 if (tu_blend_factor_is_dual_src(blend
->srcColorBlendFactor
) ||
311 tu_blend_factor_is_dual_src(blend
->dstColorBlendFactor
) ||
312 tu_blend_factor_is_dual_src(blend
->srcAlphaBlendFactor
) ||
313 tu_blend_factor_is_dual_src(blend
->dstAlphaBlendFactor
))
321 tu6_emit_xs_config(struct tu_cs
*cs
,
322 gl_shader_stage stage
, /* xs->type, but xs may be NULL */
323 const struct ir3_shader_variant
*xs
,
324 uint64_t binary_iova
)
326 static const struct xs_config
{
327 uint16_t reg_sp_xs_ctrl
;
328 uint16_t reg_sp_xs_config
;
329 uint16_t reg_hlsq_xs_ctrl
;
330 uint16_t reg_sp_vs_obj_start
;
332 [MESA_SHADER_VERTEX
] = {
333 REG_A6XX_SP_VS_CTRL_REG0
,
334 REG_A6XX_SP_VS_CONFIG
,
335 REG_A6XX_HLSQ_VS_CNTL
,
336 REG_A6XX_SP_VS_OBJ_START_LO
,
338 [MESA_SHADER_TESS_CTRL
] = {
339 REG_A6XX_SP_HS_CTRL_REG0
,
340 REG_A6XX_SP_HS_CONFIG
,
341 REG_A6XX_HLSQ_HS_CNTL
,
342 REG_A6XX_SP_HS_OBJ_START_LO
,
344 [MESA_SHADER_TESS_EVAL
] = {
345 REG_A6XX_SP_DS_CTRL_REG0
,
346 REG_A6XX_SP_DS_CONFIG
,
347 REG_A6XX_HLSQ_DS_CNTL
,
348 REG_A6XX_SP_DS_OBJ_START_LO
,
350 [MESA_SHADER_GEOMETRY
] = {
351 REG_A6XX_SP_GS_CTRL_REG0
,
352 REG_A6XX_SP_GS_CONFIG
,
353 REG_A6XX_HLSQ_GS_CNTL
,
354 REG_A6XX_SP_GS_OBJ_START_LO
,
356 [MESA_SHADER_FRAGMENT
] = {
357 REG_A6XX_SP_FS_CTRL_REG0
,
358 REG_A6XX_SP_FS_CONFIG
,
359 REG_A6XX_HLSQ_FS_CNTL
,
360 REG_A6XX_SP_FS_OBJ_START_LO
,
362 [MESA_SHADER_COMPUTE
] = {
363 REG_A6XX_SP_CS_CTRL_REG0
,
364 REG_A6XX_SP_CS_CONFIG
,
365 REG_A6XX_HLSQ_CS_CNTL
,
366 REG_A6XX_SP_CS_OBJ_START_LO
,
369 const struct xs_config
*cfg
= &xs_config
[stage
];
372 /* shader stage disabled */
373 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_xs_config
, 1);
376 tu_cs_emit_pkt4(cs
, cfg
->reg_hlsq_xs_ctrl
, 1);
381 bool is_fs
= xs
->type
== MESA_SHADER_FRAGMENT
;
382 enum a3xx_threadsize threadsize
= FOUR_QUADS
;
385 * the "threadsize" field may have nothing to do with threadsize,
386 * use a value that matches the blob until it is figured out
388 if (xs
->type
== MESA_SHADER_GEOMETRY
)
389 threadsize
= TWO_QUADS
;
391 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_xs_ctrl
, 1);
393 A6XX_SP_VS_CTRL_REG0_THREADSIZE(threadsize
) |
394 A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(xs
->info
.max_reg
+ 1) |
395 A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(xs
->info
.max_half_reg
+ 1) |
396 COND(xs
->mergedregs
, A6XX_SP_VS_CTRL_REG0_MERGEDREGS
) |
397 A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(xs
->branchstack
) |
398 COND(xs
->need_pixlod
, A6XX_SP_VS_CTRL_REG0_PIXLODENABLE
) |
399 COND(xs
->need_fine_derivatives
, A6XX_SP_VS_CTRL_REG0_DIFF_FINE
) |
400 /* only fragment shader sets VARYING bit */
401 COND(xs
->total_in
&& is_fs
, A6XX_SP_FS_CTRL_REG0_VARYING
) |
402 /* unknown bit, seems unnecessary */
403 COND(is_fs
, 0x1000000));
405 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_xs_config
, 2);
406 tu_cs_emit(cs
, A6XX_SP_VS_CONFIG_ENABLED
|
407 COND(xs
->bindless_tex
, A6XX_SP_VS_CONFIG_BINDLESS_TEX
) |
408 COND(xs
->bindless_samp
, A6XX_SP_VS_CONFIG_BINDLESS_SAMP
) |
409 COND(xs
->bindless_ibo
, A6XX_SP_VS_CONFIG_BINDLESS_IBO
) |
410 COND(xs
->bindless_ubo
, A6XX_SP_VS_CONFIG_BINDLESS_UBO
) |
411 A6XX_SP_VS_CONFIG_NTEX(xs
->num_samp
) |
412 A6XX_SP_VS_CONFIG_NSAMP(xs
->num_samp
));
413 tu_cs_emit(cs
, xs
->instrlen
);
415 tu_cs_emit_pkt4(cs
, cfg
->reg_hlsq_xs_ctrl
, 1);
416 tu_cs_emit(cs
, A6XX_HLSQ_VS_CNTL_CONSTLEN(align(xs
->constlen
, 4)) |
417 A6XX_HLSQ_VS_CNTL_ENABLED
);
419 /* emit program binary
420 * binary_iova should be aligned to 1 instrlen unit (128 bytes)
423 assert((binary_iova
& 0x7f) == 0);
425 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_vs_obj_start
, 2);
426 tu_cs_emit_qw(cs
, binary_iova
);
428 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(stage
), 3);
429 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
430 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
431 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
432 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage
)) |
433 CP_LOAD_STATE6_0_NUM_UNIT(xs
->instrlen
));
434 tu_cs_emit_qw(cs
, binary_iova
);
436 /* emit immediates */
438 const struct ir3_const_state
*const_state
= ir3_const_state(xs
);
439 uint32_t base
= const_state
->offsets
.immediate
;
440 int size
= const_state
->immediates_count
;
442 /* truncate size to avoid writing constants that shader
445 size
= MIN2(size
+ base
, xs
->constlen
) - base
;
450 tu_cs_emit_pkt7(cs
, tu6_stage2opcode(stage
), 3 + size
* 4);
451 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(base
) |
452 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
453 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
454 CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(stage
)) |
455 CP_LOAD_STATE6_0_NUM_UNIT(size
));
456 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
457 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
459 for (unsigned i
= 0; i
< size
; i
++) {
460 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[0]);
461 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[1]);
462 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[2]);
463 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[3]);
468 tu6_emit_cs_config(struct tu_cs
*cs
, const struct tu_shader
*shader
,
469 const struct ir3_shader_variant
*v
,
470 uint32_t binary_iova
)
472 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
473 tu_cs_emit(cs
, 0xff);
475 tu6_emit_xs_config(cs
, MESA_SHADER_COMPUTE
, v
, binary_iova
);
477 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_UNKNOWN_A9B1
, 1);
478 tu_cs_emit(cs
, 0x41);
480 uint32_t local_invocation_id
=
481 ir3_find_sysval_regid(v
, SYSTEM_VALUE_LOCAL_INVOCATION_ID
);
482 uint32_t work_group_id
=
483 ir3_find_sysval_regid(v
, SYSTEM_VALUE_WORK_GROUP_ID
);
485 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_CNTL_0
, 2);
487 A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id
) |
488 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
489 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
490 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id
));
491 tu_cs_emit(cs
, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
495 tu6_emit_vs_system_values(struct tu_cs
*cs
,
496 const struct ir3_shader_variant
*vs
,
497 const struct ir3_shader_variant
*gs
,
498 bool primid_passthru
)
500 const uint32_t vertexid_regid
=
501 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_VERTEX_ID
);
502 const uint32_t instanceid_regid
=
503 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_INSTANCE_ID
);
504 const uint32_t primitiveid_regid
= gs
?
505 ir3_find_sysval_regid(gs
, SYSTEM_VALUE_PRIMITIVE_ID
) :
507 const uint32_t gsheader_regid
= gs
?
508 ir3_find_sysval_regid(gs
, SYSTEM_VALUE_GS_HEADER_IR3
) :
511 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_CONTROL_1
, 6);
512 tu_cs_emit(cs
, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid
) |
513 A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid
) |
514 A6XX_VFD_CONTROL_1_REGID4PRIMID(primitiveid_regid
) |
516 tu_cs_emit(cs
, 0x0000fcfc); /* VFD_CONTROL_2 */
517 tu_cs_emit(cs
, 0xfcfcfcfc); /* VFD_CONTROL_3 */
518 tu_cs_emit(cs
, 0x000000fc); /* VFD_CONTROL_4 */
519 tu_cs_emit(cs
, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gsheader_regid
) |
520 0xfc00); /* VFD_CONTROL_5 */
521 tu_cs_emit(cs
, COND(primid_passthru
, A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU
)); /* VFD_CONTROL_6 */
524 /* Add any missing varyings needed for stream-out. Otherwise varyings not
525 * used by fragment shader will be stripped out.
528 tu6_link_streamout(struct ir3_shader_linkage
*l
,
529 const struct ir3_shader_variant
*v
)
531 const struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
534 * First, any stream-out varyings not already in linkage map (ie. also
535 * consumed by frag shader) need to be added:
537 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
538 const struct ir3_stream_output
*out
= &info
->output
[i
];
540 (1 << (out
->num_components
+ out
->start_component
)) - 1;
541 unsigned k
= out
->register_index
;
542 unsigned idx
, nextloc
= 0;
544 /* psize/pos need to be the last entries in linkage map, and will
545 * get added link_stream_out, so skip over them:
547 if (v
->outputs
[k
].slot
== VARYING_SLOT_PSIZ
||
548 v
->outputs
[k
].slot
== VARYING_SLOT_POS
)
551 for (idx
= 0; idx
< l
->cnt
; idx
++) {
552 if (l
->var
[idx
].regid
== v
->outputs
[k
].regid
)
554 nextloc
= MAX2(nextloc
, l
->var
[idx
].loc
+ 4);
557 /* add if not already in linkage map: */
559 ir3_link_add(l
, v
->outputs
[k
].regid
, compmask
, nextloc
);
561 /* expand component-mask if needed, ie streaming out all components
562 * but frag shader doesn't consume all components:
564 if (compmask
& ~l
->var
[idx
].compmask
) {
565 l
->var
[idx
].compmask
|= compmask
;
566 l
->max_loc
= MAX2(l
->max_loc
, l
->var
[idx
].loc
+
567 util_last_bit(l
->var
[idx
].compmask
));
573 tu6_setup_streamout(const struct ir3_shader_variant
*v
,
574 struct ir3_shader_linkage
*l
, struct tu_streamout_state
*tf
)
576 const struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
578 memset(tf
, 0, sizeof(*tf
));
580 tf
->prog_count
= align(l
->max_loc
, 2) / 2;
582 debug_assert(tf
->prog_count
< ARRAY_SIZE(tf
->prog
));
584 /* set stride info to the streamout state */
585 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
586 tf
->stride
[i
] = info
->stride
[i
];
588 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
589 const struct ir3_stream_output
*out
= &info
->output
[i
];
590 unsigned k
= out
->register_index
;
593 /* Skip it, if there's an unused reg in the middle of outputs. */
594 if (v
->outputs
[k
].regid
== INVALID_REG
)
597 tf
->ncomp
[out
->output_buffer
] += out
->num_components
;
599 /* linkage map sorted by order frag shader wants things, so
600 * a bit less ideal here..
602 for (idx
= 0; idx
< l
->cnt
; idx
++)
603 if (l
->var
[idx
].regid
== v
->outputs
[k
].regid
)
606 debug_assert(idx
< l
->cnt
);
608 for (unsigned j
= 0; j
< out
->num_components
; j
++) {
609 unsigned c
= j
+ out
->start_component
;
610 unsigned loc
= l
->var
[idx
].loc
+ c
;
611 unsigned off
= j
+ out
->dst_offset
; /* in dwords */
614 tf
->prog
[loc
/2] |= A6XX_VPC_SO_PROG_B_EN
|
615 A6XX_VPC_SO_PROG_B_BUF(out
->output_buffer
) |
616 A6XX_VPC_SO_PROG_B_OFF(off
* 4);
618 tf
->prog
[loc
/2] |= A6XX_VPC_SO_PROG_A_EN
|
619 A6XX_VPC_SO_PROG_A_BUF(out
->output_buffer
) |
620 A6XX_VPC_SO_PROG_A_OFF(off
* 4);
625 tf
->vpc_so_buf_cntl
= A6XX_VPC_SO_BUF_CNTL_ENABLE
|
626 COND(tf
->ncomp
[0] > 0, A6XX_VPC_SO_BUF_CNTL_BUF0
) |
627 COND(tf
->ncomp
[1] > 0, A6XX_VPC_SO_BUF_CNTL_BUF1
) |
628 COND(tf
->ncomp
[2] > 0, A6XX_VPC_SO_BUF_CNTL_BUF2
) |
629 COND(tf
->ncomp
[3] > 0, A6XX_VPC_SO_BUF_CNTL_BUF3
);
633 tu6_emit_const(struct tu_cs
*cs
, uint32_t opcode
, uint32_t base
,
634 enum a6xx_state_block block
, uint32_t offset
,
635 uint32_t size
, uint32_t *dwords
) {
636 assert(size
% 4 == 0);
638 tu_cs_emit_pkt7(cs
, opcode
, 3 + size
);
639 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(base
) |
640 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
641 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
642 CP_LOAD_STATE6_0_STATE_BLOCK(block
) |
643 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 4));
645 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
646 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
647 dwords
= (uint32_t *)&((uint8_t *)dwords
)[offset
];
649 tu_cs_emit_array(cs
, dwords
, size
);
653 tu6_emit_link_map(struct tu_cs
*cs
,
654 const struct ir3_shader_variant
*producer
,
655 const struct ir3_shader_variant
*consumer
) {
656 const struct ir3_const_state
*const_state
= ir3_const_state(consumer
);
657 uint32_t base
= const_state
->offsets
.primitive_map
;
658 uint32_t patch_locs
[MAX_VARYING
] = { }, num_loc
;
659 num_loc
= ir3_link_geometry_stages(producer
, consumer
, patch_locs
);
660 int size
= DIV_ROUND_UP(num_loc
, 4);
662 size
= (MIN2(size
+ base
, consumer
->constlen
) - base
) * 4;
666 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, base
, SB6_GS_SHADER
, 0, size
,
671 gl_primitive_to_tess(uint16_t primitive
) {
677 case GL_TRIANGLE_STRIP
:
685 tu6_emit_vpc(struct tu_cs
*cs
,
686 const struct ir3_shader_variant
*vs
,
687 const struct ir3_shader_variant
*gs
,
688 const struct ir3_shader_variant
*fs
,
689 struct tu_streamout_state
*tf
)
691 const struct ir3_shader_variant
*last_shader
= gs
?: vs
;
692 struct ir3_shader_linkage linkage
= { .primid_loc
= 0xff };
694 ir3_link_shaders(&linkage
, last_shader
, fs
, true);
696 if (last_shader
->shader
->stream_output
.num_outputs
)
697 tu6_link_streamout(&linkage
, last_shader
);
699 /* We do this after linking shaders in order to know whether PrimID
700 * passthrough needs to be enabled.
702 bool primid_passthru
= linkage
.primid_loc
!= 0xff;
703 tu6_emit_vs_system_values(cs
, vs
, gs
, primid_passthru
);
705 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VAR_DISABLE(0), 4);
706 tu_cs_emit(cs
, ~linkage
.varmask
[0]);
707 tu_cs_emit(cs
, ~linkage
.varmask
[1]);
708 tu_cs_emit(cs
, ~linkage
.varmask
[2]);
709 tu_cs_emit(cs
, ~linkage
.varmask
[3]);
711 /* a6xx finds position/pointsize at the end */
712 const uint32_t position_regid
=
713 ir3_find_output_regid(last_shader
, VARYING_SLOT_POS
);
714 const uint32_t pointsize_regid
=
715 ir3_find_output_regid(last_shader
, VARYING_SLOT_PSIZ
);
716 const uint32_t layer_regid
= gs
?
717 ir3_find_output_regid(gs
, VARYING_SLOT_LAYER
) : regid(63, 0);
719 uint32_t pointsize_loc
= 0xff, position_loc
= 0xff, layer_loc
= 0xff;
720 if (layer_regid
!= regid(63, 0)) {
721 layer_loc
= linkage
.max_loc
;
722 ir3_link_add(&linkage
, layer_regid
, 0x1, linkage
.max_loc
);
724 if (position_regid
!= regid(63, 0)) {
725 position_loc
= linkage
.max_loc
;
726 ir3_link_add(&linkage
, position_regid
, 0xf, linkage
.max_loc
);
728 if (pointsize_regid
!= regid(63, 0)) {
729 pointsize_loc
= linkage
.max_loc
;
730 ir3_link_add(&linkage
, pointsize_regid
, 0x1, linkage
.max_loc
);
733 if (last_shader
->shader
->stream_output
.num_outputs
)
734 tu6_setup_streamout(last_shader
, &linkage
, tf
);
736 /* map outputs of the last shader to VPC */
737 assert(linkage
.cnt
<= 32);
738 const uint32_t sp_out_count
= DIV_ROUND_UP(linkage
.cnt
, 2);
739 const uint32_t sp_vpc_dst_count
= DIV_ROUND_UP(linkage
.cnt
, 4);
741 uint32_t sp_vpc_dst
[8];
742 for (uint32_t i
= 0; i
< linkage
.cnt
; i
++) {
743 ((uint16_t *) sp_out
)[i
] =
744 A6XX_SP_VS_OUT_REG_A_REGID(linkage
.var
[i
].regid
) |
745 A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage
.var
[i
].compmask
);
746 ((uint8_t *) sp_vpc_dst
)[i
] =
747 A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage
.var
[i
].loc
);
751 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_OUT_REG(0), sp_out_count
);
753 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_OUT_REG(0), sp_out_count
);
754 tu_cs_emit_array(cs
, sp_out
, sp_out_count
);
757 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_VPC_DST_REG(0), sp_vpc_dst_count
);
759 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vpc_dst_count
);
760 tu_cs_emit_array(cs
, sp_vpc_dst
, sp_vpc_dst_count
);
762 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMID_CNTL
, 1);
763 tu_cs_emit(cs
, COND(primid_passthru
, A6XX_PC_PRIMID_CNTL_PRIMID_PASSTHRU
));
765 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_CNTL_0
, 1);
766 tu_cs_emit(cs
, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs
? fs
->total_in
: 0) |
767 COND(fs
&& fs
->total_in
, A6XX_VPC_CNTL_0_VARYING
) |
768 A6XX_VPC_CNTL_0_PRIMIDLOC(linkage
.primid_loc
) |
769 A6XX_VPC_CNTL_0_UNKLOC(0xff));
771 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_PACK
, 1);
772 tu_cs_emit(cs
, A6XX_VPC_PACK_POSITIONLOC(position_loc
) |
773 A6XX_VPC_PACK_PSIZELOC(pointsize_loc
) |
774 A6XX_VPC_PACK_STRIDE_IN_VPC(linkage
.max_loc
));
777 uint32_t vertices_out
, invocations
, output
, vec4_size
;
778 /* this detects the tu_clear_blit path, which doesn't set ->nir */
779 if (gs
->shader
->nir
) {
780 tu6_emit_link_map(cs
, vs
, gs
);
781 vertices_out
= gs
->shader
->nir
->info
.gs
.vertices_out
- 1;
782 output
= gl_primitive_to_tess(gs
->shader
->nir
->info
.gs
.output_primitive
);
783 invocations
= gs
->shader
->nir
->info
.gs
.invocations
- 1;
784 /* Size of per-primitive alloction in ldlw memory in vec4s. */
785 vec4_size
= gs
->shader
->nir
->info
.gs
.vertices_in
*
786 DIV_ROUND_UP(vs
->output_size
, 4);
789 output
= TESS_CW_TRIS
;
794 uint32_t primitive_regid
=
795 ir3_find_sysval_regid(gs
, SYSTEM_VALUE_PRIMITIVE_ID
);
796 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_PACK_GS
, 1);
797 tu_cs_emit(cs
, A6XX_VPC_PACK_GS_POSITIONLOC(position_loc
) |
798 A6XX_VPC_PACK_GS_PSIZELOC(pointsize_loc
) |
799 A6XX_VPC_PACK_GS_STRIDE_IN_VPC(linkage
.max_loc
));
801 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9105
, 1);
802 tu_cs_emit(cs
, A6XX_VPC_UNKNOWN_9105_LAYERLOC(layer_loc
) | 0xff00);
804 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_809C
, 1);
805 tu_cs_emit(cs
, CONDREG(layer_regid
,
806 A6XX_GRAS_UNKNOWN_809C_GS_WRITES_LAYER
));
808 uint32_t flags_regid
= ir3_find_output_regid(gs
,
809 VARYING_SLOT_GS_VERTEX_FLAGS_IR3
);
811 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_PRIMITIVE_CNTL_GS
, 1);
812 tu_cs_emit(cs
, A6XX_SP_PRIMITIVE_CNTL_GS_GSOUT(linkage
.cnt
) |
813 A6XX_SP_PRIMITIVE_CNTL_GS_FLAGS_REGID(flags_regid
));
815 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_2
, 1);
816 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_2_STRIDE_IN_VPC(linkage
.max_loc
) |
817 CONDREG(pointsize_regid
, A6XX_PC_PRIMITIVE_CNTL_2_PSIZE
) |
818 CONDREG(layer_regid
, A6XX_PC_PRIMITIVE_CNTL_2_LAYER
) |
819 CONDREG(primitive_regid
, A6XX_PC_PRIMITIVE_CNTL_2_PRIMITIVE_ID
));
821 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_5
, 1);
823 A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(vertices_out
) |
824 A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output
) |
825 A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(invocations
));
827 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_3
, 1);
830 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8003
, 1);
833 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9100
, 1);
834 tu_cs_emit(cs
, 0xff);
836 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9102
, 1);
837 tu_cs_emit(cs
, 0xffff00);
839 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 1);
840 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size
));
842 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 1);
845 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_PRIM_SIZE
, 1);
846 tu_cs_emit(cs
, vs
->output_size
);
849 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_PRIMITIVE_CNTL
, 1);
850 tu_cs_emit(cs
, A6XX_SP_PRIMITIVE_CNTL_VSOUT(linkage
.cnt
));
852 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_1
, 1);
853 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(linkage
.max_loc
) |
854 (last_shader
->writes_psize
? A6XX_PC_PRIMITIVE_CNTL_1_PSIZE
: 0));
858 tu6_vpc_varying_mode(const struct ir3_shader_variant
*fs
,
860 uint8_t *interp_mode
,
861 uint8_t *ps_repl_mode
)
875 PS_REPL_ONE_MINUS_T
= 3,
878 const uint32_t compmask
= fs
->inputs
[index
].compmask
;
880 /* NOTE: varyings are packed, so if compmask is 0xb then first, second, and
881 * fourth component occupy three consecutive varying slots
886 if (fs
->inputs
[index
].slot
== VARYING_SLOT_PNTC
) {
887 if (compmask
& 0x1) {
888 *ps_repl_mode
|= PS_REPL_S
<< shift
;
891 if (compmask
& 0x2) {
892 *ps_repl_mode
|= PS_REPL_T
<< shift
;
895 if (compmask
& 0x4) {
896 *interp_mode
|= INTERP_ZERO
<< shift
;
899 if (compmask
& 0x8) {
900 *interp_mode
|= INTERP_ONE
<< 6;
903 } else if ((fs
->inputs
[index
].interpolate
== INTERP_MODE_FLAT
) ||
904 fs
->inputs
[index
].rasterflat
) {
905 for (int i
= 0; i
< 4; i
++) {
906 if (compmask
& (1 << i
)) {
907 *interp_mode
|= INTERP_FLAT
<< shift
;
917 tu6_emit_vpc_varying_modes(struct tu_cs
*cs
,
918 const struct ir3_shader_variant
*fs
)
920 uint32_t interp_modes
[8] = { 0 };
921 uint32_t ps_repl_modes
[8] = { 0 };
925 (i
= ir3_next_varying(fs
, i
)) < (int) fs
->inputs_count
;) {
927 /* get the mode for input i */
929 uint8_t ps_repl_mode
;
931 tu6_vpc_varying_mode(fs
, i
, &interp_mode
, &ps_repl_mode
);
933 /* OR the mode into the array */
934 const uint32_t inloc
= fs
->inputs
[i
].inloc
* 2;
935 uint32_t n
= inloc
/ 32;
936 uint32_t shift
= inloc
% 32;
937 interp_modes
[n
] |= interp_mode
<< shift
;
938 ps_repl_modes
[n
] |= ps_repl_mode
<< shift
;
939 if (shift
+ bits
> 32) {
943 interp_modes
[n
] |= interp_mode
>> shift
;
944 ps_repl_modes
[n
] |= ps_repl_mode
>> shift
;
949 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
950 tu_cs_emit_array(cs
, interp_modes
, 8);
952 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
953 tu_cs_emit_array(cs
, ps_repl_modes
, 8);
957 tu6_emit_fs_inputs(struct tu_cs
*cs
, const struct ir3_shader_variant
*fs
)
959 uint32_t face_regid
, coord_regid
, zwcoord_regid
, samp_id_regid
;
960 uint32_t ij_pix_regid
, ij_samp_regid
, ij_cent_regid
, ij_size_regid
;
961 uint32_t smask_in_regid
;
963 bool sample_shading
= fs
->per_samp
| fs
->key
.sample_shading
;
964 bool enable_varyings
= fs
->total_in
> 0;
966 samp_id_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_ID
);
967 smask_in_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_MASK_IN
);
968 face_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRONT_FACE
);
969 coord_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRAG_COORD
);
970 zwcoord_regid
= VALIDREG(coord_regid
) ? coord_regid
+ 2 : regid(63, 0);
971 ij_pix_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
);
972 ij_samp_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
);
973 ij_cent_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
);
974 ij_size_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE
);
976 if (fs
->num_sampler_prefetch
> 0) {
977 assert(VALIDREG(ij_pix_regid
));
978 /* also, it seems like ij_pix is *required* to be r0.x */
979 assert(ij_pix_regid
== regid(0, 0));
982 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_PREFETCH_CNTL
, 1 + fs
->num_sampler_prefetch
);
983 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs
->num_sampler_prefetch
) |
984 A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
986 for (int i
= 0; i
< fs
->num_sampler_prefetch
; i
++) {
987 const struct ir3_sampler_prefetch
*prefetch
= &fs
->sampler_prefetch
[i
];
988 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch
->src
) |
989 A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch
->samp_id
) |
990 A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch
->tex_id
) |
991 A6XX_SP_FS_PREFETCH_CMD_DST(prefetch
->dst
) |
992 A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch
->wrmask
) |
993 COND(prefetch
->half_precision
, A6XX_SP_FS_PREFETCH_CMD_HALF
) |
994 A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch
->cmd
));
997 if (fs
->num_sampler_prefetch
> 0) {
998 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(0), fs
->num_sampler_prefetch
);
999 for (int i
= 0; i
< fs
->num_sampler_prefetch
; i
++) {
1000 const struct ir3_sampler_prefetch
*prefetch
= &fs
->sampler_prefetch
[i
];
1002 A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(prefetch
->samp_bindless_id
) |
1003 A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(prefetch
->tex_bindless_id
));
1007 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CONTROL_1_REG
, 5);
1008 tu_cs_emit(cs
, 0x7);
1009 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid
) |
1010 A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid
) |
1011 A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid
) |
1012 A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_size_regid
));
1013 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_PIXEL(ij_pix_regid
) |
1014 A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_CENTROID(ij_cent_regid
) |
1016 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid
) |
1017 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid
) |
1018 A6XX_HLSQ_CONTROL_4_REG_BARY_IJ_PIXEL_PERSAMP(ij_samp_regid
) |
1020 tu_cs_emit(cs
, 0xfc);
1022 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UNKNOWN_B980
, 1);
1023 tu_cs_emit(cs
, enable_varyings
? 3 : 1);
1025 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CNTL
, 1);
1027 CONDREG(ij_pix_regid
, A6XX_GRAS_CNTL_VARYING
) |
1028 CONDREG(ij_cent_regid
, A6XX_GRAS_CNTL_CENTROID
) |
1029 CONDREG(ij_samp_regid
, A6XX_GRAS_CNTL_PERSAMP_VARYING
) |
1030 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_GRAS_CNTL_SIZE
) |
1031 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_GRAS_CNTL_SIZE_PERSAMP
) |
1032 COND(fs
->fragcoord_compmask
!= 0, A6XX_GRAS_CNTL_SIZE
|
1033 A6XX_GRAS_CNTL_COORD_MASK(fs
->fragcoord_compmask
)) |
1034 COND(fs
->frag_face
, A6XX_GRAS_CNTL_SIZE
));
1036 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 2);
1038 CONDREG(ij_pix_regid
, A6XX_RB_RENDER_CONTROL0_VARYING
) |
1039 CONDREG(ij_cent_regid
, A6XX_RB_RENDER_CONTROL0_CENTROID
) |
1040 CONDREG(ij_samp_regid
, A6XX_RB_RENDER_CONTROL0_PERSAMP_VARYING
) |
1041 COND(enable_varyings
, A6XX_RB_RENDER_CONTROL0_UNK10
) |
1042 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE
) |
1043 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP
) |
1044 COND(fs
->fragcoord_compmask
!= 0, A6XX_RB_RENDER_CONTROL0_SIZE
|
1045 A6XX_RB_RENDER_CONTROL0_COORD_MASK(fs
->fragcoord_compmask
)) |
1046 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL0_SIZE
));
1048 /* these two bits (UNK4/UNK5) relate to fragcoord
1049 * without them, fragcoord is the same for all samples
1051 COND(sample_shading
, A6XX_RB_RENDER_CONTROL1_UNK4
) |
1052 COND(sample_shading
, A6XX_RB_RENDER_CONTROL1_UNK5
) |
1053 CONDREG(smask_in_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK
) |
1054 CONDREG(samp_id_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEID
) |
1055 CONDREG(ij_size_regid
, A6XX_RB_RENDER_CONTROL1_SIZE
) |
1056 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL1_FACENESS
));
1058 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SAMPLE_CNTL
, 1);
1059 tu_cs_emit(cs
, COND(sample_shading
, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE
));
1061 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8101
, 1);
1062 tu_cs_emit(cs
, COND(sample_shading
, 0x6)); // XXX
1064 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SAMPLE_CNTL
, 1);
1065 tu_cs_emit(cs
, COND(sample_shading
, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE
));
1069 tu6_emit_fs_outputs(struct tu_cs
*cs
,
1070 const struct ir3_shader_variant
*fs
,
1071 uint32_t mrt_count
, bool dual_src_blend
,
1072 uint32_t render_components
)
1074 uint32_t smask_regid
, posz_regid
;
1076 posz_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_DEPTH
);
1077 smask_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_SAMPLE_MASK
);
1079 uint32_t fragdata_regid
[8];
1080 if (fs
->color0_mrt
) {
1081 fragdata_regid
[0] = ir3_find_output_regid(fs
, FRAG_RESULT_COLOR
);
1082 for (uint32_t i
= 1; i
< ARRAY_SIZE(fragdata_regid
); i
++)
1083 fragdata_regid
[i
] = fragdata_regid
[0];
1085 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++)
1086 fragdata_regid
[i
] = ir3_find_output_regid(fs
, FRAG_RESULT_DATA0
+ i
);
1089 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_CNTL0
, 2);
1090 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid
) |
1091 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid
) |
1092 COND(dual_src_blend
, A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE
) |
1094 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count
));
1096 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
1097 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++) {
1098 // TODO we could have a mix of half and full precision outputs,
1099 // we really need to figure out half-precision from IR3_REG_HALF
1100 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid
[i
]) |
1101 (false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION
: 0));
1105 A6XX_SP_FS_RENDER_COMPONENTS(.dword
= render_components
));
1107 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 2);
1108 tu_cs_emit(cs
, COND(fs
->writes_pos
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z
) |
1109 COND(fs
->writes_smask
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK
) |
1110 COND(dual_src_blend
, A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE
));
1111 tu_cs_emit(cs
, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count
));
1114 A6XX_RB_RENDER_COMPONENTS(.dword
= render_components
));
1116 enum a6xx_ztest_mode zmode
;
1118 if (fs
->no_earlyz
|| fs
->has_kill
|| fs
->writes_pos
) {
1119 zmode
= A6XX_LATE_Z
;
1121 zmode
= A6XX_EARLY_Z
;
1124 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL
, 1);
1125 tu_cs_emit(cs
, A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(zmode
));
1127 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_PLANE_CNTL
, 1);
1128 tu_cs_emit(cs
, A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(zmode
));
1132 tu6_emit_geometry_consts(struct tu_cs
*cs
,
1133 const struct ir3_shader_variant
*vs
,
1134 const struct ir3_shader_variant
*gs
) {
1135 unsigned num_vertices
= gs
->shader
->nir
->info
.gs
.vertices_in
;
1137 uint32_t params
[4] = {
1138 vs
->output_size
* num_vertices
* 4, /* primitive stride */
1139 vs
->output_size
* 4, /* vertex stride */
1143 uint32_t vs_base
= ir3_const_state(vs
)->offsets
.primitive_param
;
1144 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, vs_base
, SB6_VS_SHADER
, 0,
1145 ARRAY_SIZE(params
), params
);
1147 uint32_t gs_base
= ir3_const_state(gs
)->offsets
.primitive_param
;
1148 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, gs_base
, SB6_GS_SHADER
, 0,
1149 ARRAY_SIZE(params
), params
);
1153 tu6_emit_program(struct tu_cs
*cs
,
1154 struct tu_pipeline_builder
*builder
,
1155 const struct tu_bo
*binary_bo
,
1157 struct tu_streamout_state
*tf
)
1159 const struct ir3_shader_variant
*vs
= builder
->variants
[MESA_SHADER_VERTEX
];
1160 const struct ir3_shader_variant
*bs
= builder
->binning_variant
;
1161 const struct ir3_shader_variant
*gs
= builder
->variants
[MESA_SHADER_GEOMETRY
];
1162 const struct ir3_shader_variant
*fs
= builder
->variants
[MESA_SHADER_FRAGMENT
];
1163 gl_shader_stage stage
= MESA_SHADER_VERTEX
;
1165 STATIC_ASSERT(MESA_SHADER_VERTEX
== 0);
1167 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
1168 tu_cs_emit(cs
, 0xff); /* XXX */
1170 /* Don't use the binning pass variant when GS is present because we don't
1171 * support compiling correct binning pass variants with GS.
1173 if (binning_pass
&& !gs
) {
1175 tu6_emit_xs_config(cs
, stage
, bs
,
1176 binary_bo
->iova
+ builder
->binning_vs_offset
);
1180 for (; stage
< ARRAY_SIZE(builder
->shaders
); stage
++) {
1181 const struct ir3_shader_variant
*xs
= builder
->variants
[stage
];
1183 if (stage
== MESA_SHADER_FRAGMENT
&& binning_pass
)
1186 tu6_emit_xs_config(cs
, stage
, xs
,
1187 binary_bo
->iova
+ builder
->shader_offsets
[stage
]);
1190 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_HS_UNKNOWN_A831
, 1);
1193 tu6_emit_vpc(cs
, vs
, gs
, fs
, tf
);
1194 tu6_emit_vpc_varying_modes(cs
, fs
);
1197 tu6_emit_fs_inputs(cs
, fs
);
1198 tu6_emit_fs_outputs(cs
, fs
, builder
->color_attachment_count
,
1199 builder
->use_dual_src_blend
,
1200 builder
->render_components
);
1202 /* TODO: check if these can be skipped if fs is disabled */
1203 struct ir3_shader_variant dummy_variant
= {};
1204 tu6_emit_fs_inputs(cs
, &dummy_variant
);
1205 tu6_emit_fs_outputs(cs
, &dummy_variant
, builder
->color_attachment_count
,
1206 builder
->use_dual_src_blend
,
1207 builder
->render_components
);
1211 tu6_emit_geometry_consts(cs
, vs
, gs
);
1215 tu6_emit_vertex_input(struct tu_cs
*cs
,
1216 const struct ir3_shader_variant
*vs
,
1217 const VkPipelineVertexInputStateCreateInfo
*info
,
1218 uint32_t *bindings_used
)
1220 uint32_t vfd_decode_idx
= 0;
1221 uint32_t binding_instanced
= 0; /* bitmask of instanced bindings */
1223 for (uint32_t i
= 0; i
< info
->vertexBindingDescriptionCount
; i
++) {
1224 const VkVertexInputBindingDescription
*binding
=
1225 &info
->pVertexBindingDescriptions
[i
];
1228 A6XX_VFD_FETCH_STRIDE(binding
->binding
, binding
->stride
));
1230 if (binding
->inputRate
== VK_VERTEX_INPUT_RATE_INSTANCE
)
1231 binding_instanced
|= 1 << binding
->binding
;
1233 *bindings_used
|= 1 << binding
->binding
;
1236 /* TODO: emit all VFD_DECODE/VFD_DEST_CNTL in same (two) pkt4 */
1238 for (uint32_t i
= 0; i
< info
->vertexAttributeDescriptionCount
; i
++) {
1239 const VkVertexInputAttributeDescription
*attr
=
1240 &info
->pVertexAttributeDescriptions
[i
];
1243 for (input_idx
= 0; input_idx
< vs
->inputs_count
; input_idx
++) {
1244 if ((vs
->inputs
[input_idx
].slot
- VERT_ATTRIB_GENERIC0
) == attr
->location
)
1248 /* attribute not used, skip it */
1249 if (input_idx
== vs
->inputs_count
)
1252 const struct tu_native_format format
= tu6_format_vtx(attr
->format
);
1254 A6XX_VFD_DECODE_INSTR(vfd_decode_idx
,
1255 .idx
= attr
->binding
,
1256 .offset
= attr
->offset
,
1257 .instanced
= binding_instanced
& (1 << attr
->binding
),
1258 .format
= format
.fmt
,
1259 .swap
= format
.swap
,
1261 ._float
= !vk_format_is_int(attr
->format
)),
1262 A6XX_VFD_DECODE_STEP_RATE(vfd_decode_idx
, 1));
1265 A6XX_VFD_DEST_CNTL_INSTR(vfd_decode_idx
,
1266 .writemask
= vs
->inputs
[input_idx
].compmask
,
1267 .regid
= vs
->inputs
[input_idx
].regid
));
1274 .fetch_cnt
= vfd_decode_idx
, /* decode_cnt for binning pass ? */
1275 .decode_cnt
= vfd_decode_idx
));
1279 tu6_guardband_adj(uint32_t v
)
1282 return (uint32_t)(511.0 - 65.0 * (log2(v
) - 8.0));
1288 tu6_emit_viewport(struct tu_cs
*cs
, const VkViewport
*viewport
)
1292 scales
[0] = viewport
->width
/ 2.0f
;
1293 scales
[1] = viewport
->height
/ 2.0f
;
1294 scales
[2] = viewport
->maxDepth
- viewport
->minDepth
;
1295 offsets
[0] = viewport
->x
+ scales
[0];
1296 offsets
[1] = viewport
->y
+ scales
[1];
1297 offsets
[2] = viewport
->minDepth
;
1301 min
.x
= (int32_t) viewport
->x
;
1302 max
.x
= (int32_t) ceilf(viewport
->x
+ viewport
->width
);
1303 if (viewport
->height
>= 0.0f
) {
1304 min
.y
= (int32_t) viewport
->y
;
1305 max
.y
= (int32_t) ceilf(viewport
->y
+ viewport
->height
);
1307 min
.y
= (int32_t)(viewport
->y
+ viewport
->height
);
1308 max
.y
= (int32_t) ceilf(viewport
->y
);
1310 /* the spec allows viewport->height to be 0.0f */
1313 assert(min
.x
>= 0 && min
.x
< max
.x
);
1314 assert(min
.y
>= 0 && min
.y
< max
.y
);
1316 VkExtent2D guardband_adj
;
1317 guardband_adj
.width
= tu6_guardband_adj(max
.x
- min
.x
);
1318 guardband_adj
.height
= tu6_guardband_adj(max
.y
- min
.y
);
1320 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_VPORT_XOFFSET_0
, 6);
1321 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XOFFSET_0(offsets
[0]).value
);
1322 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XSCALE_0(scales
[0]).value
);
1323 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YOFFSET_0(offsets
[1]).value
);
1324 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YSCALE_0(scales
[1]).value
);
1325 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZOFFSET_0(offsets
[2]).value
);
1326 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZSCALE_0(scales
[2]).value
);
1328 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0
, 2);
1329 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(min
.x
) |
1330 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(min
.y
));
1331 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(max
.x
- 1) |
1332 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(max
.y
- 1));
1334 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ
, 1);
1336 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband_adj
.width
) |
1337 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband_adj
.height
));
1339 float z_clamp_min
= MIN2(viewport
->minDepth
, viewport
->maxDepth
);
1340 float z_clamp_max
= MAX2(viewport
->minDepth
, viewport
->maxDepth
);
1343 A6XX_GRAS_CL_Z_CLAMP_MIN(z_clamp_min
),
1344 A6XX_GRAS_CL_Z_CLAMP_MAX(z_clamp_max
));
1347 A6XX_RB_Z_CLAMP_MIN(z_clamp_min
),
1348 A6XX_RB_Z_CLAMP_MAX(z_clamp_max
));
1352 tu6_emit_scissor(struct tu_cs
*cs
, const VkRect2D
*scissor
)
1354 const VkOffset2D min
= scissor
->offset
;
1355 const VkOffset2D max
= {
1356 scissor
->offset
.x
+ scissor
->extent
.width
,
1357 scissor
->offset
.y
+ scissor
->extent
.height
,
1360 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0
, 2);
1361 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(min
.x
) |
1362 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(min
.y
));
1363 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(max
.x
- 1) |
1364 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(max
.y
- 1));
1368 tu6_emit_sample_locations(struct tu_cs
*cs
, const VkSampleLocationsInfoEXT
*samp_loc
)
1371 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SAMPLE_CONFIG
, 1);
1374 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SAMPLE_CONFIG
, 1);
1377 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_SAMPLE_CONFIG
, 1);
1382 assert(samp_loc
->sampleLocationsPerPixel
== samp_loc
->sampleLocationsCount
);
1383 assert(samp_loc
->sampleLocationGridSize
.width
== 1);
1384 assert(samp_loc
->sampleLocationGridSize
.height
== 1);
1386 uint32_t sample_config
=
1387 A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE
;
1388 uint32_t sample_locations
= 0;
1389 for (uint32_t i
= 0; i
< samp_loc
->sampleLocationsCount
; i
++) {
1391 (A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(samp_loc
->pSampleLocations
[i
].x
) |
1392 A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(samp_loc
->pSampleLocations
[i
].y
)) << i
*8;
1395 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SAMPLE_CONFIG
, 2);
1396 tu_cs_emit(cs
, sample_config
);
1397 tu_cs_emit(cs
, sample_locations
);
1399 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SAMPLE_CONFIG
, 2);
1400 tu_cs_emit(cs
, sample_config
);
1401 tu_cs_emit(cs
, sample_locations
);
1403 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_SAMPLE_CONFIG
, 2);
1404 tu_cs_emit(cs
, sample_config
);
1405 tu_cs_emit(cs
, sample_locations
);
1409 tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo
*rast_info
,
1410 VkSampleCountFlagBits samples
)
1412 uint32_t gras_su_cntl
= 0;
1414 if (rast_info
->cullMode
& VK_CULL_MODE_FRONT_BIT
)
1415 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_FRONT
;
1416 if (rast_info
->cullMode
& VK_CULL_MODE_BACK_BIT
)
1417 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_BACK
;
1419 if (rast_info
->frontFace
== VK_FRONT_FACE_CLOCKWISE
)
1420 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_FRONT_CW
;
1422 /* don't set A6XX_GRAS_SU_CNTL_LINEHALFWIDTH */
1424 if (rast_info
->depthBiasEnable
)
1425 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_POLY_OFFSET
;
1427 if (samples
> VK_SAMPLE_COUNT_1_BIT
)
1428 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_MSAA_ENABLE
;
1430 return gras_su_cntl
;
1434 tu6_emit_depth_bias(struct tu_cs
*cs
,
1435 float constant_factor
,
1439 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE
, 3);
1440 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor
).value
);
1441 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor
).value
);
1442 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp
).value
);
1446 tu6_emit_depth_control(struct tu_cs
*cs
,
1447 const VkPipelineDepthStencilStateCreateInfo
*ds_info
,
1448 const VkPipelineRasterizationStateCreateInfo
*rast_info
)
1450 assert(!ds_info
->depthBoundsTestEnable
);
1452 uint32_t rb_depth_cntl
= 0;
1453 if (ds_info
->depthTestEnable
) {
1455 A6XX_RB_DEPTH_CNTL_Z_ENABLE
|
1456 A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info
->depthCompareOp
)) |
1457 A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE
;
1459 if (rast_info
->depthClampEnable
)
1460 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE
;
1462 if (ds_info
->depthWriteEnable
)
1463 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE
;
1466 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_CNTL
, 1);
1467 tu_cs_emit(cs
, rb_depth_cntl
);
1471 tu6_emit_stencil_control(struct tu_cs
*cs
,
1472 const VkPipelineDepthStencilStateCreateInfo
*ds_info
)
1474 uint32_t rb_stencil_control
= 0;
1475 if (ds_info
->stencilTestEnable
) {
1476 const VkStencilOpState
*front
= &ds_info
->front
;
1477 const VkStencilOpState
*back
= &ds_info
->back
;
1478 rb_stencil_control
|=
1479 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE
|
1480 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF
|
1481 A6XX_RB_STENCIL_CONTROL_STENCIL_READ
|
1482 A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front
->compareOp
)) |
1483 A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front
->failOp
)) |
1484 A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front
->passOp
)) |
1485 A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front
->depthFailOp
)) |
1486 A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back
->compareOp
)) |
1487 A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back
->failOp
)) |
1488 A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back
->passOp
)) |
1489 A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back
->depthFailOp
));
1492 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_CONTROL
, 1);
1493 tu_cs_emit(cs
, rb_stencil_control
);
1497 tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState
*att
,
1500 const enum a3xx_rb_blend_opcode color_op
= tu6_blend_op(att
->colorBlendOp
);
1501 const enum adreno_rb_blend_factor src_color_factor
= tu6_blend_factor(
1502 has_alpha
? att
->srcColorBlendFactor
1503 : tu_blend_factor_no_dst_alpha(att
->srcColorBlendFactor
));
1504 const enum adreno_rb_blend_factor dst_color_factor
= tu6_blend_factor(
1505 has_alpha
? att
->dstColorBlendFactor
1506 : tu_blend_factor_no_dst_alpha(att
->dstColorBlendFactor
));
1507 const enum a3xx_rb_blend_opcode alpha_op
= tu6_blend_op(att
->alphaBlendOp
);
1508 const enum adreno_rb_blend_factor src_alpha_factor
=
1509 tu6_blend_factor(att
->srcAlphaBlendFactor
);
1510 const enum adreno_rb_blend_factor dst_alpha_factor
=
1511 tu6_blend_factor(att
->dstAlphaBlendFactor
);
1513 return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor
) |
1514 A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op
) |
1515 A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor
) |
1516 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor
) |
1517 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op
) |
1518 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor
);
1522 tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState
*att
,
1523 uint32_t rb_mrt_control_rop
,
1527 uint32_t rb_mrt_control
=
1528 A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att
->colorWriteMask
);
1530 /* ignore blending and logic op for integer attachments */
1532 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY
);
1533 return rb_mrt_control
;
1536 rb_mrt_control
|= rb_mrt_control_rop
;
1538 if (att
->blendEnable
) {
1539 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND
;
1542 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND2
;
1545 return rb_mrt_control
;
1549 tu6_emit_rb_mrt_controls(struct tu_cs
*cs
,
1550 const VkPipelineColorBlendStateCreateInfo
*blend_info
,
1551 const VkFormat attachment_formats
[MAX_RTS
],
1552 uint32_t *blend_enable_mask
)
1554 *blend_enable_mask
= 0;
1556 bool rop_reads_dst
= false;
1557 uint32_t rb_mrt_control_rop
= 0;
1558 if (blend_info
->logicOpEnable
) {
1559 rop_reads_dst
= tu_logic_op_reads_dst(blend_info
->logicOp
);
1560 rb_mrt_control_rop
=
1561 A6XX_RB_MRT_CONTROL_ROP_ENABLE
|
1562 A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info
->logicOp
));
1565 for (uint32_t i
= 0; i
< blend_info
->attachmentCount
; i
++) {
1566 const VkPipelineColorBlendAttachmentState
*att
=
1567 &blend_info
->pAttachments
[i
];
1568 const VkFormat format
= attachment_formats
[i
];
1570 uint32_t rb_mrt_control
= 0;
1571 uint32_t rb_mrt_blend_control
= 0;
1572 if (format
!= VK_FORMAT_UNDEFINED
) {
1573 const bool is_int
= vk_format_is_int(format
);
1574 const bool has_alpha
= vk_format_has_alpha(format
);
1577 tu6_rb_mrt_control(att
, rb_mrt_control_rop
, is_int
, has_alpha
);
1578 rb_mrt_blend_control
= tu6_rb_mrt_blend_control(att
, has_alpha
);
1580 if (att
->blendEnable
|| rop_reads_dst
)
1581 *blend_enable_mask
|= 1 << i
;
1584 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_CONTROL(i
), 2);
1585 tu_cs_emit(cs
, rb_mrt_control
);
1586 tu_cs_emit(cs
, rb_mrt_blend_control
);
1591 tu6_emit_blend_control(struct tu_cs
*cs
,
1592 uint32_t blend_enable_mask
,
1593 bool dual_src_blend
,
1594 const VkPipelineMultisampleStateCreateInfo
*msaa_info
)
1596 const uint32_t sample_mask
=
1597 msaa_info
->pSampleMask
? (*msaa_info
->pSampleMask
& 0xffff)
1598 : ((1 << msaa_info
->rasterizationSamples
) - 1);
1601 A6XX_SP_BLEND_CNTL(.enabled
= blend_enable_mask
,
1602 .dual_color_in_enable
= dual_src_blend
,
1603 .alpha_to_coverage
= msaa_info
->alphaToCoverageEnable
,
1606 /* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */
1608 A6XX_RB_BLEND_CNTL(.enable_blend
= blend_enable_mask
,
1609 .independent_blend
= true,
1610 .sample_mask
= sample_mask
,
1611 .dual_color_in_enable
= dual_src_blend
,
1612 .alpha_to_coverage
= msaa_info
->alphaToCoverageEnable
,
1613 .alpha_to_one
= msaa_info
->alphaToOneEnable
));
1617 tu_pipeline_create(struct tu_device
*dev
,
1618 struct tu_pipeline_layout
*layout
,
1620 const VkAllocationCallbacks
*pAllocator
,
1621 struct tu_pipeline
**out_pipeline
)
1623 struct tu_pipeline
*pipeline
=
1624 vk_zalloc2(&dev
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1625 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1627 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1629 tu_cs_init(&pipeline
->cs
, dev
, TU_CS_MODE_SUB_STREAM
, 2048);
1631 /* Reserve the space now such that tu_cs_begin_sub_stream never fails. Note
1632 * that LOAD_STATE can potentially take up a large amount of space so we
1633 * calculate its size explicitly.
1635 unsigned load_state_size
= tu6_load_state_size(layout
, compute
);
1636 VkResult result
= tu_cs_reserve_space(&pipeline
->cs
, 2048 + load_state_size
);
1637 if (result
!= VK_SUCCESS
) {
1638 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
1642 *out_pipeline
= pipeline
;
1648 tu_pipeline_shader_key_init(struct ir3_shader_key
*key
,
1649 const VkGraphicsPipelineCreateInfo
*pipeline_info
)
1651 for (uint32_t i
= 0; i
< pipeline_info
->stageCount
; i
++) {
1652 if (pipeline_info
->pStages
[i
].stage
== VK_SHADER_STAGE_GEOMETRY_BIT
) {
1658 if (pipeline_info
->pRasterizationState
->rasterizerDiscardEnable
)
1661 const VkPipelineMultisampleStateCreateInfo
*msaa_info
= pipeline_info
->pMultisampleState
;
1662 const struct VkPipelineSampleLocationsStateCreateInfoEXT
*sample_locations
=
1663 vk_find_struct_const(msaa_info
->pNext
, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
1664 if (msaa_info
->rasterizationSamples
> 1 ||
1665 /* also set msaa key when sample location is not the default
1666 * since this affects varying interpolation */
1667 (sample_locations
&& sample_locations
->sampleLocationsEnable
)) {
1671 /* note: not actually used by ir3, just checked in tu6_emit_fs_inputs */
1672 if (msaa_info
->sampleShadingEnable
)
1673 key
->sample_shading
= true;
1675 /* TODO: Populate the remaining fields of ir3_shader_key. */
1679 tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder
*builder
)
1681 const VkPipelineShaderStageCreateInfo
*stage_infos
[MESA_SHADER_STAGES
] = {
1684 for (uint32_t i
= 0; i
< builder
->create_info
->stageCount
; i
++) {
1685 gl_shader_stage stage
=
1686 vk_to_mesa_shader_stage(builder
->create_info
->pStages
[i
].stage
);
1687 stage_infos
[stage
] = &builder
->create_info
->pStages
[i
];
1690 struct ir3_shader_key key
= {};
1691 tu_pipeline_shader_key_init(&key
, builder
->create_info
);
1693 for (gl_shader_stage stage
= MESA_SHADER_VERTEX
;
1694 stage
< MESA_SHADER_STAGES
; stage
++) {
1695 const VkPipelineShaderStageCreateInfo
*stage_info
= stage_infos
[stage
];
1696 if (!stage_info
&& stage
!= MESA_SHADER_FRAGMENT
)
1699 struct tu_shader
*shader
=
1700 tu_shader_create(builder
->device
, stage
, stage_info
, builder
->layout
,
1703 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1705 builder
->shaders
[stage
] = shader
;
1708 for (gl_shader_stage stage
= MESA_SHADER_STAGES
- 1;
1709 stage
> MESA_SHADER_NONE
; stage
--) {
1710 if (!builder
->shaders
[stage
])
1714 builder
->variants
[stage
] =
1715 ir3_shader_get_variant(builder
->shaders
[stage
]->ir3_shader
,
1716 &key
, false, &created
);
1717 if (!builder
->variants
[stage
])
1718 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1720 builder
->shader_offsets
[stage
] = builder
->shader_total_size
;
1721 builder
->shader_total_size
+=
1722 sizeof(uint32_t) * builder
->variants
[stage
]->info
.sizedwords
;
1725 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
1726 struct ir3_shader_variant
*variant
;
1728 if (vs
->ir3_shader
->stream_output
.num_outputs
) {
1729 variant
= builder
->variants
[MESA_SHADER_VERTEX
];
1732 variant
= ir3_shader_get_variant(vs
->ir3_shader
, &key
,
1735 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1738 builder
->binning_vs_offset
= builder
->shader_total_size
;
1739 builder
->shader_total_size
+=
1740 sizeof(uint32_t) * variant
->info
.sizedwords
;
1741 builder
->binning_variant
= variant
;
1747 tu_pipeline_builder_upload_shaders(struct tu_pipeline_builder
*builder
,
1748 struct tu_pipeline
*pipeline
)
1750 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
1753 tu_bo_init_new(builder
->device
, bo
, builder
->shader_total_size
);
1754 if (result
!= VK_SUCCESS
)
1757 result
= tu_bo_map(builder
->device
, bo
);
1758 if (result
!= VK_SUCCESS
)
1761 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1762 const struct ir3_shader_variant
*variant
= builder
->variants
[i
];
1766 memcpy(bo
->map
+ builder
->shader_offsets
[i
], variant
->bin
,
1767 sizeof(uint32_t) * variant
->info
.sizedwords
);
1770 if (builder
->binning_variant
) {
1771 const struct ir3_shader_variant
*variant
= builder
->binning_variant
;
1772 memcpy(bo
->map
+ builder
->binning_vs_offset
, variant
->bin
,
1773 sizeof(uint32_t) * variant
->info
.sizedwords
);
1780 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder
*builder
,
1781 struct tu_pipeline
*pipeline
)
1783 const VkPipelineDynamicStateCreateInfo
*dynamic_info
=
1784 builder
->create_info
->pDynamicState
;
1789 for (uint32_t i
= 0; i
< dynamic_info
->dynamicStateCount
; i
++) {
1790 VkDynamicState state
= dynamic_info
->pDynamicStates
[i
];
1792 case VK_DYNAMIC_STATE_VIEWPORT
... VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
1793 pipeline
->dynamic_state_mask
|= BIT(state
);
1795 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT
:
1796 pipeline
->dynamic_state_mask
|= BIT(TU_DYNAMIC_STATE_SAMPLE_LOCATIONS
);
1799 assert(!"unsupported dynamic state");
1806 tu_pipeline_set_linkage(struct tu_program_descriptor_linkage
*link
,
1807 struct tu_shader
*shader
,
1808 struct ir3_shader_variant
*v
)
1810 link
->const_state
= *ir3_const_state(v
);
1811 link
->constlen
= v
->constlen
;
1812 link
->push_consts
= shader
->push_consts
;
1816 tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder
*builder
,
1817 struct tu_pipeline
*pipeline
)
1819 struct tu_cs prog_cs
;
1820 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
1821 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, false, &pipeline
->streamout
);
1822 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
1824 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
1825 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, true, &pipeline
->streamout
);
1826 pipeline
->program
.binning_state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
1828 VkShaderStageFlags stages
= 0;
1829 for (unsigned i
= 0; i
< builder
->create_info
->stageCount
; i
++) {
1830 stages
|= builder
->create_info
->pStages
[i
].stage
;
1832 pipeline
->active_stages
= stages
;
1834 uint32_t desc_sets
= 0;
1835 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1836 if (!builder
->shaders
[i
])
1839 tu_pipeline_set_linkage(&pipeline
->program
.link
[i
],
1840 builder
->shaders
[i
],
1841 builder
->variants
[i
]);
1842 desc_sets
|= builder
->shaders
[i
]->active_desc_sets
;
1844 pipeline
->active_desc_sets
= desc_sets
;
1848 tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder
*builder
,
1849 struct tu_pipeline
*pipeline
)
1851 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1852 builder
->create_info
->pVertexInputState
;
1853 const struct ir3_shader_variant
*vs
= builder
->variants
[MESA_SHADER_VERTEX
];
1854 const struct ir3_shader_variant
*bs
= builder
->binning_variant
;
1857 tu_cs_begin_sub_stream(&pipeline
->cs
,
1858 MAX_VERTEX_ATTRIBS
* 7 + 2, &vi_cs
);
1859 tu6_emit_vertex_input(&vi_cs
, vs
, vi_info
,
1860 &pipeline
->vi
.bindings_used
);
1861 pipeline
->vi
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
1864 tu_cs_begin_sub_stream(&pipeline
->cs
,
1865 MAX_VERTEX_ATTRIBS
* 7 + 2, &vi_cs
);
1866 tu6_emit_vertex_input(
1867 &vi_cs
, bs
, vi_info
, &pipeline
->vi
.bindings_used
);
1868 pipeline
->vi
.binning_state_ib
=
1869 tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
1874 tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder
*builder
,
1875 struct tu_pipeline
*pipeline
)
1877 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
1878 builder
->create_info
->pInputAssemblyState
;
1880 pipeline
->ia
.primtype
= tu6_primtype(ia_info
->topology
);
1881 pipeline
->ia
.primitive_restart
= ia_info
->primitiveRestartEnable
;
1885 tu_pipeline_static_state(struct tu_pipeline
*pipeline
, struct tu_cs
*cs
,
1886 uint32_t id
, uint32_t size
)
1888 struct ts_cs_memory memory
;
1890 if (pipeline
->dynamic_state_mask
& BIT(id
))
1893 /* TODO: share this logc with tu_cmd_dynamic_state */
1894 tu_cs_alloc(&pipeline
->cs
, size
, 1, &memory
);
1895 tu_cs_init_external(cs
, memory
.map
, memory
.map
+ size
);
1897 tu_cs_reserve_space(cs
, size
);
1899 assert(id
< ARRAY_SIZE(pipeline
->dynamic_state
));
1900 pipeline
->dynamic_state
[id
].iova
= memory
.iova
;
1901 pipeline
->dynamic_state
[id
].size
= size
;
1906 tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder
*builder
,
1907 struct tu_pipeline
*pipeline
)
1911 * pViewportState is a pointer to an instance of the
1912 * VkPipelineViewportStateCreateInfo structure, and is ignored if the
1913 * pipeline has rasterization disabled."
1915 * We leave the relevant registers stale in that case.
1917 if (builder
->rasterizer_discard
)
1920 const VkPipelineViewportStateCreateInfo
*vp_info
=
1921 builder
->create_info
->pViewportState
;
1925 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_VIEWPORT
, 18))
1926 tu6_emit_viewport(&cs
, vp_info
->pViewports
);
1928 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_SCISSOR
, 3))
1929 tu6_emit_scissor(&cs
, vp_info
->pScissors
);
1933 tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder
*builder
,
1934 struct tu_pipeline
*pipeline
)
1936 const VkPipelineRasterizationStateCreateInfo
*rast_info
=
1937 builder
->create_info
->pRasterizationState
;
1939 assert(rast_info
->polygonMode
== VK_POLYGON_MODE_FILL
);
1942 tu_cs_begin_sub_stream(&pipeline
->cs
, 7, &cs
);
1944 tu_cs_emit_regs(&cs
,
1946 .znear_clip_disable
= rast_info
->depthClampEnable
,
1947 .zfar_clip_disable
= rast_info
->depthClampEnable
,
1948 .unk5
= rast_info
->depthClampEnable
,
1949 .zero_gb_scale_z
= 1,
1950 .vp_clip_code_ignore
= 1));
1951 /* move to hw ctx init? */
1952 tu_cs_emit_regs(&cs
, A6XX_GRAS_UNKNOWN_8001());
1953 tu_cs_emit_regs(&cs
,
1954 A6XX_GRAS_SU_POINT_MINMAX(.min
= 1.0f
/ 16.0f
, .max
= 4092.0f
),
1955 A6XX_GRAS_SU_POINT_SIZE(1.0f
));
1957 pipeline
->rast
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &cs
);
1959 pipeline
->gras_su_cntl
=
1960 tu6_gras_su_cntl(rast_info
, builder
->samples
);
1962 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_LINE_WIDTH
, 2)) {
1963 pipeline
->gras_su_cntl
|=
1964 A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(rast_info
->lineWidth
/ 2.0f
);
1965 tu_cs_emit_regs(&cs
, A6XX_GRAS_SU_CNTL(.dword
= pipeline
->gras_su_cntl
));
1968 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_DEPTH_BIAS
, 4)) {
1969 tu6_emit_depth_bias(&cs
, rast_info
->depthBiasConstantFactor
,
1970 rast_info
->depthBiasClamp
,
1971 rast_info
->depthBiasSlopeFactor
);
1977 tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder
*builder
,
1978 struct tu_pipeline
*pipeline
)
1982 * pDepthStencilState is a pointer to an instance of the
1983 * VkPipelineDepthStencilStateCreateInfo structure, and is ignored if
1984 * the pipeline has rasterization disabled or if the subpass of the
1985 * render pass the pipeline is created against does not use a
1986 * depth/stencil attachment.
1988 * Disable both depth and stencil tests if there is no ds attachment,
1989 * Disable depth test if ds attachment is S8_UINT, since S8_UINT defines
1990 * only the separate stencil attachment
1992 static const VkPipelineDepthStencilStateCreateInfo dummy_ds_info
;
1993 const VkPipelineDepthStencilStateCreateInfo
*ds_info
=
1994 builder
->depth_attachment_format
!= VK_FORMAT_UNDEFINED
1995 ? builder
->create_info
->pDepthStencilState
1997 const VkPipelineDepthStencilStateCreateInfo
*ds_info_depth
=
1998 builder
->depth_attachment_format
!= VK_FORMAT_S8_UINT
1999 ? ds_info
: &dummy_ds_info
;
2002 tu_cs_begin_sub_stream(&pipeline
->cs
, 6, &cs
);
2004 /* move to hw ctx init? */
2005 tu_cs_emit_regs(&cs
, A6XX_RB_ALPHA_CONTROL());
2006 tu6_emit_depth_control(&cs
, ds_info_depth
,
2007 builder
->create_info
->pRasterizationState
);
2008 tu6_emit_stencil_control(&cs
, ds_info
);
2010 pipeline
->ds
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &cs
);
2012 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
, 2)) {
2013 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILMASK(.mask
= ds_info
->front
.compareMask
& 0xff,
2014 .bfmask
= ds_info
->back
.compareMask
& 0xff));
2017 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
, 2)) {
2018 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILWRMASK(.wrmask
= ds_info
->front
.writeMask
& 0xff,
2019 .bfwrmask
= ds_info
->back
.writeMask
& 0xff));
2022 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_STENCIL_REFERENCE
, 2)) {
2023 tu_cs_emit_regs(&cs
, A6XX_RB_STENCILREF(.ref
= ds_info
->front
.reference
& 0xff,
2024 .bfref
= ds_info
->back
.reference
& 0xff));
2029 tu_pipeline_builder_parse_multisample_and_color_blend(
2030 struct tu_pipeline_builder
*builder
, struct tu_pipeline
*pipeline
)
2034 * pMultisampleState is a pointer to an instance of the
2035 * VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline
2036 * has rasterization disabled.
2040 * pColorBlendState is a pointer to an instance of the
2041 * VkPipelineColorBlendStateCreateInfo structure, and is ignored if the
2042 * pipeline has rasterization disabled or if the subpass of the render
2043 * pass the pipeline is created against does not use any color
2046 * We leave the relevant registers stale when rasterization is disabled.
2048 if (builder
->rasterizer_discard
)
2051 static const VkPipelineColorBlendStateCreateInfo dummy_blend_info
;
2052 const VkPipelineMultisampleStateCreateInfo
*msaa_info
=
2053 builder
->create_info
->pMultisampleState
;
2054 const VkPipelineColorBlendStateCreateInfo
*blend_info
=
2055 builder
->use_color_attachments
? builder
->create_info
->pColorBlendState
2056 : &dummy_blend_info
;
2059 tu_cs_begin_sub_stream(&pipeline
->cs
, MAX_RTS
* 3 + 4, &cs
);
2061 uint32_t blend_enable_mask
;
2062 tu6_emit_rb_mrt_controls(&cs
, blend_info
,
2063 builder
->color_attachment_formats
,
2064 &blend_enable_mask
);
2066 tu6_emit_blend_control(&cs
, blend_enable_mask
,
2067 builder
->use_dual_src_blend
, msaa_info
);
2069 pipeline
->blend
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &cs
);
2071 if (tu_pipeline_static_state(pipeline
, &cs
, VK_DYNAMIC_STATE_BLEND_CONSTANTS
, 5)) {
2072 tu_cs_emit_pkt4(&cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
2073 tu_cs_emit_array(&cs
, (const uint32_t *) blend_info
->blendConstants
, 4);
2076 const struct VkPipelineSampleLocationsStateCreateInfoEXT
*sample_locations
=
2077 vk_find_struct_const(msaa_info
->pNext
, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
2078 const VkSampleLocationsInfoEXT
*samp_loc
= NULL
;
2080 if (sample_locations
&& sample_locations
->sampleLocationsEnable
)
2081 samp_loc
= &sample_locations
->sampleLocationsInfo
;
2083 if (tu_pipeline_static_state(pipeline
, &cs
, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS
,
2084 samp_loc
? 9 : 6)) {
2085 tu6_emit_sample_locations(&cs
, samp_loc
);
2090 tu_pipeline_finish(struct tu_pipeline
*pipeline
,
2091 struct tu_device
*dev
,
2092 const VkAllocationCallbacks
*alloc
)
2094 tu_cs_finish(&pipeline
->cs
);
2096 if (pipeline
->program
.binary_bo
.gem_handle
)
2097 tu_bo_finish(dev
, &pipeline
->program
.binary_bo
);
2101 tu_pipeline_builder_build(struct tu_pipeline_builder
*builder
,
2102 struct tu_pipeline
**pipeline
)
2104 VkResult result
= tu_pipeline_create(builder
->device
, builder
->layout
,
2105 false, builder
->alloc
, pipeline
);
2106 if (result
!= VK_SUCCESS
)
2109 (*pipeline
)->layout
= builder
->layout
;
2111 /* compile and upload shaders */
2112 result
= tu_pipeline_builder_compile_shaders(builder
);
2113 if (result
== VK_SUCCESS
)
2114 result
= tu_pipeline_builder_upload_shaders(builder
, *pipeline
);
2115 if (result
!= VK_SUCCESS
) {
2116 tu_pipeline_finish(*pipeline
, builder
->device
, builder
->alloc
);
2117 vk_free2(&builder
->device
->alloc
, builder
->alloc
, *pipeline
);
2118 *pipeline
= VK_NULL_HANDLE
;
2123 tu_pipeline_builder_parse_dynamic(builder
, *pipeline
);
2124 tu_pipeline_builder_parse_shader_stages(builder
, *pipeline
);
2125 tu_pipeline_builder_parse_vertex_input(builder
, *pipeline
);
2126 tu_pipeline_builder_parse_input_assembly(builder
, *pipeline
);
2127 tu_pipeline_builder_parse_viewport(builder
, *pipeline
);
2128 tu_pipeline_builder_parse_rasterization(builder
, *pipeline
);
2129 tu_pipeline_builder_parse_depth_stencil(builder
, *pipeline
);
2130 tu_pipeline_builder_parse_multisample_and_color_blend(builder
, *pipeline
);
2131 tu6_emit_load_state(*pipeline
, false);
2133 /* we should have reserved enough space upfront such that the CS never
2136 assert((*pipeline
)->cs
.bo_count
== 1);
2142 tu_pipeline_builder_finish(struct tu_pipeline_builder
*builder
)
2144 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2145 if (!builder
->shaders
[i
])
2147 tu_shader_destroy(builder
->device
, builder
->shaders
[i
], builder
->alloc
);
2152 tu_pipeline_builder_init_graphics(
2153 struct tu_pipeline_builder
*builder
,
2154 struct tu_device
*dev
,
2155 struct tu_pipeline_cache
*cache
,
2156 const VkGraphicsPipelineCreateInfo
*create_info
,
2157 const VkAllocationCallbacks
*alloc
)
2159 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, create_info
->layout
);
2161 *builder
= (struct tu_pipeline_builder
) {
2164 .create_info
= create_info
,
2169 builder
->rasterizer_discard
=
2170 create_info
->pRasterizationState
->rasterizerDiscardEnable
;
2172 if (builder
->rasterizer_discard
) {
2173 builder
->samples
= VK_SAMPLE_COUNT_1_BIT
;
2175 builder
->samples
= create_info
->pMultisampleState
->rasterizationSamples
;
2177 const struct tu_render_pass
*pass
=
2178 tu_render_pass_from_handle(create_info
->renderPass
);
2179 const struct tu_subpass
*subpass
=
2180 &pass
->subpasses
[create_info
->subpass
];
2182 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
2183 builder
->depth_attachment_format
= (a
!= VK_ATTACHMENT_UNUSED
) ?
2184 pass
->attachments
[a
].format
: VK_FORMAT_UNDEFINED
;
2186 assert(subpass
->color_count
== 0 ||
2187 !create_info
->pColorBlendState
||
2188 subpass
->color_count
== create_info
->pColorBlendState
->attachmentCount
);
2189 builder
->color_attachment_count
= subpass
->color_count
;
2190 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
2191 const uint32_t a
= subpass
->color_attachments
[i
].attachment
;
2192 if (a
== VK_ATTACHMENT_UNUSED
)
2195 builder
->color_attachment_formats
[i
] = pass
->attachments
[a
].format
;
2196 builder
->use_color_attachments
= true;
2197 builder
->render_components
|= 0xf << (i
* 4);
2200 if (tu_blend_state_is_dual_src(create_info
->pColorBlendState
)) {
2201 builder
->color_attachment_count
++;
2202 builder
->use_dual_src_blend
= true;
2203 /* dual source blending has an extra fs output in the 2nd slot */
2204 if (subpass
->color_attachments
[0].attachment
!= VK_ATTACHMENT_UNUSED
)
2205 builder
->render_components
|= 0xf << 4;
2211 tu_graphics_pipeline_create(VkDevice device
,
2212 VkPipelineCache pipelineCache
,
2213 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2214 const VkAllocationCallbacks
*pAllocator
,
2215 VkPipeline
*pPipeline
)
2217 TU_FROM_HANDLE(tu_device
, dev
, device
);
2218 TU_FROM_HANDLE(tu_pipeline_cache
, cache
, pipelineCache
);
2220 struct tu_pipeline_builder builder
;
2221 tu_pipeline_builder_init_graphics(&builder
, dev
, cache
,
2222 pCreateInfo
, pAllocator
);
2224 struct tu_pipeline
*pipeline
= NULL
;
2225 VkResult result
= tu_pipeline_builder_build(&builder
, &pipeline
);
2226 tu_pipeline_builder_finish(&builder
);
2228 if (result
== VK_SUCCESS
)
2229 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2231 *pPipeline
= VK_NULL_HANDLE
;
2237 tu_CreateGraphicsPipelines(VkDevice device
,
2238 VkPipelineCache pipelineCache
,
2240 const VkGraphicsPipelineCreateInfo
*pCreateInfos
,
2241 const VkAllocationCallbacks
*pAllocator
,
2242 VkPipeline
*pPipelines
)
2244 VkResult final_result
= VK_SUCCESS
;
2246 for (uint32_t i
= 0; i
< count
; i
++) {
2247 VkResult result
= tu_graphics_pipeline_create(device
, pipelineCache
,
2248 &pCreateInfos
[i
], pAllocator
,
2251 if (result
!= VK_SUCCESS
)
2252 final_result
= result
;
2255 return final_result
;
2259 tu_compute_upload_shader(VkDevice device
,
2260 struct tu_pipeline
*pipeline
,
2261 struct ir3_shader_variant
*v
)
2263 TU_FROM_HANDLE(tu_device
, dev
, device
);
2264 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
2266 uint32_t shader_size
= sizeof(uint32_t) * v
->info
.sizedwords
;
2268 tu_bo_init_new(dev
, bo
, shader_size
);
2269 if (result
!= VK_SUCCESS
)
2272 result
= tu_bo_map(dev
, bo
);
2273 if (result
!= VK_SUCCESS
)
2276 memcpy(bo
->map
, v
->bin
, shader_size
);
2283 tu_compute_pipeline_create(VkDevice device
,
2284 VkPipelineCache _cache
,
2285 const VkComputePipelineCreateInfo
*pCreateInfo
,
2286 const VkAllocationCallbacks
*pAllocator
,
2287 VkPipeline
*pPipeline
)
2289 TU_FROM_HANDLE(tu_device
, dev
, device
);
2290 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, pCreateInfo
->layout
);
2291 const VkPipelineShaderStageCreateInfo
*stage_info
= &pCreateInfo
->stage
;
2294 struct tu_pipeline
*pipeline
;
2296 *pPipeline
= VK_NULL_HANDLE
;
2298 result
= tu_pipeline_create(dev
, layout
, true, pAllocator
, &pipeline
);
2299 if (result
!= VK_SUCCESS
)
2302 pipeline
->layout
= layout
;
2304 struct ir3_shader_key key
= {};
2306 struct tu_shader
*shader
=
2307 tu_shader_create(dev
, MESA_SHADER_COMPUTE
, stage_info
, layout
, pAllocator
);
2309 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
2314 struct ir3_shader_variant
*v
=
2315 ir3_shader_get_variant(shader
->ir3_shader
, &key
, false, &created
);
2319 tu_pipeline_set_linkage(&pipeline
->program
.link
[MESA_SHADER_COMPUTE
],
2322 result
= tu_compute_upload_shader(device
, pipeline
, v
);
2323 if (result
!= VK_SUCCESS
)
2326 for (int i
= 0; i
< 3; i
++)
2327 pipeline
->compute
.local_size
[i
] = v
->shader
->nir
->info
.cs
.local_size
[i
];
2329 struct tu_cs prog_cs
;
2330 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
2331 tu6_emit_cs_config(&prog_cs
, shader
, v
, pipeline
->program
.binary_bo
.iova
);
2332 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
2334 tu6_emit_load_state(pipeline
, true);
2336 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2341 tu_shader_destroy(dev
, shader
, pAllocator
);
2343 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2344 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
2350 tu_CreateComputePipelines(VkDevice device
,
2351 VkPipelineCache pipelineCache
,
2353 const VkComputePipelineCreateInfo
*pCreateInfos
,
2354 const VkAllocationCallbacks
*pAllocator
,
2355 VkPipeline
*pPipelines
)
2357 VkResult final_result
= VK_SUCCESS
;
2359 for (uint32_t i
= 0; i
< count
; i
++) {
2360 VkResult result
= tu_compute_pipeline_create(device
, pipelineCache
,
2362 pAllocator
, &pPipelines
[i
]);
2363 if (result
!= VK_SUCCESS
)
2364 final_result
= result
;
2367 return final_result
;
2371 tu_DestroyPipeline(VkDevice _device
,
2372 VkPipeline _pipeline
,
2373 const VkAllocationCallbacks
*pAllocator
)
2375 TU_FROM_HANDLE(tu_device
, dev
, _device
);
2376 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2381 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2382 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);