2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "ir3/ir3_nir.h"
31 #include "main/menums.h"
33 #include "nir/nir_builder.h"
34 #include "spirv/nir_spirv.h"
35 #include "util/debug.h"
36 #include "util/mesa-sha1.h"
37 #include "util/u_atomic.h"
38 #include "vk_format.h"
43 /* Emit IB that preloads the descriptors that the shader uses */
45 static inline uint32_t
46 tu6_vkstage2opcode(VkShaderStageFlags stage
)
49 case VK_SHADER_STAGE_VERTEX_BIT
:
50 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
51 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
52 case VK_SHADER_STAGE_GEOMETRY_BIT
:
53 return CP_LOAD_STATE6_GEOM
;
54 case VK_SHADER_STAGE_FRAGMENT_BIT
:
55 case VK_SHADER_STAGE_COMPUTE_BIT
:
56 return CP_LOAD_STATE6_FRAG
;
58 unreachable("bad shader type");
62 static enum a6xx_state_block
63 tu6_tex_stage2sb(VkShaderStageFlags stage
)
66 case VK_SHADER_STAGE_VERTEX_BIT
:
68 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
70 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
72 case VK_SHADER_STAGE_GEOMETRY_BIT
:
74 case VK_SHADER_STAGE_FRAGMENT_BIT
:
76 case VK_SHADER_STAGE_COMPUTE_BIT
:
79 unreachable("bad shader stage");
83 static enum a6xx_state_block
84 tu6_ubo_stage2sb(VkShaderStageFlags stage
)
87 case VK_SHADER_STAGE_VERTEX_BIT
:
89 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
91 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
93 case VK_SHADER_STAGE_GEOMETRY_BIT
:
95 case VK_SHADER_STAGE_FRAGMENT_BIT
:
97 case VK_SHADER_STAGE_COMPUTE_BIT
:
100 unreachable("bad shader stage");
105 emit_load_state(struct tu_cs
*cs
, unsigned opcode
, enum a6xx_state_type st
,
106 enum a6xx_state_block sb
, unsigned base
, unsigned offset
,
109 /* Note: just emit one packet, even if count overflows NUM_UNIT. It's not
110 * clear if emitting more packets will even help anything. Presumably the
111 * descriptor cache is relatively small, and these packets stop doing
112 * anything when there are too many descriptors.
114 tu_cs_emit_pkt7(cs
, opcode
, 3);
116 CP_LOAD_STATE6_0_STATE_TYPE(st
) |
117 CP_LOAD_STATE6_0_STATE_SRC(SS6_BINDLESS
) |
118 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
119 CP_LOAD_STATE6_0_NUM_UNIT(MIN2(count
, 1024-1)));
120 tu_cs_emit_qw(cs
, offset
| (base
<< 28));
124 tu6_load_state_size(struct tu_pipeline_layout
*layout
, bool compute
)
126 const unsigned load_state_size
= 4;
128 for (unsigned i
= 0; i
< layout
->num_sets
; i
++) {
129 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[i
].layout
;
130 for (unsigned j
= 0; j
< set_layout
->binding_count
; j
++) {
131 struct tu_descriptor_set_binding_layout
*binding
= &set_layout
->binding
[j
];
133 /* Note: some users, like amber for example, pass in
134 * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
135 * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
137 VkShaderStageFlags stages
= compute
?
138 binding
->shader_stages
& VK_SHADER_STAGE_COMPUTE_BIT
:
139 binding
->shader_stages
& VK_SHADER_STAGE_ALL_GRAPHICS
;
140 unsigned stage_count
= util_bitcount(stages
);
141 switch (binding
->type
) {
142 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
143 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
144 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
145 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
146 /* IBO-backed resources only need one packet for all graphics stages */
147 if (stages
& ~VK_SHADER_STAGE_COMPUTE_BIT
)
149 if (stages
& VK_SHADER_STAGE_COMPUTE_BIT
)
152 case VK_DESCRIPTOR_TYPE_SAMPLER
:
153 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
154 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
155 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
156 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
157 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
158 /* Textures and UBO's needs a packet for each stage */
161 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
162 /* Because of how we pack combined images and samplers, we
163 * currently can't use one packet for the whole array.
165 count
= stage_count
* binding
->array_size
* 2;
168 unreachable("bad descriptor type");
170 size
+= count
* load_state_size
;
177 tu6_emit_load_state(struct tu_pipeline
*pipeline
, bool compute
)
179 unsigned size
= tu6_load_state_size(pipeline
->layout
, compute
);
184 tu_cs_begin_sub_stream(&pipeline
->cs
, size
, &cs
);
186 struct tu_pipeline_layout
*layout
= pipeline
->layout
;
187 for (unsigned i
= 0; i
< layout
->num_sets
; i
++) {
188 /* From 13.2.7. Descriptor Set Binding:
190 * A compatible descriptor set must be bound for all set numbers that
191 * any shaders in a pipeline access, at the time that a draw or
192 * dispatch command is recorded to execute using that pipeline.
193 * However, if none of the shaders in a pipeline statically use any
194 * bindings with a particular set number, then no descriptor set need
195 * be bound for that set number, even if the pipeline layout includes
196 * a non-trivial descriptor set layout for that set number.
198 * This means that descriptor sets unused by the pipeline may have a
199 * garbage or 0 BINDLESS_BASE register, which will cause context faults
200 * when prefetching descriptors from these sets. Skip prefetching for
201 * descriptors from them to avoid this. This is also an optimization,
202 * since these prefetches would be useless.
204 if (!(pipeline
->active_desc_sets
& (1u << i
)))
207 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[i
].layout
;
208 for (unsigned j
= 0; j
< set_layout
->binding_count
; j
++) {
209 struct tu_descriptor_set_binding_layout
*binding
= &set_layout
->binding
[j
];
211 unsigned offset
= binding
->offset
/ 4;
212 /* Note: some users, like amber for example, pass in
213 * VK_SHADER_STAGE_ALL which includes a bunch of extra bits, so
214 * filter these out by using VK_SHADER_STAGE_ALL_GRAPHICS explicitly.
216 VkShaderStageFlags stages
= compute
?
217 binding
->shader_stages
& VK_SHADER_STAGE_COMPUTE_BIT
:
218 binding
->shader_stages
& VK_SHADER_STAGE_ALL_GRAPHICS
;
219 unsigned count
= binding
->array_size
;
220 if (count
== 0 || stages
== 0)
222 switch (binding
->type
) {
223 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
225 offset
= (layout
->input_attachment_count
+
226 layout
->set
[i
].dynamic_offset_start
+
227 binding
->dynamic_offset_offset
) * A6XX_TEX_CONST_DWORDS
;
229 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
230 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
231 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
232 /* IBO-backed resources only need one packet for all graphics stages */
233 if (stages
& ~VK_SHADER_STAGE_COMPUTE_BIT
) {
234 emit_load_state(&cs
, CP_LOAD_STATE6
, ST6_SHADER
, SB6_IBO
,
235 base
, offset
, count
);
237 if (stages
& VK_SHADER_STAGE_COMPUTE_BIT
) {
238 emit_load_state(&cs
, CP_LOAD_STATE6_FRAG
, ST6_IBO
, SB6_CS_SHADER
,
239 base
, offset
, count
);
242 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
244 offset
= (layout
->set
[i
].input_attachment_start
+
245 binding
->input_attachment_offset
) * A6XX_TEX_CONST_DWORDS
;
246 case VK_DESCRIPTOR_TYPE_SAMPLER
:
247 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
248 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
: {
250 for_each_bit(stage_log2
, stages
) {
251 VkShaderStageFlags stage
= 1 << stage_log2
;
252 emit_load_state(&cs
, tu6_vkstage2opcode(stage
),
253 binding
->type
== VK_DESCRIPTOR_TYPE_SAMPLER
?
254 ST6_SHADER
: ST6_CONSTANTS
,
255 tu6_tex_stage2sb(stage
), base
, offset
, count
);
259 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
261 offset
= (layout
->input_attachment_count
+
262 layout
->set
[i
].dynamic_offset_start
+
263 binding
->dynamic_offset_offset
) * A6XX_TEX_CONST_DWORDS
;
265 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
: {
267 for_each_bit(stage_log2
, stages
) {
268 VkShaderStageFlags stage
= 1 << stage_log2
;
269 emit_load_state(&cs
, tu6_vkstage2opcode(stage
), ST6_UBO
,
270 tu6_ubo_stage2sb(stage
), base
, offset
, count
);
274 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
: {
276 for_each_bit(stage_log2
, stages
) {
277 VkShaderStageFlags stage
= 1 << stage_log2
;
278 /* TODO: We could emit less CP_LOAD_STATE6 if we used
279 * struct-of-arrays instead of array-of-structs.
281 for (unsigned i
= 0; i
< count
; i
++) {
282 unsigned tex_offset
= offset
+ 2 * i
* A6XX_TEX_CONST_DWORDS
;
283 unsigned sam_offset
= offset
+ (2 * i
+ 1) * A6XX_TEX_CONST_DWORDS
;
284 emit_load_state(&cs
, tu6_vkstage2opcode(stage
),
285 ST6_CONSTANTS
, tu6_tex_stage2sb(stage
),
286 base
, tex_offset
, 1);
287 emit_load_state(&cs
, tu6_vkstage2opcode(stage
),
288 ST6_SHADER
, tu6_tex_stage2sb(stage
),
289 base
, sam_offset
, 1);
295 unreachable("bad descriptor type");
300 pipeline
->load_state
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &cs
);
303 struct tu_pipeline_builder
305 struct tu_device
*device
;
306 struct tu_pipeline_cache
*cache
;
307 struct tu_pipeline_layout
*layout
;
308 const VkAllocationCallbacks
*alloc
;
309 const VkGraphicsPipelineCreateInfo
*create_info
;
311 struct tu_shader
*shaders
[MESA_SHADER_STAGES
];
312 uint32_t shader_offsets
[MESA_SHADER_STAGES
];
313 uint32_t binning_vs_offset
;
314 uint32_t shader_total_size
;
316 bool rasterizer_discard
;
317 /* these states are affectd by rasterizer_discard */
318 VkSampleCountFlagBits samples
;
319 bool use_color_attachments
;
320 bool use_dual_src_blend
;
321 uint32_t color_attachment_count
;
322 VkFormat color_attachment_formats
[MAX_RTS
];
323 VkFormat depth_attachment_format
;
324 uint32_t render_components
;
327 static enum tu_dynamic_state_bits
328 tu_dynamic_state_bit(VkDynamicState state
)
331 case VK_DYNAMIC_STATE_VIEWPORT
:
332 return TU_DYNAMIC_VIEWPORT
;
333 case VK_DYNAMIC_STATE_SCISSOR
:
334 return TU_DYNAMIC_SCISSOR
;
335 case VK_DYNAMIC_STATE_LINE_WIDTH
:
336 return TU_DYNAMIC_LINE_WIDTH
;
337 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
338 return TU_DYNAMIC_DEPTH_BIAS
;
339 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
340 return TU_DYNAMIC_BLEND_CONSTANTS
;
341 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
342 return TU_DYNAMIC_DEPTH_BOUNDS
;
343 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
344 return TU_DYNAMIC_STENCIL_COMPARE_MASK
;
345 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
346 return TU_DYNAMIC_STENCIL_WRITE_MASK
;
347 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
348 return TU_DYNAMIC_STENCIL_REFERENCE
;
349 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT
:
350 return TU_DYNAMIC_SAMPLE_LOCATIONS
;
352 unreachable("invalid dynamic state");
358 tu_logic_op_reads_dst(VkLogicOp op
)
361 case VK_LOGIC_OP_CLEAR
:
362 case VK_LOGIC_OP_COPY
:
363 case VK_LOGIC_OP_COPY_INVERTED
:
364 case VK_LOGIC_OP_SET
:
372 tu_blend_factor_no_dst_alpha(VkBlendFactor factor
)
374 /* treat dst alpha as 1.0 and avoid reading it */
376 case VK_BLEND_FACTOR_DST_ALPHA
:
377 return VK_BLEND_FACTOR_ONE
;
378 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
379 return VK_BLEND_FACTOR_ZERO
;
385 static bool tu_blend_factor_is_dual_src(VkBlendFactor factor
)
388 case VK_BLEND_FACTOR_SRC1_COLOR
:
389 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
390 case VK_BLEND_FACTOR_SRC1_ALPHA
:
391 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
399 tu_blend_state_is_dual_src(const VkPipelineColorBlendStateCreateInfo
*info
)
404 for (unsigned i
= 0; i
< info
->attachmentCount
; i
++) {
405 const VkPipelineColorBlendAttachmentState
*blend
= &info
->pAttachments
[i
];
406 if (tu_blend_factor_is_dual_src(blend
->srcColorBlendFactor
) ||
407 tu_blend_factor_is_dual_src(blend
->dstColorBlendFactor
) ||
408 tu_blend_factor_is_dual_src(blend
->srcAlphaBlendFactor
) ||
409 tu_blend_factor_is_dual_src(blend
->dstAlphaBlendFactor
))
416 static enum pc_di_primtype
417 tu6_primtype(VkPrimitiveTopology topology
)
420 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
421 return DI_PT_POINTLIST
;
422 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
423 return DI_PT_LINELIST
;
424 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
425 return DI_PT_LINESTRIP
;
426 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
427 return DI_PT_TRILIST
;
428 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
429 return DI_PT_TRISTRIP
;
430 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
432 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
433 return DI_PT_LINE_ADJ
;
434 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
435 return DI_PT_LINESTRIP_ADJ
;
436 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
437 return DI_PT_TRI_ADJ
;
438 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
439 return DI_PT_TRISTRIP_ADJ
;
440 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
442 unreachable("invalid primitive topology");
447 static enum adreno_compare_func
448 tu6_compare_func(VkCompareOp op
)
451 case VK_COMPARE_OP_NEVER
:
453 case VK_COMPARE_OP_LESS
:
455 case VK_COMPARE_OP_EQUAL
:
457 case VK_COMPARE_OP_LESS_OR_EQUAL
:
459 case VK_COMPARE_OP_GREATER
:
461 case VK_COMPARE_OP_NOT_EQUAL
:
462 return FUNC_NOTEQUAL
;
463 case VK_COMPARE_OP_GREATER_OR_EQUAL
:
465 case VK_COMPARE_OP_ALWAYS
:
468 unreachable("invalid VkCompareOp");
473 static enum adreno_stencil_op
474 tu6_stencil_op(VkStencilOp op
)
477 case VK_STENCIL_OP_KEEP
:
479 case VK_STENCIL_OP_ZERO
:
481 case VK_STENCIL_OP_REPLACE
:
482 return STENCIL_REPLACE
;
483 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
484 return STENCIL_INCR_CLAMP
;
485 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
486 return STENCIL_DECR_CLAMP
;
487 case VK_STENCIL_OP_INVERT
:
488 return STENCIL_INVERT
;
489 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
490 return STENCIL_INCR_WRAP
;
491 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
492 return STENCIL_DECR_WRAP
;
494 unreachable("invalid VkStencilOp");
499 static enum a3xx_rop_code
500 tu6_rop(VkLogicOp op
)
503 case VK_LOGIC_OP_CLEAR
:
505 case VK_LOGIC_OP_AND
:
507 case VK_LOGIC_OP_AND_REVERSE
:
508 return ROP_AND_REVERSE
;
509 case VK_LOGIC_OP_COPY
:
511 case VK_LOGIC_OP_AND_INVERTED
:
512 return ROP_AND_INVERTED
;
513 case VK_LOGIC_OP_NO_OP
:
515 case VK_LOGIC_OP_XOR
:
519 case VK_LOGIC_OP_NOR
:
521 case VK_LOGIC_OP_EQUIVALENT
:
523 case VK_LOGIC_OP_INVERT
:
525 case VK_LOGIC_OP_OR_REVERSE
:
526 return ROP_OR_REVERSE
;
527 case VK_LOGIC_OP_COPY_INVERTED
:
528 return ROP_COPY_INVERTED
;
529 case VK_LOGIC_OP_OR_INVERTED
:
530 return ROP_OR_INVERTED
;
531 case VK_LOGIC_OP_NAND
:
533 case VK_LOGIC_OP_SET
:
536 unreachable("invalid VkLogicOp");
541 static enum adreno_rb_blend_factor
542 tu6_blend_factor(VkBlendFactor factor
)
545 case VK_BLEND_FACTOR_ZERO
:
547 case VK_BLEND_FACTOR_ONE
:
549 case VK_BLEND_FACTOR_SRC_COLOR
:
550 return FACTOR_SRC_COLOR
;
551 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
552 return FACTOR_ONE_MINUS_SRC_COLOR
;
553 case VK_BLEND_FACTOR_DST_COLOR
:
554 return FACTOR_DST_COLOR
;
555 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
556 return FACTOR_ONE_MINUS_DST_COLOR
;
557 case VK_BLEND_FACTOR_SRC_ALPHA
:
558 return FACTOR_SRC_ALPHA
;
559 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
560 return FACTOR_ONE_MINUS_SRC_ALPHA
;
561 case VK_BLEND_FACTOR_DST_ALPHA
:
562 return FACTOR_DST_ALPHA
;
563 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
564 return FACTOR_ONE_MINUS_DST_ALPHA
;
565 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
566 return FACTOR_CONSTANT_COLOR
;
567 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
568 return FACTOR_ONE_MINUS_CONSTANT_COLOR
;
569 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
570 return FACTOR_CONSTANT_ALPHA
;
571 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
572 return FACTOR_ONE_MINUS_CONSTANT_ALPHA
;
573 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
574 return FACTOR_SRC_ALPHA_SATURATE
;
575 case VK_BLEND_FACTOR_SRC1_COLOR
:
576 return FACTOR_SRC1_COLOR
;
577 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
578 return FACTOR_ONE_MINUS_SRC1_COLOR
;
579 case VK_BLEND_FACTOR_SRC1_ALPHA
:
580 return FACTOR_SRC1_ALPHA
;
581 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
582 return FACTOR_ONE_MINUS_SRC1_ALPHA
;
584 unreachable("invalid VkBlendFactor");
589 static enum a3xx_rb_blend_opcode
590 tu6_blend_op(VkBlendOp op
)
593 case VK_BLEND_OP_ADD
:
594 return BLEND_DST_PLUS_SRC
;
595 case VK_BLEND_OP_SUBTRACT
:
596 return BLEND_SRC_MINUS_DST
;
597 case VK_BLEND_OP_REVERSE_SUBTRACT
:
598 return BLEND_DST_MINUS_SRC
;
599 case VK_BLEND_OP_MIN
:
600 return BLEND_MIN_DST_SRC
;
601 case VK_BLEND_OP_MAX
:
602 return BLEND_MAX_DST_SRC
;
604 unreachable("invalid VkBlendOp");
605 return BLEND_DST_PLUS_SRC
;
610 tu6_emit_xs_config(struct tu_cs
*cs
,
611 gl_shader_stage stage
, /* xs->type, but xs may be NULL */
612 const struct ir3_shader_variant
*xs
,
613 uint64_t binary_iova
)
615 static const struct xs_config
{
616 uint16_t reg_sp_xs_ctrl
;
617 uint16_t reg_sp_xs_config
;
618 uint16_t reg_hlsq_xs_ctrl
;
619 uint16_t reg_sp_vs_obj_start
;
621 enum a6xx_state_block sb
: 8;
623 [MESA_SHADER_VERTEX
] = {
624 REG_A6XX_SP_VS_CTRL_REG0
,
625 REG_A6XX_SP_VS_CONFIG
,
626 REG_A6XX_HLSQ_VS_CNTL
,
627 REG_A6XX_SP_VS_OBJ_START_LO
,
631 [MESA_SHADER_TESS_CTRL
] = {
632 REG_A6XX_SP_HS_CTRL_REG0
,
633 REG_A6XX_SP_HS_CONFIG
,
634 REG_A6XX_HLSQ_HS_CNTL
,
635 REG_A6XX_SP_HS_OBJ_START_LO
,
639 [MESA_SHADER_TESS_EVAL
] = {
640 REG_A6XX_SP_DS_CTRL_REG0
,
641 REG_A6XX_SP_DS_CONFIG
,
642 REG_A6XX_HLSQ_DS_CNTL
,
643 REG_A6XX_SP_DS_OBJ_START_LO
,
647 [MESA_SHADER_GEOMETRY
] = {
648 REG_A6XX_SP_GS_CTRL_REG0
,
649 REG_A6XX_SP_GS_CONFIG
,
650 REG_A6XX_HLSQ_GS_CNTL
,
651 REG_A6XX_SP_GS_OBJ_START_LO
,
655 [MESA_SHADER_FRAGMENT
] = {
656 REG_A6XX_SP_FS_CTRL_REG0
,
657 REG_A6XX_SP_FS_CONFIG
,
658 REG_A6XX_HLSQ_FS_CNTL
,
659 REG_A6XX_SP_FS_OBJ_START_LO
,
663 [MESA_SHADER_COMPUTE
] = {
664 REG_A6XX_SP_CS_CTRL_REG0
,
665 REG_A6XX_SP_CS_CONFIG
,
666 REG_A6XX_HLSQ_CS_CNTL
,
667 REG_A6XX_SP_CS_OBJ_START_LO
,
672 const struct xs_config
*cfg
= &xs_config
[stage
];
675 /* shader stage disabled */
676 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_xs_config
, 1);
679 tu_cs_emit_pkt4(cs
, cfg
->reg_hlsq_xs_ctrl
, 1);
684 bool is_fs
= xs
->type
== MESA_SHADER_FRAGMENT
;
685 enum a3xx_threadsize threadsize
= FOUR_QUADS
;
688 * the "threadsize" field may have nothing to do with threadsize,
689 * use a value that matches the blob until it is figured out
691 if (xs
->type
== MESA_SHADER_GEOMETRY
)
692 threadsize
= TWO_QUADS
;
694 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_xs_ctrl
, 1);
696 A6XX_SP_VS_CTRL_REG0_THREADSIZE(threadsize
) |
697 A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(xs
->info
.max_reg
+ 1) |
698 A6XX_SP_VS_CTRL_REG0_MERGEDREGS
|
699 A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(xs
->branchstack
) |
700 COND(xs
->need_pixlod
, A6XX_SP_VS_CTRL_REG0_PIXLODENABLE
) |
701 COND(xs
->need_fine_derivatives
, A6XX_SP_VS_CTRL_REG0_DIFF_FINE
) |
702 /* only fragment shader sets VARYING bit */
703 COND(xs
->total_in
&& is_fs
, A6XX_SP_FS_CTRL_REG0_VARYING
) |
704 /* unknown bit, seems unnecessary */
705 COND(is_fs
, 0x1000000));
707 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_xs_config
, 2);
708 tu_cs_emit(cs
, A6XX_SP_VS_CONFIG_ENABLED
|
709 COND(xs
->bindless_tex
, A6XX_SP_VS_CONFIG_BINDLESS_TEX
) |
710 COND(xs
->bindless_samp
, A6XX_SP_VS_CONFIG_BINDLESS_SAMP
) |
711 COND(xs
->bindless_ibo
, A6XX_SP_VS_CONFIG_BINDLESS_IBO
) |
712 COND(xs
->bindless_ubo
, A6XX_SP_VS_CONFIG_BINDLESS_UBO
) |
713 A6XX_SP_VS_CONFIG_NTEX(xs
->num_samp
) |
714 A6XX_SP_VS_CONFIG_NSAMP(xs
->num_samp
));
715 tu_cs_emit(cs
, xs
->instrlen
);
717 tu_cs_emit_pkt4(cs
, cfg
->reg_hlsq_xs_ctrl
, 1);
718 tu_cs_emit(cs
, A6XX_HLSQ_VS_CNTL_CONSTLEN(align(xs
->constlen
, 4)) |
719 A6XX_HLSQ_VS_CNTL_ENABLED
);
721 /* emit program binary
722 * binary_iova should be aligned to 1 instrlen unit (128 bytes)
725 assert((binary_iova
& 0x7f) == 0);
727 tu_cs_emit_pkt4(cs
, cfg
->reg_sp_vs_obj_start
, 2);
728 tu_cs_emit_qw(cs
, binary_iova
);
730 tu_cs_emit_pkt7(cs
, cfg
->opcode
, 3);
731 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(0) |
732 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
733 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
734 CP_LOAD_STATE6_0_STATE_BLOCK(cfg
->sb
) |
735 CP_LOAD_STATE6_0_NUM_UNIT(xs
->instrlen
));
736 tu_cs_emit_qw(cs
, binary_iova
);
738 /* emit immediates */
740 const struct ir3_const_state
*const_state
= &xs
->shader
->const_state
;
741 uint32_t base
= const_state
->offsets
.immediate
;
742 int size
= const_state
->immediates_count
;
744 /* truncate size to avoid writing constants that shader
747 size
= MIN2(size
+ base
, xs
->constlen
) - base
;
752 tu_cs_emit_pkt7(cs
, cfg
->opcode
, 3 + size
* 4);
753 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(base
) |
754 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
755 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
756 CP_LOAD_STATE6_0_STATE_BLOCK(cfg
->sb
) |
757 CP_LOAD_STATE6_0_NUM_UNIT(size
));
758 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
759 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
761 for (unsigned i
= 0; i
< size
; i
++) {
762 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[0]);
763 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[1]);
764 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[2]);
765 tu_cs_emit(cs
, const_state
->immediates
[i
].val
[3]);
770 tu6_emit_cs_config(struct tu_cs
*cs
, const struct tu_shader
*shader
,
771 const struct ir3_shader_variant
*v
,
772 uint32_t binary_iova
)
774 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
775 tu_cs_emit(cs
, 0xff);
777 tu6_emit_xs_config(cs
, MESA_SHADER_COMPUTE
, v
, binary_iova
);
779 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_CS_UNKNOWN_A9B1
, 1);
780 tu_cs_emit(cs
, 0x41);
782 uint32_t local_invocation_id
=
783 ir3_find_sysval_regid(v
, SYSTEM_VALUE_LOCAL_INVOCATION_ID
);
784 uint32_t work_group_id
=
785 ir3_find_sysval_regid(v
, SYSTEM_VALUE_WORK_GROUP_ID
);
787 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CS_CNTL_0
, 2);
789 A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id
) |
790 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
791 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
792 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id
));
793 tu_cs_emit(cs
, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
797 tu6_emit_vs_system_values(struct tu_cs
*cs
,
798 const struct ir3_shader_variant
*vs
,
799 const struct ir3_shader_variant
*gs
,
800 bool primid_passthru
)
802 const uint32_t vertexid_regid
=
803 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_VERTEX_ID
);
804 const uint32_t instanceid_regid
=
805 ir3_find_sysval_regid(vs
, SYSTEM_VALUE_INSTANCE_ID
);
806 const uint32_t primitiveid_regid
= gs
?
807 ir3_find_sysval_regid(gs
, SYSTEM_VALUE_PRIMITIVE_ID
) :
809 const uint32_t gsheader_regid
= gs
?
810 ir3_find_sysval_regid(gs
, SYSTEM_VALUE_GS_HEADER_IR3
) :
813 tu_cs_emit_pkt4(cs
, REG_A6XX_VFD_CONTROL_1
, 6);
814 tu_cs_emit(cs
, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid
) |
815 A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid
) |
816 A6XX_VFD_CONTROL_1_REGID4PRIMID(primitiveid_regid
) |
818 tu_cs_emit(cs
, 0x0000fcfc); /* VFD_CONTROL_2 */
819 tu_cs_emit(cs
, 0xfcfcfcfc); /* VFD_CONTROL_3 */
820 tu_cs_emit(cs
, 0x000000fc); /* VFD_CONTROL_4 */
821 tu_cs_emit(cs
, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gsheader_regid
) |
822 0xfc00); /* VFD_CONTROL_5 */
823 tu_cs_emit(cs
, COND(primid_passthru
, A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU
)); /* VFD_CONTROL_6 */
826 /* Add any missing varyings needed for stream-out. Otherwise varyings not
827 * used by fragment shader will be stripped out.
830 tu6_link_streamout(struct ir3_shader_linkage
*l
,
831 const struct ir3_shader_variant
*v
)
833 const struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
836 * First, any stream-out varyings not already in linkage map (ie. also
837 * consumed by frag shader) need to be added:
839 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
840 const struct ir3_stream_output
*out
= &info
->output
[i
];
842 (1 << (out
->num_components
+ out
->start_component
)) - 1;
843 unsigned k
= out
->register_index
;
844 unsigned idx
, nextloc
= 0;
846 /* psize/pos need to be the last entries in linkage map, and will
847 * get added link_stream_out, so skip over them:
849 if (v
->outputs
[k
].slot
== VARYING_SLOT_PSIZ
||
850 v
->outputs
[k
].slot
== VARYING_SLOT_POS
)
853 for (idx
= 0; idx
< l
->cnt
; idx
++) {
854 if (l
->var
[idx
].regid
== v
->outputs
[k
].regid
)
856 nextloc
= MAX2(nextloc
, l
->var
[idx
].loc
+ 4);
859 /* add if not already in linkage map: */
861 ir3_link_add(l
, v
->outputs
[k
].regid
, compmask
, nextloc
);
863 /* expand component-mask if needed, ie streaming out all components
864 * but frag shader doesn't consume all components:
866 if (compmask
& ~l
->var
[idx
].compmask
) {
867 l
->var
[idx
].compmask
|= compmask
;
868 l
->max_loc
= MAX2(l
->max_loc
, l
->var
[idx
].loc
+
869 util_last_bit(l
->var
[idx
].compmask
));
875 tu6_setup_streamout(const struct ir3_shader_variant
*v
,
876 struct ir3_shader_linkage
*l
, struct tu_streamout_state
*tf
)
878 const struct ir3_stream_output_info
*info
= &v
->shader
->stream_output
;
880 memset(tf
, 0, sizeof(*tf
));
882 tf
->prog_count
= align(l
->max_loc
, 2) / 2;
884 debug_assert(tf
->prog_count
< ARRAY_SIZE(tf
->prog
));
886 /* set stride info to the streamout state */
887 for (unsigned i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
888 tf
->stride
[i
] = info
->stride
[i
];
890 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
891 const struct ir3_stream_output
*out
= &info
->output
[i
];
892 unsigned k
= out
->register_index
;
895 /* Skip it, if there's an unused reg in the middle of outputs. */
896 if (v
->outputs
[k
].regid
== INVALID_REG
)
899 tf
->ncomp
[out
->output_buffer
] += out
->num_components
;
901 /* linkage map sorted by order frag shader wants things, so
902 * a bit less ideal here..
904 for (idx
= 0; idx
< l
->cnt
; idx
++)
905 if (l
->var
[idx
].regid
== v
->outputs
[k
].regid
)
908 debug_assert(idx
< l
->cnt
);
910 for (unsigned j
= 0; j
< out
->num_components
; j
++) {
911 unsigned c
= j
+ out
->start_component
;
912 unsigned loc
= l
->var
[idx
].loc
+ c
;
913 unsigned off
= j
+ out
->dst_offset
; /* in dwords */
916 tf
->prog
[loc
/2] |= A6XX_VPC_SO_PROG_B_EN
|
917 A6XX_VPC_SO_PROG_B_BUF(out
->output_buffer
) |
918 A6XX_VPC_SO_PROG_B_OFF(off
* 4);
920 tf
->prog
[loc
/2] |= A6XX_VPC_SO_PROG_A_EN
|
921 A6XX_VPC_SO_PROG_A_BUF(out
->output_buffer
) |
922 A6XX_VPC_SO_PROG_A_OFF(off
* 4);
927 tf
->vpc_so_buf_cntl
= A6XX_VPC_SO_BUF_CNTL_ENABLE
|
928 COND(tf
->ncomp
[0] > 0, A6XX_VPC_SO_BUF_CNTL_BUF0
) |
929 COND(tf
->ncomp
[1] > 0, A6XX_VPC_SO_BUF_CNTL_BUF1
) |
930 COND(tf
->ncomp
[2] > 0, A6XX_VPC_SO_BUF_CNTL_BUF2
) |
931 COND(tf
->ncomp
[3] > 0, A6XX_VPC_SO_BUF_CNTL_BUF3
);
935 tu6_emit_const(struct tu_cs
*cs
, uint32_t opcode
, uint32_t base
,
936 enum a6xx_state_block block
, uint32_t offset
,
937 uint32_t size
, uint32_t *dwords
) {
938 assert(size
% 4 == 0);
940 tu_cs_emit_pkt7(cs
, opcode
, 3 + size
);
941 tu_cs_emit(cs
, CP_LOAD_STATE6_0_DST_OFF(base
) |
942 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
943 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT
) |
944 CP_LOAD_STATE6_0_STATE_BLOCK(block
) |
945 CP_LOAD_STATE6_0_NUM_UNIT(size
/ 4));
947 tu_cs_emit(cs
, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
948 tu_cs_emit(cs
, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
949 dwords
= (uint32_t *)&((uint8_t *)dwords
)[offset
];
951 tu_cs_emit_array(cs
, dwords
, size
);
955 tu6_emit_link_map(struct tu_cs
*cs
,
956 const struct ir3_shader_variant
*producer
,
957 const struct ir3_shader_variant
*consumer
) {
958 const struct ir3_const_state
*const_state
= &consumer
->shader
->const_state
;
959 uint32_t base
= const_state
->offsets
.primitive_map
;
960 uint32_t patch_locs
[MAX_VARYING
] = { }, num_loc
;
961 num_loc
= ir3_link_geometry_stages(producer
, consumer
, patch_locs
);
962 int size
= DIV_ROUND_UP(num_loc
, 4);
964 size
= (MIN2(size
+ base
, consumer
->constlen
) - base
) * 4;
968 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, base
, SB6_GS_SHADER
, 0, size
,
973 gl_primitive_to_tess(uint16_t primitive
) {
979 case GL_TRIANGLE_STRIP
:
987 tu6_emit_vpc(struct tu_cs
*cs
,
988 const struct ir3_shader_variant
*vs
,
989 const struct ir3_shader_variant
*gs
,
990 const struct ir3_shader_variant
*fs
,
991 struct tu_streamout_state
*tf
)
993 const struct ir3_shader_variant
*last_shader
= gs
?: vs
;
994 struct ir3_shader_linkage linkage
= { .primid_loc
= 0xff };
996 ir3_link_shaders(&linkage
, last_shader
, fs
, true);
998 if (last_shader
->shader
->stream_output
.num_outputs
)
999 tu6_link_streamout(&linkage
, last_shader
);
1001 /* We do this after linking shaders in order to know whether PrimID
1002 * passthrough needs to be enabled.
1004 bool primid_passthru
= linkage
.primid_loc
!= 0xff;
1005 tu6_emit_vs_system_values(cs
, vs
, gs
, primid_passthru
);
1007 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VAR_DISABLE(0), 4);
1008 tu_cs_emit(cs
, ~linkage
.varmask
[0]);
1009 tu_cs_emit(cs
, ~linkage
.varmask
[1]);
1010 tu_cs_emit(cs
, ~linkage
.varmask
[2]);
1011 tu_cs_emit(cs
, ~linkage
.varmask
[3]);
1013 /* a6xx finds position/pointsize at the end */
1014 const uint32_t position_regid
=
1015 ir3_find_output_regid(last_shader
, VARYING_SLOT_POS
);
1016 const uint32_t pointsize_regid
=
1017 ir3_find_output_regid(last_shader
, VARYING_SLOT_PSIZ
);
1018 const uint32_t layer_regid
= gs
?
1019 ir3_find_output_regid(gs
, VARYING_SLOT_LAYER
) : regid(63, 0);
1021 uint32_t pointsize_loc
= 0xff, position_loc
= 0xff, layer_loc
= 0xff;
1022 if (layer_regid
!= regid(63, 0)) {
1023 layer_loc
= linkage
.max_loc
;
1024 ir3_link_add(&linkage
, layer_regid
, 0x1, linkage
.max_loc
);
1026 if (position_regid
!= regid(63, 0)) {
1027 position_loc
= linkage
.max_loc
;
1028 ir3_link_add(&linkage
, position_regid
, 0xf, linkage
.max_loc
);
1030 if (pointsize_regid
!= regid(63, 0)) {
1031 pointsize_loc
= linkage
.max_loc
;
1032 ir3_link_add(&linkage
, pointsize_regid
, 0x1, linkage
.max_loc
);
1035 if (last_shader
->shader
->stream_output
.num_outputs
)
1036 tu6_setup_streamout(last_shader
, &linkage
, tf
);
1038 /* map outputs of the last shader to VPC */
1039 assert(linkage
.cnt
<= 32);
1040 const uint32_t sp_out_count
= DIV_ROUND_UP(linkage
.cnt
, 2);
1041 const uint32_t sp_vpc_dst_count
= DIV_ROUND_UP(linkage
.cnt
, 4);
1042 uint32_t sp_out
[16];
1043 uint32_t sp_vpc_dst
[8];
1044 for (uint32_t i
= 0; i
< linkage
.cnt
; i
++) {
1045 ((uint16_t *) sp_out
)[i
] =
1046 A6XX_SP_VS_OUT_REG_A_REGID(linkage
.var
[i
].regid
) |
1047 A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage
.var
[i
].compmask
);
1048 ((uint8_t *) sp_vpc_dst
)[i
] =
1049 A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage
.var
[i
].loc
);
1053 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_OUT_REG(0), sp_out_count
);
1055 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_OUT_REG(0), sp_out_count
);
1056 tu_cs_emit_array(cs
, sp_out
, sp_out_count
);
1059 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_VPC_DST_REG(0), sp_vpc_dst_count
);
1061 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vpc_dst_count
);
1062 tu_cs_emit_array(cs
, sp_vpc_dst
, sp_vpc_dst_count
);
1064 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMID_CNTL
, 1);
1065 tu_cs_emit(cs
, COND(primid_passthru
, A6XX_PC_PRIMID_CNTL_PRIMID_PASSTHRU
));
1067 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_CNTL_0
, 1);
1068 tu_cs_emit(cs
, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs
? fs
->total_in
: 0) |
1069 COND(fs
&& fs
->total_in
, A6XX_VPC_CNTL_0_VARYING
) |
1070 A6XX_VPC_CNTL_0_PRIMIDLOC(linkage
.primid_loc
) |
1071 A6XX_VPC_CNTL_0_UNKLOC(0xff));
1073 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_PACK
, 1);
1074 tu_cs_emit(cs
, A6XX_VPC_PACK_POSITIONLOC(position_loc
) |
1075 A6XX_VPC_PACK_PSIZELOC(pointsize_loc
) |
1076 A6XX_VPC_PACK_STRIDE_IN_VPC(linkage
.max_loc
));
1079 uint32_t vertices_out
, invocations
, output
, vec4_size
;
1080 /* this detects the tu_clear_blit path, which doesn't set ->nir */
1081 if (gs
->shader
->nir
) {
1082 tu6_emit_link_map(cs
, vs
, gs
);
1083 vertices_out
= gs
->shader
->nir
->info
.gs
.vertices_out
- 1;
1084 output
= gl_primitive_to_tess(gs
->shader
->nir
->info
.gs
.output_primitive
);
1085 invocations
= gs
->shader
->nir
->info
.gs
.invocations
- 1;
1086 /* Size of per-primitive alloction in ldlw memory in vec4s. */
1087 vec4_size
= gs
->shader
->nir
->info
.gs
.vertices_in
*
1088 DIV_ROUND_UP(vs
->shader
->output_size
, 4);
1091 output
= TESS_CW_TRIS
;
1096 uint32_t primitive_regid
=
1097 ir3_find_sysval_regid(gs
, SYSTEM_VALUE_PRIMITIVE_ID
);
1098 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_PACK_GS
, 1);
1099 tu_cs_emit(cs
, A6XX_VPC_PACK_GS_POSITIONLOC(position_loc
) |
1100 A6XX_VPC_PACK_GS_PSIZELOC(pointsize_loc
) |
1101 A6XX_VPC_PACK_GS_STRIDE_IN_VPC(linkage
.max_loc
));
1103 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9105
, 1);
1104 tu_cs_emit(cs
, A6XX_VPC_UNKNOWN_9105_LAYERLOC(layer_loc
) | 0xff00);
1106 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_809C
, 1);
1107 tu_cs_emit(cs
, CONDREG(layer_regid
,
1108 A6XX_GRAS_UNKNOWN_809C_GS_WRITES_LAYER
));
1110 uint32_t flags_regid
= ir3_find_output_regid(gs
,
1111 VARYING_SLOT_GS_VERTEX_FLAGS_IR3
);
1113 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_PRIMITIVE_CNTL_GS
, 1);
1114 tu_cs_emit(cs
, A6XX_SP_PRIMITIVE_CNTL_GS_GSOUT(linkage
.cnt
) |
1115 A6XX_SP_PRIMITIVE_CNTL_GS_FLAGS_REGID(flags_regid
));
1117 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_2
, 1);
1118 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_2_STRIDE_IN_VPC(linkage
.max_loc
) |
1119 CONDREG(pointsize_regid
, A6XX_PC_PRIMITIVE_CNTL_2_PSIZE
) |
1120 CONDREG(layer_regid
, A6XX_PC_PRIMITIVE_CNTL_2_LAYER
) |
1121 CONDREG(primitive_regid
, A6XX_PC_PRIMITIVE_CNTL_2_PRIMITIVE_ID
));
1123 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_5
, 1);
1125 A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(vertices_out
) |
1126 A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output
) |
1127 A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(invocations
));
1129 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_3
, 1);
1132 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8003
, 1);
1135 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9100
, 1);
1136 tu_cs_emit(cs
, 0xff);
1138 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_UNKNOWN_9102
, 1);
1139 tu_cs_emit(cs
, 0xffff00);
1141 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_6
, 1);
1142 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size
));
1144 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_UNKNOWN_9B07
, 1);
1147 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_GS_PRIM_SIZE
, 1);
1148 tu_cs_emit(cs
, vs
->shader
->output_size
);
1151 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_PRIMITIVE_CNTL
, 1);
1152 tu_cs_emit(cs
, A6XX_SP_PRIMITIVE_CNTL_VSOUT(linkage
.cnt
));
1154 tu_cs_emit_pkt4(cs
, REG_A6XX_PC_PRIMITIVE_CNTL_1
, 1);
1155 tu_cs_emit(cs
, A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(linkage
.max_loc
) |
1156 (last_shader
->writes_psize
? A6XX_PC_PRIMITIVE_CNTL_1_PSIZE
: 0));
1160 tu6_vpc_varying_mode(const struct ir3_shader_variant
*fs
,
1162 uint8_t *interp_mode
,
1163 uint8_t *ps_repl_mode
)
1177 PS_REPL_ONE_MINUS_T
= 3,
1180 const uint32_t compmask
= fs
->inputs
[index
].compmask
;
1182 /* NOTE: varyings are packed, so if compmask is 0xb then first, second, and
1183 * fourth component occupy three consecutive varying slots
1188 if (fs
->inputs
[index
].slot
== VARYING_SLOT_PNTC
) {
1189 if (compmask
& 0x1) {
1190 *ps_repl_mode
|= PS_REPL_S
<< shift
;
1193 if (compmask
& 0x2) {
1194 *ps_repl_mode
|= PS_REPL_T
<< shift
;
1197 if (compmask
& 0x4) {
1198 *interp_mode
|= INTERP_ZERO
<< shift
;
1201 if (compmask
& 0x8) {
1202 *interp_mode
|= INTERP_ONE
<< 6;
1205 } else if ((fs
->inputs
[index
].interpolate
== INTERP_MODE_FLAT
) ||
1206 fs
->inputs
[index
].rasterflat
) {
1207 for (int i
= 0; i
< 4; i
++) {
1208 if (compmask
& (1 << i
)) {
1209 *interp_mode
|= INTERP_FLAT
<< shift
;
1219 tu6_emit_vpc_varying_modes(struct tu_cs
*cs
,
1220 const struct ir3_shader_variant
*fs
)
1222 uint32_t interp_modes
[8] = { 0 };
1223 uint32_t ps_repl_modes
[8] = { 0 };
1227 (i
= ir3_next_varying(fs
, i
)) < (int) fs
->inputs_count
;) {
1229 /* get the mode for input i */
1230 uint8_t interp_mode
;
1231 uint8_t ps_repl_mode
;
1233 tu6_vpc_varying_mode(fs
, i
, &interp_mode
, &ps_repl_mode
);
1235 /* OR the mode into the array */
1236 const uint32_t inloc
= fs
->inputs
[i
].inloc
* 2;
1237 uint32_t n
= inloc
/ 32;
1238 uint32_t shift
= inloc
% 32;
1239 interp_modes
[n
] |= interp_mode
<< shift
;
1240 ps_repl_modes
[n
] |= ps_repl_mode
<< shift
;
1241 if (shift
+ bits
> 32) {
1245 interp_modes
[n
] |= interp_mode
>> shift
;
1246 ps_repl_modes
[n
] |= ps_repl_mode
>> shift
;
1251 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
1252 tu_cs_emit_array(cs
, interp_modes
, 8);
1254 tu_cs_emit_pkt4(cs
, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
1255 tu_cs_emit_array(cs
, ps_repl_modes
, 8);
1259 tu6_emit_fs_inputs(struct tu_cs
*cs
, const struct ir3_shader_variant
*fs
)
1261 uint32_t face_regid
, coord_regid
, zwcoord_regid
, samp_id_regid
;
1262 uint32_t ij_pix_regid
, ij_samp_regid
, ij_cent_regid
, ij_size_regid
;
1263 uint32_t smask_in_regid
;
1265 bool sample_shading
= fs
->per_samp
; /* TODO | key->sample_shading; */
1266 bool enable_varyings
= fs
->total_in
> 0;
1268 samp_id_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_ID
);
1269 smask_in_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_SAMPLE_MASK_IN
);
1270 face_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRONT_FACE
);
1271 coord_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_FRAG_COORD
);
1272 zwcoord_regid
= VALIDREG(coord_regid
) ? coord_regid
+ 2 : regid(63, 0);
1273 ij_pix_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
);
1274 ij_samp_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
);
1275 ij_cent_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
);
1276 ij_size_regid
= ir3_find_sysval_regid(fs
, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE
);
1278 if (fs
->num_sampler_prefetch
> 0) {
1279 assert(VALIDREG(ij_pix_regid
));
1280 /* also, it seems like ij_pix is *required* to be r0.x */
1281 assert(ij_pix_regid
== regid(0, 0));
1284 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_PREFETCH_CNTL
, 1 + fs
->num_sampler_prefetch
);
1285 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CNTL_COUNT(fs
->num_sampler_prefetch
) |
1286 A6XX_SP_FS_PREFETCH_CNTL_UNK4(regid(63, 0)) |
1288 for (int i
= 0; i
< fs
->num_sampler_prefetch
; i
++) {
1289 const struct ir3_sampler_prefetch
*prefetch
= &fs
->sampler_prefetch
[i
];
1290 tu_cs_emit(cs
, A6XX_SP_FS_PREFETCH_CMD_SRC(prefetch
->src
) |
1291 A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(prefetch
->samp_id
) |
1292 A6XX_SP_FS_PREFETCH_CMD_TEX_ID(prefetch
->tex_id
) |
1293 A6XX_SP_FS_PREFETCH_CMD_DST(prefetch
->dst
) |
1294 A6XX_SP_FS_PREFETCH_CMD_WRMASK(prefetch
->wrmask
) |
1295 COND(prefetch
->half_precision
, A6XX_SP_FS_PREFETCH_CMD_HALF
) |
1296 A6XX_SP_FS_PREFETCH_CMD_CMD(prefetch
->cmd
));
1299 if (fs
->num_sampler_prefetch
> 0) {
1300 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(0), fs
->num_sampler_prefetch
);
1301 for (int i
= 0; i
< fs
->num_sampler_prefetch
; i
++) {
1302 const struct ir3_sampler_prefetch
*prefetch
= &fs
->sampler_prefetch
[i
];
1304 A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(prefetch
->samp_bindless_id
) |
1305 A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(prefetch
->tex_bindless_id
));
1309 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_CONTROL_1_REG
, 5);
1310 tu_cs_emit(cs
, 0x7);
1311 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(face_regid
) |
1312 A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(samp_id_regid
) |
1313 A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(smask_in_regid
) |
1314 A6XX_HLSQ_CONTROL_2_REG_SIZE(ij_size_regid
));
1315 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_PIXEL(ij_pix_regid
) |
1316 A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_CENTROID(ij_cent_regid
) |
1318 tu_cs_emit(cs
, A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(coord_regid
) |
1319 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(zwcoord_regid
) |
1320 A6XX_HLSQ_CONTROL_4_REG_BARY_IJ_PIXEL_PERSAMP(ij_samp_regid
) |
1322 tu_cs_emit(cs
, 0xfc);
1324 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UNKNOWN_B980
, 1);
1325 tu_cs_emit(cs
, enable_varyings
? 3 : 1);
1327 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CNTL
, 1);
1329 CONDREG(ij_pix_regid
, A6XX_GRAS_CNTL_VARYING
) |
1330 CONDREG(ij_cent_regid
, A6XX_GRAS_CNTL_CENTROID
) |
1331 CONDREG(ij_samp_regid
, A6XX_GRAS_CNTL_PERSAMP_VARYING
) |
1332 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_GRAS_CNTL_SIZE
) |
1333 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_GRAS_CNTL_SIZE_PERSAMP
) |
1334 COND(fs
->fragcoord_compmask
!= 0, A6XX_GRAS_CNTL_SIZE
|
1335 A6XX_GRAS_CNTL_COORD_MASK(fs
->fragcoord_compmask
)) |
1336 COND(fs
->frag_face
, A6XX_GRAS_CNTL_SIZE
));
1338 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_RENDER_CONTROL0
, 2);
1340 CONDREG(ij_pix_regid
, A6XX_RB_RENDER_CONTROL0_VARYING
) |
1341 CONDREG(ij_cent_regid
, A6XX_RB_RENDER_CONTROL0_CENTROID
) |
1342 CONDREG(ij_samp_regid
, A6XX_RB_RENDER_CONTROL0_PERSAMP_VARYING
) |
1343 COND(enable_varyings
, A6XX_RB_RENDER_CONTROL0_UNK10
) |
1344 COND(VALIDREG(ij_size_regid
) && !sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE
) |
1345 COND(VALIDREG(ij_size_regid
) && sample_shading
, A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP
) |
1346 COND(fs
->fragcoord_compmask
!= 0, A6XX_RB_RENDER_CONTROL0_SIZE
|
1347 A6XX_RB_RENDER_CONTROL0_COORD_MASK(fs
->fragcoord_compmask
)) |
1348 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL0_SIZE
));
1350 CONDREG(smask_in_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEMASK
) |
1351 CONDREG(samp_id_regid
, A6XX_RB_RENDER_CONTROL1_SAMPLEID
) |
1352 CONDREG(ij_size_regid
, A6XX_RB_RENDER_CONTROL1_SIZE
) |
1353 COND(fs
->frag_face
, A6XX_RB_RENDER_CONTROL1_FACENESS
));
1355 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SAMPLE_CNTL
, 1);
1356 tu_cs_emit(cs
, COND(sample_shading
, A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE
));
1358 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8101
, 1);
1359 tu_cs_emit(cs
, COND(sample_shading
, 0x6)); // XXX
1361 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SAMPLE_CNTL
, 1);
1362 tu_cs_emit(cs
, COND(sample_shading
, A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE
));
1366 tu6_emit_fs_outputs(struct tu_cs
*cs
,
1367 const struct ir3_shader_variant
*fs
,
1368 uint32_t mrt_count
, bool dual_src_blend
,
1369 uint32_t render_components
)
1371 uint32_t smask_regid
, posz_regid
;
1373 posz_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_DEPTH
);
1374 smask_regid
= ir3_find_output_regid(fs
, FRAG_RESULT_SAMPLE_MASK
);
1376 uint32_t fragdata_regid
[8];
1377 if (fs
->color0_mrt
) {
1378 fragdata_regid
[0] = ir3_find_output_regid(fs
, FRAG_RESULT_COLOR
);
1379 for (uint32_t i
= 1; i
< ARRAY_SIZE(fragdata_regid
); i
++)
1380 fragdata_regid
[i
] = fragdata_regid
[0];
1382 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++)
1383 fragdata_regid
[i
] = ir3_find_output_regid(fs
, FRAG_RESULT_DATA0
+ i
);
1386 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_CNTL0
, 2);
1387 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(posz_regid
) |
1388 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(smask_regid
) |
1389 COND(dual_src_blend
, A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE
) |
1391 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count
));
1393 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
1394 for (uint32_t i
= 0; i
< ARRAY_SIZE(fragdata_regid
); i
++) {
1395 // TODO we could have a mix of half and full precision outputs,
1396 // we really need to figure out half-precision from IR3_REG_HALF
1397 tu_cs_emit(cs
, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid
[i
]) |
1398 (false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION
: 0));
1402 A6XX_SP_FS_RENDER_COMPONENTS(.dword
= render_components
));
1404 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 2);
1405 tu_cs_emit(cs
, COND(fs
->writes_pos
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z
) |
1406 COND(fs
->writes_smask
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK
) |
1407 COND(dual_src_blend
, A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE
));
1408 tu_cs_emit(cs
, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count
));
1411 A6XX_RB_RENDER_COMPONENTS(.dword
= render_components
));
1413 enum a6xx_ztest_mode zmode
;
1415 if (fs
->no_earlyz
|| fs
->has_kill
|| fs
->writes_pos
) {
1416 zmode
= A6XX_LATE_Z
;
1418 zmode
= A6XX_EARLY_Z
;
1421 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL
, 1);
1422 tu_cs_emit(cs
, A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(zmode
));
1424 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_PLANE_CNTL
, 1);
1425 tu_cs_emit(cs
, A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(zmode
));
1429 tu6_emit_geometry_consts(struct tu_cs
*cs
,
1430 const struct ir3_shader_variant
*vs
,
1431 const struct ir3_shader_variant
*gs
) {
1432 unsigned num_vertices
= gs
->shader
->nir
->info
.gs
.vertices_in
;
1434 uint32_t params
[4] = {
1435 vs
->shader
->output_size
* num_vertices
* 4, /* primitive stride */
1436 vs
->shader
->output_size
* 4, /* vertex stride */
1440 uint32_t vs_base
= vs
->shader
->const_state
.offsets
.primitive_param
;
1441 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, vs_base
, SB6_VS_SHADER
, 0,
1442 ARRAY_SIZE(params
), params
);
1444 uint32_t gs_base
= gs
->shader
->const_state
.offsets
.primitive_param
;
1445 tu6_emit_const(cs
, CP_LOAD_STATE6_GEOM
, gs_base
, SB6_GS_SHADER
, 0,
1446 ARRAY_SIZE(params
), params
);
1449 /* get pointer to first variant, return NULL if shader is NULL */
1450 static const struct ir3_shader_variant
*
1451 tu_shader_get_variant(const struct tu_shader
*shader
)
1453 return shader
? &shader
->variants
[0] : NULL
;
1457 tu6_emit_program(struct tu_cs
*cs
,
1458 struct tu_pipeline_builder
*builder
,
1459 const struct tu_bo
*binary_bo
,
1461 struct tu_streamout_state
*tf
)
1463 const struct ir3_shader_variant
*vs
=
1464 tu_shader_get_variant(builder
->shaders
[MESA_SHADER_VERTEX
]);
1465 const struct ir3_shader_variant
*gs
=
1466 tu_shader_get_variant(builder
->shaders
[MESA_SHADER_GEOMETRY
]);
1467 const struct ir3_shader_variant
*fs
=
1468 tu_shader_get_variant(builder
->shaders
[MESA_SHADER_FRAGMENT
]);
1469 gl_shader_stage stage
= MESA_SHADER_VERTEX
;
1471 STATIC_ASSERT(MESA_SHADER_VERTEX
== 0);
1473 tu_cs_emit_pkt4(cs
, REG_A6XX_HLSQ_UPDATE_CNTL
, 1);
1474 tu_cs_emit(cs
, 0xff); /* XXX */
1476 /* if we have streamout, use full VS in binning pass, as the
1477 * binning pass VS will have outputs on other than position/psize
1480 * GS also can have streamout, but we completely disable the
1481 * the binning pass variant when GS is present because we don't
1482 * support compiling correct binning pass variants with GS
1484 if (binning_pass
&& vs
->shader
->stream_output
.num_outputs
== 0 && !gs
) {
1485 vs
= &builder
->shaders
[MESA_SHADER_VERTEX
]->variants
[1];
1486 tu6_emit_xs_config(cs
, stage
, vs
,
1487 binary_bo
->iova
+ builder
->binning_vs_offset
);
1491 for (; stage
< ARRAY_SIZE(builder
->shaders
); stage
++) {
1492 const struct ir3_shader_variant
*xs
=
1493 tu_shader_get_variant(builder
->shaders
[stage
]);
1495 if (stage
== MESA_SHADER_FRAGMENT
&& binning_pass
)
1498 tu6_emit_xs_config(cs
, stage
, xs
,
1499 binary_bo
->iova
+ builder
->shader_offsets
[stage
]);
1502 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_HS_UNKNOWN_A831
, 1);
1505 tu6_emit_vpc(cs
, vs
, gs
, fs
, tf
);
1506 tu6_emit_vpc_varying_modes(cs
, fs
);
1509 tu6_emit_fs_inputs(cs
, fs
);
1510 tu6_emit_fs_outputs(cs
, fs
, builder
->color_attachment_count
,
1511 builder
->use_dual_src_blend
,
1512 builder
->render_components
);
1514 /* TODO: check if these can be skipped if fs is disabled */
1515 struct ir3_shader_variant dummy_variant
= {};
1516 tu6_emit_fs_inputs(cs
, &dummy_variant
);
1517 tu6_emit_fs_outputs(cs
, &dummy_variant
, builder
->color_attachment_count
,
1518 builder
->use_dual_src_blend
,
1519 builder
->render_components
);
1523 tu6_emit_geometry_consts(cs
, vs
, gs
);
1527 tu6_emit_vertex_input(struct tu_cs
*cs
,
1528 const struct ir3_shader_variant
*vs
,
1529 const VkPipelineVertexInputStateCreateInfo
*info
,
1530 uint32_t *bindings_used
)
1532 uint32_t vfd_decode_idx
= 0;
1533 uint32_t binding_instanced
= 0; /* bitmask of instanced bindings */
1535 for (uint32_t i
= 0; i
< info
->vertexBindingDescriptionCount
; i
++) {
1536 const VkVertexInputBindingDescription
*binding
=
1537 &info
->pVertexBindingDescriptions
[i
];
1540 A6XX_VFD_FETCH_STRIDE(binding
->binding
, binding
->stride
));
1542 if (binding
->inputRate
== VK_VERTEX_INPUT_RATE_INSTANCE
)
1543 binding_instanced
|= 1 << binding
->binding
;
1545 *bindings_used
|= 1 << binding
->binding
;
1548 /* TODO: emit all VFD_DECODE/VFD_DEST_CNTL in same (two) pkt4 */
1550 for (uint32_t i
= 0; i
< info
->vertexAttributeDescriptionCount
; i
++) {
1551 const VkVertexInputAttributeDescription
*attr
=
1552 &info
->pVertexAttributeDescriptions
[i
];
1555 for (input_idx
= 0; input_idx
< vs
->inputs_count
; input_idx
++) {
1556 if ((vs
->inputs
[input_idx
].slot
- VERT_ATTRIB_GENERIC0
) == attr
->location
)
1560 /* attribute not used, skip it */
1561 if (input_idx
== vs
->inputs_count
)
1564 const struct tu_native_format format
= tu6_format_vtx(attr
->format
);
1566 A6XX_VFD_DECODE_INSTR(vfd_decode_idx
,
1567 .idx
= attr
->binding
,
1568 .offset
= attr
->offset
,
1569 .instanced
= binding_instanced
& (1 << attr
->binding
),
1570 .format
= format
.fmt
,
1571 .swap
= format
.swap
,
1573 ._float
= !vk_format_is_int(attr
->format
)),
1574 A6XX_VFD_DECODE_STEP_RATE(vfd_decode_idx
, 1));
1577 A6XX_VFD_DEST_CNTL_INSTR(vfd_decode_idx
,
1578 .writemask
= vs
->inputs
[input_idx
].compmask
,
1579 .regid
= vs
->inputs
[input_idx
].regid
));
1586 .fetch_cnt
= vfd_decode_idx
, /* decode_cnt for binning pass ? */
1587 .decode_cnt
= vfd_decode_idx
));
1591 tu6_guardband_adj(uint32_t v
)
1594 return (uint32_t)(511.0 - 65.0 * (log2(v
) - 8.0));
1600 tu6_emit_viewport(struct tu_cs
*cs
, const VkViewport
*viewport
)
1604 scales
[0] = viewport
->width
/ 2.0f
;
1605 scales
[1] = viewport
->height
/ 2.0f
;
1606 scales
[2] = viewport
->maxDepth
- viewport
->minDepth
;
1607 offsets
[0] = viewport
->x
+ scales
[0];
1608 offsets
[1] = viewport
->y
+ scales
[1];
1609 offsets
[2] = viewport
->minDepth
;
1613 min
.x
= (int32_t) viewport
->x
;
1614 max
.x
= (int32_t) ceilf(viewport
->x
+ viewport
->width
);
1615 if (viewport
->height
>= 0.0f
) {
1616 min
.y
= (int32_t) viewport
->y
;
1617 max
.y
= (int32_t) ceilf(viewport
->y
+ viewport
->height
);
1619 min
.y
= (int32_t)(viewport
->y
+ viewport
->height
);
1620 max
.y
= (int32_t) ceilf(viewport
->y
);
1622 /* the spec allows viewport->height to be 0.0f */
1625 assert(min
.x
>= 0 && min
.x
< max
.x
);
1626 assert(min
.y
>= 0 && min
.y
< max
.y
);
1628 VkExtent2D guardband_adj
;
1629 guardband_adj
.width
= tu6_guardband_adj(max
.x
- min
.x
);
1630 guardband_adj
.height
= tu6_guardband_adj(max
.y
- min
.y
);
1632 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_VPORT_XOFFSET_0
, 6);
1633 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XOFFSET_0(offsets
[0]).value
);
1634 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_XSCALE_0(scales
[0]).value
);
1635 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YOFFSET_0(offsets
[1]).value
);
1636 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_YSCALE_0(scales
[1]).value
);
1637 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZOFFSET_0(offsets
[2]).value
);
1638 tu_cs_emit(cs
, A6XX_GRAS_CL_VPORT_ZSCALE_0(scales
[2]).value
);
1640 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0
, 2);
1641 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(min
.x
) |
1642 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(min
.y
));
1643 tu_cs_emit(cs
, A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(max
.x
- 1) |
1644 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(max
.y
- 1));
1646 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ
, 1);
1648 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband_adj
.width
) |
1649 A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband_adj
.height
));
1651 float z_clamp_min
= MIN2(viewport
->minDepth
, viewport
->maxDepth
);
1652 float z_clamp_max
= MAX2(viewport
->minDepth
, viewport
->maxDepth
);
1655 A6XX_GRAS_CL_Z_CLAMP_MIN(z_clamp_min
),
1656 A6XX_GRAS_CL_Z_CLAMP_MAX(z_clamp_max
));
1659 A6XX_RB_Z_CLAMP_MIN(z_clamp_min
),
1660 A6XX_RB_Z_CLAMP_MAX(z_clamp_max
));
1664 tu6_emit_scissor(struct tu_cs
*cs
, const VkRect2D
*scissor
)
1666 const VkOffset2D min
= scissor
->offset
;
1667 const VkOffset2D max
= {
1668 scissor
->offset
.x
+ scissor
->extent
.width
,
1669 scissor
->offset
.y
+ scissor
->extent
.height
,
1672 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0
, 2);
1673 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(min
.x
) |
1674 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(min
.y
));
1675 tu_cs_emit(cs
, A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(max
.x
- 1) |
1676 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(max
.y
- 1));
1680 tu6_emit_sample_locations(struct tu_cs
*cs
, const VkSampleLocationsInfoEXT
*samp_loc
)
1683 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SAMPLE_CONFIG
, 1);
1686 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SAMPLE_CONFIG
, 1);
1689 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_SAMPLE_CONFIG
, 1);
1694 assert(samp_loc
->sampleLocationsPerPixel
== samp_loc
->sampleLocationsCount
);
1695 assert(samp_loc
->sampleLocationGridSize
.width
== 1);
1696 assert(samp_loc
->sampleLocationGridSize
.height
== 1);
1698 uint32_t sample_config
=
1699 A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE
;
1700 uint32_t sample_locations
= 0;
1701 for (uint32_t i
= 0; i
< samp_loc
->sampleLocationsCount
; i
++) {
1703 (A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(samp_loc
->pSampleLocations
[i
].x
) |
1704 A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(samp_loc
->pSampleLocations
[i
].y
)) << i
*8;
1707 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SAMPLE_CONFIG
, 2);
1708 tu_cs_emit(cs
, sample_config
);
1709 tu_cs_emit(cs
, sample_locations
);
1711 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_SAMPLE_CONFIG
, 2);
1712 tu_cs_emit(cs
, sample_config
);
1713 tu_cs_emit(cs
, sample_locations
);
1715 tu_cs_emit_pkt4(cs
, REG_A6XX_SP_TP_SAMPLE_CONFIG
, 2);
1716 tu_cs_emit(cs
, sample_config
);
1717 tu_cs_emit(cs
, sample_locations
);
1721 tu6_emit_gras_unknowns(struct tu_cs
*cs
)
1723 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_UNKNOWN_8001
, 1);
1724 tu_cs_emit(cs
, 0x0);
1728 tu6_emit_point_size(struct tu_cs
*cs
)
1730 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POINT_MINMAX
, 2);
1731 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_MINMAX_MIN(1.0f
/ 16.0f
) |
1732 A6XX_GRAS_SU_POINT_MINMAX_MAX(4092.0f
));
1733 tu_cs_emit(cs
, A6XX_GRAS_SU_POINT_SIZE(1.0f
).value
);
1737 tu6_gras_su_cntl(const VkPipelineRasterizationStateCreateInfo
*rast_info
,
1738 VkSampleCountFlagBits samples
)
1740 uint32_t gras_su_cntl
= 0;
1742 if (rast_info
->cullMode
& VK_CULL_MODE_FRONT_BIT
)
1743 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_FRONT
;
1744 if (rast_info
->cullMode
& VK_CULL_MODE_BACK_BIT
)
1745 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_CULL_BACK
;
1747 if (rast_info
->frontFace
== VK_FRONT_FACE_CLOCKWISE
)
1748 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_FRONT_CW
;
1750 /* don't set A6XX_GRAS_SU_CNTL_LINEHALFWIDTH */
1752 if (rast_info
->depthBiasEnable
)
1753 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_POLY_OFFSET
;
1755 if (samples
> VK_SAMPLE_COUNT_1_BIT
)
1756 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_MSAA_ENABLE
;
1758 return gras_su_cntl
;
1762 tu6_emit_gras_su_cntl(struct tu_cs
*cs
,
1763 uint32_t gras_su_cntl
,
1766 assert((gras_su_cntl
& A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK
) == 0);
1767 gras_su_cntl
|= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(line_width
/ 2.0f
);
1769 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_CNTL
, 1);
1770 tu_cs_emit(cs
, gras_su_cntl
);
1774 tu6_emit_depth_bias(struct tu_cs
*cs
,
1775 float constant_factor
,
1779 tu_cs_emit_pkt4(cs
, REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE
, 3);
1780 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_SCALE(slope_factor
).value
);
1781 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET(constant_factor
).value
);
1782 tu_cs_emit(cs
, A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(clamp
).value
);
1786 tu6_emit_alpha_control_disable(struct tu_cs
*cs
)
1788 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_ALPHA_CONTROL
, 1);
1793 tu6_emit_depth_control(struct tu_cs
*cs
,
1794 const VkPipelineDepthStencilStateCreateInfo
*ds_info
,
1795 const VkPipelineRasterizationStateCreateInfo
*rast_info
)
1797 assert(!ds_info
->depthBoundsTestEnable
);
1799 uint32_t rb_depth_cntl
= 0;
1800 if (ds_info
->depthTestEnable
) {
1802 A6XX_RB_DEPTH_CNTL_Z_ENABLE
|
1803 A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info
->depthCompareOp
)) |
1804 A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE
;
1806 if (rast_info
->depthClampEnable
)
1807 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE
;
1809 if (ds_info
->depthWriteEnable
)
1810 rb_depth_cntl
|= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE
;
1813 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_DEPTH_CNTL
, 1);
1814 tu_cs_emit(cs
, rb_depth_cntl
);
1818 tu6_emit_stencil_control(struct tu_cs
*cs
,
1819 const VkPipelineDepthStencilStateCreateInfo
*ds_info
)
1821 uint32_t rb_stencil_control
= 0;
1822 if (ds_info
->stencilTestEnable
) {
1823 const VkStencilOpState
*front
= &ds_info
->front
;
1824 const VkStencilOpState
*back
= &ds_info
->back
;
1825 rb_stencil_control
|=
1826 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE
|
1827 A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF
|
1828 A6XX_RB_STENCIL_CONTROL_STENCIL_READ
|
1829 A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(front
->compareOp
)) |
1830 A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(front
->failOp
)) |
1831 A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(front
->passOp
)) |
1832 A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(front
->depthFailOp
)) |
1833 A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(back
->compareOp
)) |
1834 A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(back
->failOp
)) |
1835 A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(back
->passOp
)) |
1836 A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(back
->depthFailOp
));
1839 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCIL_CONTROL
, 1);
1840 tu_cs_emit(cs
, rb_stencil_control
);
1844 tu6_emit_stencil_compare_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1846 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILMASK
, 1);
1848 cs
, A6XX_RB_STENCILMASK_MASK(front
) | A6XX_RB_STENCILMASK_BFMASK(back
));
1852 tu6_emit_stencil_write_mask(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1854 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILWRMASK
, 1);
1855 tu_cs_emit(cs
, A6XX_RB_STENCILWRMASK_WRMASK(front
) |
1856 A6XX_RB_STENCILWRMASK_BFWRMASK(back
));
1860 tu6_emit_stencil_reference(struct tu_cs
*cs
, uint32_t front
, uint32_t back
)
1862 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_STENCILREF
, 1);
1864 A6XX_RB_STENCILREF_REF(front
) | A6XX_RB_STENCILREF_BFREF(back
));
1868 tu6_rb_mrt_blend_control(const VkPipelineColorBlendAttachmentState
*att
,
1871 const enum a3xx_rb_blend_opcode color_op
= tu6_blend_op(att
->colorBlendOp
);
1872 const enum adreno_rb_blend_factor src_color_factor
= tu6_blend_factor(
1873 has_alpha
? att
->srcColorBlendFactor
1874 : tu_blend_factor_no_dst_alpha(att
->srcColorBlendFactor
));
1875 const enum adreno_rb_blend_factor dst_color_factor
= tu6_blend_factor(
1876 has_alpha
? att
->dstColorBlendFactor
1877 : tu_blend_factor_no_dst_alpha(att
->dstColorBlendFactor
));
1878 const enum a3xx_rb_blend_opcode alpha_op
= tu6_blend_op(att
->alphaBlendOp
);
1879 const enum adreno_rb_blend_factor src_alpha_factor
=
1880 tu6_blend_factor(att
->srcAlphaBlendFactor
);
1881 const enum adreno_rb_blend_factor dst_alpha_factor
=
1882 tu6_blend_factor(att
->dstAlphaBlendFactor
);
1884 return A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(src_color_factor
) |
1885 A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(color_op
) |
1886 A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(dst_color_factor
) |
1887 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(src_alpha_factor
) |
1888 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(alpha_op
) |
1889 A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(dst_alpha_factor
);
1893 tu6_rb_mrt_control(const VkPipelineColorBlendAttachmentState
*att
,
1894 uint32_t rb_mrt_control_rop
,
1898 uint32_t rb_mrt_control
=
1899 A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(att
->colorWriteMask
);
1901 /* ignore blending and logic op for integer attachments */
1903 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY
);
1904 return rb_mrt_control
;
1907 rb_mrt_control
|= rb_mrt_control_rop
;
1909 if (att
->blendEnable
) {
1910 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND
;
1913 rb_mrt_control
|= A6XX_RB_MRT_CONTROL_BLEND2
;
1916 return rb_mrt_control
;
1920 tu6_emit_rb_mrt_controls(struct tu_cs
*cs
,
1921 const VkPipelineColorBlendStateCreateInfo
*blend_info
,
1922 const VkFormat attachment_formats
[MAX_RTS
],
1923 uint32_t *blend_enable_mask
)
1925 *blend_enable_mask
= 0;
1927 bool rop_reads_dst
= false;
1928 uint32_t rb_mrt_control_rop
= 0;
1929 if (blend_info
->logicOpEnable
) {
1930 rop_reads_dst
= tu_logic_op_reads_dst(blend_info
->logicOp
);
1931 rb_mrt_control_rop
=
1932 A6XX_RB_MRT_CONTROL_ROP_ENABLE
|
1933 A6XX_RB_MRT_CONTROL_ROP_CODE(tu6_rop(blend_info
->logicOp
));
1936 for (uint32_t i
= 0; i
< blend_info
->attachmentCount
; i
++) {
1937 const VkPipelineColorBlendAttachmentState
*att
=
1938 &blend_info
->pAttachments
[i
];
1939 const VkFormat format
= attachment_formats
[i
];
1941 uint32_t rb_mrt_control
= 0;
1942 uint32_t rb_mrt_blend_control
= 0;
1943 if (format
!= VK_FORMAT_UNDEFINED
) {
1944 const bool is_int
= vk_format_is_int(format
);
1945 const bool has_alpha
= vk_format_has_alpha(format
);
1948 tu6_rb_mrt_control(att
, rb_mrt_control_rop
, is_int
, has_alpha
);
1949 rb_mrt_blend_control
= tu6_rb_mrt_blend_control(att
, has_alpha
);
1951 if (att
->blendEnable
|| rop_reads_dst
)
1952 *blend_enable_mask
|= 1 << i
;
1955 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_MRT_CONTROL(i
), 2);
1956 tu_cs_emit(cs
, rb_mrt_control
);
1957 tu_cs_emit(cs
, rb_mrt_blend_control
);
1962 tu6_emit_blend_control(struct tu_cs
*cs
,
1963 uint32_t blend_enable_mask
,
1964 bool dual_src_blend
,
1965 const VkPipelineMultisampleStateCreateInfo
*msaa_info
)
1967 const uint32_t sample_mask
=
1968 msaa_info
->pSampleMask
? (*msaa_info
->pSampleMask
& 0xffff)
1969 : ((1 << msaa_info
->rasterizationSamples
) - 1);
1972 A6XX_SP_BLEND_CNTL(.enabled
= blend_enable_mask
,
1973 .dual_color_in_enable
= dual_src_blend
,
1974 .alpha_to_coverage
= msaa_info
->alphaToCoverageEnable
,
1977 /* set A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND only when enabled? */
1979 A6XX_RB_BLEND_CNTL(.enable_blend
= blend_enable_mask
,
1980 .independent_blend
= true,
1981 .sample_mask
= sample_mask
,
1982 .dual_color_in_enable
= dual_src_blend
,
1983 .alpha_to_coverage
= msaa_info
->alphaToCoverageEnable
,
1984 .alpha_to_one
= msaa_info
->alphaToOneEnable
));
1988 tu6_emit_blend_constants(struct tu_cs
*cs
, const float constants
[4])
1990 tu_cs_emit_pkt4(cs
, REG_A6XX_RB_BLEND_RED_F32
, 4);
1991 tu_cs_emit_array(cs
, (const uint32_t *) constants
, 4);
1995 tu_pipeline_create(struct tu_device
*dev
,
1996 struct tu_pipeline_layout
*layout
,
1998 const VkAllocationCallbacks
*pAllocator
,
1999 struct tu_pipeline
**out_pipeline
)
2001 struct tu_pipeline
*pipeline
=
2002 vk_zalloc2(&dev
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
2003 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2005 return VK_ERROR_OUT_OF_HOST_MEMORY
;
2007 tu_cs_init(&pipeline
->cs
, dev
, TU_CS_MODE_SUB_STREAM
, 2048);
2009 /* Reserve the space now such that tu_cs_begin_sub_stream never fails. Note
2010 * that LOAD_STATE can potentially take up a large amount of space so we
2011 * calculate its size explicitly.
2013 unsigned load_state_size
= tu6_load_state_size(layout
, compute
);
2014 VkResult result
= tu_cs_reserve_space(&pipeline
->cs
, 2048 + load_state_size
);
2015 if (result
!= VK_SUCCESS
) {
2016 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
2020 *out_pipeline
= pipeline
;
2026 tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder
*builder
)
2028 const VkPipelineShaderStageCreateInfo
*stage_infos
[MESA_SHADER_STAGES
] = {
2031 for (uint32_t i
= 0; i
< builder
->create_info
->stageCount
; i
++) {
2032 gl_shader_stage stage
=
2033 vk_to_mesa_shader_stage(builder
->create_info
->pStages
[i
].stage
);
2034 stage_infos
[stage
] = &builder
->create_info
->pStages
[i
];
2037 struct tu_shader_compile_options options
;
2038 tu_shader_compile_options_init(&options
, builder
->create_info
);
2040 /* compile shaders in reverse order */
2041 struct tu_shader
*next_stage_shader
= NULL
;
2042 for (gl_shader_stage stage
= MESA_SHADER_STAGES
- 1;
2043 stage
> MESA_SHADER_NONE
; stage
--) {
2044 const VkPipelineShaderStageCreateInfo
*stage_info
= stage_infos
[stage
];
2045 if (!stage_info
&& stage
!= MESA_SHADER_FRAGMENT
)
2048 struct tu_shader
*shader
=
2049 tu_shader_create(builder
->device
, stage
, stage_info
, builder
->layout
,
2052 return VK_ERROR_OUT_OF_HOST_MEMORY
;
2055 tu_shader_compile(builder
->device
, shader
, next_stage_shader
,
2056 &options
, builder
->alloc
);
2057 if (result
!= VK_SUCCESS
)
2060 builder
->shaders
[stage
] = shader
;
2061 builder
->shader_offsets
[stage
] = builder
->shader_total_size
;
2062 builder
->shader_total_size
+=
2063 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
;
2065 next_stage_shader
= shader
;
2068 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
2069 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
2070 const struct ir3_shader_variant
*variant
;
2072 if (vs
->ir3_shader
.stream_output
.num_outputs
)
2073 variant
= &vs
->variants
[0];
2075 variant
= &vs
->variants
[1];
2077 builder
->binning_vs_offset
= builder
->shader_total_size
;
2078 builder
->shader_total_size
+=
2079 sizeof(uint32_t) * variant
->info
.sizedwords
;
2086 tu_pipeline_builder_upload_shaders(struct tu_pipeline_builder
*builder
,
2087 struct tu_pipeline
*pipeline
)
2089 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
2092 tu_bo_init_new(builder
->device
, bo
, builder
->shader_total_size
);
2093 if (result
!= VK_SUCCESS
)
2096 result
= tu_bo_map(builder
->device
, bo
);
2097 if (result
!= VK_SUCCESS
)
2100 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2101 const struct tu_shader
*shader
= builder
->shaders
[i
];
2105 memcpy(bo
->map
+ builder
->shader_offsets
[i
], shader
->binary
,
2106 sizeof(uint32_t) * shader
->variants
[0].info
.sizedwords
);
2109 if (builder
->shaders
[MESA_SHADER_VERTEX
]->has_binning_pass
) {
2110 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
2111 const struct ir3_shader_variant
*variant
;
2114 if (vs
->ir3_shader
.stream_output
.num_outputs
) {
2115 variant
= &vs
->variants
[0];
2118 variant
= &vs
->variants
[1];
2119 bin
= vs
->binning_binary
;
2122 memcpy(bo
->map
+ builder
->binning_vs_offset
, bin
,
2123 sizeof(uint32_t) * variant
->info
.sizedwords
);
2130 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder
*builder
,
2131 struct tu_pipeline
*pipeline
)
2133 const VkPipelineDynamicStateCreateInfo
*dynamic_info
=
2134 builder
->create_info
->pDynamicState
;
2139 for (uint32_t i
= 0; i
< dynamic_info
->dynamicStateCount
; i
++) {
2140 pipeline
->dynamic_state
.mask
|=
2141 tu_dynamic_state_bit(dynamic_info
->pDynamicStates
[i
]);
2146 tu_pipeline_set_linkage(struct tu_program_descriptor_linkage
*link
,
2147 struct tu_shader
*shader
,
2148 struct ir3_shader_variant
*v
)
2150 link
->ubo_state
= v
->shader
->ubo_state
;
2151 link
->const_state
= v
->shader
->const_state
;
2152 link
->constlen
= v
->constlen
;
2153 link
->push_consts
= shader
->push_consts
;
2157 tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder
*builder
,
2158 struct tu_pipeline
*pipeline
)
2160 struct tu_cs prog_cs
;
2161 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
2162 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, false, &pipeline
->streamout
);
2163 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
2165 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
2166 tu6_emit_program(&prog_cs
, builder
, &pipeline
->program
.binary_bo
, true, &pipeline
->streamout
);
2167 pipeline
->program
.binning_state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
2169 VkShaderStageFlags stages
= 0;
2170 for (unsigned i
= 0; i
< builder
->create_info
->stageCount
; i
++) {
2171 stages
|= builder
->create_info
->pStages
[i
].stage
;
2173 pipeline
->active_stages
= stages
;
2175 uint32_t desc_sets
= 0;
2176 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2177 if (!builder
->shaders
[i
])
2180 tu_pipeline_set_linkage(&pipeline
->program
.link
[i
],
2181 builder
->shaders
[i
],
2182 &builder
->shaders
[i
]->variants
[0]);
2183 desc_sets
|= builder
->shaders
[i
]->active_desc_sets
;
2185 pipeline
->active_desc_sets
= desc_sets
;
2187 if (builder
->shaders
[MESA_SHADER_FRAGMENT
]) {
2188 memcpy(pipeline
->program
.input_attachment_idx
,
2189 builder
->shaders
[MESA_SHADER_FRAGMENT
]->attachment_idx
,
2190 sizeof(pipeline
->program
.input_attachment_idx
));
2195 tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder
*builder
,
2196 struct tu_pipeline
*pipeline
)
2198 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
2199 builder
->create_info
->pVertexInputState
;
2200 const struct tu_shader
*vs
= builder
->shaders
[MESA_SHADER_VERTEX
];
2203 tu_cs_begin_sub_stream(&pipeline
->cs
,
2204 MAX_VERTEX_ATTRIBS
* 7 + 2, &vi_cs
);
2205 tu6_emit_vertex_input(&vi_cs
, &vs
->variants
[0], vi_info
,
2206 &pipeline
->vi
.bindings_used
);
2207 pipeline
->vi
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
2209 if (vs
->has_binning_pass
) {
2210 tu_cs_begin_sub_stream(&pipeline
->cs
,
2211 MAX_VERTEX_ATTRIBS
* 7 + 2, &vi_cs
);
2212 tu6_emit_vertex_input(
2213 &vi_cs
, &vs
->variants
[1], vi_info
, &pipeline
->vi
.bindings_used
);
2214 pipeline
->vi
.binning_state_ib
=
2215 tu_cs_end_sub_stream(&pipeline
->cs
, &vi_cs
);
2220 tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder
*builder
,
2221 struct tu_pipeline
*pipeline
)
2223 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
2224 builder
->create_info
->pInputAssemblyState
;
2226 pipeline
->ia
.primtype
= tu6_primtype(ia_info
->topology
);
2227 pipeline
->ia
.primitive_restart
= ia_info
->primitiveRestartEnable
;
2231 tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder
*builder
,
2232 struct tu_pipeline
*pipeline
)
2236 * pViewportState is a pointer to an instance of the
2237 * VkPipelineViewportStateCreateInfo structure, and is ignored if the
2238 * pipeline has rasterization disabled."
2240 * We leave the relevant registers stale in that case.
2242 if (builder
->rasterizer_discard
)
2245 const VkPipelineViewportStateCreateInfo
*vp_info
=
2246 builder
->create_info
->pViewportState
;
2249 tu_cs_begin_sub_stream(&pipeline
->cs
, 21, &vp_cs
);
2251 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_VIEWPORT
)) {
2252 assert(vp_info
->viewportCount
== 1);
2253 tu6_emit_viewport(&vp_cs
, vp_info
->pViewports
);
2256 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_SCISSOR
)) {
2257 assert(vp_info
->scissorCount
== 1);
2258 tu6_emit_scissor(&vp_cs
, vp_info
->pScissors
);
2261 pipeline
->vp
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &vp_cs
);
2265 tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder
*builder
,
2266 struct tu_pipeline
*pipeline
)
2268 const VkPipelineRasterizationStateCreateInfo
*rast_info
=
2269 builder
->create_info
->pRasterizationState
;
2271 assert(rast_info
->polygonMode
== VK_POLYGON_MODE_FILL
);
2273 struct tu_cs rast_cs
;
2274 tu_cs_begin_sub_stream(&pipeline
->cs
, 20, &rast_cs
);
2277 tu_cs_emit_regs(&rast_cs
,
2279 .znear_clip_disable
= rast_info
->depthClampEnable
,
2280 .zfar_clip_disable
= rast_info
->depthClampEnable
,
2281 .unk5
= rast_info
->depthClampEnable
,
2282 .zero_gb_scale_z
= 1,
2283 .vp_clip_code_ignore
= 1));
2284 /* move to hw ctx init? */
2285 tu6_emit_gras_unknowns(&rast_cs
);
2286 tu6_emit_point_size(&rast_cs
);
2288 const uint32_t gras_su_cntl
=
2289 tu6_gras_su_cntl(rast_info
, builder
->samples
);
2291 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_LINE_WIDTH
))
2292 tu6_emit_gras_su_cntl(&rast_cs
, gras_su_cntl
, rast_info
->lineWidth
);
2294 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_DEPTH_BIAS
)) {
2295 tu6_emit_depth_bias(&rast_cs
, rast_info
->depthBiasConstantFactor
,
2296 rast_info
->depthBiasClamp
,
2297 rast_info
->depthBiasSlopeFactor
);
2300 pipeline
->rast
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &rast_cs
);
2302 pipeline
->rast
.gras_su_cntl
= gras_su_cntl
;
2306 tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder
*builder
,
2307 struct tu_pipeline
*pipeline
)
2311 * pDepthStencilState is a pointer to an instance of the
2312 * VkPipelineDepthStencilStateCreateInfo structure, and is ignored if
2313 * the pipeline has rasterization disabled or if the subpass of the
2314 * render pass the pipeline is created against does not use a
2315 * depth/stencil attachment.
2317 * Disable both depth and stencil tests if there is no ds attachment,
2318 * Disable depth test if ds attachment is S8_UINT, since S8_UINT defines
2319 * only the separate stencil attachment
2321 static const VkPipelineDepthStencilStateCreateInfo dummy_ds_info
;
2322 const VkPipelineDepthStencilStateCreateInfo
*ds_info
=
2323 builder
->depth_attachment_format
!= VK_FORMAT_UNDEFINED
2324 ? builder
->create_info
->pDepthStencilState
2326 const VkPipelineDepthStencilStateCreateInfo
*ds_info_depth
=
2327 builder
->depth_attachment_format
!= VK_FORMAT_S8_UINT
2328 ? ds_info
: &dummy_ds_info
;
2331 tu_cs_begin_sub_stream(&pipeline
->cs
, 12, &ds_cs
);
2333 /* move to hw ctx init? */
2334 tu6_emit_alpha_control_disable(&ds_cs
);
2336 tu6_emit_depth_control(&ds_cs
, ds_info_depth
,
2337 builder
->create_info
->pRasterizationState
);
2338 tu6_emit_stencil_control(&ds_cs
, ds_info
);
2340 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
)) {
2341 tu6_emit_stencil_compare_mask(&ds_cs
, ds_info
->front
.compareMask
,
2342 ds_info
->back
.compareMask
);
2344 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
)) {
2345 tu6_emit_stencil_write_mask(&ds_cs
, ds_info
->front
.writeMask
,
2346 ds_info
->back
.writeMask
);
2348 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_STENCIL_REFERENCE
)) {
2349 tu6_emit_stencil_reference(&ds_cs
, ds_info
->front
.reference
,
2350 ds_info
->back
.reference
);
2353 pipeline
->ds
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &ds_cs
);
2357 tu_pipeline_builder_parse_multisample_and_color_blend(
2358 struct tu_pipeline_builder
*builder
, struct tu_pipeline
*pipeline
)
2362 * pMultisampleState is a pointer to an instance of the
2363 * VkPipelineMultisampleStateCreateInfo, and is ignored if the pipeline
2364 * has rasterization disabled.
2368 * pColorBlendState is a pointer to an instance of the
2369 * VkPipelineColorBlendStateCreateInfo structure, and is ignored if the
2370 * pipeline has rasterization disabled or if the subpass of the render
2371 * pass the pipeline is created against does not use any color
2374 * We leave the relevant registers stale when rasterization is disabled.
2376 if (builder
->rasterizer_discard
)
2379 static const VkPipelineColorBlendStateCreateInfo dummy_blend_info
;
2380 const VkPipelineMultisampleStateCreateInfo
*msaa_info
=
2381 builder
->create_info
->pMultisampleState
;
2382 const VkPipelineColorBlendStateCreateInfo
*blend_info
=
2383 builder
->use_color_attachments
? builder
->create_info
->pColorBlendState
2384 : &dummy_blend_info
;
2386 struct tu_cs blend_cs
;
2387 tu_cs_begin_sub_stream(&pipeline
->cs
, MAX_RTS
* 3 + 18, &blend_cs
);
2389 uint32_t blend_enable_mask
;
2390 tu6_emit_rb_mrt_controls(&blend_cs
, blend_info
,
2391 builder
->color_attachment_formats
,
2392 &blend_enable_mask
);
2394 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_BLEND_CONSTANTS
))
2395 tu6_emit_blend_constants(&blend_cs
, blend_info
->blendConstants
);
2397 if (!(pipeline
->dynamic_state
.mask
& TU_DYNAMIC_SAMPLE_LOCATIONS
)) {
2398 const struct VkPipelineSampleLocationsStateCreateInfoEXT
*sample_locations
=
2399 vk_find_struct_const(msaa_info
->pNext
, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
2400 const VkSampleLocationsInfoEXT
*samp_loc
= NULL
;
2402 if (sample_locations
&& sample_locations
->sampleLocationsEnable
)
2403 samp_loc
= &sample_locations
->sampleLocationsInfo
;
2405 tu6_emit_sample_locations(&blend_cs
, samp_loc
);
2408 tu6_emit_blend_control(&blend_cs
, blend_enable_mask
,
2409 builder
->use_dual_src_blend
, msaa_info
);
2411 pipeline
->blend
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &blend_cs
);
2415 tu_pipeline_finish(struct tu_pipeline
*pipeline
,
2416 struct tu_device
*dev
,
2417 const VkAllocationCallbacks
*alloc
)
2419 tu_cs_finish(&pipeline
->cs
);
2421 if (pipeline
->program
.binary_bo
.gem_handle
)
2422 tu_bo_finish(dev
, &pipeline
->program
.binary_bo
);
2426 tu_pipeline_builder_build(struct tu_pipeline_builder
*builder
,
2427 struct tu_pipeline
**pipeline
)
2429 VkResult result
= tu_pipeline_create(builder
->device
, builder
->layout
,
2430 false, builder
->alloc
, pipeline
);
2431 if (result
!= VK_SUCCESS
)
2434 (*pipeline
)->layout
= builder
->layout
;
2436 /* compile and upload shaders */
2437 result
= tu_pipeline_builder_compile_shaders(builder
);
2438 if (result
== VK_SUCCESS
)
2439 result
= tu_pipeline_builder_upload_shaders(builder
, *pipeline
);
2440 if (result
!= VK_SUCCESS
) {
2441 tu_pipeline_finish(*pipeline
, builder
->device
, builder
->alloc
);
2442 vk_free2(&builder
->device
->alloc
, builder
->alloc
, *pipeline
);
2443 *pipeline
= VK_NULL_HANDLE
;
2448 tu_pipeline_builder_parse_dynamic(builder
, *pipeline
);
2449 tu_pipeline_builder_parse_shader_stages(builder
, *pipeline
);
2450 tu_pipeline_builder_parse_vertex_input(builder
, *pipeline
);
2451 tu_pipeline_builder_parse_input_assembly(builder
, *pipeline
);
2452 tu_pipeline_builder_parse_viewport(builder
, *pipeline
);
2453 tu_pipeline_builder_parse_rasterization(builder
, *pipeline
);
2454 tu_pipeline_builder_parse_depth_stencil(builder
, *pipeline
);
2455 tu_pipeline_builder_parse_multisample_and_color_blend(builder
, *pipeline
);
2456 tu6_emit_load_state(*pipeline
, false);
2458 /* we should have reserved enough space upfront such that the CS never
2461 assert((*pipeline
)->cs
.bo_count
== 1);
2467 tu_pipeline_builder_finish(struct tu_pipeline_builder
*builder
)
2469 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2470 if (!builder
->shaders
[i
])
2472 tu_shader_destroy(builder
->device
, builder
->shaders
[i
], builder
->alloc
);
2477 tu_pipeline_builder_init_graphics(
2478 struct tu_pipeline_builder
*builder
,
2479 struct tu_device
*dev
,
2480 struct tu_pipeline_cache
*cache
,
2481 const VkGraphicsPipelineCreateInfo
*create_info
,
2482 const VkAllocationCallbacks
*alloc
)
2484 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, create_info
->layout
);
2486 *builder
= (struct tu_pipeline_builder
) {
2489 .create_info
= create_info
,
2494 builder
->rasterizer_discard
=
2495 create_info
->pRasterizationState
->rasterizerDiscardEnable
;
2497 if (builder
->rasterizer_discard
) {
2498 builder
->samples
= VK_SAMPLE_COUNT_1_BIT
;
2500 builder
->samples
= create_info
->pMultisampleState
->rasterizationSamples
;
2502 const struct tu_render_pass
*pass
=
2503 tu_render_pass_from_handle(create_info
->renderPass
);
2504 const struct tu_subpass
*subpass
=
2505 &pass
->subpasses
[create_info
->subpass
];
2507 const uint32_t a
= subpass
->depth_stencil_attachment
.attachment
;
2508 builder
->depth_attachment_format
= (a
!= VK_ATTACHMENT_UNUSED
) ?
2509 pass
->attachments
[a
].format
: VK_FORMAT_UNDEFINED
;
2511 assert(subpass
->color_count
== 0 ||
2512 !create_info
->pColorBlendState
||
2513 subpass
->color_count
== create_info
->pColorBlendState
->attachmentCount
);
2514 builder
->color_attachment_count
= subpass
->color_count
;
2515 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
2516 const uint32_t a
= subpass
->color_attachments
[i
].attachment
;
2517 if (a
== VK_ATTACHMENT_UNUSED
)
2520 builder
->color_attachment_formats
[i
] = pass
->attachments
[a
].format
;
2521 builder
->use_color_attachments
= true;
2522 builder
->render_components
|= 0xf << (i
* 4);
2525 if (tu_blend_state_is_dual_src(create_info
->pColorBlendState
)) {
2526 builder
->color_attachment_count
++;
2527 builder
->use_dual_src_blend
= true;
2528 /* dual source blending has an extra fs output in the 2nd slot */
2529 if (subpass
->color_attachments
[0].attachment
!= VK_ATTACHMENT_UNUSED
)
2530 builder
->render_components
|= 0xf << 4;
2536 tu_graphics_pipeline_create(VkDevice device
,
2537 VkPipelineCache pipelineCache
,
2538 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2539 const VkAllocationCallbacks
*pAllocator
,
2540 VkPipeline
*pPipeline
)
2542 TU_FROM_HANDLE(tu_device
, dev
, device
);
2543 TU_FROM_HANDLE(tu_pipeline_cache
, cache
, pipelineCache
);
2545 struct tu_pipeline_builder builder
;
2546 tu_pipeline_builder_init_graphics(&builder
, dev
, cache
,
2547 pCreateInfo
, pAllocator
);
2549 struct tu_pipeline
*pipeline
= NULL
;
2550 VkResult result
= tu_pipeline_builder_build(&builder
, &pipeline
);
2551 tu_pipeline_builder_finish(&builder
);
2553 if (result
== VK_SUCCESS
)
2554 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2556 *pPipeline
= VK_NULL_HANDLE
;
2562 tu_CreateGraphicsPipelines(VkDevice device
,
2563 VkPipelineCache pipelineCache
,
2565 const VkGraphicsPipelineCreateInfo
*pCreateInfos
,
2566 const VkAllocationCallbacks
*pAllocator
,
2567 VkPipeline
*pPipelines
)
2569 VkResult final_result
= VK_SUCCESS
;
2571 for (uint32_t i
= 0; i
< count
; i
++) {
2572 VkResult result
= tu_graphics_pipeline_create(device
, pipelineCache
,
2573 &pCreateInfos
[i
], pAllocator
,
2576 if (result
!= VK_SUCCESS
)
2577 final_result
= result
;
2580 return final_result
;
2584 tu_compute_upload_shader(VkDevice device
,
2585 struct tu_pipeline
*pipeline
,
2586 struct tu_shader
*shader
)
2588 TU_FROM_HANDLE(tu_device
, dev
, device
);
2589 struct tu_bo
*bo
= &pipeline
->program
.binary_bo
;
2590 struct ir3_shader_variant
*v
= &shader
->variants
[0];
2592 uint32_t shader_size
= sizeof(uint32_t) * v
->info
.sizedwords
;
2594 tu_bo_init_new(dev
, bo
, shader_size
);
2595 if (result
!= VK_SUCCESS
)
2598 result
= tu_bo_map(dev
, bo
);
2599 if (result
!= VK_SUCCESS
)
2602 memcpy(bo
->map
, shader
->binary
, shader_size
);
2609 tu_compute_pipeline_create(VkDevice device
,
2610 VkPipelineCache _cache
,
2611 const VkComputePipelineCreateInfo
*pCreateInfo
,
2612 const VkAllocationCallbacks
*pAllocator
,
2613 VkPipeline
*pPipeline
)
2615 TU_FROM_HANDLE(tu_device
, dev
, device
);
2616 TU_FROM_HANDLE(tu_pipeline_layout
, layout
, pCreateInfo
->layout
);
2617 const VkPipelineShaderStageCreateInfo
*stage_info
= &pCreateInfo
->stage
;
2620 struct tu_pipeline
*pipeline
;
2622 *pPipeline
= VK_NULL_HANDLE
;
2624 result
= tu_pipeline_create(dev
, layout
, true, pAllocator
, &pipeline
);
2625 if (result
!= VK_SUCCESS
)
2628 pipeline
->layout
= layout
;
2630 struct tu_shader_compile_options options
;
2631 tu_shader_compile_options_init(&options
, NULL
);
2633 struct tu_shader
*shader
=
2634 tu_shader_create(dev
, MESA_SHADER_COMPUTE
, stage_info
, layout
, pAllocator
);
2636 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
2640 result
= tu_shader_compile(dev
, shader
, NULL
, &options
, pAllocator
);
2641 if (result
!= VK_SUCCESS
)
2644 struct ir3_shader_variant
*v
= &shader
->variants
[0];
2646 tu_pipeline_set_linkage(&pipeline
->program
.link
[MESA_SHADER_COMPUTE
],
2649 result
= tu_compute_upload_shader(device
, pipeline
, shader
);
2650 if (result
!= VK_SUCCESS
)
2653 for (int i
= 0; i
< 3; i
++)
2654 pipeline
->compute
.local_size
[i
] = v
->shader
->nir
->info
.cs
.local_size
[i
];
2656 struct tu_cs prog_cs
;
2657 tu_cs_begin_sub_stream(&pipeline
->cs
, 512, &prog_cs
);
2658 tu6_emit_cs_config(&prog_cs
, shader
, v
, pipeline
->program
.binary_bo
.iova
);
2659 pipeline
->program
.state_ib
= tu_cs_end_sub_stream(&pipeline
->cs
, &prog_cs
);
2661 tu6_emit_load_state(pipeline
, true);
2663 *pPipeline
= tu_pipeline_to_handle(pipeline
);
2668 tu_shader_destroy(dev
, shader
, pAllocator
);
2670 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2671 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);
2677 tu_CreateComputePipelines(VkDevice device
,
2678 VkPipelineCache pipelineCache
,
2680 const VkComputePipelineCreateInfo
*pCreateInfos
,
2681 const VkAllocationCallbacks
*pAllocator
,
2682 VkPipeline
*pPipelines
)
2684 VkResult final_result
= VK_SUCCESS
;
2686 for (uint32_t i
= 0; i
< count
; i
++) {
2687 VkResult result
= tu_compute_pipeline_create(device
, pipelineCache
,
2689 pAllocator
, &pPipelines
[i
]);
2690 if (result
!= VK_SUCCESS
)
2691 final_result
= result
;
2694 return final_result
;
2698 tu_DestroyPipeline(VkDevice _device
,
2699 VkPipeline _pipeline
,
2700 const VkAllocationCallbacks
*pAllocator
)
2702 TU_FROM_HANDLE(tu_device
, dev
, _device
);
2703 TU_FROM_HANDLE(tu_pipeline
, pipeline
, _pipeline
);
2708 tu_pipeline_finish(pipeline
, dev
, pAllocator
);
2709 vk_free2(&dev
->alloc
, pAllocator
, pipeline
);