2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/disk_cache.h"
29 #include "util/mesa-sha1.h"
30 #include "util/u_atomic.h"
31 #include "radv_debug.h"
32 #include "radv_private.h"
34 #include "radv_shader.h"
36 #include "nir/nir_builder.h"
37 #include "nir/nir_xfb_info.h"
38 #include "spirv/nir_spirv.h"
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_nir_to_llvm.h"
45 #include "vk_format.h"
46 #include "util/debug.h"
47 #include "ac_exp_param.h"
48 #include "ac_shader_util.h"
50 struct radv_blend_state
{
51 uint32_t blend_enable_4bit
;
52 uint32_t need_src_alpha
;
54 uint32_t cb_color_control
;
55 uint32_t cb_target_mask
;
56 uint32_t cb_target_enabled_4bit
;
57 uint32_t sx_mrt_blend_opt
[8];
58 uint32_t cb_blend_control
[8];
60 uint32_t spi_shader_col_format
;
61 uint32_t col_format_is_int8
;
62 uint32_t col_format_is_int10
;
63 uint32_t cb_shader_mask
;
64 uint32_t db_alpha_to_mask
;
66 uint32_t commutative_4bit
;
68 bool single_cb_enable
;
69 bool mrt0_is_dual_src
;
72 struct radv_dsa_order_invariance
{
73 /* Whether the final result in Z/S buffers is guaranteed to be
74 * invariant under changes to the order in which fragments arrive.
78 /* Whether the set of fragments that pass the combined Z/S test is
79 * guaranteed to be invariant under changes to the order in which
85 static const VkPipelineMultisampleStateCreateInfo
*
86 radv_pipeline_get_multisample_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
88 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
89 return pCreateInfo
->pMultisampleState
;
93 static const VkPipelineTessellationStateCreateInfo
*
94 radv_pipeline_get_tessellation_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
96 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
97 if (pCreateInfo
->pStages
[i
].stage
== VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
||
98 pCreateInfo
->pStages
[i
].stage
== VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
) {
99 return pCreateInfo
->pTessellationState
;
105 static const VkPipelineDepthStencilStateCreateInfo
*
106 radv_pipeline_get_depth_stencil_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
108 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
109 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
111 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
112 subpass
->depth_stencil_attachment
)
113 return pCreateInfo
->pDepthStencilState
;
117 static const VkPipelineColorBlendStateCreateInfo
*
118 radv_pipeline_get_color_blend_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
120 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
121 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
123 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
124 subpass
->has_color_att
)
125 return pCreateInfo
->pColorBlendState
;
129 bool radv_pipeline_has_ngg(const struct radv_pipeline
*pipeline
)
131 struct radv_shader_variant
*variant
= NULL
;
132 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
133 variant
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
134 else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
135 variant
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
136 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
137 variant
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
140 return variant
->info
.is_ngg
;
143 bool radv_pipeline_has_ngg_passthrough(const struct radv_pipeline
*pipeline
)
145 assert(radv_pipeline_has_ngg(pipeline
));
147 struct radv_shader_variant
*variant
= NULL
;
148 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
149 variant
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
150 else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
151 variant
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
152 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
153 variant
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
156 return variant
->info
.is_ngg_passthrough
;
159 bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline
*pipeline
)
161 if (!radv_pipeline_has_gs(pipeline
))
164 /* The GS copy shader is required if the pipeline has GS on GFX6-GFX9.
165 * On GFX10, it might be required in rare cases if it's not possible to
168 if (radv_pipeline_has_ngg(pipeline
))
171 assert(pipeline
->gs_copy_shader
);
176 radv_pipeline_destroy(struct radv_device
*device
,
177 struct radv_pipeline
*pipeline
,
178 const VkAllocationCallbacks
* allocator
)
180 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
181 if (pipeline
->shaders
[i
])
182 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
184 if (pipeline
->gs_copy_shader
)
185 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
188 free(pipeline
->cs
.buf
);
190 vk_object_base_finish(&pipeline
->base
);
191 vk_free2(&device
->vk
.alloc
, allocator
, pipeline
);
194 void radv_DestroyPipeline(
196 VkPipeline _pipeline
,
197 const VkAllocationCallbacks
* pAllocator
)
199 RADV_FROM_HANDLE(radv_device
, device
, _device
);
200 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
205 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
208 static uint32_t get_hash_flags(struct radv_device
*device
)
210 uint32_t hash_flags
= 0;
212 if (device
->instance
->debug_flags
& RADV_DEBUG_NO_NGG
)
213 hash_flags
|= RADV_HASH_SHADER_NO_NGG
;
214 if (device
->physical_device
->cs_wave_size
== 32)
215 hash_flags
|= RADV_HASH_SHADER_CS_WAVE32
;
216 if (device
->physical_device
->ps_wave_size
== 32)
217 hash_flags
|= RADV_HASH_SHADER_PS_WAVE32
;
218 if (device
->physical_device
->ge_wave_size
== 32)
219 hash_flags
|= RADV_HASH_SHADER_GE_WAVE32
;
220 if (device
->physical_device
->use_llvm
)
221 hash_flags
|= RADV_HASH_SHADER_LLVM
;
226 radv_pipeline_init_scratch(struct radv_device
*device
,
227 struct radv_pipeline
*pipeline
)
229 unsigned scratch_bytes_per_wave
= 0;
230 unsigned max_waves
= 0;
231 unsigned min_waves
= 1;
233 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
234 if (pipeline
->shaders
[i
] &&
235 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
) {
236 unsigned max_stage_waves
= device
->scratch_waves
;
238 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
239 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
241 max_stage_waves
= MIN2(max_stage_waves
,
242 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
243 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
244 max_waves
= MAX2(max_waves
, max_stage_waves
);
248 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
249 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
250 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
251 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
252 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
255 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
256 pipeline
->max_waves
= max_waves
;
259 static uint32_t si_translate_blend_logic_op(VkLogicOp op
)
262 case VK_LOGIC_OP_CLEAR
:
263 return V_028808_ROP3_CLEAR
;
264 case VK_LOGIC_OP_AND
:
265 return V_028808_ROP3_AND
;
266 case VK_LOGIC_OP_AND_REVERSE
:
267 return V_028808_ROP3_AND_REVERSE
;
268 case VK_LOGIC_OP_COPY
:
269 return V_028808_ROP3_COPY
;
270 case VK_LOGIC_OP_AND_INVERTED
:
271 return V_028808_ROP3_AND_INVERTED
;
272 case VK_LOGIC_OP_NO_OP
:
273 return V_028808_ROP3_NO_OP
;
274 case VK_LOGIC_OP_XOR
:
275 return V_028808_ROP3_XOR
;
277 return V_028808_ROP3_OR
;
278 case VK_LOGIC_OP_NOR
:
279 return V_028808_ROP3_NOR
;
280 case VK_LOGIC_OP_EQUIVALENT
:
281 return V_028808_ROP3_EQUIVALENT
;
282 case VK_LOGIC_OP_INVERT
:
283 return V_028808_ROP3_INVERT
;
284 case VK_LOGIC_OP_OR_REVERSE
:
285 return V_028808_ROP3_OR_REVERSE
;
286 case VK_LOGIC_OP_COPY_INVERTED
:
287 return V_028808_ROP3_COPY_INVERTED
;
288 case VK_LOGIC_OP_OR_INVERTED
:
289 return V_028808_ROP3_OR_INVERTED
;
290 case VK_LOGIC_OP_NAND
:
291 return V_028808_ROP3_NAND
;
292 case VK_LOGIC_OP_SET
:
293 return V_028808_ROP3_SET
;
295 unreachable("Unhandled logic op");
300 static uint32_t si_translate_blend_function(VkBlendOp op
)
303 case VK_BLEND_OP_ADD
:
304 return V_028780_COMB_DST_PLUS_SRC
;
305 case VK_BLEND_OP_SUBTRACT
:
306 return V_028780_COMB_SRC_MINUS_DST
;
307 case VK_BLEND_OP_REVERSE_SUBTRACT
:
308 return V_028780_COMB_DST_MINUS_SRC
;
309 case VK_BLEND_OP_MIN
:
310 return V_028780_COMB_MIN_DST_SRC
;
311 case VK_BLEND_OP_MAX
:
312 return V_028780_COMB_MAX_DST_SRC
;
318 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
321 case VK_BLEND_FACTOR_ZERO
:
322 return V_028780_BLEND_ZERO
;
323 case VK_BLEND_FACTOR_ONE
:
324 return V_028780_BLEND_ONE
;
325 case VK_BLEND_FACTOR_SRC_COLOR
:
326 return V_028780_BLEND_SRC_COLOR
;
327 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
328 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
329 case VK_BLEND_FACTOR_DST_COLOR
:
330 return V_028780_BLEND_DST_COLOR
;
331 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
332 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
333 case VK_BLEND_FACTOR_SRC_ALPHA
:
334 return V_028780_BLEND_SRC_ALPHA
;
335 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
336 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
337 case VK_BLEND_FACTOR_DST_ALPHA
:
338 return V_028780_BLEND_DST_ALPHA
;
339 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
340 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
341 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
342 return V_028780_BLEND_CONSTANT_COLOR
;
343 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
344 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
345 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
346 return V_028780_BLEND_CONSTANT_ALPHA
;
347 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
348 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
349 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
350 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
351 case VK_BLEND_FACTOR_SRC1_COLOR
:
352 return V_028780_BLEND_SRC1_COLOR
;
353 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
354 return V_028780_BLEND_INV_SRC1_COLOR
;
355 case VK_BLEND_FACTOR_SRC1_ALPHA
:
356 return V_028780_BLEND_SRC1_ALPHA
;
357 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
358 return V_028780_BLEND_INV_SRC1_ALPHA
;
364 static uint32_t si_translate_blend_opt_function(VkBlendOp op
)
367 case VK_BLEND_OP_ADD
:
368 return V_028760_OPT_COMB_ADD
;
369 case VK_BLEND_OP_SUBTRACT
:
370 return V_028760_OPT_COMB_SUBTRACT
;
371 case VK_BLEND_OP_REVERSE_SUBTRACT
:
372 return V_028760_OPT_COMB_REVSUBTRACT
;
373 case VK_BLEND_OP_MIN
:
374 return V_028760_OPT_COMB_MIN
;
375 case VK_BLEND_OP_MAX
:
376 return V_028760_OPT_COMB_MAX
;
378 return V_028760_OPT_COMB_BLEND_DISABLED
;
382 static uint32_t si_translate_blend_opt_factor(VkBlendFactor factor
, bool is_alpha
)
385 case VK_BLEND_FACTOR_ZERO
:
386 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_ALL
;
387 case VK_BLEND_FACTOR_ONE
:
388 return V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
;
389 case VK_BLEND_FACTOR_SRC_COLOR
:
390 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
391 : V_028760_BLEND_OPT_PRESERVE_C1_IGNORE_C0
;
392 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
393 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
394 : V_028760_BLEND_OPT_PRESERVE_C0_IGNORE_C1
;
395 case VK_BLEND_FACTOR_SRC_ALPHA
:
396 return V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
;
397 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
398 return V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
;
399 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
400 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
401 : V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
403 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
408 * Get rid of DST in the blend factors by commuting the operands:
409 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
411 static void si_blend_remove_dst(unsigned *func
, unsigned *src_factor
,
412 unsigned *dst_factor
, unsigned expected_dst
,
413 unsigned replacement_src
)
415 if (*src_factor
== expected_dst
&&
416 *dst_factor
== VK_BLEND_FACTOR_ZERO
) {
417 *src_factor
= VK_BLEND_FACTOR_ZERO
;
418 *dst_factor
= replacement_src
;
420 /* Commuting the operands requires reversing subtractions. */
421 if (*func
== VK_BLEND_OP_SUBTRACT
)
422 *func
= VK_BLEND_OP_REVERSE_SUBTRACT
;
423 else if (*func
== VK_BLEND_OP_REVERSE_SUBTRACT
)
424 *func
= VK_BLEND_OP_SUBTRACT
;
428 static bool si_blend_factor_uses_dst(unsigned factor
)
430 return factor
== VK_BLEND_FACTOR_DST_COLOR
||
431 factor
== VK_BLEND_FACTOR_DST_ALPHA
||
432 factor
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
433 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
||
434 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
;
437 static bool is_dual_src(VkBlendFactor factor
)
440 case VK_BLEND_FACTOR_SRC1_COLOR
:
441 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
442 case VK_BLEND_FACTOR_SRC1_ALPHA
:
443 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
450 static unsigned radv_choose_spi_color_format(VkFormat vk_format
,
452 bool blend_need_alpha
)
454 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
455 struct ac_spi_color_formats formats
= {};
456 unsigned format
, ntype
, swap
;
458 format
= radv_translate_colorformat(vk_format
);
459 ntype
= radv_translate_color_numformat(vk_format
, desc
,
460 vk_format_get_first_non_void_channel(vk_format
));
461 swap
= radv_translate_colorswap(vk_format
, false);
463 ac_choose_spi_color_formats(format
, swap
, ntype
, false, &formats
);
465 if (blend_enable
&& blend_need_alpha
)
466 return formats
.blend_alpha
;
467 else if(blend_need_alpha
)
468 return formats
.alpha
;
469 else if(blend_enable
)
470 return formats
.blend
;
472 return formats
.normal
;
476 format_is_int8(VkFormat format
)
478 const struct vk_format_description
*desc
= vk_format_description(format
);
479 int channel
= vk_format_get_first_non_void_channel(format
);
481 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
482 desc
->channel
[channel
].size
== 8;
486 format_is_int10(VkFormat format
)
488 const struct vk_format_description
*desc
= vk_format_description(format
);
490 if (desc
->nr_channels
!= 4)
492 for (unsigned i
= 0; i
< 4; i
++) {
493 if (desc
->channel
[i
].pure_integer
&& desc
->channel
[i
].size
== 10)
500 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
501 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
502 struct radv_blend_state
*blend
)
504 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
505 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
506 unsigned col_format
= 0, is_int8
= 0, is_int10
= 0;
507 unsigned num_targets
;
509 for (unsigned i
= 0; i
< (blend
->single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
512 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
||
513 !(blend
->cb_target_mask
& (0xfu
<< (i
* 4)))) {
514 cf
= V_028714_SPI_SHADER_ZERO
;
516 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
518 blend
->blend_enable_4bit
& (0xfu
<< (i
* 4));
520 cf
= radv_choose_spi_color_format(attachment
->format
,
522 blend
->need_src_alpha
& (1 << i
));
524 if (format_is_int8(attachment
->format
))
526 if (format_is_int10(attachment
->format
))
530 col_format
|= cf
<< (4 * i
);
533 if (!(col_format
& 0xf) && blend
->need_src_alpha
& (1 << 0)) {
534 /* When a subpass doesn't have any color attachments, write the
535 * alpha channel of MRT0 when alpha coverage is enabled because
536 * the depth attachment needs it.
538 col_format
|= V_028714_SPI_SHADER_32_AR
;
541 /* If the i-th target format is set, all previous target formats must
542 * be non-zero to avoid hangs.
544 num_targets
= (util_last_bit(col_format
) + 3) / 4;
545 for (unsigned i
= 0; i
< num_targets
; i
++) {
546 if (!(col_format
& (0xf << (i
* 4)))) {
547 col_format
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
551 /* The output for dual source blending should have the same format as
554 if (blend
->mrt0_is_dual_src
)
555 col_format
|= (col_format
& 0xf) << 4;
557 blend
->spi_shader_col_format
= col_format
;
558 blend
->col_format_is_int8
= is_int8
;
559 blend
->col_format_is_int10
= is_int10
;
563 * Ordered so that for each i,
564 * radv_format_meta_fs_key(radv_fs_key_format_exemplars[i]) == i.
566 const VkFormat radv_fs_key_format_exemplars
[NUM_META_FS_KEYS
] = {
567 VK_FORMAT_R32_SFLOAT
,
568 VK_FORMAT_R32G32_SFLOAT
,
569 VK_FORMAT_R8G8B8A8_UNORM
,
570 VK_FORMAT_R16G16B16A16_UNORM
,
571 VK_FORMAT_R16G16B16A16_SNORM
,
572 VK_FORMAT_R16G16B16A16_UINT
,
573 VK_FORMAT_R16G16B16A16_SINT
,
574 VK_FORMAT_R32G32B32A32_SFLOAT
,
575 VK_FORMAT_R8G8B8A8_UINT
,
576 VK_FORMAT_R8G8B8A8_SINT
,
577 VK_FORMAT_A2R10G10B10_UINT_PACK32
,
578 VK_FORMAT_A2R10G10B10_SINT_PACK32
,
581 unsigned radv_format_meta_fs_key(VkFormat format
)
583 unsigned col_format
= radv_choose_spi_color_format(format
, false, false);
585 assert(col_format
!= V_028714_SPI_SHADER_32_AR
);
586 if (col_format
>= V_028714_SPI_SHADER_32_AR
)
587 --col_format
; /* Skip V_028714_SPI_SHADER_32_AR since there is no such VkFormat */
589 --col_format
; /* Skip V_028714_SPI_SHADER_ZERO */
590 bool is_int8
= format_is_int8(format
);
591 bool is_int10
= format_is_int10(format
);
593 return col_format
+ (is_int8
? 3 : is_int10
? 5 : 0);
597 radv_blend_check_commutativity(struct radv_blend_state
*blend
,
598 VkBlendOp op
, VkBlendFactor src
,
599 VkBlendFactor dst
, unsigned chanmask
)
601 /* Src factor is allowed when it does not depend on Dst. */
602 static const uint32_t src_allowed
=
603 (1u << VK_BLEND_FACTOR_ONE
) |
604 (1u << VK_BLEND_FACTOR_SRC_COLOR
) |
605 (1u << VK_BLEND_FACTOR_SRC_ALPHA
) |
606 (1u << VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
) |
607 (1u << VK_BLEND_FACTOR_CONSTANT_COLOR
) |
608 (1u << VK_BLEND_FACTOR_CONSTANT_ALPHA
) |
609 (1u << VK_BLEND_FACTOR_SRC1_COLOR
) |
610 (1u << VK_BLEND_FACTOR_SRC1_ALPHA
) |
611 (1u << VK_BLEND_FACTOR_ZERO
) |
612 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
) |
613 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
) |
614 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
) |
615 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
) |
616 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
) |
617 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
);
619 if (dst
== VK_BLEND_FACTOR_ONE
&&
620 (src_allowed
& (1u << src
))) {
621 /* Addition is commutative, but floating point addition isn't
622 * associative: subtle changes can be introduced via different
623 * rounding. Be conservative, only enable for min and max.
625 if (op
== VK_BLEND_OP_MAX
|| op
== VK_BLEND_OP_MIN
)
626 blend
->commutative_4bit
|= chanmask
;
630 static struct radv_blend_state
631 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
632 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
633 const struct radv_graphics_pipeline_create_info
*extra
)
635 const VkPipelineColorBlendStateCreateInfo
*vkblend
= radv_pipeline_get_color_blend_state(pCreateInfo
);
636 const VkPipelineMultisampleStateCreateInfo
*vkms
= radv_pipeline_get_multisample_state(pCreateInfo
);
637 struct radv_blend_state blend
= {0};
638 unsigned mode
= V_028808_CB_NORMAL
;
641 if (extra
&& extra
->custom_blend_mode
) {
642 blend
.single_cb_enable
= true;
643 mode
= extra
->custom_blend_mode
;
646 blend
.cb_color_control
= 0;
648 if (vkblend
->logicOpEnable
)
649 blend
.cb_color_control
|= S_028808_ROP3(si_translate_blend_logic_op(vkblend
->logicOp
));
651 blend
.cb_color_control
|= S_028808_ROP3(V_028808_ROP3_COPY
);
654 blend
.db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(3) |
655 S_028B70_ALPHA_TO_MASK_OFFSET1(1) |
656 S_028B70_ALPHA_TO_MASK_OFFSET2(0) |
657 S_028B70_ALPHA_TO_MASK_OFFSET3(2) |
658 S_028B70_OFFSET_ROUND(1);
660 if (vkms
&& vkms
->alphaToCoverageEnable
) {
661 blend
.db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
662 blend
.need_src_alpha
|= 0x1;
665 blend
.cb_target_mask
= 0;
667 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
668 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
669 unsigned blend_cntl
= 0;
670 unsigned srcRGB_opt
, dstRGB_opt
, srcA_opt
, dstA_opt
;
671 VkBlendOp eqRGB
= att
->colorBlendOp
;
672 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
673 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
674 VkBlendOp eqA
= att
->alphaBlendOp
;
675 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
676 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
678 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
680 if (!att
->colorWriteMask
)
683 blend
.cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
684 blend
.cb_target_enabled_4bit
|= 0xf << (4 * i
);
685 if (!att
->blendEnable
) {
686 blend
.cb_blend_control
[i
] = blend_cntl
;
690 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
692 blend
.mrt0_is_dual_src
= true;
694 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
695 srcRGB
= VK_BLEND_FACTOR_ONE
;
696 dstRGB
= VK_BLEND_FACTOR_ONE
;
698 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
699 srcA
= VK_BLEND_FACTOR_ONE
;
700 dstA
= VK_BLEND_FACTOR_ONE
;
703 radv_blend_check_commutativity(&blend
, eqRGB
, srcRGB
, dstRGB
,
705 radv_blend_check_commutativity(&blend
, eqA
, srcA
, dstA
,
708 /* Blending optimizations for RB+.
709 * These transformations don't change the behavior.
711 * First, get rid of DST in the blend factors:
712 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
714 si_blend_remove_dst(&eqRGB
, &srcRGB
, &dstRGB
,
715 VK_BLEND_FACTOR_DST_COLOR
,
716 VK_BLEND_FACTOR_SRC_COLOR
);
718 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
719 VK_BLEND_FACTOR_DST_COLOR
,
720 VK_BLEND_FACTOR_SRC_COLOR
);
722 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
723 VK_BLEND_FACTOR_DST_ALPHA
,
724 VK_BLEND_FACTOR_SRC_ALPHA
);
726 /* Look up the ideal settings from tables. */
727 srcRGB_opt
= si_translate_blend_opt_factor(srcRGB
, false);
728 dstRGB_opt
= si_translate_blend_opt_factor(dstRGB
, false);
729 srcA_opt
= si_translate_blend_opt_factor(srcA
, true);
730 dstA_opt
= si_translate_blend_opt_factor(dstA
, true);
732 /* Handle interdependencies. */
733 if (si_blend_factor_uses_dst(srcRGB
))
734 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
735 if (si_blend_factor_uses_dst(srcA
))
736 dstA_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
738 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
&&
739 (dstRGB
== VK_BLEND_FACTOR_ZERO
||
740 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
741 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
))
742 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
744 /* Set the final value. */
745 blend
.sx_mrt_blend_opt
[i
] =
746 S_028760_COLOR_SRC_OPT(srcRGB_opt
) |
747 S_028760_COLOR_DST_OPT(dstRGB_opt
) |
748 S_028760_COLOR_COMB_FCN(si_translate_blend_opt_function(eqRGB
)) |
749 S_028760_ALPHA_SRC_OPT(srcA_opt
) |
750 S_028760_ALPHA_DST_OPT(dstA_opt
) |
751 S_028760_ALPHA_COMB_FCN(si_translate_blend_opt_function(eqA
));
752 blend_cntl
|= S_028780_ENABLE(1);
754 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
755 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
756 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
757 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
758 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
759 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
760 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
761 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
763 blend
.cb_blend_control
[i
] = blend_cntl
;
765 blend
.blend_enable_4bit
|= 0xfu
<< (i
* 4);
767 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
768 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
769 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
770 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
771 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
772 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
773 blend
.need_src_alpha
|= 1 << i
;
775 for (i
= vkblend
->attachmentCount
; i
< 8; i
++) {
776 blend
.cb_blend_control
[i
] = 0;
777 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
781 if (pipeline
->device
->physical_device
->rad_info
.has_rbplus
) {
782 /* Disable RB+ blend optimizations for dual source blending. */
783 if (blend
.mrt0_is_dual_src
) {
784 for (i
= 0; i
< 8; i
++) {
785 blend
.sx_mrt_blend_opt
[i
] =
786 S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE
) |
787 S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE
);
791 /* RB+ doesn't work with dual source blending, logic op and
794 if (blend
.mrt0_is_dual_src
||
795 (vkblend
&& vkblend
->logicOpEnable
) ||
796 mode
== V_028808_CB_RESOLVE
)
797 blend
.cb_color_control
|= S_028808_DISABLE_DUAL_QUAD(1);
800 if (blend
.cb_target_mask
)
801 blend
.cb_color_control
|= S_028808_MODE(mode
);
803 blend
.cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
805 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
, &blend
);
809 static uint32_t si_translate_fill(VkPolygonMode func
)
812 case VK_POLYGON_MODE_FILL
:
813 return V_028814_X_DRAW_TRIANGLES
;
814 case VK_POLYGON_MODE_LINE
:
815 return V_028814_X_DRAW_LINES
;
816 case VK_POLYGON_MODE_POINT
:
817 return V_028814_X_DRAW_POINTS
;
820 return V_028814_X_DRAW_POINTS
;
824 static uint8_t radv_pipeline_get_ps_iter_samples(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
826 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
827 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
828 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
829 uint32_t ps_iter_samples
= 1;
830 uint32_t num_samples
;
832 /* From the Vulkan 1.1.129 spec, 26.7. Sample Shading:
834 * "If the VK_AMD_mixed_attachment_samples extension is enabled and the
835 * subpass uses color attachments, totalSamples is the number of
836 * samples of the color attachments. Otherwise, totalSamples is the
837 * value of VkPipelineMultisampleStateCreateInfo::rasterizationSamples
838 * specified at pipeline creation time."
840 if (subpass
->has_color_att
) {
841 num_samples
= subpass
->color_sample_count
;
843 num_samples
= vkms
->rasterizationSamples
;
846 if (vkms
->sampleShadingEnable
) {
847 ps_iter_samples
= ceilf(vkms
->minSampleShading
* num_samples
);
848 ps_iter_samples
= util_next_power_of_two(ps_iter_samples
);
850 return ps_iter_samples
;
854 radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
856 return pCreateInfo
->depthTestEnable
&&
857 pCreateInfo
->depthWriteEnable
&&
858 pCreateInfo
->depthCompareOp
!= VK_COMPARE_OP_NEVER
;
862 radv_writes_stencil(const VkStencilOpState
*state
)
864 return state
->writeMask
&&
865 (state
->failOp
!= VK_STENCIL_OP_KEEP
||
866 state
->passOp
!= VK_STENCIL_OP_KEEP
||
867 state
->depthFailOp
!= VK_STENCIL_OP_KEEP
);
871 radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
873 return pCreateInfo
->stencilTestEnable
&&
874 (radv_writes_stencil(&pCreateInfo
->front
) ||
875 radv_writes_stencil(&pCreateInfo
->back
));
879 radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
881 return radv_is_depth_write_enabled(pCreateInfo
) ||
882 radv_is_stencil_write_enabled(pCreateInfo
);
886 radv_order_invariant_stencil_op(VkStencilOp op
)
888 /* REPLACE is normally order invariant, except when the stencil
889 * reference value is written by the fragment shader. Tracking this
890 * interaction does not seem worth the effort, so be conservative.
892 return op
!= VK_STENCIL_OP_INCREMENT_AND_CLAMP
&&
893 op
!= VK_STENCIL_OP_DECREMENT_AND_CLAMP
&&
894 op
!= VK_STENCIL_OP_REPLACE
;
898 radv_order_invariant_stencil_state(const VkStencilOpState
*state
)
900 /* Compute whether, assuming Z writes are disabled, this stencil state
901 * is order invariant in the sense that the set of passing fragments as
902 * well as the final stencil buffer result does not depend on the order
905 return !state
->writeMask
||
906 /* The following assumes that Z writes are disabled. */
907 (state
->compareOp
== VK_COMPARE_OP_ALWAYS
&&
908 radv_order_invariant_stencil_op(state
->passOp
) &&
909 radv_order_invariant_stencil_op(state
->depthFailOp
)) ||
910 (state
->compareOp
== VK_COMPARE_OP_NEVER
&&
911 radv_order_invariant_stencil_op(state
->failOp
));
915 radv_pipeline_has_dynamic_ds_states(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
917 VkDynamicState ds_states
[] = {
918 VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT
,
919 VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT
,
920 VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT
,
921 VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT
,
922 VK_DYNAMIC_STATE_STENCIL_OP_EXT
,
925 if (pCreateInfo
->pDynamicState
) {
926 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
927 for (uint32_t i
= 0; i
< count
; i
++) {
928 for (uint32_t j
= 0; j
< ARRAY_SIZE(ds_states
); j
++) {
929 if (pCreateInfo
->pDynamicState
->pDynamicStates
[i
] == ds_states
[j
])
939 radv_pipeline_out_of_order_rast(struct radv_pipeline
*pipeline
,
940 struct radv_blend_state
*blend
,
941 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
943 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
944 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
945 const VkPipelineDepthStencilStateCreateInfo
*vkds
= radv_pipeline_get_depth_stencil_state(pCreateInfo
);
946 const VkPipelineColorBlendStateCreateInfo
*vkblend
= radv_pipeline_get_color_blend_state(pCreateInfo
);
947 unsigned colormask
= blend
->cb_target_enabled_4bit
;
949 if (!pipeline
->device
->physical_device
->out_of_order_rast_allowed
)
952 /* Be conservative if a logic operation is enabled with color buffers. */
953 if (colormask
&& vkblend
&& vkblend
->logicOpEnable
)
956 /* Be conservative if an extended dynamic depth/stencil state is
957 * enabled because the driver can't update out-of-order rasterization
960 if (radv_pipeline_has_dynamic_ds_states(pCreateInfo
))
963 /* Default depth/stencil invariance when no attachment is bound. */
964 struct radv_dsa_order_invariance dsa_order_invariant
= {
965 .zs
= true, .pass_set
= true
969 struct radv_render_pass_attachment
*attachment
=
970 pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
971 bool has_stencil
= vk_format_is_stencil(attachment
->format
);
972 struct radv_dsa_order_invariance order_invariance
[2];
973 struct radv_shader_variant
*ps
=
974 pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
976 /* Compute depth/stencil order invariance in order to know if
977 * it's safe to enable out-of-order.
979 bool zfunc_is_ordered
=
980 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
||
981 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS
||
982 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS_OR_EQUAL
||
983 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER
||
984 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER_OR_EQUAL
;
986 bool nozwrite_and_order_invariant_stencil
=
987 !radv_is_ds_write_enabled(vkds
) ||
988 (!radv_is_depth_write_enabled(vkds
) &&
989 radv_order_invariant_stencil_state(&vkds
->front
) &&
990 radv_order_invariant_stencil_state(&vkds
->back
));
992 order_invariance
[1].zs
=
993 nozwrite_and_order_invariant_stencil
||
994 (!radv_is_stencil_write_enabled(vkds
) &&
996 order_invariance
[0].zs
=
997 !radv_is_depth_write_enabled(vkds
) || zfunc_is_ordered
;
999 order_invariance
[1].pass_set
=
1000 nozwrite_and_order_invariant_stencil
||
1001 (!radv_is_stencil_write_enabled(vkds
) &&
1002 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1003 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
));
1004 order_invariance
[0].pass_set
=
1005 !radv_is_depth_write_enabled(vkds
) ||
1006 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1007 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
);
1009 dsa_order_invariant
= order_invariance
[has_stencil
];
1010 if (!dsa_order_invariant
.zs
)
1013 /* The set of PS invocations is always order invariant,
1014 * except when early Z/S tests are requested.
1017 ps
->info
.ps
.writes_memory
&&
1018 ps
->info
.ps
.early_fragment_test
&&
1019 !dsa_order_invariant
.pass_set
)
1022 /* Determine if out-of-order rasterization should be disabled
1023 * when occlusion queries are used.
1025 pipeline
->graphics
.disable_out_of_order_rast_for_occlusion
=
1026 !dsa_order_invariant
.pass_set
;
1029 /* No color buffers are enabled for writing. */
1033 unsigned blendmask
= colormask
& blend
->blend_enable_4bit
;
1036 /* Only commutative blending. */
1037 if (blendmask
& ~blend
->commutative_4bit
)
1040 if (!dsa_order_invariant
.pass_set
)
1044 if (colormask
& ~blendmask
)
1050 static const VkConservativeRasterizationModeEXT
1051 radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo
*pCreateInfo
)
1053 const VkPipelineRasterizationConservativeStateCreateInfoEXT
*conservative_raster
=
1054 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT
);
1056 if (!conservative_raster
)
1057 return VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
;
1058 return conservative_raster
->conservativeRasterizationMode
;
1062 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1063 struct radv_blend_state
*blend
,
1064 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1066 const VkPipelineMultisampleStateCreateInfo
*vkms
= radv_pipeline_get_multisample_state(pCreateInfo
);
1067 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1068 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1069 const VkConservativeRasterizationModeEXT mode
=
1070 radv_get_conservative_raster_mode(pCreateInfo
->pRasterizationState
);
1071 bool out_of_order_rast
= false;
1072 int ps_iter_samples
= 1;
1073 uint32_t mask
= 0xffff;
1076 ms
->num_samples
= vkms
->rasterizationSamples
;
1078 /* From the Vulkan 1.1.129 spec, 26.7. Sample Shading:
1080 * "Sample shading is enabled for a graphics pipeline:
1082 * - If the interface of the fragment shader entry point of the
1083 * graphics pipeline includes an input variable decorated
1084 * with SampleId or SamplePosition. In this case
1085 * minSampleShadingFactor takes the value 1.0.
1086 * - Else if the sampleShadingEnable member of the
1087 * VkPipelineMultisampleStateCreateInfo structure specified
1088 * when creating the graphics pipeline is set to VK_TRUE. In
1089 * this case minSampleShadingFactor takes the value of
1090 * VkPipelineMultisampleStateCreateInfo::minSampleShading.
1092 * Otherwise, sample shading is considered disabled."
1094 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.force_persample
) {
1095 ps_iter_samples
= ms
->num_samples
;
1097 ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
);
1100 ms
->num_samples
= 1;
1103 const struct VkPipelineRasterizationStateRasterizationOrderAMD
*raster_order
=
1104 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
, PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD
);
1105 if (raster_order
&& raster_order
->rasterizationOrder
== VK_RASTERIZATION_ORDER_RELAXED_AMD
) {
1106 /* Out-of-order rasterization is explicitly enabled by the
1109 out_of_order_rast
= true;
1111 /* Determine if the driver can enable out-of-order
1112 * rasterization internally.
1115 radv_pipeline_out_of_order_rast(pipeline
, blend
, pCreateInfo
);
1118 ms
->pa_sc_aa_config
= 0;
1119 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1120 S_028804_INCOHERENT_EQAA_READS(1) |
1121 S_028804_INTERPOLATE_COMP_Z(1) |
1122 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1124 /* Adjust MSAA state if conservative rasterization is enabled. */
1125 if (mode
!= VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
) {
1126 ms
->pa_sc_aa_config
|= S_028BE0_AA_MASK_CENTROID_DTMN(1);
1128 ms
->db_eqaa
|= S_028804_ENABLE_POSTZ_OVERRASTERIZATION(1) |
1129 S_028804_OVERRASTERIZATION_AMOUNT(4);
1132 ms
->pa_sc_mode_cntl_1
=
1133 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1134 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1135 S_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(out_of_order_rast
) |
1136 S_028A4C_OUT_OF_ORDER_WATER_MARK(0x7) |
1138 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1139 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1140 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1141 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1142 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1143 S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1144 ms
->pa_sc_mode_cntl_0
= S_028A48_ALTERNATE_RBS_PER_TILE(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
1145 S_028A48_VPORT_SCISSOR_ENABLE(1);
1147 const VkPipelineRasterizationLineStateCreateInfoEXT
*rast_line
=
1148 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
,
1149 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT
);
1151 ms
->pa_sc_mode_cntl_0
|= S_028A48_LINE_STIPPLE_ENABLE(rast_line
->stippledLineEnable
);
1152 if (rast_line
->lineRasterizationMode
== VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT
) {
1153 /* From the Vulkan spec 1.1.129:
1155 * "When VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT lines
1156 * are being rasterized, sample locations may all be
1157 * treated as being at the pixel center (this may
1158 * affect attribute and depth interpolation)."
1160 ms
->num_samples
= 1;
1164 if (ms
->num_samples
> 1) {
1165 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1166 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1167 uint32_t z_samples
= subpass
->depth_stencil_attachment
? subpass
->depth_sample_count
: ms
->num_samples
;
1168 unsigned log_samples
= util_logbase2(ms
->num_samples
);
1169 unsigned log_z_samples
= util_logbase2(z_samples
);
1170 unsigned log_ps_iter_samples
= util_logbase2(ps_iter_samples
);
1171 ms
->pa_sc_mode_cntl_0
|= S_028A48_MSAA_ENABLE(1);
1172 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_z_samples
) |
1173 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1174 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1175 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1176 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1177 S_028BE0_MAX_SAMPLE_DIST(radv_get_default_max_sample_dist(log_samples
)) |
1178 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
) | /* CM_R_028BE0_PA_SC_AA_CONFIG */
1179 S_028BE0_COVERED_CENTROID_IS_CENTER(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
);
1180 ms
->pa_sc_mode_cntl_1
|= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1181 if (ps_iter_samples
> 1)
1182 pipeline
->graphics
.spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1185 if (vkms
&& vkms
->pSampleMask
) {
1186 mask
= vkms
->pSampleMask
[0] & 0xffff;
1189 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1190 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1194 radv_prim_can_use_guardband(enum VkPrimitiveTopology topology
)
1197 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1198 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1199 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1200 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1201 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1203 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1204 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1205 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1206 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1207 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1208 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1211 unreachable("unhandled primitive type");
1216 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1219 case 0: /* GL_POINTS */
1220 return V_028A6C_POINTLIST
;
1221 case 1: /* GL_LINES */
1222 case 3: /* GL_LINE_STRIP */
1223 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1224 case 0x8E7A: /* GL_ISOLINES */
1225 return V_028A6C_LINESTRIP
;
1227 case 4: /* GL_TRIANGLES */
1228 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1229 case 5: /* GL_TRIANGLE_STRIP */
1230 case 7: /* GL_QUADS */
1231 return V_028A6C_TRISTRIP
;
1239 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1242 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1243 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1244 return V_028A6C_POINTLIST
;
1245 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1246 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1247 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1248 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1249 return V_028A6C_LINESTRIP
;
1250 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1251 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1252 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1253 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1254 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1255 return V_028A6C_TRISTRIP
;
1262 static unsigned radv_dynamic_state_mask(VkDynamicState state
)
1265 case VK_DYNAMIC_STATE_VIEWPORT
:
1266 case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT
:
1267 return RADV_DYNAMIC_VIEWPORT
;
1268 case VK_DYNAMIC_STATE_SCISSOR
:
1269 case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT
:
1270 return RADV_DYNAMIC_SCISSOR
;
1271 case VK_DYNAMIC_STATE_LINE_WIDTH
:
1272 return RADV_DYNAMIC_LINE_WIDTH
;
1273 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
1274 return RADV_DYNAMIC_DEPTH_BIAS
;
1275 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
1276 return RADV_DYNAMIC_BLEND_CONSTANTS
;
1277 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
1278 return RADV_DYNAMIC_DEPTH_BOUNDS
;
1279 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
1280 return RADV_DYNAMIC_STENCIL_COMPARE_MASK
;
1281 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
1282 return RADV_DYNAMIC_STENCIL_WRITE_MASK
;
1283 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
1284 return RADV_DYNAMIC_STENCIL_REFERENCE
;
1285 case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT
:
1286 return RADV_DYNAMIC_DISCARD_RECTANGLE
;
1287 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT
:
1288 return RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1289 case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT
:
1290 return RADV_DYNAMIC_LINE_STIPPLE
;
1291 case VK_DYNAMIC_STATE_CULL_MODE_EXT
:
1292 return RADV_DYNAMIC_CULL_MODE
;
1293 case VK_DYNAMIC_STATE_FRONT_FACE_EXT
:
1294 return RADV_DYNAMIC_FRONT_FACE
;
1295 case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT
:
1296 return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY
;
1297 case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT
:
1298 return RADV_DYNAMIC_DEPTH_TEST_ENABLE
;
1299 case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT
:
1300 return RADV_DYNAMIC_DEPTH_WRITE_ENABLE
;
1301 case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT
:
1302 return RADV_DYNAMIC_DEPTH_COMPARE_OP
;
1303 case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT
:
1304 return RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE
;
1305 case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT
:
1306 return RADV_DYNAMIC_STENCIL_TEST_ENABLE
;
1307 case VK_DYNAMIC_STATE_STENCIL_OP_EXT
:
1308 return RADV_DYNAMIC_STENCIL_OP
;
1309 case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT
:
1310 return RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE
;
1312 unreachable("Unhandled dynamic state");
1316 static uint32_t radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1318 uint32_t states
= RADV_DYNAMIC_ALL
;
1320 /* If rasterization is disabled we do not care about any of the
1321 * dynamic states, since they are all rasterization related only,
1322 * except primitive topology and vertex binding stride.
1324 if (pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
1325 return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY
|
1326 RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE
;
1328 if (!pCreateInfo
->pRasterizationState
->depthBiasEnable
)
1329 states
&= ~RADV_DYNAMIC_DEPTH_BIAS
;
1331 if (!pCreateInfo
->pDepthStencilState
||
1332 !pCreateInfo
->pDepthStencilState
->depthBoundsTestEnable
)
1333 states
&= ~RADV_DYNAMIC_DEPTH_BOUNDS
;
1335 if (!pCreateInfo
->pDepthStencilState
||
1336 !pCreateInfo
->pDepthStencilState
->stencilTestEnable
)
1337 states
&= ~(RADV_DYNAMIC_STENCIL_COMPARE_MASK
|
1338 RADV_DYNAMIC_STENCIL_WRITE_MASK
|
1339 RADV_DYNAMIC_STENCIL_REFERENCE
);
1341 if (!vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
))
1342 states
&= ~RADV_DYNAMIC_DISCARD_RECTANGLE
;
1344 if (!pCreateInfo
->pMultisampleState
||
1345 !vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1346 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
))
1347 states
&= ~RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1349 if (!pCreateInfo
->pRasterizationState
||
1350 !vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
,
1351 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT
))
1352 states
&= ~RADV_DYNAMIC_LINE_STIPPLE
;
1354 /* TODO: blend constants & line width. */
1359 static struct radv_ia_multi_vgt_param_helpers
1360 radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline
*pipeline
)
1362 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param
= {0};
1363 const struct radv_device
*device
= pipeline
->device
;
1365 if (radv_pipeline_has_tess(pipeline
))
1366 ia_multi_vgt_param
.primgroup_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
1367 else if (radv_pipeline_has_gs(pipeline
))
1368 ia_multi_vgt_param
.primgroup_size
= 64;
1370 ia_multi_vgt_param
.primgroup_size
= 128; /* recommended without a GS */
1372 /* GS requirement. */
1373 ia_multi_vgt_param
.partial_es_wave
= false;
1374 if (radv_pipeline_has_gs(pipeline
) && device
->physical_device
->rad_info
.chip_class
<= GFX8
)
1375 if (SI_GS_PER_ES
/ ia_multi_vgt_param
.primgroup_size
>= pipeline
->device
->gs_table_depth
- 3)
1376 ia_multi_vgt_param
.partial_es_wave
= true;
1378 ia_multi_vgt_param
.ia_switch_on_eoi
= false;
1379 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.prim_id_input
)
1380 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
1381 if (radv_pipeline_has_gs(pipeline
) &&
1382 pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.uses_prim_id
)
1383 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
1384 if (radv_pipeline_has_tess(pipeline
)) {
1385 /* SWITCH_ON_EOI must be set if PrimID is used. */
1386 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.uses_prim_id
||
1387 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.uses_prim_id
)
1388 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
1391 ia_multi_vgt_param
.partial_vs_wave
= false;
1392 if (radv_pipeline_has_tess(pipeline
)) {
1393 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
1394 if ((device
->physical_device
->rad_info
.family
== CHIP_TAHITI
||
1395 device
->physical_device
->rad_info
.family
== CHIP_PITCAIRN
||
1396 device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
) &&
1397 radv_pipeline_has_gs(pipeline
))
1398 ia_multi_vgt_param
.partial_vs_wave
= true;
1399 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
1400 if (device
->physical_device
->rad_info
.has_distributed_tess
) {
1401 if (radv_pipeline_has_gs(pipeline
)) {
1402 if (device
->physical_device
->rad_info
.chip_class
<= GFX8
)
1403 ia_multi_vgt_param
.partial_es_wave
= true;
1405 ia_multi_vgt_param
.partial_vs_wave
= true;
1410 if (radv_pipeline_has_gs(pipeline
)) {
1411 /* On these chips there is the possibility of a hang if the
1412 * pipeline uses a GS and partial_vs_wave is not set.
1414 * This mostly does not hit 4-SE chips, as those typically set
1415 * ia_switch_on_eoi and then partial_vs_wave is set for pipelines
1416 * with GS due to another workaround.
1418 * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242
1420 if (device
->physical_device
->rad_info
.family
== CHIP_TONGA
||
1421 device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
1422 device
->physical_device
->rad_info
.family
== CHIP_POLARIS10
||
1423 device
->physical_device
->rad_info
.family
== CHIP_POLARIS11
||
1424 device
->physical_device
->rad_info
.family
== CHIP_POLARIS12
||
1425 device
->physical_device
->rad_info
.family
== CHIP_VEGAM
) {
1426 ia_multi_vgt_param
.partial_vs_wave
= true;
1430 ia_multi_vgt_param
.base
=
1431 S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param
.primgroup_size
- 1) |
1432 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
1433 S_028AA8_MAX_PRIMGRP_IN_WAVE(device
->physical_device
->rad_info
.chip_class
== GFX8
? 2 : 0) |
1434 S_030960_EN_INST_OPT_BASIC(device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
1435 S_030960_EN_INST_OPT_ADV(device
->physical_device
->rad_info
.chip_class
>= GFX9
);
1437 return ia_multi_vgt_param
;
1441 radv_pipeline_init_input_assembly_state(struct radv_pipeline
*pipeline
,
1442 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1443 const struct radv_graphics_pipeline_create_info
*extra
)
1445 const VkPipelineInputAssemblyStateCreateInfo
*ia_state
= pCreateInfo
->pInputAssemblyState
;
1446 struct radv_shader_variant
*tes
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
1447 struct radv_shader_variant
*gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1449 pipeline
->graphics
.prim_restart_enable
= !!ia_state
->primitiveRestartEnable
;
1450 pipeline
->graphics
.can_use_guardband
= radv_prim_can_use_guardband(ia_state
->topology
);
1452 if (radv_pipeline_has_gs(pipeline
)) {
1453 if (si_conv_gl_prim_to_gs_out(gs
->info
.gs
.output_prim
) == V_028A6C_TRISTRIP
)
1454 pipeline
->graphics
.can_use_guardband
= true;
1455 } else if (radv_pipeline_has_tess(pipeline
)) {
1456 if (!tes
->info
.tes
.point_mode
&&
1457 si_conv_gl_prim_to_gs_out(tes
->info
.tes
.primitive_mode
) == V_028A6C_TRISTRIP
)
1458 pipeline
->graphics
.can_use_guardband
= true;
1461 if (extra
&& extra
->use_rectlist
) {
1462 pipeline
->graphics
.can_use_guardband
= true;
1465 pipeline
->graphics
.ia_multi_vgt_param
=
1466 radv_compute_ia_multi_vgt_param_helpers(pipeline
);
1470 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1471 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1472 const struct radv_graphics_pipeline_create_info
*extra
)
1474 uint32_t needed_states
= radv_pipeline_needed_dynamic_state(pCreateInfo
);
1475 uint32_t states
= needed_states
;
1476 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1477 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1479 pipeline
->dynamic_state
= default_dynamic_state
;
1480 pipeline
->graphics
.needed_dynamic_state
= needed_states
;
1482 if (pCreateInfo
->pDynamicState
) {
1483 /* Remove all of the states that are marked as dynamic */
1484 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1485 for (uint32_t s
= 0; s
< count
; s
++)
1486 states
&= ~radv_dynamic_state_mask(pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1489 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1491 if (needed_states
& RADV_DYNAMIC_VIEWPORT
) {
1492 assert(pCreateInfo
->pViewportState
);
1494 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1495 if (states
& RADV_DYNAMIC_VIEWPORT
) {
1496 typed_memcpy(dynamic
->viewport
.viewports
,
1497 pCreateInfo
->pViewportState
->pViewports
,
1498 pCreateInfo
->pViewportState
->viewportCount
);
1502 if (needed_states
& RADV_DYNAMIC_SCISSOR
) {
1503 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1504 if (states
& RADV_DYNAMIC_SCISSOR
) {
1505 typed_memcpy(dynamic
->scissor
.scissors
,
1506 pCreateInfo
->pViewportState
->pScissors
,
1507 pCreateInfo
->pViewportState
->scissorCount
);
1511 if (states
& RADV_DYNAMIC_LINE_WIDTH
) {
1512 assert(pCreateInfo
->pRasterizationState
);
1513 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1516 if (states
& RADV_DYNAMIC_DEPTH_BIAS
) {
1517 assert(pCreateInfo
->pRasterizationState
);
1518 dynamic
->depth_bias
.bias
=
1519 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1520 dynamic
->depth_bias
.clamp
=
1521 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1522 dynamic
->depth_bias
.slope
=
1523 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1526 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1528 * pColorBlendState is [...] NULL if the pipeline has rasterization
1529 * disabled or if the subpass of the render pass the pipeline is
1530 * created against does not use any color attachments.
1532 if (subpass
->has_color_att
&& states
& RADV_DYNAMIC_BLEND_CONSTANTS
) {
1533 assert(pCreateInfo
->pColorBlendState
);
1534 typed_memcpy(dynamic
->blend_constants
,
1535 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1538 if (states
& RADV_DYNAMIC_CULL_MODE
) {
1539 dynamic
->cull_mode
=
1540 pCreateInfo
->pRasterizationState
->cullMode
;
1543 if (states
& RADV_DYNAMIC_FRONT_FACE
) {
1544 dynamic
->front_face
=
1545 pCreateInfo
->pRasterizationState
->frontFace
;
1548 if (states
& RADV_DYNAMIC_PRIMITIVE_TOPOLOGY
) {
1549 dynamic
->primitive_topology
=
1550 si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
1551 if (extra
&& extra
->use_rectlist
) {
1552 dynamic
->primitive_topology
= V_008958_DI_PT_RECTLIST
;
1556 /* If there is no depthstencil attachment, then don't read
1557 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1558 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1559 * no need to override the depthstencil defaults in
1560 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1562 * Section 9.2 of the Vulkan 1.0.15 spec says:
1564 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1565 * disabled or if the subpass of the render pass the pipeline is created
1566 * against does not use a depth/stencil attachment.
1568 if (needed_states
&& subpass
->depth_stencil_attachment
) {
1569 assert(pCreateInfo
->pDepthStencilState
);
1571 if (states
& RADV_DYNAMIC_DEPTH_BOUNDS
) {
1572 dynamic
->depth_bounds
.min
=
1573 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1574 dynamic
->depth_bounds
.max
=
1575 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1578 if (states
& RADV_DYNAMIC_STENCIL_COMPARE_MASK
) {
1579 dynamic
->stencil_compare_mask
.front
=
1580 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1581 dynamic
->stencil_compare_mask
.back
=
1582 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1585 if (states
& RADV_DYNAMIC_STENCIL_WRITE_MASK
) {
1586 dynamic
->stencil_write_mask
.front
=
1587 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1588 dynamic
->stencil_write_mask
.back
=
1589 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1592 if (states
& RADV_DYNAMIC_STENCIL_REFERENCE
) {
1593 dynamic
->stencil_reference
.front
=
1594 pCreateInfo
->pDepthStencilState
->front
.reference
;
1595 dynamic
->stencil_reference
.back
=
1596 pCreateInfo
->pDepthStencilState
->back
.reference
;
1599 if (states
& RADV_DYNAMIC_DEPTH_TEST_ENABLE
) {
1600 dynamic
->depth_test_enable
=
1601 pCreateInfo
->pDepthStencilState
->depthTestEnable
;
1604 if (states
& RADV_DYNAMIC_DEPTH_WRITE_ENABLE
) {
1605 dynamic
->depth_write_enable
=
1606 pCreateInfo
->pDepthStencilState
->depthWriteEnable
;
1609 if (states
& RADV_DYNAMIC_DEPTH_COMPARE_OP
) {
1610 dynamic
->depth_compare_op
=
1611 pCreateInfo
->pDepthStencilState
->depthCompareOp
;
1614 if (states
& RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE
) {
1615 dynamic
->depth_bounds_test_enable
=
1616 pCreateInfo
->pDepthStencilState
->depthBoundsTestEnable
;
1619 if (states
& RADV_DYNAMIC_STENCIL_TEST_ENABLE
) {
1620 dynamic
->stencil_test_enable
=
1621 pCreateInfo
->pDepthStencilState
->stencilTestEnable
;
1624 if (states
& RADV_DYNAMIC_STENCIL_OP
) {
1625 dynamic
->stencil_op
.front
.compare_op
=
1626 pCreateInfo
->pDepthStencilState
->front
.compareOp
;
1627 dynamic
->stencil_op
.front
.fail_op
=
1628 pCreateInfo
->pDepthStencilState
->front
.failOp
;
1629 dynamic
->stencil_op
.front
.pass_op
=
1630 pCreateInfo
->pDepthStencilState
->front
.passOp
;
1631 dynamic
->stencil_op
.front
.depth_fail_op
=
1632 pCreateInfo
->pDepthStencilState
->front
.depthFailOp
;
1634 dynamic
->stencil_op
.back
.compare_op
=
1635 pCreateInfo
->pDepthStencilState
->back
.compareOp
;
1636 dynamic
->stencil_op
.back
.fail_op
=
1637 pCreateInfo
->pDepthStencilState
->back
.failOp
;
1638 dynamic
->stencil_op
.back
.pass_op
=
1639 pCreateInfo
->pDepthStencilState
->back
.passOp
;
1640 dynamic
->stencil_op
.back
.depth_fail_op
=
1641 pCreateInfo
->pDepthStencilState
->back
.depthFailOp
;
1645 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
1646 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
1647 if (needed_states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1648 dynamic
->discard_rectangle
.count
= discard_rectangle_info
->discardRectangleCount
;
1649 if (states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1650 typed_memcpy(dynamic
->discard_rectangle
.rectangles
,
1651 discard_rectangle_info
->pDiscardRectangles
,
1652 discard_rectangle_info
->discardRectangleCount
);
1656 if (needed_states
& RADV_DYNAMIC_SAMPLE_LOCATIONS
) {
1657 const VkPipelineSampleLocationsStateCreateInfoEXT
*sample_location_info
=
1658 vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1659 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
1660 /* If sampleLocationsEnable is VK_FALSE, the default sample
1661 * locations are used and the values specified in
1662 * sampleLocationsInfo are ignored.
1664 if (sample_location_info
->sampleLocationsEnable
) {
1665 const VkSampleLocationsInfoEXT
*pSampleLocationsInfo
=
1666 &sample_location_info
->sampleLocationsInfo
;
1668 assert(pSampleLocationsInfo
->sampleLocationsCount
<= MAX_SAMPLE_LOCATIONS
);
1670 dynamic
->sample_location
.per_pixel
= pSampleLocationsInfo
->sampleLocationsPerPixel
;
1671 dynamic
->sample_location
.grid_size
= pSampleLocationsInfo
->sampleLocationGridSize
;
1672 dynamic
->sample_location
.count
= pSampleLocationsInfo
->sampleLocationsCount
;
1673 typed_memcpy(&dynamic
->sample_location
.locations
[0],
1674 pSampleLocationsInfo
->pSampleLocations
,
1675 pSampleLocationsInfo
->sampleLocationsCount
);
1679 const VkPipelineRasterizationLineStateCreateInfoEXT
*rast_line_info
=
1680 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
,
1681 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT
);
1682 if (needed_states
& RADV_DYNAMIC_LINE_STIPPLE
) {
1683 dynamic
->line_stipple
.factor
= rast_line_info
->lineStippleFactor
;
1684 dynamic
->line_stipple
.pattern
= rast_line_info
->lineStipplePattern
;
1687 if (!(states
& RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE
))
1688 pipeline
->graphics
.uses_dynamic_stride
= true;
1690 pipeline
->dynamic_state
.mask
= states
;
1694 radv_pipeline_init_raster_state(struct radv_pipeline
*pipeline
,
1695 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1697 const VkPipelineRasterizationStateCreateInfo
*raster_info
=
1698 pCreateInfo
->pRasterizationState
;
1700 pipeline
->graphics
.pa_su_sc_mode_cntl
=
1701 S_028814_FACE(raster_info
->frontFace
) |
1702 S_028814_CULL_FRONT(!!(raster_info
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
1703 S_028814_CULL_BACK(!!(raster_info
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
1704 S_028814_POLY_MODE(raster_info
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
1705 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(raster_info
->polygonMode
)) |
1706 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(raster_info
->polygonMode
)) |
1707 S_028814_POLY_OFFSET_FRONT_ENABLE(raster_info
->depthBiasEnable
? 1 : 0) |
1708 S_028814_POLY_OFFSET_BACK_ENABLE(raster_info
->depthBiasEnable
? 1 : 0) |
1709 S_028814_POLY_OFFSET_PARA_ENABLE(raster_info
->depthBiasEnable
? 1 : 0);
1713 radv_pipeline_init_depth_stencil_state(struct radv_pipeline
*pipeline
,
1714 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1716 const VkPipelineDepthStencilStateCreateInfo
*ds_info
1717 = radv_pipeline_get_depth_stencil_state(pCreateInfo
);
1718 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1719 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
1720 struct radv_render_pass_attachment
*attachment
= NULL
;
1721 uint32_t db_depth_control
= 0;
1723 if (subpass
->depth_stencil_attachment
)
1724 attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
1726 bool has_depth_attachment
= attachment
&& vk_format_is_depth(attachment
->format
);
1727 bool has_stencil_attachment
= attachment
&& vk_format_is_stencil(attachment
->format
);
1730 if (has_depth_attachment
) {
1731 db_depth_control
= S_028800_Z_ENABLE(ds_info
->depthTestEnable
? 1 : 0) |
1732 S_028800_Z_WRITE_ENABLE(ds_info
->depthWriteEnable
? 1 : 0) |
1733 S_028800_ZFUNC(ds_info
->depthCompareOp
) |
1734 S_028800_DEPTH_BOUNDS_ENABLE(ds_info
->depthBoundsTestEnable
? 1 : 0);
1737 if (has_stencil_attachment
&& ds_info
->stencilTestEnable
) {
1738 db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
1739 db_depth_control
|= S_028800_STENCILFUNC(ds_info
->front
.compareOp
);
1740 db_depth_control
|= S_028800_STENCILFUNC_BF(ds_info
->back
.compareOp
);
1744 pipeline
->graphics
.db_depth_control
= db_depth_control
;
1748 gfx9_get_gs_info(const struct radv_pipeline_key
*key
,
1749 const struct radv_pipeline
*pipeline
,
1751 struct radv_shader_info
*infos
,
1752 struct gfx9_gs_info
*out
)
1754 struct radv_shader_info
*gs_info
= &infos
[MESA_SHADER_GEOMETRY
];
1755 struct radv_es_output_info
*es_info
;
1756 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1757 es_info
= nir
[MESA_SHADER_TESS_CTRL
] ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1759 es_info
= nir
[MESA_SHADER_TESS_CTRL
] ?
1760 &infos
[MESA_SHADER_TESS_EVAL
].tes
.es_info
:
1761 &infos
[MESA_SHADER_VERTEX
].vs
.es_info
;
1763 unsigned gs_num_invocations
= MAX2(gs_info
->gs
.invocations
, 1);
1764 bool uses_adjacency
;
1765 switch(key
->topology
) {
1766 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1767 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1768 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1769 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1770 uses_adjacency
= true;
1773 uses_adjacency
= false;
1777 /* All these are in dwords: */
1778 /* We can't allow using the whole LDS, because GS waves compete with
1779 * other shader stages for LDS space. */
1780 const unsigned max_lds_size
= 8 * 1024;
1781 const unsigned esgs_itemsize
= es_info
->esgs_itemsize
/ 4;
1782 unsigned esgs_lds_size
;
1784 /* All these are per subgroup: */
1785 const unsigned max_out_prims
= 32 * 1024;
1786 const unsigned max_es_verts
= 255;
1787 const unsigned ideal_gs_prims
= 64;
1788 unsigned max_gs_prims
, gs_prims
;
1789 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
1791 if (uses_adjacency
|| gs_num_invocations
> 1)
1792 max_gs_prims
= 127 / gs_num_invocations
;
1796 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
1797 * Make sure we don't go over the maximum value.
1799 if (gs_info
->gs
.vertices_out
> 0) {
1800 max_gs_prims
= MIN2(max_gs_prims
,
1802 (gs_info
->gs
.vertices_out
* gs_num_invocations
));
1804 assert(max_gs_prims
> 0);
1806 /* If the primitive has adjacency, halve the number of vertices
1807 * that will be reused in multiple primitives.
1809 min_es_verts
= gs_info
->gs
.vertices_in
/ (uses_adjacency
? 2 : 1);
1811 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
1812 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
1814 /* Compute ESGS LDS size based on the worst case number of ES vertices
1815 * needed to create the target number of GS prims per subgroup.
1817 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1819 /* If total LDS usage is too big, refactor partitions based on ratio
1820 * of ESGS item sizes.
1822 if (esgs_lds_size
> max_lds_size
) {
1823 /* Our target GS Prims Per Subgroup was too large. Calculate
1824 * the maximum number of GS Prims Per Subgroup that will fit
1825 * into LDS, capped by the maximum that the hardware can support.
1827 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
1829 assert(gs_prims
> 0);
1830 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
1833 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1834 assert(esgs_lds_size
<= max_lds_size
);
1837 /* Now calculate remaining ESGS information. */
1839 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
1841 es_verts
= max_es_verts
;
1843 /* Vertices for adjacency primitives are not always reused, so restore
1844 * it for ES_VERTS_PER_SUBGRP.
1846 min_es_verts
= gs_info
->gs
.vertices_in
;
1848 /* For normal primitives, the VGT only checks if they are past the ES
1849 * verts per subgroup after allocating a full GS primitive and if they
1850 * are, kick off a new subgroup. But if those additional ES verts are
1851 * unique (e.g. not reused) we need to make sure there is enough LDS
1852 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
1854 es_verts
-= min_es_verts
- 1;
1856 uint32_t es_verts_per_subgroup
= es_verts
;
1857 uint32_t gs_prims_per_subgroup
= gs_prims
;
1858 uint32_t gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
1859 uint32_t max_prims_per_subgroup
= gs_inst_prims_in_subgroup
* gs_info
->gs
.vertices_out
;
1860 out
->lds_size
= align(esgs_lds_size
, 128) / 128;
1861 out
->vgt_gs_onchip_cntl
= S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup
) |
1862 S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup
) |
1863 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup
);
1864 out
->vgt_gs_max_prims_per_subgroup
= S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup
);
1865 out
->vgt_esgs_ring_itemsize
= esgs_itemsize
;
1866 assert(max_prims_per_subgroup
<= max_out_prims
);
1869 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1870 unsigned min_verts_per_prim
, bool use_adjacency
)
1872 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1875 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1879 radv_get_num_input_vertices(nir_shader
**nir
)
1881 if (nir
[MESA_SHADER_GEOMETRY
]) {
1882 nir_shader
*gs
= nir
[MESA_SHADER_GEOMETRY
];
1884 return gs
->info
.gs
.vertices_in
;
1887 if (nir
[MESA_SHADER_TESS_CTRL
]) {
1888 nir_shader
*tes
= nir
[MESA_SHADER_TESS_EVAL
];
1890 if (tes
->info
.tess
.point_mode
)
1892 if (tes
->info
.tess
.primitive_mode
== GL_ISOLINES
)
1901 gfx10_get_ngg_info(const struct radv_pipeline_key
*key
,
1902 struct radv_pipeline
*pipeline
,
1904 struct radv_shader_info
*infos
,
1905 struct gfx10_ngg_info
*ngg
)
1907 struct radv_shader_info
*gs_info
= &infos
[MESA_SHADER_GEOMETRY
];
1908 struct radv_es_output_info
*es_info
=
1909 nir
[MESA_SHADER_TESS_CTRL
] ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1910 unsigned gs_type
= nir
[MESA_SHADER_GEOMETRY
] ? MESA_SHADER_GEOMETRY
: MESA_SHADER_VERTEX
;
1911 unsigned max_verts_per_prim
= radv_get_num_input_vertices(nir
);
1912 unsigned min_verts_per_prim
=
1913 gs_type
== MESA_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1914 unsigned gs_num_invocations
= nir
[MESA_SHADER_GEOMETRY
] ? MAX2(gs_info
->gs
.invocations
, 1) : 1;
1915 bool uses_adjacency
;
1916 switch(key
->topology
) {
1917 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1918 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1919 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1920 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1921 uses_adjacency
= true;
1924 uses_adjacency
= false;
1928 /* All these are in dwords: */
1929 /* We can't allow using the whole LDS, because GS waves compete with
1930 * other shader stages for LDS space.
1932 * TODO: We should really take the shader's internal LDS use into
1933 * account. The linker will fail if the size is greater than
1936 const unsigned max_lds_size
= 8 * 1024 - 768;
1937 const unsigned target_lds_size
= max_lds_size
;
1938 unsigned esvert_lds_size
= 0;
1939 unsigned gsprim_lds_size
= 0;
1941 /* All these are per subgroup: */
1942 const unsigned min_esverts
= pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
? 29 : 24;
1943 bool max_vert_out_per_gs_instance
= false;
1944 unsigned max_esverts_base
= 256;
1945 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1947 /* Hardware has the following non-natural restrictions on the value
1948 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1950 * - at most 252 for any line input primitive type
1951 * - at most 251 for any quad input primitive type
1952 * - at most 251 for triangle strips with adjacency (this happens to
1953 * be the natural limit for triangle *lists* with adjacency)
1955 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1957 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1958 unsigned max_out_verts_per_gsprim
=
1959 gs_info
->gs
.vertices_out
* gs_num_invocations
;
1961 if (max_out_verts_per_gsprim
<= 256) {
1962 if (max_out_verts_per_gsprim
) {
1963 max_gsprims_base
= MIN2(max_gsprims_base
,
1964 256 / max_out_verts_per_gsprim
);
1967 /* Use special multi-cycling mode in which each GS
1968 * instance gets its own subgroup. Does not work with
1970 max_vert_out_per_gs_instance
= true;
1971 max_gsprims_base
= 1;
1972 max_out_verts_per_gsprim
= gs_info
->gs
.vertices_out
;
1975 esvert_lds_size
= es_info
->esgs_itemsize
/ 4;
1976 gsprim_lds_size
= (gs_info
->gs
.gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1979 /* LDS size for passing data from GS to ES. */
1980 struct radv_streamout_info
*so_info
= nir
[MESA_SHADER_TESS_CTRL
]
1981 ? &infos
[MESA_SHADER_TESS_EVAL
].so
1982 : &infos
[MESA_SHADER_VERTEX
].so
;
1984 if (so_info
->num_outputs
)
1985 esvert_lds_size
= 4 * so_info
->num_outputs
+ 1;
1987 /* GS stores Primitive IDs (one DWORD) into LDS at the address
1988 * corresponding to the ES thread of the provoking vertex. All
1989 * ES threads load and export PrimitiveID for their thread.
1991 if (!nir
[MESA_SHADER_TESS_CTRL
] &&
1992 infos
[MESA_SHADER_VERTEX
].vs
.outinfo
.export_prim_id
)
1993 esvert_lds_size
= MAX2(esvert_lds_size
, 1);
1996 unsigned max_gsprims
= max_gsprims_base
;
1997 unsigned max_esverts
= max_esverts_base
;
1999 if (esvert_lds_size
)
2000 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
2001 if (gsprim_lds_size
)
2002 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
2004 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
2005 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, uses_adjacency
);
2006 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
2008 if (esvert_lds_size
|| gsprim_lds_size
) {
2009 /* Now that we have a rough proportionality between esverts
2010 * and gsprims based on the primitive type, scale both of them
2011 * down simultaneously based on required LDS space.
2013 * We could be smarter about this if we knew how much vertex
2016 unsigned lds_total
= max_esverts
* esvert_lds_size
+
2017 max_gsprims
* gsprim_lds_size
;
2018 if (lds_total
> target_lds_size
) {
2019 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
2020 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
2022 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
2023 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
2024 min_verts_per_prim
, uses_adjacency
);
2025 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
2029 /* Round up towards full wave sizes for better ALU utilization. */
2030 if (!max_vert_out_per_gs_instance
) {
2031 unsigned orig_max_esverts
;
2032 unsigned orig_max_gsprims
;
2035 if (gs_type
== MESA_SHADER_GEOMETRY
) {
2036 wavesize
= gs_info
->wave_size
;
2038 wavesize
= nir
[MESA_SHADER_TESS_CTRL
]
2039 ? infos
[MESA_SHADER_TESS_EVAL
].wave_size
2040 : infos
[MESA_SHADER_VERTEX
].wave_size
;
2044 orig_max_esverts
= max_esverts
;
2045 orig_max_gsprims
= max_gsprims
;
2047 max_esverts
= align(max_esverts
, wavesize
);
2048 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
2049 if (esvert_lds_size
)
2050 max_esverts
= MIN2(max_esverts
,
2051 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
2053 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
2055 max_gsprims
= align(max_gsprims
, wavesize
);
2056 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
2057 if (gsprim_lds_size
)
2058 max_gsprims
= MIN2(max_gsprims
,
2059 (max_lds_size
- max_esverts
* esvert_lds_size
) /
2061 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
2062 min_verts_per_prim
, uses_adjacency
);
2063 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
2064 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
2067 /* Hardware restriction: minimum value of max_esverts */
2068 max_esverts
= MAX2(max_esverts
, min_esverts
- 1 + max_verts_per_prim
);
2070 unsigned max_out_vertices
=
2071 max_vert_out_per_gs_instance
? gs_info
->gs
.vertices_out
:
2072 gs_type
== MESA_SHADER_GEOMETRY
?
2073 max_gsprims
* gs_num_invocations
* gs_info
->gs
.vertices_out
:
2075 assert(max_out_vertices
<= 256);
2077 unsigned prim_amp_factor
= 1;
2078 if (gs_type
== MESA_SHADER_GEOMETRY
) {
2079 /* Number of output primitives per GS input primitive after
2081 prim_amp_factor
= gs_info
->gs
.vertices_out
;
2084 /* The GE only checks against the maximum number of ES verts after
2085 * allocating a full GS primitive. So we need to ensure that whenever
2086 * this check passes, there is enough space for a full primitive without
2089 ngg
->hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
2090 ngg
->max_gsprims
= max_gsprims
;
2091 ngg
->max_out_verts
= max_out_vertices
;
2092 ngg
->prim_amp_factor
= prim_amp_factor
;
2093 ngg
->max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
2094 ngg
->ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
2095 ngg
->esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
2097 if (gs_type
== MESA_SHADER_GEOMETRY
) {
2098 ngg
->vgt_esgs_ring_itemsize
= es_info
->esgs_itemsize
/ 4;
2100 ngg
->vgt_esgs_ring_itemsize
= 1;
2103 pipeline
->graphics
.esgs_ring_size
= ngg
->esgs_ring_size
;
2105 assert(ngg
->hw_max_esverts
>= min_esverts
); /* HW limitation */
2109 radv_pipeline_init_gs_ring_state(struct radv_pipeline
*pipeline
,
2110 const struct gfx9_gs_info
*gs
)
2112 struct radv_device
*device
= pipeline
->device
;
2113 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
2114 unsigned wave_size
= 64;
2115 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
2116 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
2117 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
2119 unsigned gs_vertex_reuse
=
2120 (device
->physical_device
->rad_info
.chip_class
>= GFX8
? 32 : 16) * num_se
;
2121 unsigned alignment
= 256 * num_se
;
2122 /* The maximum size is 63.999 MB per SE. */
2123 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
2124 struct radv_shader_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
2126 /* Calculate the minimum size. */
2127 unsigned min_esgs_ring_size
= align(gs
->vgt_esgs_ring_itemsize
* 4 * gs_vertex_reuse
*
2128 wave_size
, alignment
);
2129 /* These are recommended sizes, not minimum sizes. */
2130 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
2131 gs
->vgt_esgs_ring_itemsize
* 4 * gs_info
->gs
.vertices_in
;
2132 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
2133 gs_info
->gs
.max_gsvs_emit_size
;
2135 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
2136 esgs_ring_size
= align(esgs_ring_size
, alignment
);
2137 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
2139 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
2140 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
2142 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
2145 struct radv_shader_variant
*
2146 radv_get_shader(const struct radv_pipeline
*pipeline
,
2147 gl_shader_stage stage
)
2149 if (stage
== MESA_SHADER_VERTEX
) {
2150 if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
2151 return pipeline
->shaders
[MESA_SHADER_VERTEX
];
2152 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
])
2153 return pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
2154 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
2155 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
2156 } else if (stage
== MESA_SHADER_TESS_EVAL
) {
2157 if (!radv_pipeline_has_tess(pipeline
))
2159 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
2160 return pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
2161 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
2162 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
2164 return pipeline
->shaders
[stage
];
2167 static const struct radv_vs_output_info
*get_vs_output_info(const struct radv_pipeline
*pipeline
)
2169 if (radv_pipeline_has_gs(pipeline
))
2170 if (radv_pipeline_has_ngg(pipeline
))
2171 return &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.vs
.outinfo
;
2173 return &pipeline
->gs_copy_shader
->info
.vs
.outinfo
;
2174 else if (radv_pipeline_has_tess(pipeline
))
2175 return &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.outinfo
;
2177 return &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.outinfo
;
2181 radv_link_shaders(struct radv_pipeline
*pipeline
, nir_shader
**shaders
)
2183 nir_shader
* ordered_shaders
[MESA_SHADER_STAGES
];
2184 int shader_count
= 0;
2186 if(shaders
[MESA_SHADER_FRAGMENT
]) {
2187 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_FRAGMENT
];
2189 if(shaders
[MESA_SHADER_GEOMETRY
]) {
2190 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_GEOMETRY
];
2192 if(shaders
[MESA_SHADER_TESS_EVAL
]) {
2193 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_EVAL
];
2195 if(shaders
[MESA_SHADER_TESS_CTRL
]) {
2196 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_CTRL
];
2198 if(shaders
[MESA_SHADER_VERTEX
]) {
2199 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_VERTEX
];
2202 if (shader_count
> 1) {
2203 unsigned first
= ordered_shaders
[shader_count
- 1]->info
.stage
;
2204 unsigned last
= ordered_shaders
[0]->info
.stage
;
2206 if (ordered_shaders
[0]->info
.stage
== MESA_SHADER_FRAGMENT
&&
2207 ordered_shaders
[1]->info
.has_transform_feedback_varyings
)
2208 nir_link_xfb_varyings(ordered_shaders
[1], ordered_shaders
[0]);
2210 for (int i
= 0; i
< shader_count
; ++i
) {
2211 nir_variable_mode mask
= 0;
2213 if (ordered_shaders
[i
]->info
.stage
!= first
)
2214 mask
= mask
| nir_var_shader_in
;
2216 if (ordered_shaders
[i
]->info
.stage
!= last
)
2217 mask
= mask
| nir_var_shader_out
;
2219 nir_lower_io_to_scalar_early(ordered_shaders
[i
], mask
);
2220 radv_optimize_nir(ordered_shaders
[i
], false, false);
2224 for (int i
= 1; i
< shader_count
; ++i
) {
2225 nir_lower_io_arrays_to_elements(ordered_shaders
[i
],
2226 ordered_shaders
[i
- 1]);
2228 if (nir_link_opt_varyings(ordered_shaders
[i
],
2229 ordered_shaders
[i
- 1]))
2230 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2232 nir_remove_dead_variables(ordered_shaders
[i
],
2233 nir_var_shader_out
, NULL
);
2234 nir_remove_dead_variables(ordered_shaders
[i
- 1],
2235 nir_var_shader_in
, NULL
);
2237 bool progress
= nir_remove_unused_varyings(ordered_shaders
[i
],
2238 ordered_shaders
[i
- 1]);
2240 nir_compact_varyings(ordered_shaders
[i
],
2241 ordered_shaders
[i
- 1], true);
2244 if (nir_lower_global_vars_to_local(ordered_shaders
[i
])) {
2245 ac_lower_indirect_derefs(ordered_shaders
[i
],
2246 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2248 radv_optimize_nir(ordered_shaders
[i
], false, false);
2250 if (nir_lower_global_vars_to_local(ordered_shaders
[i
- 1])) {
2251 ac_lower_indirect_derefs(ordered_shaders
[i
- 1],
2252 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2254 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2260 radv_set_linked_driver_locations(struct radv_pipeline
*pipeline
, nir_shader
**shaders
,
2261 struct radv_shader_info infos
[MESA_SHADER_STAGES
])
2263 bool has_tess
= shaders
[MESA_SHADER_TESS_CTRL
];
2264 bool has_gs
= shaders
[MESA_SHADER_GEOMETRY
];
2266 if (!has_tess
&& !has_gs
)
2269 unsigned vs_info_idx
= MESA_SHADER_VERTEX
;
2270 unsigned tes_info_idx
= MESA_SHADER_TESS_EVAL
;
2272 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
2273 /* These are merged into the next stage */
2274 vs_info_idx
= has_tess
? MESA_SHADER_TESS_CTRL
: MESA_SHADER_GEOMETRY
;
2275 tes_info_idx
= has_gs
? MESA_SHADER_GEOMETRY
: MESA_SHADER_TESS_EVAL
;
2279 nir_linked_io_var_info vs2tcs
=
2280 nir_assign_linked_io_var_locations(shaders
[MESA_SHADER_VERTEX
], shaders
[MESA_SHADER_TESS_CTRL
]);
2281 nir_linked_io_var_info tcs2tes
=
2282 nir_assign_linked_io_var_locations(shaders
[MESA_SHADER_TESS_CTRL
], shaders
[MESA_SHADER_TESS_EVAL
]);
2284 infos
[vs_info_idx
].vs
.num_linked_outputs
= vs2tcs
.num_linked_io_vars
;
2285 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_linked_inputs
= vs2tcs
.num_linked_io_vars
;
2286 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_linked_outputs
= tcs2tes
.num_linked_io_vars
;
2287 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_linked_patch_outputs
= tcs2tes
.num_linked_patch_io_vars
;
2288 infos
[tes_info_idx
].tes
.num_linked_inputs
= tcs2tes
.num_linked_io_vars
;
2289 infos
[tes_info_idx
].tes
.num_linked_patch_inputs
= tcs2tes
.num_linked_patch_io_vars
;
2292 nir_linked_io_var_info tes2gs
=
2293 nir_assign_linked_io_var_locations(shaders
[MESA_SHADER_TESS_EVAL
], shaders
[MESA_SHADER_GEOMETRY
]);
2295 infos
[tes_info_idx
].tes
.num_linked_outputs
= tes2gs
.num_linked_io_vars
;
2296 infos
[MESA_SHADER_GEOMETRY
].gs
.num_linked_inputs
= tes2gs
.num_linked_io_vars
;
2298 } else if (has_gs
) {
2299 nir_linked_io_var_info vs2gs
=
2300 nir_assign_linked_io_var_locations(shaders
[MESA_SHADER_VERTEX
], shaders
[MESA_SHADER_GEOMETRY
]);
2302 infos
[vs_info_idx
].vs
.num_linked_outputs
= vs2gs
.num_linked_io_vars
;
2303 infos
[MESA_SHADER_GEOMETRY
].gs
.num_linked_inputs
= vs2gs
.num_linked_io_vars
;
2308 radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo
*input_state
,
2309 uint32_t attrib_binding
)
2311 for (uint32_t i
= 0; i
< input_state
->vertexBindingDescriptionCount
; i
++) {
2312 const VkVertexInputBindingDescription
*input_binding
=
2313 &input_state
->pVertexBindingDescriptions
[i
];
2315 if (input_binding
->binding
== attrib_binding
)
2316 return input_binding
->stride
;
2322 static struct radv_pipeline_key
2323 radv_generate_graphics_pipeline_key(struct radv_pipeline
*pipeline
,
2324 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2325 const struct radv_blend_state
*blend
)
2327 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
2328 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
2329 const VkPipelineVertexInputStateCreateInfo
*input_state
=
2330 pCreateInfo
->pVertexInputState
;
2331 const VkPipelineVertexInputDivisorStateCreateInfoEXT
*divisor_state
=
2332 vk_find_struct_const(input_state
->pNext
, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT
);
2334 struct radv_pipeline_key key
;
2335 memset(&key
, 0, sizeof(key
));
2337 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
2338 key
.optimisations_disabled
= 1;
2340 key
.has_multiview_view_index
= !!subpass
->view_mask
;
2342 uint32_t binding_input_rate
= 0;
2343 uint32_t instance_rate_divisors
[MAX_VERTEX_ATTRIBS
];
2344 for (unsigned i
= 0; i
< input_state
->vertexBindingDescriptionCount
; ++i
) {
2345 if (input_state
->pVertexBindingDescriptions
[i
].inputRate
) {
2346 unsigned binding
= input_state
->pVertexBindingDescriptions
[i
].binding
;
2347 binding_input_rate
|= 1u << binding
;
2348 instance_rate_divisors
[binding
] = 1;
2351 if (divisor_state
) {
2352 for (unsigned i
= 0; i
< divisor_state
->vertexBindingDivisorCount
; ++i
) {
2353 instance_rate_divisors
[divisor_state
->pVertexBindingDivisors
[i
].binding
] =
2354 divisor_state
->pVertexBindingDivisors
[i
].divisor
;
2358 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
2359 const VkVertexInputAttributeDescription
*desc
=
2360 &input_state
->pVertexAttributeDescriptions
[i
];
2361 const struct vk_format_description
*format_desc
;
2362 unsigned location
= desc
->location
;
2363 unsigned binding
= desc
->binding
;
2364 unsigned num_format
, data_format
;
2367 if (binding_input_rate
& (1u << binding
)) {
2368 key
.instance_rate_inputs
|= 1u << location
;
2369 key
.instance_rate_divisors
[location
] = instance_rate_divisors
[binding
];
2372 format_desc
= vk_format_description(desc
->format
);
2373 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
2375 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
2376 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
2378 key
.vertex_attribute_formats
[location
] = data_format
| (num_format
<< 4);
2379 key
.vertex_attribute_bindings
[location
] = desc
->binding
;
2380 key
.vertex_attribute_offsets
[location
] = desc
->offset
;
2381 key
.vertex_attribute_strides
[location
] = radv_get_attrib_stride(input_state
, desc
->binding
);
2383 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
&&
2384 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_STONEY
) {
2385 VkFormat format
= input_state
->pVertexAttributeDescriptions
[i
].format
;
2388 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2389 case VK_FORMAT_A2B10G10R10_SNORM_PACK32
:
2390 adjust
= RADV_ALPHA_ADJUST_SNORM
;
2392 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2393 case VK_FORMAT_A2B10G10R10_SSCALED_PACK32
:
2394 adjust
= RADV_ALPHA_ADJUST_SSCALED
;
2396 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2397 case VK_FORMAT_A2B10G10R10_SINT_PACK32
:
2398 adjust
= RADV_ALPHA_ADJUST_SINT
;
2404 key
.vertex_alpha_adjust
|= adjust
<< (2 * location
);
2407 switch (desc
->format
) {
2408 case VK_FORMAT_B8G8R8A8_UNORM
:
2409 case VK_FORMAT_B8G8R8A8_SNORM
:
2410 case VK_FORMAT_B8G8R8A8_USCALED
:
2411 case VK_FORMAT_B8G8R8A8_SSCALED
:
2412 case VK_FORMAT_B8G8R8A8_UINT
:
2413 case VK_FORMAT_B8G8R8A8_SINT
:
2414 case VK_FORMAT_B8G8R8A8_SRGB
:
2415 case VK_FORMAT_A2R10G10B10_UNORM_PACK32
:
2416 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2417 case VK_FORMAT_A2R10G10B10_USCALED_PACK32
:
2418 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2419 case VK_FORMAT_A2R10G10B10_UINT_PACK32
:
2420 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2421 key
.vertex_post_shuffle
|= 1 << location
;
2428 const VkPipelineTessellationStateCreateInfo
*tess
=
2429 radv_pipeline_get_tessellation_state(pCreateInfo
);
2431 key
.tess_input_vertices
= tess
->patchControlPoints
;
2433 const VkPipelineMultisampleStateCreateInfo
*vkms
=
2434 radv_pipeline_get_multisample_state(pCreateInfo
);
2435 if (vkms
&& vkms
->rasterizationSamples
> 1) {
2436 uint32_t num_samples
= vkms
->rasterizationSamples
;
2437 uint32_t ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
);
2438 key
.num_samples
= num_samples
;
2439 key
.log2_ps_iter_samples
= util_logbase2(ps_iter_samples
);
2442 key
.col_format
= blend
->spi_shader_col_format
;
2443 key
.is_dual_src
= blend
->mrt0_is_dual_src
;
2444 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX8
) {
2445 key
.is_int8
= blend
->col_format_is_int8
;
2446 key
.is_int10
= blend
->col_format_is_int10
;
2449 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
)
2450 key
.topology
= pCreateInfo
->pInputAssemblyState
->topology
;
2456 radv_nir_stage_uses_xfb(const nir_shader
*nir
)
2458 nir_xfb_info
*xfb
= nir_gather_xfb_info(nir
, NULL
);
2459 bool uses_xfb
= !!xfb
;
2466 radv_fill_shader_keys(struct radv_device
*device
,
2467 struct radv_shader_variant_key
*keys
,
2468 const struct radv_pipeline_key
*key
,
2471 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_inputs
= key
->instance_rate_inputs
;
2472 keys
[MESA_SHADER_VERTEX
].vs
.alpha_adjust
= key
->vertex_alpha_adjust
;
2473 keys
[MESA_SHADER_VERTEX
].vs
.post_shuffle
= key
->vertex_post_shuffle
;
2474 for (unsigned i
= 0; i
< MAX_VERTEX_ATTRIBS
; ++i
) {
2475 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_divisors
[i
] = key
->instance_rate_divisors
[i
];
2476 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_formats
[i
] = key
->vertex_attribute_formats
[i
];
2477 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_bindings
[i
] = key
->vertex_attribute_bindings
[i
];
2478 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_offsets
[i
] = key
->vertex_attribute_offsets
[i
];
2479 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_strides
[i
] = key
->vertex_attribute_strides
[i
];
2481 keys
[MESA_SHADER_VERTEX
].vs
.outprim
= si_conv_prim_to_gs_out(key
->topology
);
2483 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2484 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ls
= true;
2485 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= 0;
2486 keys
[MESA_SHADER_TESS_CTRL
].tcs
.input_vertices
= key
->tess_input_vertices
;
2487 keys
[MESA_SHADER_TESS_CTRL
].tcs
.primitive_mode
= nir
[MESA_SHADER_TESS_EVAL
]->info
.tess
.primitive_mode
;
2489 keys
[MESA_SHADER_TESS_CTRL
].tcs
.tes_reads_tess_factors
= !!(nir
[MESA_SHADER_TESS_EVAL
]->info
.inputs_read
& (VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
));
2492 if (nir
[MESA_SHADER_GEOMETRY
]) {
2493 if (nir
[MESA_SHADER_TESS_CTRL
])
2494 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_es
= true;
2496 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_es
= true;
2499 if (device
->physical_device
->use_ngg
) {
2500 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2501 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= true;
2503 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= true;
2506 if (nir
[MESA_SHADER_TESS_CTRL
] &&
2507 nir
[MESA_SHADER_GEOMETRY
] &&
2508 nir
[MESA_SHADER_GEOMETRY
]->info
.gs
.invocations
*
2509 nir
[MESA_SHADER_GEOMETRY
]->info
.gs
.vertices_out
> 256) {
2510 /* Fallback to the legacy path if tessellation is
2511 * enabled with extreme geometry because
2512 * EN_MAX_VERT_OUT_PER_GS_INSTANCE doesn't work and it
2515 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2518 if (!device
->physical_device
->use_ngg_gs
) {
2519 if (nir
[MESA_SHADER_GEOMETRY
]) {
2520 if (nir
[MESA_SHADER_TESS_CTRL
])
2521 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2523 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= false;
2527 gl_shader_stage last_xfb_stage
= MESA_SHADER_VERTEX
;
2529 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
2534 bool uses_xfb
= nir
[last_xfb_stage
] &&
2535 radv_nir_stage_uses_xfb(nir
[last_xfb_stage
]);
2537 if (!device
->physical_device
->use_ngg_streamout
&& uses_xfb
) {
2538 if (nir
[MESA_SHADER_TESS_CTRL
])
2539 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2541 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= false;
2544 /* Determine if the pipeline is eligible for the NGG passthrough
2545 * mode. It can't be enabled for geometry shaders, for NGG
2546 * streamout or for vertex shaders that export the primitive ID
2547 * (this is checked later because we don't have the info here.)
2549 if (!nir
[MESA_SHADER_GEOMETRY
] && !uses_xfb
) {
2550 if (nir
[MESA_SHADER_TESS_CTRL
] &&
2551 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
) {
2552 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg_passthrough
= true;
2553 } else if (nir
[MESA_SHADER_VERTEX
] &&
2554 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
) {
2555 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg_passthrough
= true;
2560 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
2561 keys
[i
].has_multiview_view_index
= key
->has_multiview_view_index
;
2563 keys
[MESA_SHADER_FRAGMENT
].fs
.col_format
= key
->col_format
;
2564 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int8
= key
->is_int8
;
2565 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int10
= key
->is_int10
;
2566 keys
[MESA_SHADER_FRAGMENT
].fs
.log2_ps_iter_samples
= key
->log2_ps_iter_samples
;
2567 keys
[MESA_SHADER_FRAGMENT
].fs
.num_samples
= key
->num_samples
;
2568 keys
[MESA_SHADER_FRAGMENT
].fs
.is_dual_src
= key
->is_dual_src
;
2570 if (nir
[MESA_SHADER_COMPUTE
]) {
2571 keys
[MESA_SHADER_COMPUTE
].cs
.subgroup_size
= key
->compute_subgroup_size
;
2576 radv_get_wave_size(struct radv_device
*device
,
2577 const VkPipelineShaderStageCreateInfo
*pStage
,
2578 gl_shader_stage stage
,
2579 const struct radv_shader_variant_key
*key
)
2581 if (stage
== MESA_SHADER_GEOMETRY
&& !key
->vs_common_out
.as_ngg
)
2583 else if (stage
== MESA_SHADER_COMPUTE
) {
2584 if (key
->cs
.subgroup_size
) {
2585 /* Return the required subgroup size if specified. */
2586 return key
->cs
.subgroup_size
;
2588 return device
->physical_device
->cs_wave_size
;
2590 else if (stage
== MESA_SHADER_FRAGMENT
)
2591 return device
->physical_device
->ps_wave_size
;
2593 return device
->physical_device
->ge_wave_size
;
2597 radv_get_ballot_bit_size(struct radv_device
*device
,
2598 const VkPipelineShaderStageCreateInfo
*pStage
,
2599 gl_shader_stage stage
,
2600 const struct radv_shader_variant_key
*key
)
2602 if (stage
== MESA_SHADER_COMPUTE
&& key
->cs
.subgroup_size
)
2603 return key
->cs
.subgroup_size
;
2608 radv_fill_shader_info(struct radv_pipeline
*pipeline
,
2609 const VkPipelineShaderStageCreateInfo
**pStages
,
2610 struct radv_shader_variant_key
*keys
,
2611 struct radv_shader_info
*infos
,
2614 unsigned active_stages
= 0;
2615 unsigned filled_stages
= 0;
2617 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2619 active_stages
|= (1 << i
);
2622 if (nir
[MESA_SHADER_FRAGMENT
]) {
2623 radv_nir_shader_info_init(&infos
[MESA_SHADER_FRAGMENT
]);
2624 radv_nir_shader_info_pass(nir
[MESA_SHADER_FRAGMENT
],
2626 &keys
[MESA_SHADER_FRAGMENT
],
2627 &infos
[MESA_SHADER_FRAGMENT
],
2628 radv_use_llvm_for_stage(pipeline
->device
, MESA_SHADER_FRAGMENT
));
2630 /* TODO: These are no longer used as keys we should refactor this */
2631 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_prim_id
=
2632 infos
[MESA_SHADER_FRAGMENT
].ps
.prim_id_input
;
2633 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_layer_id
=
2634 infos
[MESA_SHADER_FRAGMENT
].ps
.layer_input
;
2635 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_clip_dists
=
2636 !!infos
[MESA_SHADER_FRAGMENT
].ps
.num_input_clips_culls
;
2637 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_viewport_index
=
2638 infos
[MESA_SHADER_FRAGMENT
].ps
.viewport_index_input
;
2639 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_prim_id
=
2640 infos
[MESA_SHADER_FRAGMENT
].ps
.prim_id_input
;
2641 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_layer_id
=
2642 infos
[MESA_SHADER_FRAGMENT
].ps
.layer_input
;
2643 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_clip_dists
=
2644 !!infos
[MESA_SHADER_FRAGMENT
].ps
.num_input_clips_culls
;
2645 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_viewport_index
=
2646 infos
[MESA_SHADER_FRAGMENT
].ps
.viewport_index_input
;
2648 /* NGG passthrough mode can't be enabled for vertex shaders
2649 * that export the primitive ID.
2651 * TODO: I should really refactor the keys logic.
2653 if (nir
[MESA_SHADER_VERTEX
] &&
2654 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_prim_id
) {
2655 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg_passthrough
= false;
2658 filled_stages
|= (1 << MESA_SHADER_FRAGMENT
);
2661 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2662 infos
[MESA_SHADER_TESS_CTRL
].tcs
.tes_inputs_read
=
2663 nir
[MESA_SHADER_TESS_EVAL
]->info
.inputs_read
;
2664 infos
[MESA_SHADER_TESS_CTRL
].tcs
.tes_patch_inputs_read
=
2665 nir
[MESA_SHADER_TESS_EVAL
]->info
.patch_inputs_read
;
2668 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
2669 nir
[MESA_SHADER_TESS_CTRL
]) {
2670 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2671 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2672 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2674 radv_nir_shader_info_init(&infos
[MESA_SHADER_TESS_CTRL
]);
2676 for (int i
= 0; i
< 2; i
++) {
2677 radv_nir_shader_info_pass(combined_nir
[i
],
2678 pipeline
->layout
, &key
,
2679 &infos
[MESA_SHADER_TESS_CTRL
],
2680 radv_use_llvm_for_stage(pipeline
->device
, MESA_SHADER_TESS_CTRL
));
2683 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
=
2684 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_patches
;
2685 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
=
2686 util_last_bit64(infos
[MESA_SHADER_TESS_CTRL
].tcs
.outputs_written
);
2688 filled_stages
|= (1 << MESA_SHADER_VERTEX
);
2689 filled_stages
|= (1 << MESA_SHADER_TESS_CTRL
);
2692 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
2693 nir
[MESA_SHADER_GEOMETRY
]) {
2694 gl_shader_stage pre_stage
= nir
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2695 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2697 radv_nir_shader_info_init(&infos
[MESA_SHADER_GEOMETRY
]);
2699 for (int i
= 0; i
< 2; i
++) {
2700 radv_nir_shader_info_pass(combined_nir
[i
],
2703 &infos
[MESA_SHADER_GEOMETRY
],
2704 radv_use_llvm_for_stage(pipeline
->device
, MESA_SHADER_GEOMETRY
));
2707 filled_stages
|= (1 << pre_stage
);
2708 filled_stages
|= (1 << MESA_SHADER_GEOMETRY
);
2711 active_stages
^= filled_stages
;
2712 while (active_stages
) {
2713 int i
= u_bit_scan(&active_stages
);
2715 if (i
== MESA_SHADER_TESS_CTRL
) {
2716 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
=
2717 util_last_bit64(infos
[MESA_SHADER_VERTEX
].vs
.ls_outputs_written
);
2720 if (i
== MESA_SHADER_TESS_EVAL
) {
2721 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
=
2722 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_patches
;
2723 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
=
2724 util_last_bit64(infos
[MESA_SHADER_TESS_CTRL
].tcs
.outputs_written
);
2727 radv_nir_shader_info_init(&infos
[i
]);
2728 radv_nir_shader_info_pass(nir
[i
], pipeline
->layout
,
2729 &keys
[i
], &infos
[i
],
2730 radv_use_llvm_for_stage(pipeline
->device
, i
));
2733 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2735 infos
[i
].wave_size
=
2736 radv_get_wave_size(pipeline
->device
, pStages
[i
],
2738 infos
[i
].ballot_bit_size
=
2739 radv_get_ballot_bit_size(pipeline
->device
,
2747 merge_tess_info(struct shader_info
*tes_info
,
2748 const struct shader_info
*tcs_info
)
2750 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
2752 * "PointMode. Controls generation of points rather than triangles
2753 * or lines. This functionality defaults to disabled, and is
2754 * enabled if either shader stage includes the execution mode.
2756 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
2757 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
2758 * and OutputVertices, it says:
2760 * "One mode must be set in at least one of the tessellation
2763 * So, the fields can be set in either the TCS or TES, but they must
2764 * agree if set in both. Our backend looks at TES, so bitwise-or in
2765 * the values from the TCS.
2767 assert(tcs_info
->tess
.tcs_vertices_out
== 0 ||
2768 tes_info
->tess
.tcs_vertices_out
== 0 ||
2769 tcs_info
->tess
.tcs_vertices_out
== tes_info
->tess
.tcs_vertices_out
);
2770 tes_info
->tess
.tcs_vertices_out
|= tcs_info
->tess
.tcs_vertices_out
;
2772 assert(tcs_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2773 tes_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2774 tcs_info
->tess
.spacing
== tes_info
->tess
.spacing
);
2775 tes_info
->tess
.spacing
|= tcs_info
->tess
.spacing
;
2777 assert(tcs_info
->tess
.primitive_mode
== 0 ||
2778 tes_info
->tess
.primitive_mode
== 0 ||
2779 tcs_info
->tess
.primitive_mode
== tes_info
->tess
.primitive_mode
);
2780 tes_info
->tess
.primitive_mode
|= tcs_info
->tess
.primitive_mode
;
2781 tes_info
->tess
.ccw
|= tcs_info
->tess
.ccw
;
2782 tes_info
->tess
.point_mode
|= tcs_info
->tess
.point_mode
;
2786 void radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT
*ext
)
2791 if (ext
->pPipelineCreationFeedback
) {
2792 ext
->pPipelineCreationFeedback
->flags
= 0;
2793 ext
->pPipelineCreationFeedback
->duration
= 0;
2796 for (unsigned i
= 0; i
< ext
->pipelineStageCreationFeedbackCount
; ++i
) {
2797 ext
->pPipelineStageCreationFeedbacks
[i
].flags
= 0;
2798 ext
->pPipelineStageCreationFeedbacks
[i
].duration
= 0;
2803 void radv_start_feedback(VkPipelineCreationFeedbackEXT
*feedback
)
2808 feedback
->duration
-= radv_get_current_time();
2809 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
;
2813 void radv_stop_feedback(VkPipelineCreationFeedbackEXT
*feedback
, bool cache_hit
)
2818 feedback
->duration
+= radv_get_current_time();
2819 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
|
2820 (cache_hit
? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT
: 0);
2823 VkResult
radv_create_shaders(struct radv_pipeline
*pipeline
,
2824 struct radv_device
*device
,
2825 struct radv_pipeline_cache
*cache
,
2826 const struct radv_pipeline_key
*key
,
2827 const VkPipelineShaderStageCreateInfo
**pStages
,
2828 const VkPipelineCreateFlags flags
,
2829 VkPipelineCreationFeedbackEXT
*pipeline_feedback
,
2830 VkPipelineCreationFeedbackEXT
**stage_feedbacks
)
2832 struct radv_shader_module fs_m
= {0};
2833 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
2834 nir_shader
*nir
[MESA_SHADER_STAGES
] = {0};
2835 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2836 struct radv_shader_variant_key keys
[MESA_SHADER_STAGES
] = {{{{{0}}}}};
2837 struct radv_shader_info infos
[MESA_SHADER_STAGES
] = {0};
2838 unsigned char hash
[20], gs_copy_hash
[20];
2839 bool keep_executable_info
= (flags
& VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR
) || device
->keep_shader_info
;
2840 bool keep_statistic_info
= (flags
& VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR
) ||
2841 (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
) ||
2842 device
->keep_shader_info
;
2844 radv_start_feedback(pipeline_feedback
);
2846 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2848 modules
[i
] = radv_shader_module_from_handle(pStages
[i
]->module
);
2849 if (modules
[i
]->nir
)
2850 _mesa_sha1_compute(modules
[i
]->nir
->info
.name
,
2851 strlen(modules
[i
]->nir
->info
.name
),
2854 pipeline
->active_stages
|= mesa_to_vk_shader_stage(i
);
2858 radv_hash_shaders(hash
, pStages
, pipeline
->layout
, key
, get_hash_flags(device
));
2859 memcpy(gs_copy_hash
, hash
, 20);
2860 gs_copy_hash
[0] ^= 1;
2862 bool found_in_application_cache
= true;
2863 if (modules
[MESA_SHADER_GEOMETRY
] && !keep_executable_info
&& !keep_statistic_info
) {
2864 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2865 radv_create_shader_variants_from_pipeline_cache(device
, cache
, gs_copy_hash
, variants
,
2866 &found_in_application_cache
);
2867 pipeline
->gs_copy_shader
= variants
[MESA_SHADER_GEOMETRY
];
2870 if (!keep_executable_info
&& !keep_statistic_info
&&
2871 radv_create_shader_variants_from_pipeline_cache(device
, cache
, hash
, pipeline
->shaders
,
2872 &found_in_application_cache
) &&
2873 (!modules
[MESA_SHADER_GEOMETRY
] || pipeline
->gs_copy_shader
)) {
2874 radv_stop_feedback(pipeline_feedback
, found_in_application_cache
);
2878 if (flags
& VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT
) {
2879 radv_stop_feedback(pipeline_feedback
, found_in_application_cache
);
2880 return VK_PIPELINE_COMPILE_REQUIRED_EXT
;
2883 if (!modules
[MESA_SHADER_FRAGMENT
] && !modules
[MESA_SHADER_COMPUTE
]) {
2885 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
2886 fs_b
.shader
->info
.name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
2887 fs_m
.nir
= fs_b
.shader
;
2888 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
2891 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2892 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[i
];
2893 unsigned subgroup_size
= 64, ballot_bit_size
= 64;
2898 radv_start_feedback(stage_feedbacks
[i
]);
2900 if (key
->compute_subgroup_size
) {
2901 /* Only compute shaders currently support requiring a
2902 * specific subgroup size.
2904 assert(i
== MESA_SHADER_COMPUTE
);
2905 subgroup_size
= key
->compute_subgroup_size
;
2906 ballot_bit_size
= key
->compute_subgroup_size
;
2909 nir
[i
] = radv_shader_compile_to_nir(device
, modules
[i
],
2910 stage
? stage
->pName
: "main", i
,
2911 stage
? stage
->pSpecializationInfo
: NULL
,
2912 flags
, pipeline
->layout
,
2913 subgroup_size
, ballot_bit_size
);
2915 /* We don't want to alter meta shaders IR directly so clone it
2918 if (nir
[i
]->info
.name
) {
2919 nir
[i
] = nir_shader_clone(NULL
, nir
[i
]);
2922 radv_stop_feedback(stage_feedbacks
[i
], false);
2925 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2926 nir_lower_patch_vertices(nir
[MESA_SHADER_TESS_EVAL
], nir
[MESA_SHADER_TESS_CTRL
]->info
.tess
.tcs_vertices_out
, NULL
);
2927 merge_tess_info(&nir
[MESA_SHADER_TESS_EVAL
]->info
, &nir
[MESA_SHADER_TESS_CTRL
]->info
);
2930 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
2931 radv_link_shaders(pipeline
, nir
);
2933 radv_set_linked_driver_locations(pipeline
, nir
, infos
);
2935 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2937 /* do this again since information such as outputs_read can be out-of-date */
2938 nir_shader_gather_info(nir
[i
], nir_shader_get_entrypoint(nir
[i
]));
2940 if (radv_use_llvm_for_stage(device
, i
)) {
2941 NIR_PASS_V(nir
[i
], nir_lower_bool_to_int32
);
2943 NIR_PASS_V(nir
[i
], nir_lower_non_uniform_access
,
2944 nir_lower_non_uniform_ubo_access
|
2945 nir_lower_non_uniform_ssbo_access
|
2946 nir_lower_non_uniform_texture_access
|
2947 nir_lower_non_uniform_image_access
);
2949 NIR_PASS_V(nir
[i
], nir_lower_memory_model
);
2953 if (nir
[MESA_SHADER_FRAGMENT
])
2954 radv_lower_fs_io(nir
[MESA_SHADER_FRAGMENT
]);
2956 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2957 if (radv_can_dump_shader(device
, modules
[i
], false))
2958 nir_print_shader(nir
[i
], stderr
);
2961 radv_fill_shader_keys(device
, keys
, key
, nir
);
2963 radv_fill_shader_info(pipeline
, pStages
, keys
, infos
, nir
);
2965 if ((nir
[MESA_SHADER_VERTEX
] &&
2966 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
) ||
2967 (nir
[MESA_SHADER_TESS_EVAL
] &&
2968 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
)) {
2969 struct gfx10_ngg_info
*ngg_info
;
2971 if (nir
[MESA_SHADER_GEOMETRY
])
2972 ngg_info
= &infos
[MESA_SHADER_GEOMETRY
].ngg_info
;
2973 else if (nir
[MESA_SHADER_TESS_CTRL
])
2974 ngg_info
= &infos
[MESA_SHADER_TESS_EVAL
].ngg_info
;
2976 ngg_info
= &infos
[MESA_SHADER_VERTEX
].ngg_info
;
2978 gfx10_get_ngg_info(key
, pipeline
, nir
, infos
, ngg_info
);
2979 } else if (nir
[MESA_SHADER_GEOMETRY
]) {
2980 struct gfx9_gs_info
*gs_info
=
2981 &infos
[MESA_SHADER_GEOMETRY
].gs_ring_info
;
2983 gfx9_get_gs_info(key
, pipeline
, nir
, infos
, gs_info
);
2986 if(modules
[MESA_SHADER_GEOMETRY
]) {
2987 struct radv_shader_binary
*gs_copy_binary
= NULL
;
2988 if (!pipeline
->gs_copy_shader
&&
2989 !radv_pipeline_has_ngg(pipeline
)) {
2990 struct radv_shader_info info
= {};
2991 struct radv_shader_variant_key key
= {};
2993 key
.has_multiview_view_index
=
2994 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
;
2996 radv_nir_shader_info_pass(nir
[MESA_SHADER_GEOMETRY
],
2997 pipeline
->layout
, &key
,
2999 radv_use_llvm_for_stage(pipeline
->device
, MESA_SHADER_GEOMETRY
));
3000 info
.wave_size
= 64; /* Wave32 not supported. */
3001 info
.ballot_bit_size
= 64;
3003 pipeline
->gs_copy_shader
= radv_create_gs_copy_shader(
3004 device
, nir
[MESA_SHADER_GEOMETRY
], &info
,
3005 &gs_copy_binary
, keep_executable_info
, keep_statistic_info
,
3006 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
);
3009 if (!keep_executable_info
&& !keep_statistic_info
&& pipeline
->gs_copy_shader
) {
3010 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
3011 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
3013 binaries
[MESA_SHADER_GEOMETRY
] = gs_copy_binary
;
3014 variants
[MESA_SHADER_GEOMETRY
] = pipeline
->gs_copy_shader
;
3016 radv_pipeline_cache_insert_shaders(device
, cache
,
3021 free(gs_copy_binary
);
3024 if (nir
[MESA_SHADER_FRAGMENT
]) {
3025 if (!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]) {
3026 radv_start_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
]);
3028 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
3029 radv_shader_variant_compile(device
, modules
[MESA_SHADER_FRAGMENT
], &nir
[MESA_SHADER_FRAGMENT
], 1,
3030 pipeline
->layout
, keys
+ MESA_SHADER_FRAGMENT
,
3031 infos
+ MESA_SHADER_FRAGMENT
,
3032 keep_executable_info
, keep_statistic_info
,
3033 &binaries
[MESA_SHADER_FRAGMENT
]);
3035 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
], false);
3039 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_TESS_CTRL
]) {
3040 if (!pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]) {
3041 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
3042 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
3043 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
3045 radv_start_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
]);
3047 pipeline
->shaders
[MESA_SHADER_TESS_CTRL
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_TESS_CTRL
], combined_nir
, 2,
3049 &key
, &infos
[MESA_SHADER_TESS_CTRL
], keep_executable_info
,
3050 keep_statistic_info
, &binaries
[MESA_SHADER_TESS_CTRL
]);
3052 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
], false);
3054 modules
[MESA_SHADER_VERTEX
] = NULL
;
3055 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
3056 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.outputs_written
);
3059 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_GEOMETRY
]) {
3060 gl_shader_stage pre_stage
= modules
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
3061 if (!pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
3062 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
3064 radv_start_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
]);
3066 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_GEOMETRY
], combined_nir
, 2,
3068 &keys
[pre_stage
], &infos
[MESA_SHADER_GEOMETRY
], keep_executable_info
,
3069 keep_statistic_info
, &binaries
[MESA_SHADER_GEOMETRY
]);
3071 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
], false);
3073 modules
[pre_stage
] = NULL
;
3076 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
3077 if(modules
[i
] && !pipeline
->shaders
[i
]) {
3078 if (i
== MESA_SHADER_TESS_CTRL
) {
3079 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.ls_outputs_written
);
3081 if (i
== MESA_SHADER_TESS_EVAL
) {
3082 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
3083 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.outputs_written
);
3086 radv_start_feedback(stage_feedbacks
[i
]);
3088 pipeline
->shaders
[i
] = radv_shader_variant_compile(device
, modules
[i
], &nir
[i
], 1,
3090 keys
+ i
, infos
+ i
, keep_executable_info
,
3091 keep_statistic_info
, &binaries
[i
]);
3093 radv_stop_feedback(stage_feedbacks
[i
], false);
3097 if (!keep_executable_info
&& !keep_statistic_info
) {
3098 radv_pipeline_cache_insert_shaders(device
, cache
, hash
, pipeline
->shaders
,
3102 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
3105 ralloc_free(nir
[i
]);
3107 if (radv_can_dump_shader_stats(device
, modules
[i
])) {
3108 radv_dump_shader_stats(device
, pipeline
, i
, stderr
);
3114 ralloc_free(fs_m
.nir
);
3116 radv_stop_feedback(pipeline_feedback
, false);
3121 radv_pipeline_stage_to_user_data_0(struct radv_pipeline
*pipeline
,
3122 gl_shader_stage stage
, enum chip_class chip_class
)
3124 bool has_gs
= radv_pipeline_has_gs(pipeline
);
3125 bool has_tess
= radv_pipeline_has_tess(pipeline
);
3126 bool has_ngg
= radv_pipeline_has_ngg(pipeline
);
3129 case MESA_SHADER_FRAGMENT
:
3130 return R_00B030_SPI_SHADER_USER_DATA_PS_0
;
3131 case MESA_SHADER_VERTEX
:
3133 if (chip_class
>= GFX10
) {
3134 return R_00B430_SPI_SHADER_USER_DATA_HS_0
;
3135 } else if (chip_class
== GFX9
) {
3136 return R_00B430_SPI_SHADER_USER_DATA_LS_0
;
3138 return R_00B530_SPI_SHADER_USER_DATA_LS_0
;
3144 if (chip_class
>= GFX10
) {
3145 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3147 return R_00B330_SPI_SHADER_USER_DATA_ES_0
;
3152 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3154 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
3155 case MESA_SHADER_GEOMETRY
:
3156 return chip_class
== GFX9
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
3157 R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3158 case MESA_SHADER_COMPUTE
:
3159 return R_00B900_COMPUTE_USER_DATA_0
;
3160 case MESA_SHADER_TESS_CTRL
:
3161 return chip_class
== GFX9
? R_00B430_SPI_SHADER_USER_DATA_LS_0
:
3162 R_00B430_SPI_SHADER_USER_DATA_HS_0
;
3163 case MESA_SHADER_TESS_EVAL
:
3165 return chip_class
>= GFX10
? R_00B230_SPI_SHADER_USER_DATA_GS_0
:
3166 R_00B330_SPI_SHADER_USER_DATA_ES_0
;
3167 } else if (has_ngg
) {
3168 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3170 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
3173 unreachable("unknown shader");
3177 struct radv_bin_size_entry
{
3183 radv_gfx9_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3185 static const struct radv_bin_size_entry color_size_table
[][3][9] = {
3189 /* One shader engine */
3195 { UINT_MAX
, { 0, 0}},
3198 /* Two shader engines */
3204 { UINT_MAX
, { 0, 0}},
3207 /* Four shader engines */
3212 { UINT_MAX
, { 0, 0}},
3218 /* One shader engine */
3224 { UINT_MAX
, { 0, 0}},
3227 /* Two shader engines */
3233 { UINT_MAX
, { 0, 0}},
3236 /* Four shader engines */
3243 { UINT_MAX
, { 0, 0}},
3249 /* One shader engine */
3256 { UINT_MAX
, { 0, 0}},
3259 /* Two shader engines */
3267 { UINT_MAX
, { 0, 0}},
3270 /* Four shader engines */
3278 { UINT_MAX
, { 0, 0}},
3282 static const struct radv_bin_size_entry ds_size_table
[][3][9] = {
3286 // One shader engine
3293 { UINT_MAX
, { 0, 0}},
3296 // Two shader engines
3304 { UINT_MAX
, { 0, 0}},
3307 // Four shader engines
3315 { UINT_MAX
, { 0, 0}},
3321 // One shader engine
3329 { UINT_MAX
, { 0, 0}},
3332 // Two shader engines
3341 { UINT_MAX
, { 0, 0}},
3344 // Four shader engines
3353 { UINT_MAX
, { 0, 0}},
3359 // One shader engine
3367 { UINT_MAX
, { 0, 0}},
3370 // Two shader engines
3379 { UINT_MAX
, { 0, 0}},
3382 // Four shader engines
3390 { UINT_MAX
, { 0, 0}},
3395 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3396 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3397 VkExtent2D extent
= {512, 512};
3399 unsigned log_num_rb_per_se
=
3400 util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.num_render_backends
/
3401 pipeline
->device
->physical_device
->rad_info
.max_se
);
3402 unsigned log_num_se
= util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.max_se
);
3404 unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
3405 unsigned ps_iter_samples
= 1u << G_028804_PS_ITER_SAMPLES(pipeline
->graphics
.ms
.db_eqaa
);
3406 unsigned effective_samples
= total_samples
;
3407 unsigned color_bytes_per_pixel
= 0;
3409 const VkPipelineColorBlendStateCreateInfo
*vkblend
=
3410 radv_pipeline_get_color_blend_state(pCreateInfo
);
3412 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3413 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3416 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3419 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3420 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
3423 /* MSAA images typically don't use all samples all the time. */
3424 if (effective_samples
>= 2 && ps_iter_samples
<= 1)
3425 effective_samples
= 2;
3426 color_bytes_per_pixel
*= effective_samples
;
3429 const struct radv_bin_size_entry
*color_entry
= color_size_table
[log_num_rb_per_se
][log_num_se
];
3430 while(color_entry
[1].bpp
<= color_bytes_per_pixel
)
3433 extent
= color_entry
->extent
;
3435 if (subpass
->depth_stencil_attachment
) {
3436 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3438 /* Coefficients taken from AMDVLK */
3439 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
3440 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
3441 unsigned ds_bytes_per_pixel
= 4 * (depth_coeff
+ stencil_coeff
) * total_samples
;
3443 const struct radv_bin_size_entry
*ds_entry
= ds_size_table
[log_num_rb_per_se
][log_num_se
];
3444 while(ds_entry
[1].bpp
<= ds_bytes_per_pixel
)
3447 if (ds_entry
->extent
.width
* ds_entry
->extent
.height
< extent
.width
* extent
.height
)
3448 extent
= ds_entry
->extent
;
3455 radv_gfx10_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3457 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3458 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3459 VkExtent2D extent
= {512, 512};
3461 const unsigned db_tag_size
= 64;
3462 const unsigned db_tag_count
= 312;
3463 const unsigned color_tag_size
= 1024;
3464 const unsigned color_tag_count
= 31;
3465 const unsigned fmask_tag_size
= 256;
3466 const unsigned fmask_tag_count
= 44;
3468 const unsigned rb_count
= pipeline
->device
->physical_device
->rad_info
.num_render_backends
;
3469 const unsigned pipe_count
= MAX2(rb_count
, pipeline
->device
->physical_device
->rad_info
.num_sdp_interfaces
);
3471 const unsigned db_tag_part
= (db_tag_count
* rb_count
/ pipe_count
) * db_tag_size
* pipe_count
;
3472 const unsigned color_tag_part
= (color_tag_count
* rb_count
/ pipe_count
) * color_tag_size
* pipe_count
;
3473 const unsigned fmask_tag_part
= (fmask_tag_count
* rb_count
/ pipe_count
) * fmask_tag_size
* pipe_count
;
3475 const unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
3476 const unsigned samples_log
= util_logbase2_ceil(total_samples
);
3478 unsigned color_bytes_per_pixel
= 0;
3479 unsigned fmask_bytes_per_pixel
= 0;
3481 const VkPipelineColorBlendStateCreateInfo
*vkblend
=
3482 radv_pipeline_get_color_blend_state(pCreateInfo
);
3484 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3485 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3488 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3491 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3492 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
3494 if (total_samples
> 1) {
3495 assert(samples_log
<= 3);
3496 const unsigned fmask_array
[] = {0, 1, 1, 4};
3497 fmask_bytes_per_pixel
+= fmask_array
[samples_log
];
3501 color_bytes_per_pixel
*= total_samples
;
3503 color_bytes_per_pixel
= MAX2(color_bytes_per_pixel
, 1);
3505 const unsigned color_pixel_count_log
= util_logbase2(color_tag_part
/ color_bytes_per_pixel
);
3506 extent
.width
= 1ull << ((color_pixel_count_log
+ 1) / 2);
3507 extent
.height
= 1ull << (color_pixel_count_log
/ 2);
3509 if (fmask_bytes_per_pixel
) {
3510 const unsigned fmask_pixel_count_log
= util_logbase2(fmask_tag_part
/ fmask_bytes_per_pixel
);
3512 const VkExtent2D fmask_extent
= (VkExtent2D
){
3513 .width
= 1ull << ((fmask_pixel_count_log
+ 1) / 2),
3514 .height
= 1ull << (color_pixel_count_log
/ 2)
3517 if (fmask_extent
.width
* fmask_extent
.height
< extent
.width
* extent
.height
)
3518 extent
= fmask_extent
;
3521 if (subpass
->depth_stencil_attachment
) {
3522 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3524 /* Coefficients taken from AMDVLK */
3525 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
3526 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
3527 unsigned db_bytes_per_pixel
= (depth_coeff
+ stencil_coeff
) * total_samples
;
3529 const unsigned db_pixel_count_log
= util_logbase2(db_tag_part
/ db_bytes_per_pixel
);
3531 const VkExtent2D db_extent
= (VkExtent2D
){
3532 .width
= 1ull << ((db_pixel_count_log
+ 1) / 2),
3533 .height
= 1ull << (color_pixel_count_log
/ 2)
3536 if (db_extent
.width
* db_extent
.height
< extent
.width
* extent
.height
)
3540 extent
.width
= MAX2(extent
.width
, 128);
3541 extent
.height
= MAX2(extent
.width
, 64);
3547 radv_pipeline_init_disabled_binning_state(struct radv_pipeline
*pipeline
,
3548 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3550 uint32_t pa_sc_binner_cntl_0
=
3551 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC
) |
3552 S_028C44_DISABLE_START_OF_PRIM(1);
3553 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
3555 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3556 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3557 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3558 const VkPipelineColorBlendStateCreateInfo
*vkblend
=
3559 radv_pipeline_get_color_blend_state(pCreateInfo
);
3560 unsigned min_bytes_per_pixel
= 0;
3563 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3564 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3567 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3570 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3571 unsigned bytes
= vk_format_get_blocksize(format
);
3572 if (!min_bytes_per_pixel
|| bytes
< min_bytes_per_pixel
)
3573 min_bytes_per_pixel
= bytes
;
3577 pa_sc_binner_cntl_0
=
3578 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_NEW_SC
) |
3579 S_028C44_BIN_SIZE_X(0) |
3580 S_028C44_BIN_SIZE_Y(0) |
3581 S_028C44_BIN_SIZE_X_EXTEND(2) | /* 128 */
3582 S_028C44_BIN_SIZE_Y_EXTEND(min_bytes_per_pixel
<= 4 ? 2 : 1) | /* 128 or 64 */
3583 S_028C44_DISABLE_START_OF_PRIM(1);
3586 pipeline
->graphics
.binning
.pa_sc_binner_cntl_0
= pa_sc_binner_cntl_0
;
3587 pipeline
->graphics
.binning
.db_dfsm_control
= db_dfsm_control
;
3590 struct radv_binning_settings
3591 radv_get_binning_settings(const struct radv_physical_device
*pdev
)
3593 struct radv_binning_settings settings
;
3594 if (pdev
->rad_info
.has_dedicated_vram
) {
3595 if (pdev
->rad_info
.num_render_backends
> 4) {
3596 settings
.context_states_per_bin
= 1;
3597 settings
.persistent_states_per_bin
= 1;
3599 settings
.context_states_per_bin
= 3;
3600 settings
.persistent_states_per_bin
= 8;
3602 settings
.fpovs_per_batch
= 63;
3604 /* The context states are affected by the scissor bug. */
3605 settings
.context_states_per_bin
= 6;
3606 /* 32 causes hangs for RAVEN. */
3607 settings
.persistent_states_per_bin
= 16;
3608 settings
.fpovs_per_batch
= 63;
3611 if (pdev
->rad_info
.has_gfx9_scissor_bug
)
3612 settings
.context_states_per_bin
= 1;
3618 radv_pipeline_init_binning_state(struct radv_pipeline
*pipeline
,
3619 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3620 const struct radv_blend_state
*blend
)
3622 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX9
)
3625 VkExtent2D bin_size
;
3626 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3627 bin_size
= radv_gfx10_compute_bin_size(pipeline
, pCreateInfo
);
3628 } else if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX9
) {
3629 bin_size
= radv_gfx9_compute_bin_size(pipeline
, pCreateInfo
);
3631 unreachable("Unhandled generation for binning bin size calculation");
3633 if (pipeline
->device
->pbb_allowed
&& bin_size
.width
&& bin_size
.height
) {
3634 struct radv_binning_settings settings
=
3635 radv_get_binning_settings(pipeline
->device
->physical_device
);
3637 bool disable_start_of_prim
= true;
3638 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
3640 const struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3642 if (pipeline
->device
->dfsm_allowed
&& ps
&&
3643 !ps
->info
.ps
.can_discard
&&
3644 !ps
->info
.ps
.writes_memory
&&
3645 blend
->cb_target_enabled_4bit
) {
3646 db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_AUTO
);
3647 disable_start_of_prim
= (blend
->blend_enable_4bit
& blend
->cb_target_enabled_4bit
) != 0;
3650 const uint32_t pa_sc_binner_cntl_0
=
3651 S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED
) |
3652 S_028C44_BIN_SIZE_X(bin_size
.width
== 16) |
3653 S_028C44_BIN_SIZE_Y(bin_size
.height
== 16) |
3654 S_028C44_BIN_SIZE_X_EXTEND(util_logbase2(MAX2(bin_size
.width
, 32)) - 5) |
3655 S_028C44_BIN_SIZE_Y_EXTEND(util_logbase2(MAX2(bin_size
.height
, 32)) - 5) |
3656 S_028C44_CONTEXT_STATES_PER_BIN(settings
.context_states_per_bin
- 1) |
3657 S_028C44_PERSISTENT_STATES_PER_BIN(settings
.persistent_states_per_bin
- 1) |
3658 S_028C44_DISABLE_START_OF_PRIM(disable_start_of_prim
) |
3659 S_028C44_FPOVS_PER_BATCH(settings
.fpovs_per_batch
) |
3660 S_028C44_OPTIMAL_BIN_SELECTION(1);
3662 pipeline
->graphics
.binning
.pa_sc_binner_cntl_0
= pa_sc_binner_cntl_0
;
3663 pipeline
->graphics
.binning
.db_dfsm_control
= db_dfsm_control
;
3665 radv_pipeline_init_disabled_binning_state(pipeline
, pCreateInfo
);
3670 radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf
*ctx_cs
,
3671 const struct radv_pipeline
*pipeline
,
3672 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3673 const struct radv_graphics_pipeline_create_info
*extra
)
3675 const VkPipelineDepthStencilStateCreateInfo
*vkds
= radv_pipeline_get_depth_stencil_state(pCreateInfo
);
3676 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3677 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3678 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3679 struct radv_render_pass_attachment
*attachment
= NULL
;
3680 uint32_t db_render_control
= 0, db_render_override2
= 0;
3681 uint32_t db_render_override
= 0;
3683 if (subpass
->depth_stencil_attachment
)
3684 attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3686 bool has_depth_attachment
= attachment
&& vk_format_is_depth(attachment
->format
);
3688 if (vkds
&& has_depth_attachment
) {
3689 /* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
3690 db_render_override2
|= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment
->samples
> 2);
3692 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
)
3693 db_render_override2
|= S_028010_CENTROID_COMPUTATION_MODE(2);
3696 if (attachment
&& extra
) {
3697 db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
3698 db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
3700 db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->resummarize_enable
);
3701 db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->depth_compress_disable
);
3702 db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->stencil_compress_disable
);
3703 db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
3704 db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
3707 db_render_override
|= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
3708 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
);
3710 if (!pCreateInfo
->pRasterizationState
->depthClampEnable
&&
3711 ps
->info
.ps
.writes_z
) {
3712 /* From VK_EXT_depth_range_unrestricted spec:
3714 * "The behavior described in Primitive Clipping still applies.
3715 * If depth clamping is disabled the depth values are still
3716 * clipped to 0 ≤ zc ≤ wc before the viewport transform. If
3717 * depth clamping is enabled the above equation is ignored and
3718 * the depth values are instead clamped to the VkViewport
3719 * minDepth and maxDepth values, which in the case of this
3720 * extension can be outside of the 0.0 to 1.0 range."
3722 db_render_override
|= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
3725 radeon_set_context_reg(ctx_cs
, R_028000_DB_RENDER_CONTROL
, db_render_control
);
3726 radeon_set_context_reg(ctx_cs
, R_02800C_DB_RENDER_OVERRIDE
, db_render_override
);
3727 radeon_set_context_reg(ctx_cs
, R_028010_DB_RENDER_OVERRIDE2
, db_render_override2
);
3731 radv_pipeline_generate_blend_state(struct radeon_cmdbuf
*ctx_cs
,
3732 const struct radv_pipeline
*pipeline
,
3733 const struct radv_blend_state
*blend
)
3735 radeon_set_context_reg_seq(ctx_cs
, R_028780_CB_BLEND0_CONTROL
, 8);
3736 radeon_emit_array(ctx_cs
, blend
->cb_blend_control
,
3738 radeon_set_context_reg(ctx_cs
, R_028808_CB_COLOR_CONTROL
, blend
->cb_color_control
);
3739 radeon_set_context_reg(ctx_cs
, R_028B70_DB_ALPHA_TO_MASK
, blend
->db_alpha_to_mask
);
3741 if (pipeline
->device
->physical_device
->rad_info
.has_rbplus
) {
3743 radeon_set_context_reg_seq(ctx_cs
, R_028760_SX_MRT0_BLEND_OPT
, 8);
3744 radeon_emit_array(ctx_cs
, blend
->sx_mrt_blend_opt
, 8);
3747 radeon_set_context_reg(ctx_cs
, R_028714_SPI_SHADER_COL_FORMAT
, blend
->spi_shader_col_format
);
3749 radeon_set_context_reg(ctx_cs
, R_028238_CB_TARGET_MASK
, blend
->cb_target_mask
);
3750 radeon_set_context_reg(ctx_cs
, R_02823C_CB_SHADER_MASK
, blend
->cb_shader_mask
);
3754 radv_pipeline_generate_raster_state(struct radeon_cmdbuf
*ctx_cs
,
3755 const struct radv_pipeline
*pipeline
,
3756 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3758 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
3759 const VkConservativeRasterizationModeEXT mode
=
3760 radv_get_conservative_raster_mode(vkraster
);
3761 uint32_t pa_sc_conservative_rast
= S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1);
3762 bool depth_clip_disable
= vkraster
->depthClampEnable
;
3764 const VkPipelineRasterizationDepthClipStateCreateInfoEXT
*depth_clip_state
=
3765 vk_find_struct_const(vkraster
->pNext
, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT
);
3766 if (depth_clip_state
) {
3767 depth_clip_disable
= !depth_clip_state
->depthClipEnable
;
3770 radeon_set_context_reg(ctx_cs
, R_028810_PA_CL_CLIP_CNTL
,
3771 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
3772 S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable
? 1 : 0) |
3773 S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable
? 1 : 0) |
3774 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
3775 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
3777 radeon_set_context_reg(ctx_cs
, R_028BDC_PA_SC_LINE_CNTL
,
3778 S_028BDC_DX10_DIAMOND_TEST_ENA(1));
3780 /* Conservative rasterization. */
3781 if (mode
!= VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
) {
3782 pa_sc_conservative_rast
= S_028C4C_PREZ_AA_MASK_ENABLE(1) |
3783 S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
3784 S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
3786 if (mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
) {
3787 pa_sc_conservative_rast
|=
3788 S_028C4C_OVER_RAST_ENABLE(1) |
3789 S_028C4C_OVER_RAST_SAMPLE_SELECT(0) |
3790 S_028C4C_UNDER_RAST_ENABLE(0) |
3791 S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
3792 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
3794 assert(mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT
);
3795 pa_sc_conservative_rast
|=
3796 S_028C4C_OVER_RAST_ENABLE(0) |
3797 S_028C4C_OVER_RAST_SAMPLE_SELECT(1) |
3798 S_028C4C_UNDER_RAST_ENABLE(1) |
3799 S_028C4C_UNDER_RAST_SAMPLE_SELECT(0) |
3800 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(0);
3804 radeon_set_context_reg(ctx_cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
3805 pa_sc_conservative_rast
);
3810 radv_pipeline_generate_multisample_state(struct radeon_cmdbuf
*ctx_cs
,
3811 const struct radv_pipeline
*pipeline
)
3813 const struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3815 radeon_set_context_reg_seq(ctx_cs
, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0
, 2);
3816 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[0]);
3817 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[1]);
3819 radeon_set_context_reg(ctx_cs
, R_028804_DB_EQAA
, ms
->db_eqaa
);
3820 radeon_set_context_reg(ctx_cs
, R_028A48_PA_SC_MODE_CNTL_0
, ms
->pa_sc_mode_cntl_0
);
3821 radeon_set_context_reg(ctx_cs
, R_028A4C_PA_SC_MODE_CNTL_1
, ms
->pa_sc_mode_cntl_1
);
3822 radeon_set_context_reg(ctx_cs
, R_028BE0_PA_SC_AA_CONFIG
, ms
->pa_sc_aa_config
);
3824 /* The exclusion bits can be set to improve rasterization efficiency
3825 * if no sample lies on the pixel boundary (-8 sample offset). It's
3826 * currently always TRUE because the driver doesn't support 16 samples.
3828 bool exclusion
= pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
;
3829 radeon_set_context_reg(ctx_cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
,
3830 S_02882C_XMAX_RIGHT_EXCLUSION(exclusion
) |
3831 S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion
));
3833 /* GFX9: Flush DFSM when the AA mode changes. */
3834 if (pipeline
->device
->dfsm_allowed
) {
3835 radeon_emit(ctx_cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
3836 radeon_emit(ctx_cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
3841 radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf
*ctx_cs
,
3842 const struct radv_pipeline
*pipeline
)
3844 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3845 const struct radv_shader_variant
*vs
=
3846 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] ?
3847 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] :
3848 pipeline
->shaders
[MESA_SHADER_VERTEX
];
3849 unsigned vgt_primitiveid_en
= 0;
3850 uint32_t vgt_gs_mode
= 0;
3852 if (radv_pipeline_has_ngg(pipeline
))
3855 if (radv_pipeline_has_gs(pipeline
)) {
3856 const struct radv_shader_variant
*gs
=
3857 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3859 vgt_gs_mode
= ac_vgt_gs_mode(gs
->info
.gs
.vertices_out
,
3860 pipeline
->device
->physical_device
->rad_info
.chip_class
);
3861 } else if (outinfo
->export_prim_id
|| vs
->info
.uses_prim_id
) {
3862 vgt_gs_mode
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
3863 vgt_primitiveid_en
|= S_028A84_PRIMITIVEID_EN(1);
3866 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
, vgt_primitiveid_en
);
3867 radeon_set_context_reg(ctx_cs
, R_028A40_VGT_GS_MODE
, vgt_gs_mode
);
3871 radv_pipeline_generate_hw_vs(struct radeon_cmdbuf
*ctx_cs
,
3872 struct radeon_cmdbuf
*cs
,
3873 const struct radv_pipeline
*pipeline
,
3874 const struct radv_shader_variant
*shader
)
3876 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3878 radeon_set_sh_reg_seq(cs
, R_00B120_SPI_SHADER_PGM_LO_VS
, 4);
3879 radeon_emit(cs
, va
>> 8);
3880 radeon_emit(cs
, S_00B124_MEM_BASE(va
>> 40));
3881 radeon_emit(cs
, shader
->config
.rsrc1
);
3882 radeon_emit(cs
, shader
->config
.rsrc2
);
3884 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3885 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3886 clip_dist_mask
= outinfo
->clip_dist_mask
;
3887 cull_dist_mask
= outinfo
->cull_dist_mask
;
3888 total_mask
= clip_dist_mask
| cull_dist_mask
;
3889 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3890 outinfo
->writes_layer
||
3891 outinfo
->writes_viewport_index
;
3892 unsigned spi_vs_out_config
, nparams
;
3894 /* VS is required to export at least one param. */
3895 nparams
= MAX2(outinfo
->param_exports
, 1);
3896 spi_vs_out_config
= S_0286C4_VS_EXPORT_COUNT(nparams
- 1);
3898 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3899 spi_vs_out_config
|= S_0286C4_NO_PC_EXPORT(outinfo
->param_exports
== 0);
3902 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
, spi_vs_out_config
);
3904 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3905 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3906 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3907 V_02870C_SPI_SHADER_4COMP
:
3908 V_02870C_SPI_SHADER_NONE
) |
3909 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3910 V_02870C_SPI_SHADER_4COMP
:
3911 V_02870C_SPI_SHADER_NONE
) |
3912 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3913 V_02870C_SPI_SHADER_4COMP
:
3914 V_02870C_SPI_SHADER_NONE
));
3916 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3917 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3918 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3919 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3920 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3921 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3922 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3923 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3924 S_02881C_BYPASS_PRIM_RATE_COMBINER(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
) |
3925 S_02881C_BYPASS_VTX_RATE_COMBINER(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
) |
3926 cull_dist_mask
<< 8 |
3929 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
3930 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
3931 outinfo
->writes_viewport_index
);
3935 radv_pipeline_generate_hw_es(struct radeon_cmdbuf
*cs
,
3936 const struct radv_pipeline
*pipeline
,
3937 const struct radv_shader_variant
*shader
)
3939 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3941 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 4);
3942 radeon_emit(cs
, va
>> 8);
3943 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3944 radeon_emit(cs
, shader
->config
.rsrc1
);
3945 radeon_emit(cs
, shader
->config
.rsrc2
);
3949 radv_pipeline_generate_hw_ls(struct radeon_cmdbuf
*cs
,
3950 const struct radv_pipeline
*pipeline
,
3951 const struct radv_shader_variant
*shader
)
3953 unsigned num_lds_blocks
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_lds_blocks
;
3954 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3955 uint32_t rsrc2
= shader
->config
.rsrc2
;
3957 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3958 radeon_emit(cs
, va
>> 8);
3959 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3961 rsrc2
|= S_00B52C_LDS_SIZE(num_lds_blocks
);
3962 if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX7
&&
3963 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_HAWAII
)
3964 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, rsrc2
);
3966 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
3967 radeon_emit(cs
, shader
->config
.rsrc1
);
3968 radeon_emit(cs
, rsrc2
);
3972 radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf
*ctx_cs
,
3973 struct radeon_cmdbuf
*cs
,
3974 const struct radv_pipeline
*pipeline
,
3975 const struct radv_shader_variant
*shader
)
3977 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3978 gl_shader_stage es_type
=
3979 radv_pipeline_has_tess(pipeline
) ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
3980 struct radv_shader_variant
*es
=
3981 es_type
== MESA_SHADER_TESS_EVAL
? pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] : pipeline
->shaders
[MESA_SHADER_VERTEX
];
3982 const struct gfx10_ngg_info
*ngg_state
= &shader
->info
.ngg_info
;
3984 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
3985 radeon_emit(cs
, va
>> 8);
3986 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3987 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3988 radeon_emit(cs
, shader
->config
.rsrc1
);
3989 radeon_emit(cs
, shader
->config
.rsrc2
);
3991 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3992 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3993 clip_dist_mask
= outinfo
->clip_dist_mask
;
3994 cull_dist_mask
= outinfo
->cull_dist_mask
;
3995 total_mask
= clip_dist_mask
| cull_dist_mask
;
3996 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3997 outinfo
->writes_layer
||
3998 outinfo
->writes_viewport_index
;
3999 bool es_enable_prim_id
= outinfo
->export_prim_id
||
4000 (es
&& es
->info
.uses_prim_id
);
4001 bool break_wave_at_eoi
= false;
4005 if (es_type
== MESA_SHADER_TESS_EVAL
) {
4006 struct radv_shader_variant
*gs
=
4007 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
4009 if (es_enable_prim_id
|| (gs
&& gs
->info
.uses_prim_id
))
4010 break_wave_at_eoi
= true;
4013 nparams
= MAX2(outinfo
->param_exports
, 1);
4014 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
4015 S_0286C4_VS_EXPORT_COUNT(nparams
- 1) |
4016 S_0286C4_NO_PC_EXPORT(outinfo
->param_exports
== 0));
4018 radeon_set_context_reg(ctx_cs
, R_028708_SPI_SHADER_IDX_FORMAT
,
4019 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP
));
4020 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
4021 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
4022 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
4023 V_02870C_SPI_SHADER_4COMP
:
4024 V_02870C_SPI_SHADER_NONE
) |
4025 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
4026 V_02870C_SPI_SHADER_4COMP
:
4027 V_02870C_SPI_SHADER_NONE
) |
4028 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
4029 V_02870C_SPI_SHADER_4COMP
:
4030 V_02870C_SPI_SHADER_NONE
));
4032 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
4033 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
4034 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
4035 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
4036 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
4037 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
4038 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
4039 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
4040 S_02881C_BYPASS_PRIM_RATE_COMBINER(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
) |
4041 S_02881C_BYPASS_VTX_RATE_COMBINER(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
) |
4042 cull_dist_mask
<< 8 |
4045 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
,
4046 S_028A84_PRIMITIVEID_EN(es_enable_prim_id
) |
4047 S_028A84_NGG_DISABLE_PROVOK_REUSE(outinfo
->export_prim_id
));
4049 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
4050 ngg_state
->vgt_esgs_ring_itemsize
);
4052 /* NGG specific registers. */
4053 struct radv_shader_variant
*gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
4054 uint32_t gs_num_invocations
= gs
? gs
->info
.gs
.invocations
: 1;
4056 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
4057 S_028A44_ES_VERTS_PER_SUBGRP(ngg_state
->hw_max_esverts
) |
4058 S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state
->max_gsprims
) |
4059 S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state
->max_gsprims
* gs_num_invocations
));
4060 radeon_set_context_reg(ctx_cs
, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP
,
4061 S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state
->max_out_verts
));
4062 radeon_set_context_reg(ctx_cs
, R_028B4C_GE_NGG_SUBGRP_CNTL
,
4063 S_028B4C_PRIM_AMP_FACTOR(ngg_state
->prim_amp_factor
) |
4064 S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
4065 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
4066 S_028B90_CNT(gs_num_invocations
) |
4067 S_028B90_ENABLE(gs_num_invocations
> 1) |
4068 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state
->max_vert_out_per_gs_instance
));
4070 /* User edge flags are set by the pos exports. If user edge flags are
4071 * not used, we must use hw-generated edge flags and pass them via
4072 * the prim export to prevent drawing lines on internal edges of
4073 * decomposed primitives (such as quads) with polygon mode = lines.
4075 * TODO: We should combine hw-generated edge flags with user edge
4076 * flags in the shader.
4078 radeon_set_context_reg(ctx_cs
, R_028838_PA_CL_NGG_CNTL
,
4079 S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline
) &&
4080 !radv_pipeline_has_gs(pipeline
)) |
4081 /* Reuse for NGG. */
4082 S_028838_VERTEX_REUSE_DEPTH(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10_3
? 30 : 0));
4084 ge_cntl
= S_03096C_PRIM_GRP_SIZE(ngg_state
->max_gsprims
) |
4085 S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
4086 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
);
4088 /* Bug workaround for a possible hang with non-tessellation cases.
4089 * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
4091 * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
4093 if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX10
&&
4094 !radv_pipeline_has_tess(pipeline
) &&
4095 ngg_state
->hw_max_esverts
!= 256) {
4096 ge_cntl
&= C_03096C_VERT_GRP_SIZE
;
4098 if (ngg_state
->hw_max_esverts
> 5) {
4099 ge_cntl
|= S_03096C_VERT_GRP_SIZE(ngg_state
->hw_max_esverts
- 5);
4103 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
, ge_cntl
);
4107 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf
*cs
,
4108 const struct radv_pipeline
*pipeline
,
4109 const struct radv_shader_variant
*shader
)
4111 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
4113 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
4114 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4115 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
4116 radeon_emit(cs
, va
>> 8);
4117 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
4119 radeon_set_sh_reg_seq(cs
, R_00B410_SPI_SHADER_PGM_LO_LS
, 2);
4120 radeon_emit(cs
, va
>> 8);
4121 radeon_emit(cs
, S_00B414_MEM_BASE(va
>> 40));
4124 radeon_set_sh_reg_seq(cs
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
, 2);
4125 radeon_emit(cs
, shader
->config
.rsrc1
);
4126 radeon_emit(cs
, shader
->config
.rsrc2
);
4128 radeon_set_sh_reg_seq(cs
, R_00B420_SPI_SHADER_PGM_LO_HS
, 4);
4129 radeon_emit(cs
, va
>> 8);
4130 radeon_emit(cs
, S_00B424_MEM_BASE(va
>> 40));
4131 radeon_emit(cs
, shader
->config
.rsrc1
);
4132 radeon_emit(cs
, shader
->config
.rsrc2
);
4137 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf
*ctx_cs
,
4138 struct radeon_cmdbuf
*cs
,
4139 const struct radv_pipeline
*pipeline
)
4141 struct radv_shader_variant
*vs
;
4143 /* Skip shaders merged into HS/GS */
4144 vs
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
4148 if (vs
->info
.vs
.as_ls
)
4149 radv_pipeline_generate_hw_ls(cs
, pipeline
, vs
);
4150 else if (vs
->info
.vs
.as_es
)
4151 radv_pipeline_generate_hw_es(cs
, pipeline
, vs
);
4152 else if (vs
->info
.is_ngg
)
4153 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, vs
);
4155 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, vs
);
4159 radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf
*ctx_cs
,
4160 struct radeon_cmdbuf
*cs
,
4161 const struct radv_pipeline
*pipeline
)
4163 struct radv_shader_variant
*tes
, *tcs
;
4165 tcs
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
4166 tes
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
4169 if (tes
->info
.is_ngg
) {
4170 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, tes
);
4171 } else if (tes
->info
.tes
.as_es
)
4172 radv_pipeline_generate_hw_es(cs
, pipeline
, tes
);
4174 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, tes
);
4177 radv_pipeline_generate_hw_hs(cs
, pipeline
, tcs
);
4179 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
4180 !radv_pipeline_has_gs(pipeline
) && !radv_pipeline_has_ngg(pipeline
)) {
4181 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
4182 S_028A44_ES_VERTS_PER_SUBGRP(250) |
4183 S_028A44_GS_PRIMS_PER_SUBGRP(126) |
4184 S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
4189 radv_pipeline_generate_tess_state(struct radeon_cmdbuf
*ctx_cs
,
4190 const struct radv_pipeline
*pipeline
,
4191 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4193 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
4194 unsigned type
= 0, partitioning
= 0, topology
= 0, distribution_mode
= 0;
4195 unsigned num_tcs_input_cp
, num_tcs_output_cp
, num_patches
;
4196 unsigned ls_hs_config
;
4198 num_tcs_input_cp
= pCreateInfo
->pTessellationState
->patchControlPoints
;
4199 num_tcs_output_cp
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.tcs_vertices_out
; //TCS VERTICES OUT
4200 num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
4202 ls_hs_config
= S_028B58_NUM_PATCHES(num_patches
) |
4203 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
4204 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
4206 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4207 radeon_set_context_reg_idx(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
,
4210 radeon_set_context_reg(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
,
4214 switch (tes
->info
.tes
.primitive_mode
) {
4216 type
= V_028B6C_TESS_TRIANGLE
;
4219 type
= V_028B6C_TESS_QUAD
;
4222 type
= V_028B6C_TESS_ISOLINE
;
4226 switch (tes
->info
.tes
.spacing
) {
4227 case TESS_SPACING_EQUAL
:
4228 partitioning
= V_028B6C_PART_INTEGER
;
4230 case TESS_SPACING_FRACTIONAL_ODD
:
4231 partitioning
= V_028B6C_PART_FRAC_ODD
;
4233 case TESS_SPACING_FRACTIONAL_EVEN
:
4234 partitioning
= V_028B6C_PART_FRAC_EVEN
;
4240 bool ccw
= tes
->info
.tes
.ccw
;
4241 const VkPipelineTessellationDomainOriginStateCreateInfo
*domain_origin_state
=
4242 vk_find_struct_const(pCreateInfo
->pTessellationState
,
4243 PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO
);
4245 if (domain_origin_state
&& domain_origin_state
->domainOrigin
!= VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT
)
4248 if (tes
->info
.tes
.point_mode
)
4249 topology
= V_028B6C_OUTPUT_POINT
;
4250 else if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
4251 topology
= V_028B6C_OUTPUT_LINE
;
4253 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
4255 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
4257 if (pipeline
->device
->physical_device
->rad_info
.has_distributed_tess
) {
4258 if (pipeline
->device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
4259 pipeline
->device
->physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
4260 distribution_mode
= V_028B6C_TRAPEZOIDS
;
4262 distribution_mode
= V_028B6C_DONUTS
;
4264 distribution_mode
= V_028B6C_NO_DIST
;
4266 radeon_set_context_reg(ctx_cs
, R_028B6C_VGT_TF_PARAM
,
4267 S_028B6C_TYPE(type
) |
4268 S_028B6C_PARTITIONING(partitioning
) |
4269 S_028B6C_TOPOLOGY(topology
) |
4270 S_028B6C_DISTRIBUTION_MODE(distribution_mode
));
4274 radv_pipeline_generate_hw_gs(struct radeon_cmdbuf
*ctx_cs
,
4275 struct radeon_cmdbuf
*cs
,
4276 const struct radv_pipeline
*pipeline
,
4277 const struct radv_shader_variant
*gs
)
4279 const struct gfx9_gs_info
*gs_state
= &gs
->info
.gs_ring_info
;
4280 unsigned gs_max_out_vertices
;
4281 const uint8_t *num_components
;
4286 gs_max_out_vertices
= gs
->info
.gs
.vertices_out
;
4287 max_stream
= gs
->info
.gs
.max_stream
;
4288 num_components
= gs
->info
.gs
.num_stream_output_components
;
4290 offset
= num_components
[0] * gs_max_out_vertices
;
4292 radeon_set_context_reg_seq(ctx_cs
, R_028A60_VGT_GSVS_RING_OFFSET_1
, 3);
4293 radeon_emit(ctx_cs
, offset
);
4294 if (max_stream
>= 1)
4295 offset
+= num_components
[1] * gs_max_out_vertices
;
4296 radeon_emit(ctx_cs
, offset
);
4297 if (max_stream
>= 2)
4298 offset
+= num_components
[2] * gs_max_out_vertices
;
4299 radeon_emit(ctx_cs
, offset
);
4300 if (max_stream
>= 3)
4301 offset
+= num_components
[3] * gs_max_out_vertices
;
4302 radeon_set_context_reg(ctx_cs
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
4304 radeon_set_context_reg_seq(ctx_cs
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, 4);
4305 radeon_emit(ctx_cs
, num_components
[0]);
4306 radeon_emit(ctx_cs
, (max_stream
>= 1) ? num_components
[1] : 0);
4307 radeon_emit(ctx_cs
, (max_stream
>= 2) ? num_components
[2] : 0);
4308 radeon_emit(ctx_cs
, (max_stream
>= 3) ? num_components
[3] : 0);
4310 uint32_t gs_num_invocations
= gs
->info
.gs
.invocations
;
4311 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
4312 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
4313 S_028B90_ENABLE(gs_num_invocations
> 0));
4315 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
4316 gs_state
->vgt_esgs_ring_itemsize
);
4318 va
= radv_buffer_get_va(gs
->bo
) + gs
->bo_offset
;
4320 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
4321 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4322 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
4323 radeon_emit(cs
, va
>> 8);
4324 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
4326 radeon_set_sh_reg_seq(cs
, R_00B210_SPI_SHADER_PGM_LO_ES
, 2);
4327 radeon_emit(cs
, va
>> 8);
4328 radeon_emit(cs
, S_00B214_MEM_BASE(va
>> 40));
4331 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
4332 radeon_emit(cs
, gs
->config
.rsrc1
);
4333 radeon_emit(cs
, gs
->config
.rsrc2
| S_00B22C_LDS_SIZE(gs_state
->lds_size
));
4335 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
, gs_state
->vgt_gs_onchip_cntl
);
4336 radeon_set_context_reg(ctx_cs
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
, gs_state
->vgt_gs_max_prims_per_subgroup
);
4338 radeon_set_sh_reg_seq(cs
, R_00B220_SPI_SHADER_PGM_LO_GS
, 4);
4339 radeon_emit(cs
, va
>> 8);
4340 radeon_emit(cs
, S_00B224_MEM_BASE(va
>> 40));
4341 radeon_emit(cs
, gs
->config
.rsrc1
);
4342 radeon_emit(cs
, gs
->config
.rsrc2
);
4345 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, pipeline
->gs_copy_shader
);
4349 radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf
*ctx_cs
,
4350 struct radeon_cmdbuf
*cs
,
4351 const struct radv_pipeline
*pipeline
)
4353 struct radv_shader_variant
*gs
;
4355 gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
4359 if (gs
->info
.is_ngg
)
4360 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, gs
);
4362 radv_pipeline_generate_hw_gs(ctx_cs
, cs
, pipeline
, gs
);
4364 radeon_set_context_reg(ctx_cs
, R_028B38_VGT_GS_MAX_VERT_OUT
,
4365 gs
->info
.gs
.vertices_out
);
4368 static uint32_t offset_to_ps_input(uint32_t offset
, bool flat_shade
,
4369 bool explicit, bool float16
)
4371 uint32_t ps_input_cntl
;
4372 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
4373 ps_input_cntl
= S_028644_OFFSET(offset
);
4374 if (flat_shade
|| explicit)
4375 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
4377 /* Force parameter cache to be read in passthrough
4380 ps_input_cntl
|= S_028644_OFFSET(1 << 5);
4383 ps_input_cntl
|= S_028644_FP16_INTERP_MODE(1) |
4384 S_028644_ATTR0_VALID(1);
4387 /* The input is a DEFAULT_VAL constant. */
4388 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
4389 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
4390 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
4391 ps_input_cntl
= S_028644_OFFSET(0x20) |
4392 S_028644_DEFAULT_VAL(offset
);
4394 return ps_input_cntl
;
4398 radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf
*ctx_cs
,
4399 const struct radv_pipeline
*pipeline
)
4401 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4402 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
4403 uint32_t ps_input_cntl
[32];
4405 unsigned ps_offset
= 0;
4407 if (ps
->info
.ps
.prim_id_input
) {
4408 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
];
4409 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
4410 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false, false);
4415 if (ps
->info
.ps
.layer_input
||
4416 ps
->info
.needs_multiview_view_index
) {
4417 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
];
4418 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
)
4419 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false, false);
4421 ps_input_cntl
[ps_offset
] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000
, true, false, false);
4425 if (ps
->info
.ps
.viewport_index_input
) {
4426 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_VIEWPORT
];
4427 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
)
4428 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false, false);
4430 ps_input_cntl
[ps_offset
] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000
, true, false, false);
4434 if (ps
->info
.ps
.has_pcoord
) {
4436 val
= S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
4437 ps_input_cntl
[ps_offset
] = val
;
4441 if (ps
->info
.ps
.num_input_clips_culls
) {
4444 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
];
4445 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
4446 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false, false);
4450 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
];
4451 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
&&
4452 ps
->info
.ps
.num_input_clips_culls
> 4) {
4453 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false, false);
4458 for (unsigned i
= 0; i
< 32 && (1u << i
) <= ps
->info
.ps
.input_mask
; ++i
) {
4463 if (!(ps
->info
.ps
.input_mask
& (1u << i
)))
4466 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_VAR0
+ i
];
4467 if (vs_offset
== AC_EXP_PARAM_UNDEFINED
) {
4468 ps_input_cntl
[ps_offset
] = S_028644_OFFSET(0x20);
4473 flat_shade
= !!(ps
->info
.ps
.flat_shaded_mask
& (1u << ps_offset
));
4474 explicit = !!(ps
->info
.ps
.explicit_shaded_mask
& (1u << ps_offset
));
4475 float16
= !!(ps
->info
.ps
.float16_shaded_mask
& (1u << ps_offset
));
4477 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, flat_shade
, explicit, float16
);
4482 radeon_set_context_reg_seq(ctx_cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps_offset
);
4483 for (unsigned i
= 0; i
< ps_offset
; i
++) {
4484 radeon_emit(ctx_cs
, ps_input_cntl
[i
]);
4490 radv_compute_db_shader_control(const struct radv_device
*device
,
4491 const struct radv_pipeline
*pipeline
,
4492 const struct radv_shader_variant
*ps
)
4494 unsigned conservative_z_export
= V_02880C_EXPORT_ANY_Z
;
4496 if (ps
->info
.ps
.early_fragment_test
|| !ps
->info
.ps
.writes_memory
)
4497 z_order
= V_02880C_EARLY_Z_THEN_LATE_Z
;
4499 z_order
= V_02880C_LATE_Z
;
4501 if (ps
->info
.ps
.depth_layout
== FRAG_DEPTH_LAYOUT_GREATER
)
4502 conservative_z_export
= V_02880C_EXPORT_GREATER_THAN_Z
;
4503 else if (ps
->info
.ps
.depth_layout
== FRAG_DEPTH_LAYOUT_LESS
)
4504 conservative_z_export
= V_02880C_EXPORT_LESS_THAN_Z
;
4506 bool disable_rbplus
= device
->physical_device
->rad_info
.has_rbplus
&&
4507 !device
->physical_device
->rad_info
.rbplus_allowed
;
4509 /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
4510 * but this appears to break Project Cars (DXVK). See
4511 * https://bugs.freedesktop.org/show_bug.cgi?id=109401
4513 bool mask_export_enable
= ps
->info
.ps
.writes_sample_mask
;
4515 return S_02880C_Z_EXPORT_ENABLE(ps
->info
.ps
.writes_z
) |
4516 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps
->info
.ps
.writes_stencil
) |
4517 S_02880C_KILL_ENABLE(!!ps
->info
.ps
.can_discard
) |
4518 S_02880C_MASK_EXPORT_ENABLE(mask_export_enable
) |
4519 S_02880C_CONSERVATIVE_Z_EXPORT(conservative_z_export
) |
4520 S_02880C_Z_ORDER(z_order
) |
4521 S_02880C_DEPTH_BEFORE_SHADER(ps
->info
.ps
.early_fragment_test
) |
4522 S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps
->info
.ps
.post_depth_coverage
) |
4523 S_02880C_EXEC_ON_HIER_FAIL(ps
->info
.ps
.writes_memory
) |
4524 S_02880C_EXEC_ON_NOOP(ps
->info
.ps
.writes_memory
) |
4525 S_02880C_DUAL_QUAD_DISABLE(disable_rbplus
);
4529 radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf
*ctx_cs
,
4530 struct radeon_cmdbuf
*cs
,
4531 struct radv_pipeline
*pipeline
)
4533 struct radv_shader_variant
*ps
;
4535 assert (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]);
4537 ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4538 va
= radv_buffer_get_va(ps
->bo
) + ps
->bo_offset
;
4540 radeon_set_sh_reg_seq(cs
, R_00B020_SPI_SHADER_PGM_LO_PS
, 4);
4541 radeon_emit(cs
, va
>> 8);
4542 radeon_emit(cs
, S_00B024_MEM_BASE(va
>> 40));
4543 radeon_emit(cs
, ps
->config
.rsrc1
);
4544 radeon_emit(cs
, ps
->config
.rsrc2
);
4546 radeon_set_context_reg(ctx_cs
, R_02880C_DB_SHADER_CONTROL
,
4547 radv_compute_db_shader_control(pipeline
->device
,
4550 radeon_set_context_reg(ctx_cs
, R_0286CC_SPI_PS_INPUT_ENA
,
4551 ps
->config
.spi_ps_input_ena
);
4553 radeon_set_context_reg(ctx_cs
, R_0286D0_SPI_PS_INPUT_ADDR
,
4554 ps
->config
.spi_ps_input_addr
);
4556 radeon_set_context_reg(ctx_cs
, R_0286D8_SPI_PS_IN_CONTROL
,
4557 S_0286D8_NUM_INTERP(ps
->info
.ps
.num_interp
) |
4558 S_0286D8_PS_W32_EN(ps
->info
.wave_size
== 32));
4560 radeon_set_context_reg(ctx_cs
, R_0286E0_SPI_BARYC_CNTL
, pipeline
->graphics
.spi_baryc_cntl
);
4562 radeon_set_context_reg(ctx_cs
, R_028710_SPI_SHADER_Z_FORMAT
,
4563 ac_get_spi_shader_z_format(ps
->info
.ps
.writes_z
,
4564 ps
->info
.ps
.writes_stencil
,
4565 ps
->info
.ps
.writes_sample_mask
));
4567 if (pipeline
->device
->dfsm_allowed
) {
4568 /* optimise this? */
4569 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
4570 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
4575 radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf
*ctx_cs
,
4576 const struct radv_pipeline
*pipeline
)
4578 if (pipeline
->device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
4579 pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
)
4582 unsigned vtx_reuse_depth
= 30;
4583 if (radv_pipeline_has_tess(pipeline
) &&
4584 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.tes
.spacing
== TESS_SPACING_FRACTIONAL_ODD
) {
4585 vtx_reuse_depth
= 14;
4587 radeon_set_context_reg(ctx_cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
4588 S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth
));
4592 radv_pipeline_generate_vgt_shader_config(struct radeon_cmdbuf
*ctx_cs
,
4593 const struct radv_pipeline
*pipeline
)
4595 uint32_t stages
= 0;
4596 if (radv_pipeline_has_tess(pipeline
)) {
4597 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
4598 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
4600 if (radv_pipeline_has_gs(pipeline
))
4601 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
4603 else if (radv_pipeline_has_ngg(pipeline
))
4604 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
);
4606 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
4607 } else if (radv_pipeline_has_gs(pipeline
)) {
4608 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
4610 } else if (radv_pipeline_has_ngg(pipeline
)) {
4611 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
);
4614 if (radv_pipeline_has_ngg(pipeline
)) {
4615 stages
|= S_028B54_PRIMGEN_EN(1);
4616 if (pipeline
->streamout_shader
)
4617 stages
|= S_028B54_NGG_WAVE_ID_EN(1);
4618 if (radv_pipeline_has_ngg_passthrough(pipeline
))
4619 stages
|= S_028B54_PRIMGEN_PASSTHRU_EN(1);
4620 } else if (radv_pipeline_has_gs(pipeline
)) {
4621 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
4624 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
4625 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
4627 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4628 uint8_t hs_size
= 64, gs_size
= 64, vs_size
= 64;
4630 if (radv_pipeline_has_tess(pipeline
))
4631 hs_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.wave_size
;
4633 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
4634 vs_size
= gs_size
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.wave_size
;
4635 if (pipeline
->gs_copy_shader
)
4636 vs_size
= pipeline
->gs_copy_shader
->info
.wave_size
;
4637 } else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
4638 vs_size
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.wave_size
;
4639 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
4640 vs_size
= pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.wave_size
;
4642 if (radv_pipeline_has_ngg(pipeline
))
4645 /* legacy GS only supports Wave64 */
4646 stages
|= S_028B54_HS_W32_EN(hs_size
== 32 ? 1 : 0) |
4647 S_028B54_GS_W32_EN(gs_size
== 32 ? 1 : 0) |
4648 S_028B54_VS_W32_EN(vs_size
== 32 ? 1 : 0);
4651 radeon_set_context_reg(ctx_cs
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
4655 radv_pipeline_generate_cliprect_rule(struct radeon_cmdbuf
*ctx_cs
,
4656 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4658 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
4659 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
4660 uint32_t cliprect_rule
= 0;
4662 if (!discard_rectangle_info
) {
4663 cliprect_rule
= 0xffff;
4665 for (unsigned i
= 0; i
< (1u << MAX_DISCARD_RECTANGLES
); ++i
) {
4666 /* Interpret i as a bitmask, and then set the bit in
4667 * the mask if that combination of rectangles in which
4668 * the pixel is contained should pass the cliprect
4671 unsigned relevant_subset
= i
& ((1u << discard_rectangle_info
->discardRectangleCount
) - 1);
4673 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT
&&
4677 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT
&&
4681 cliprect_rule
|= 1u << i
;
4685 radeon_set_context_reg(ctx_cs
, R_02820C_PA_SC_CLIPRECT_RULE
, cliprect_rule
);
4689 gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf
*ctx_cs
,
4690 struct radv_pipeline
*pipeline
)
4692 bool break_wave_at_eoi
= false;
4693 unsigned primgroup_size
;
4694 unsigned vertgroup_size
= 256; /* 256 = disable vertex grouping */
4696 if (radv_pipeline_has_tess(pipeline
)) {
4697 primgroup_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
4698 } else if (radv_pipeline_has_gs(pipeline
)) {
4699 const struct gfx9_gs_info
*gs_state
=
4700 &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs_ring_info
;
4701 unsigned vgt_gs_onchip_cntl
= gs_state
->vgt_gs_onchip_cntl
;
4702 primgroup_size
= G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl
);
4704 primgroup_size
= 128; /* recommended without a GS and tess */
4707 if (radv_pipeline_has_tess(pipeline
)) {
4708 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.uses_prim_id
||
4709 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.uses_prim_id
)
4710 break_wave_at_eoi
= true;
4713 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
,
4714 S_03096C_PRIM_GRP_SIZE(primgroup_size
) |
4715 S_03096C_VERT_GRP_SIZE(vertgroup_size
) |
4716 S_03096C_PACKET_TO_ONE_PA(0) /* line stipple */ |
4717 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
));
4721 radv_pipeline_generate_vgt_gs_out(struct radeon_cmdbuf
*ctx_cs
,
4722 const struct radv_pipeline
*pipeline
,
4723 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4724 const struct radv_graphics_pipeline_create_info
*extra
)
4728 if (radv_pipeline_has_gs(pipeline
)) {
4729 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
4730 } else if (radv_pipeline_has_tess(pipeline
)) {
4731 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.point_mode
) {
4732 gs_out
= V_028A6C_POINTLIST
;
4734 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.primitive_mode
);
4737 gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
4740 if (extra
&& extra
->use_rectlist
) {
4741 gs_out
= V_028A6C_TRISTRIP
;
4742 if (radv_pipeline_has_ngg(pipeline
))
4743 gs_out
= V_028A6C_RECTLIST
;
4746 radeon_set_context_reg(ctx_cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out
);
4750 radv_pipeline_generate_pm4(struct radv_pipeline
*pipeline
,
4751 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4752 const struct radv_graphics_pipeline_create_info
*extra
,
4753 const struct radv_blend_state
*blend
)
4755 struct radeon_cmdbuf
*ctx_cs
= &pipeline
->ctx_cs
;
4756 struct radeon_cmdbuf
*cs
= &pipeline
->cs
;
4759 ctx_cs
->max_dw
= 256;
4760 cs
->buf
= malloc(4 * (cs
->max_dw
+ ctx_cs
->max_dw
));
4761 ctx_cs
->buf
= cs
->buf
+ cs
->max_dw
;
4763 radv_pipeline_generate_depth_stencil_state(ctx_cs
, pipeline
, pCreateInfo
, extra
);
4764 radv_pipeline_generate_blend_state(ctx_cs
, pipeline
, blend
);
4765 radv_pipeline_generate_raster_state(ctx_cs
, pipeline
, pCreateInfo
);
4766 radv_pipeline_generate_multisample_state(ctx_cs
, pipeline
);
4767 radv_pipeline_generate_vgt_gs_mode(ctx_cs
, pipeline
);
4768 radv_pipeline_generate_vertex_shader(ctx_cs
, cs
, pipeline
);
4770 if (radv_pipeline_has_tess(pipeline
)) {
4771 radv_pipeline_generate_tess_shaders(ctx_cs
, cs
, pipeline
);
4772 radv_pipeline_generate_tess_state(ctx_cs
, pipeline
, pCreateInfo
);
4775 radv_pipeline_generate_geometry_shader(ctx_cs
, cs
, pipeline
);
4776 radv_pipeline_generate_fragment_shader(ctx_cs
, cs
, pipeline
);
4777 radv_pipeline_generate_ps_inputs(ctx_cs
, pipeline
);
4778 radv_pipeline_generate_vgt_vertex_reuse(ctx_cs
, pipeline
);
4779 radv_pipeline_generate_vgt_shader_config(ctx_cs
, pipeline
);
4780 radv_pipeline_generate_cliprect_rule(ctx_cs
, pCreateInfo
);
4781 radv_pipeline_generate_vgt_gs_out(ctx_cs
, pipeline
, pCreateInfo
, extra
);
4783 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&& !radv_pipeline_has_ngg(pipeline
))
4784 gfx10_pipeline_generate_ge_cntl(ctx_cs
, pipeline
);
4786 pipeline
->ctx_cs_hash
= _mesa_hash_data(ctx_cs
->buf
, ctx_cs
->cdw
* 4);
4788 assert(ctx_cs
->cdw
<= ctx_cs
->max_dw
);
4789 assert(cs
->cdw
<= cs
->max_dw
);
4793 radv_pipeline_init_vertex_input_state(struct radv_pipeline
*pipeline
,
4794 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4796 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
4797 pCreateInfo
->pVertexInputState
;
4799 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
4800 const VkVertexInputBindingDescription
*desc
=
4801 &vi_info
->pVertexBindingDescriptions
[i
];
4803 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
4804 pipeline
->num_vertex_bindings
=
4805 MAX2(pipeline
->num_vertex_bindings
, desc
->binding
+ 1);
4809 static struct radv_shader_variant
*
4810 radv_pipeline_get_streamout_shader(struct radv_pipeline
*pipeline
)
4814 for (i
= MESA_SHADER_GEOMETRY
; i
>= MESA_SHADER_VERTEX
; i
--) {
4815 struct radv_shader_variant
*shader
=
4816 radv_get_shader(pipeline
, i
);
4818 if (shader
&& shader
->info
.so
.num_outputs
> 0)
4826 radv_pipeline_init_shader_stages_state(struct radv_pipeline
*pipeline
)
4828 struct radv_device
*device
= pipeline
->device
;
4830 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4831 pipeline
->user_data_0
[i
] =
4832 radv_pipeline_stage_to_user_data_0(pipeline
, i
,
4833 device
->physical_device
->rad_info
.chip_class
);
4835 if (pipeline
->shaders
[i
]) {
4836 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[i
]->info
.need_indirect_descriptor_sets
;
4840 struct radv_userdata_info
*loc
= radv_lookup_user_sgpr(pipeline
, MESA_SHADER_VERTEX
,
4841 AC_UD_VS_BASE_VERTEX_START_INSTANCE
);
4842 if (loc
->sgpr_idx
!= -1) {
4843 pipeline
->graphics
.vtx_base_sgpr
= pipeline
->user_data_0
[MESA_SHADER_VERTEX
];
4844 pipeline
->graphics
.vtx_base_sgpr
+= loc
->sgpr_idx
* 4;
4845 if (radv_get_shader(pipeline
, MESA_SHADER_VERTEX
)->info
.vs
.needs_draw_id
)
4846 pipeline
->graphics
.vtx_emit_num
= 3;
4848 pipeline
->graphics
.vtx_emit_num
= 2;
4853 radv_pipeline_init(struct radv_pipeline
*pipeline
,
4854 struct radv_device
*device
,
4855 struct radv_pipeline_cache
*cache
,
4856 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4857 const struct radv_graphics_pipeline_create_info
*extra
)
4861 pipeline
->device
= device
;
4862 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
4863 assert(pipeline
->layout
);
4865 struct radv_blend_state blend
= radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
4867 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
4868 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
4869 radv_init_feedback(creation_feedback
);
4871 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
4873 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
4874 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
4875 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
4876 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
4877 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
4878 if(creation_feedback
)
4879 stage_feedbacks
[stage
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[i
];
4882 struct radv_pipeline_key key
= radv_generate_graphics_pipeline_key(pipeline
, pCreateInfo
, &blend
);
4884 result
= radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
,
4885 pCreateInfo
->flags
, pipeline_feedback
,
4887 if (result
!= VK_SUCCESS
)
4890 pipeline
->graphics
.spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
4891 radv_pipeline_init_multisample_state(pipeline
, &blend
, pCreateInfo
);
4892 radv_pipeline_init_input_assembly_state(pipeline
, pCreateInfo
, extra
);
4893 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
, extra
);
4894 radv_pipeline_init_raster_state(pipeline
, pCreateInfo
);
4895 radv_pipeline_init_depth_stencil_state(pipeline
, pCreateInfo
);
4897 /* Ensure that some export memory is always allocated, for two reasons:
4899 * 1) Correctness: The hardware ignores the EXEC mask if no export
4900 * memory is allocated, so KILL and alpha test do not work correctly
4902 * 2) Performance: Every shader needs at least a NULL export, even when
4903 * it writes no color/depth output. The NULL export instruction
4904 * stalls without this setting.
4906 * Don't add this to CB_SHADER_MASK.
4908 * GFX10 supports pixel shaders without exports by setting both the
4909 * color and Z formats to SPI_SHADER_ZERO. The hw will skip export
4910 * instructions if any are present.
4912 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4913 if ((pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX9
||
4914 ps
->info
.ps
.can_discard
) &&
4915 !blend
.spi_shader_col_format
) {
4916 if (!ps
->info
.ps
.writes_z
&&
4917 !ps
->info
.ps
.writes_stencil
&&
4918 !ps
->info
.ps
.writes_sample_mask
)
4919 blend
.spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
4922 blend
.cb_shader_mask
= ps
->info
.ps
.cb_shader_mask
;
4925 (extra
->custom_blend_mode
== V_028808_CB_ELIMINATE_FAST_CLEAR
||
4926 extra
->custom_blend_mode
== V_028808_CB_FMASK_DECOMPRESS
||
4927 extra
->custom_blend_mode
== V_028808_CB_DCC_DECOMPRESS
||
4928 extra
->custom_blend_mode
== V_028808_CB_RESOLVE
)) {
4929 /* According to the CB spec states, CB_SHADER_MASK should be
4930 * set to enable writes to all four channels of MRT0.
4932 blend
.cb_shader_mask
= 0xf;
4935 pipeline
->graphics
.col_format
= blend
.spi_shader_col_format
;
4936 pipeline
->graphics
.cb_target_mask
= blend
.cb_target_mask
;
4938 if (radv_pipeline_has_gs(pipeline
) && !radv_pipeline_has_ngg(pipeline
)) {
4939 struct radv_shader_variant
*gs
=
4940 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
4942 radv_pipeline_init_gs_ring_state(pipeline
, &gs
->info
.gs_ring_info
);
4945 if (radv_pipeline_has_tess(pipeline
)) {
4946 pipeline
->graphics
.tess_patch_control_points
=
4947 pCreateInfo
->pTessellationState
->patchControlPoints
;
4950 radv_pipeline_init_vertex_input_state(pipeline
, pCreateInfo
);
4951 radv_pipeline_init_binning_state(pipeline
, pCreateInfo
, &blend
);
4952 radv_pipeline_init_shader_stages_state(pipeline
);
4953 radv_pipeline_init_scratch(device
, pipeline
);
4955 /* Find the last vertex shader stage that eventually uses streamout. */
4956 pipeline
->streamout_shader
= radv_pipeline_get_streamout_shader(pipeline
);
4958 radv_pipeline_generate_pm4(pipeline
, pCreateInfo
, extra
, &blend
);
4964 radv_graphics_pipeline_create(
4966 VkPipelineCache _cache
,
4967 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4968 const struct radv_graphics_pipeline_create_info
*extra
,
4969 const VkAllocationCallbacks
*pAllocator
,
4970 VkPipeline
*pPipeline
)
4972 RADV_FROM_HANDLE(radv_device
, device
, _device
);
4973 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
4974 struct radv_pipeline
*pipeline
;
4977 pipeline
= vk_zalloc2(&device
->vk
.alloc
, pAllocator
, sizeof(*pipeline
), 8,
4978 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
4979 if (pipeline
== NULL
)
4980 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
4982 vk_object_base_init(&device
->vk
, &pipeline
->base
,
4983 VK_OBJECT_TYPE_PIPELINE
);
4985 result
= radv_pipeline_init(pipeline
, device
, cache
,
4986 pCreateInfo
, extra
);
4987 if (result
!= VK_SUCCESS
) {
4988 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
4992 *pPipeline
= radv_pipeline_to_handle(pipeline
);
4997 VkResult
radv_CreateGraphicsPipelines(
4999 VkPipelineCache pipelineCache
,
5001 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
5002 const VkAllocationCallbacks
* pAllocator
,
5003 VkPipeline
* pPipelines
)
5005 VkResult result
= VK_SUCCESS
;
5008 for (; i
< count
; i
++) {
5010 r
= radv_graphics_pipeline_create(_device
,
5013 NULL
, pAllocator
, &pPipelines
[i
]);
5014 if (r
!= VK_SUCCESS
) {
5016 pPipelines
[i
] = VK_NULL_HANDLE
;
5018 if (pCreateInfos
[i
].flags
& VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT
)
5023 for (; i
< count
; ++i
)
5024 pPipelines
[i
] = VK_NULL_HANDLE
;
5030 radv_pipeline_generate_hw_cs(struct radeon_cmdbuf
*cs
,
5031 const struct radv_pipeline
*pipeline
)
5033 struct radv_shader_variant
*shader
= pipeline
->shaders
[MESA_SHADER_COMPUTE
];
5034 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
5035 struct radv_device
*device
= pipeline
->device
;
5037 radeon_set_sh_reg_seq(cs
, R_00B830_COMPUTE_PGM_LO
, 2);
5038 radeon_emit(cs
, va
>> 8);
5039 radeon_emit(cs
, S_00B834_DATA(va
>> 40));
5041 radeon_set_sh_reg_seq(cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
5042 radeon_emit(cs
, shader
->config
.rsrc1
);
5043 radeon_emit(cs
, shader
->config
.rsrc2
);
5044 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
5045 radeon_set_sh_reg(cs
, R_00B8A0_COMPUTE_PGM_RSRC3
, shader
->config
.rsrc3
);
5050 radv_pipeline_generate_compute_state(struct radeon_cmdbuf
*cs
,
5051 const struct radv_pipeline
*pipeline
)
5053 struct radv_shader_variant
*shader
= pipeline
->shaders
[MESA_SHADER_COMPUTE
];
5054 struct radv_device
*device
= pipeline
->device
;
5055 unsigned threads_per_threadgroup
;
5056 unsigned threadgroups_per_cu
= 1;
5057 unsigned waves_per_threadgroup
;
5058 unsigned max_waves_per_sh
= 0;
5060 /* Calculate best compute resource limits. */
5061 threads_per_threadgroup
= shader
->info
.cs
.block_size
[0] *
5062 shader
->info
.cs
.block_size
[1] *
5063 shader
->info
.cs
.block_size
[2];
5064 waves_per_threadgroup
= DIV_ROUND_UP(threads_per_threadgroup
,
5065 shader
->info
.wave_size
);
5067 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
5068 waves_per_threadgroup
== 1)
5069 threadgroups_per_cu
= 2;
5071 radeon_set_sh_reg(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
5072 ac_get_compute_resource_limits(&device
->physical_device
->rad_info
,
5073 waves_per_threadgroup
,
5075 threadgroups_per_cu
));
5077 radeon_set_sh_reg_seq(cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
5078 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(shader
->info
.cs
.block_size
[0]));
5079 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(shader
->info
.cs
.block_size
[1]));
5080 radeon_emit(cs
, S_00B81C_NUM_THREAD_FULL(shader
->info
.cs
.block_size
[2]));
5084 radv_compute_generate_pm4(struct radv_pipeline
*pipeline
)
5086 struct radv_device
*device
= pipeline
->device
;
5087 struct radeon_cmdbuf
*cs
= &pipeline
->cs
;
5089 cs
->max_dw
= device
->physical_device
->rad_info
.chip_class
>= GFX10
? 19 : 16;
5090 cs
->buf
= malloc(cs
->max_dw
* 4);
5092 radv_pipeline_generate_hw_cs(cs
, pipeline
);
5093 radv_pipeline_generate_compute_state(cs
, pipeline
);
5095 assert(pipeline
->cs
.cdw
<= pipeline
->cs
.max_dw
);
5098 static struct radv_pipeline_key
5099 radv_generate_compute_pipeline_key(struct radv_pipeline
*pipeline
,
5100 const VkComputePipelineCreateInfo
*pCreateInfo
)
5102 const VkPipelineShaderStageCreateInfo
*stage
= &pCreateInfo
->stage
;
5103 struct radv_pipeline_key key
;
5104 memset(&key
, 0, sizeof(key
));
5106 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
5107 key
.optimisations_disabled
= 1;
5109 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT
*subgroup_size
=
5110 vk_find_struct_const(stage
->pNext
,
5111 PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT
);
5113 if (subgroup_size
) {
5114 assert(subgroup_size
->requiredSubgroupSize
== 32 ||
5115 subgroup_size
->requiredSubgroupSize
== 64);
5116 key
.compute_subgroup_size
= subgroup_size
->requiredSubgroupSize
;
5122 static VkResult
radv_compute_pipeline_create(
5124 VkPipelineCache _cache
,
5125 const VkComputePipelineCreateInfo
* pCreateInfo
,
5126 const VkAllocationCallbacks
* pAllocator
,
5127 VkPipeline
* pPipeline
)
5129 RADV_FROM_HANDLE(radv_device
, device
, _device
);
5130 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
5131 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
5132 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
5133 struct radv_pipeline
*pipeline
;
5136 pipeline
= vk_zalloc2(&device
->vk
.alloc
, pAllocator
, sizeof(*pipeline
), 8,
5137 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
5138 if (pipeline
== NULL
)
5139 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
5141 vk_object_base_init(&device
->vk
, &pipeline
->base
,
5142 VK_OBJECT_TYPE_PIPELINE
);
5144 pipeline
->device
= device
;
5145 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
5146 assert(pipeline
->layout
);
5148 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
5149 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
5150 radv_init_feedback(creation_feedback
);
5152 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
5153 if (creation_feedback
)
5154 stage_feedbacks
[MESA_SHADER_COMPUTE
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[0];
5156 pStages
[MESA_SHADER_COMPUTE
] = &pCreateInfo
->stage
;
5158 struct radv_pipeline_key key
=
5159 radv_generate_compute_pipeline_key(pipeline
, pCreateInfo
);
5161 result
= radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
,
5162 pCreateInfo
->flags
, pipeline_feedback
,
5164 if (result
!= VK_SUCCESS
) {
5165 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
5169 pipeline
->user_data_0
[MESA_SHADER_COMPUTE
] = radv_pipeline_stage_to_user_data_0(pipeline
, MESA_SHADER_COMPUTE
, device
->physical_device
->rad_info
.chip_class
);
5170 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.need_indirect_descriptor_sets
;
5171 radv_pipeline_init_scratch(device
, pipeline
);
5173 radv_compute_generate_pm4(pipeline
);
5175 *pPipeline
= radv_pipeline_to_handle(pipeline
);
5180 VkResult
radv_CreateComputePipelines(
5182 VkPipelineCache pipelineCache
,
5184 const VkComputePipelineCreateInfo
* pCreateInfos
,
5185 const VkAllocationCallbacks
* pAllocator
,
5186 VkPipeline
* pPipelines
)
5188 VkResult result
= VK_SUCCESS
;
5191 for (; i
< count
; i
++) {
5193 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
5195 pAllocator
, &pPipelines
[i
]);
5196 if (r
!= VK_SUCCESS
) {
5198 pPipelines
[i
] = VK_NULL_HANDLE
;
5200 if (pCreateInfos
[i
].flags
& VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT
)
5205 for (; i
< count
; ++i
)
5206 pPipelines
[i
] = VK_NULL_HANDLE
;
5212 static uint32_t radv_get_executable_count(const struct radv_pipeline
*pipeline
)
5215 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
5216 if (!pipeline
->shaders
[i
])
5219 if (i
== MESA_SHADER_GEOMETRY
&&
5220 !radv_pipeline_has_ngg(pipeline
)) {
5230 static struct radv_shader_variant
*
5231 radv_get_shader_from_executable_index(const struct radv_pipeline
*pipeline
, int index
, gl_shader_stage
*stage
)
5233 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
5234 if (!pipeline
->shaders
[i
])
5238 return pipeline
->shaders
[i
];
5243 if (i
== MESA_SHADER_GEOMETRY
&&
5244 !radv_pipeline_has_ngg(pipeline
)) {
5247 return pipeline
->gs_copy_shader
;
5257 /* Basically strlcpy (which does not exist on linux) specialized for
5259 static void desc_copy(char *desc
, const char *src
) {
5260 int len
= strlen(src
);
5261 assert(len
< VK_MAX_DESCRIPTION_SIZE
);
5262 memcpy(desc
, src
, len
);
5263 memset(desc
+ len
, 0, VK_MAX_DESCRIPTION_SIZE
- len
);
5266 VkResult
radv_GetPipelineExecutablePropertiesKHR(
5268 const VkPipelineInfoKHR
* pPipelineInfo
,
5269 uint32_t* pExecutableCount
,
5270 VkPipelineExecutablePropertiesKHR
* pProperties
)
5272 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pPipelineInfo
->pipeline
);
5273 const uint32_t total_count
= radv_get_executable_count(pipeline
);
5276 *pExecutableCount
= total_count
;
5280 const uint32_t count
= MIN2(total_count
, *pExecutableCount
);
5281 for (unsigned i
= 0, executable_idx
= 0;
5282 i
< MESA_SHADER_STAGES
&& executable_idx
< count
; ++i
) {
5283 if (!pipeline
->shaders
[i
])
5285 pProperties
[executable_idx
].stages
= mesa_to_vk_shader_stage(i
);
5286 const char *name
= NULL
;
5287 const char *description
= NULL
;
5289 case MESA_SHADER_VERTEX
:
5290 name
= "Vertex Shader";
5291 description
= "Vulkan Vertex Shader";
5293 case MESA_SHADER_TESS_CTRL
:
5294 if (!pipeline
->shaders
[MESA_SHADER_VERTEX
]) {
5295 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_VERTEX_BIT
;
5296 name
= "Vertex + Tessellation Control Shaders";
5297 description
= "Combined Vulkan Vertex and Tessellation Control Shaders";
5299 name
= "Tessellation Control Shader";
5300 description
= "Vulkan Tessellation Control Shader";
5303 case MESA_SHADER_TESS_EVAL
:
5304 name
= "Tessellation Evaluation Shader";
5305 description
= "Vulkan Tessellation Evaluation Shader";
5307 case MESA_SHADER_GEOMETRY
:
5308 if (radv_pipeline_has_tess(pipeline
) && !pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]) {
5309 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
;
5310 name
= "Tessellation Evaluation + Geometry Shaders";
5311 description
= "Combined Vulkan Tessellation Evaluation and Geometry Shaders";
5312 } else if (!radv_pipeline_has_tess(pipeline
) && !pipeline
->shaders
[MESA_SHADER_VERTEX
]) {
5313 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_VERTEX_BIT
;
5314 name
= "Vertex + Geometry Shader";
5315 description
= "Combined Vulkan Vertex and Geometry Shaders";
5317 name
= "Geometry Shader";
5318 description
= "Vulkan Geometry Shader";
5321 case MESA_SHADER_FRAGMENT
:
5322 name
= "Fragment Shader";
5323 description
= "Vulkan Fragment Shader";
5325 case MESA_SHADER_COMPUTE
:
5326 name
= "Compute Shader";
5327 description
= "Vulkan Compute Shader";
5331 pProperties
[executable_idx
].subgroupSize
= pipeline
->shaders
[i
]->info
.wave_size
;
5332 desc_copy(pProperties
[executable_idx
].name
, name
);
5333 desc_copy(pProperties
[executable_idx
].description
, description
);
5336 if (i
== MESA_SHADER_GEOMETRY
&&
5337 !radv_pipeline_has_ngg(pipeline
)) {
5338 assert(pipeline
->gs_copy_shader
);
5339 if (executable_idx
>= count
)
5342 pProperties
[executable_idx
].stages
= VK_SHADER_STAGE_GEOMETRY_BIT
;
5343 pProperties
[executable_idx
].subgroupSize
= 64;
5344 desc_copy(pProperties
[executable_idx
].name
, "GS Copy Shader");
5345 desc_copy(pProperties
[executable_idx
].description
,
5346 "Extra shader stage that loads the GS output ringbuffer into the rasterizer");
5352 VkResult result
= *pExecutableCount
< total_count
? VK_INCOMPLETE
: VK_SUCCESS
;
5353 *pExecutableCount
= count
;
5357 VkResult
radv_GetPipelineExecutableStatisticsKHR(
5359 const VkPipelineExecutableInfoKHR
* pExecutableInfo
,
5360 uint32_t* pStatisticCount
,
5361 VkPipelineExecutableStatisticKHR
* pStatistics
)
5363 RADV_FROM_HANDLE(radv_device
, device
, _device
);
5364 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pExecutableInfo
->pipeline
);
5365 gl_shader_stage stage
;
5366 struct radv_shader_variant
*shader
= radv_get_shader_from_executable_index(pipeline
, pExecutableInfo
->executableIndex
, &stage
);
5368 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
5369 unsigned lds_increment
= chip_class
>= GFX7
? 512 : 256;
5370 unsigned max_waves
= radv_get_max_waves(device
, shader
, stage
);
5372 VkPipelineExecutableStatisticKHR
*s
= pStatistics
;
5373 VkPipelineExecutableStatisticKHR
*end
= s
+ (pStatistics
? *pStatisticCount
: 0);
5374 VkResult result
= VK_SUCCESS
;
5377 desc_copy(s
->name
, "SGPRs");
5378 desc_copy(s
->description
, "Number of SGPR registers allocated per subgroup");
5379 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5380 s
->value
.u64
= shader
->config
.num_sgprs
;
5385 desc_copy(s
->name
, "VGPRs");
5386 desc_copy(s
->description
, "Number of VGPR registers allocated per subgroup");
5387 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5388 s
->value
.u64
= shader
->config
.num_vgprs
;
5393 desc_copy(s
->name
, "Spilled SGPRs");
5394 desc_copy(s
->description
, "Number of SGPR registers spilled per subgroup");
5395 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5396 s
->value
.u64
= shader
->config
.spilled_sgprs
;
5401 desc_copy(s
->name
, "Spilled VGPRs");
5402 desc_copy(s
->description
, "Number of VGPR registers spilled per subgroup");
5403 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5404 s
->value
.u64
= shader
->config
.spilled_vgprs
;
5409 desc_copy(s
->name
, "PrivMem VGPRs");
5410 desc_copy(s
->description
, "Number of VGPRs stored in private memory per subgroup");
5411 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5412 s
->value
.u64
= shader
->info
.private_mem_vgprs
;
5417 desc_copy(s
->name
, "Code size");
5418 desc_copy(s
->description
, "Code size in bytes");
5419 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5420 s
->value
.u64
= shader
->exec_size
;
5425 desc_copy(s
->name
, "LDS size");
5426 desc_copy(s
->description
, "LDS size in bytes per workgroup");
5427 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5428 s
->value
.u64
= shader
->config
.lds_size
* lds_increment
;
5433 desc_copy(s
->name
, "Scratch size");
5434 desc_copy(s
->description
, "Private memory in bytes per subgroup");
5435 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5436 s
->value
.u64
= shader
->config
.scratch_bytes_per_wave
;
5441 desc_copy(s
->name
, "Subgroups per SIMD");
5442 desc_copy(s
->description
, "The maximum number of subgroups in flight on a SIMD unit");
5443 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5444 s
->value
.u64
= max_waves
;
5448 if (shader
->statistics
) {
5449 for (unsigned i
= 0; i
< shader
->statistics
->count
; i
++) {
5450 struct radv_compiler_statistic_info
*info
= &shader
->statistics
->infos
[i
];
5451 uint32_t value
= shader
->statistics
->values
[i
];
5453 desc_copy(s
->name
, info
->name
);
5454 desc_copy(s
->description
, info
->desc
);
5455 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5456 s
->value
.u64
= value
;
5463 *pStatisticCount
= s
- pStatistics
;
5465 *pStatisticCount
= end
- pStatistics
;
5466 result
= VK_INCOMPLETE
;
5468 *pStatisticCount
= s
- pStatistics
;
5474 static VkResult
radv_copy_representation(void *data
, size_t *data_size
, const char *src
)
5476 size_t total_size
= strlen(src
) + 1;
5479 *data_size
= total_size
;
5483 size_t size
= MIN2(total_size
, *data_size
);
5485 memcpy(data
, src
, size
);
5487 *((char*)data
+ size
- 1) = 0;
5488 return size
< total_size
? VK_INCOMPLETE
: VK_SUCCESS
;
5491 VkResult
radv_GetPipelineExecutableInternalRepresentationsKHR(
5493 const VkPipelineExecutableInfoKHR
* pExecutableInfo
,
5494 uint32_t* pInternalRepresentationCount
,
5495 VkPipelineExecutableInternalRepresentationKHR
* pInternalRepresentations
)
5497 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pExecutableInfo
->pipeline
);
5498 gl_shader_stage stage
;
5499 struct radv_shader_variant
*shader
= radv_get_shader_from_executable_index(pipeline
, pExecutableInfo
->executableIndex
, &stage
);
5501 VkPipelineExecutableInternalRepresentationKHR
*p
= pInternalRepresentations
;
5502 VkPipelineExecutableInternalRepresentationKHR
*end
= p
+ (pInternalRepresentations
? *pInternalRepresentationCount
: 0);
5503 VkResult result
= VK_SUCCESS
;
5507 desc_copy(p
->name
, "NIR Shader(s)");
5508 desc_copy(p
->description
, "The optimized NIR shader(s)");
5509 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->nir_string
) != VK_SUCCESS
)
5510 result
= VK_INCOMPLETE
;
5517 if (radv_use_llvm_for_stage(pipeline
->device
, stage
)) {
5518 desc_copy(p
->name
, "LLVM IR");
5519 desc_copy(p
->description
, "The LLVM IR after some optimizations");
5521 desc_copy(p
->name
, "ACO IR");
5522 desc_copy(p
->description
, "The ACO IR after some optimizations");
5524 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->ir_string
) != VK_SUCCESS
)
5525 result
= VK_INCOMPLETE
;
5532 desc_copy(p
->name
, "Assembly");
5533 desc_copy(p
->description
, "Final Assembly");
5534 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->disasm_string
) != VK_SUCCESS
)
5535 result
= VK_INCOMPLETE
;
5539 if (!pInternalRepresentations
)
5540 *pInternalRepresentationCount
= p
- pInternalRepresentations
;
5542 result
= VK_INCOMPLETE
;
5543 *pInternalRepresentationCount
= end
- pInternalRepresentations
;
5545 *pInternalRepresentationCount
= p
- pInternalRepresentations
;