2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/disk_cache.h"
29 #include "util/mesa-sha1.h"
30 #include "util/u_atomic.h"
31 #include "radv_debug.h"
32 #include "radv_private.h"
34 #include "radv_shader.h"
36 #include "nir/nir_builder.h"
37 #include "nir/nir_xfb_info.h"
38 #include "spirv/nir_spirv.h"
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_nir_to_llvm.h"
45 #include "vk_format.h"
46 #include "util/debug.h"
47 #include "ac_exp_param.h"
48 #include "ac_shader_util.h"
49 #include "main/menums.h"
51 struct radv_blend_state
{
52 uint32_t blend_enable_4bit
;
53 uint32_t need_src_alpha
;
55 uint32_t cb_color_control
;
56 uint32_t cb_target_mask
;
57 uint32_t cb_target_enabled_4bit
;
58 uint32_t sx_mrt_blend_opt
[8];
59 uint32_t cb_blend_control
[8];
61 uint32_t spi_shader_col_format
;
62 uint32_t cb_shader_mask
;
63 uint32_t db_alpha_to_mask
;
65 uint32_t commutative_4bit
;
67 bool single_cb_enable
;
68 bool mrt0_is_dual_src
;
71 struct radv_dsa_order_invariance
{
72 /* Whether the final result in Z/S buffers is guaranteed to be
73 * invariant under changes to the order in which fragments arrive.
77 /* Whether the set of fragments that pass the combined Z/S test is
78 * guaranteed to be invariant under changes to the order in which
84 struct radv_tessellation_state
{
85 uint32_t ls_hs_config
;
91 static const VkPipelineMultisampleStateCreateInfo
*
92 radv_pipeline_get_multisample_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
94 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
95 return pCreateInfo
->pMultisampleState
;
99 static const VkPipelineTessellationStateCreateInfo
*
100 radv_pipeline_get_tessellation_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
102 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
103 if (pCreateInfo
->pStages
[i
].stage
== VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
||
104 pCreateInfo
->pStages
[i
].stage
== VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
) {
105 return pCreateInfo
->pTessellationState
;
111 static const VkPipelineDepthStencilStateCreateInfo
*
112 radv_pipeline_get_depth_stencil_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
114 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
115 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
117 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
118 subpass
->depth_stencil_attachment
)
119 return pCreateInfo
->pDepthStencilState
;
123 static const VkPipelineColorBlendStateCreateInfo
*
124 radv_pipeline_get_color_blend_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
126 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
127 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
129 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
130 subpass
->has_color_att
)
131 return pCreateInfo
->pColorBlendState
;
135 bool radv_pipeline_has_ngg(const struct radv_pipeline
*pipeline
)
137 struct radv_shader_variant
*variant
= NULL
;
138 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
139 variant
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
140 else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
141 variant
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
142 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
143 variant
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
146 return variant
->info
.is_ngg
;
149 bool radv_pipeline_has_ngg_passthrough(const struct radv_pipeline
*pipeline
)
151 assert(radv_pipeline_has_ngg(pipeline
));
153 struct radv_shader_variant
*variant
= NULL
;
154 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
155 variant
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
156 else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
157 variant
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
158 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
159 variant
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
162 return variant
->info
.is_ngg_passthrough
;
165 bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline
*pipeline
)
167 if (!radv_pipeline_has_gs(pipeline
))
170 /* The GS copy shader is required if the pipeline has GS on GFX6-GFX9.
171 * On GFX10, it might be required in rare cases if it's not possible to
174 if (radv_pipeline_has_ngg(pipeline
))
177 assert(pipeline
->gs_copy_shader
);
182 radv_pipeline_destroy(struct radv_device
*device
,
183 struct radv_pipeline
*pipeline
,
184 const VkAllocationCallbacks
* allocator
)
186 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
187 if (pipeline
->shaders
[i
])
188 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
190 if (pipeline
->gs_copy_shader
)
191 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
194 free(pipeline
->cs
.buf
);
195 vk_free2(&device
->alloc
, allocator
, pipeline
);
198 void radv_DestroyPipeline(
200 VkPipeline _pipeline
,
201 const VkAllocationCallbacks
* pAllocator
)
203 RADV_FROM_HANDLE(radv_device
, device
, _device
);
204 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
209 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
212 static uint32_t get_hash_flags(struct radv_device
*device
)
214 uint32_t hash_flags
= 0;
216 if (device
->instance
->debug_flags
& RADV_DEBUG_NO_NGG
)
217 hash_flags
|= RADV_HASH_SHADER_NO_NGG
;
218 if (device
->physical_device
->cs_wave_size
== 32)
219 hash_flags
|= RADV_HASH_SHADER_CS_WAVE32
;
220 if (device
->physical_device
->ps_wave_size
== 32)
221 hash_flags
|= RADV_HASH_SHADER_PS_WAVE32
;
222 if (device
->physical_device
->ge_wave_size
== 32)
223 hash_flags
|= RADV_HASH_SHADER_GE_WAVE32
;
224 if (device
->physical_device
->use_aco
)
225 hash_flags
|= RADV_HASH_SHADER_ACO
;
230 radv_pipeline_scratch_init(struct radv_device
*device
,
231 struct radv_pipeline
*pipeline
)
233 unsigned scratch_bytes_per_wave
= 0;
234 unsigned max_waves
= 0;
235 unsigned min_waves
= 1;
237 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
238 if (pipeline
->shaders
[i
] &&
239 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
) {
240 unsigned max_stage_waves
= device
->scratch_waves
;
242 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
243 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
245 max_stage_waves
= MIN2(max_stage_waves
,
246 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
247 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
248 max_waves
= MAX2(max_waves
, max_stage_waves
);
252 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
253 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
254 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
255 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
256 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
259 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
260 pipeline
->max_waves
= max_waves
;
264 static uint32_t si_translate_blend_logic_op(VkLogicOp op
)
267 case VK_LOGIC_OP_CLEAR
:
268 return V_028808_ROP3_CLEAR
;
269 case VK_LOGIC_OP_AND
:
270 return V_028808_ROP3_AND
;
271 case VK_LOGIC_OP_AND_REVERSE
:
272 return V_028808_ROP3_AND_REVERSE
;
273 case VK_LOGIC_OP_COPY
:
274 return V_028808_ROP3_COPY
;
275 case VK_LOGIC_OP_AND_INVERTED
:
276 return V_028808_ROP3_AND_INVERTED
;
277 case VK_LOGIC_OP_NO_OP
:
278 return V_028808_ROP3_NO_OP
;
279 case VK_LOGIC_OP_XOR
:
280 return V_028808_ROP3_XOR
;
282 return V_028808_ROP3_OR
;
283 case VK_LOGIC_OP_NOR
:
284 return V_028808_ROP3_NOR
;
285 case VK_LOGIC_OP_EQUIVALENT
:
286 return V_028808_ROP3_EQUIVALENT
;
287 case VK_LOGIC_OP_INVERT
:
288 return V_028808_ROP3_INVERT
;
289 case VK_LOGIC_OP_OR_REVERSE
:
290 return V_028808_ROP3_OR_REVERSE
;
291 case VK_LOGIC_OP_COPY_INVERTED
:
292 return V_028808_ROP3_COPY_INVERTED
;
293 case VK_LOGIC_OP_OR_INVERTED
:
294 return V_028808_ROP3_OR_INVERTED
;
295 case VK_LOGIC_OP_NAND
:
296 return V_028808_ROP3_NAND
;
297 case VK_LOGIC_OP_SET
:
298 return V_028808_ROP3_SET
;
300 unreachable("Unhandled logic op");
305 static uint32_t si_translate_blend_function(VkBlendOp op
)
308 case VK_BLEND_OP_ADD
:
309 return V_028780_COMB_DST_PLUS_SRC
;
310 case VK_BLEND_OP_SUBTRACT
:
311 return V_028780_COMB_SRC_MINUS_DST
;
312 case VK_BLEND_OP_REVERSE_SUBTRACT
:
313 return V_028780_COMB_DST_MINUS_SRC
;
314 case VK_BLEND_OP_MIN
:
315 return V_028780_COMB_MIN_DST_SRC
;
316 case VK_BLEND_OP_MAX
:
317 return V_028780_COMB_MAX_DST_SRC
;
323 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
326 case VK_BLEND_FACTOR_ZERO
:
327 return V_028780_BLEND_ZERO
;
328 case VK_BLEND_FACTOR_ONE
:
329 return V_028780_BLEND_ONE
;
330 case VK_BLEND_FACTOR_SRC_COLOR
:
331 return V_028780_BLEND_SRC_COLOR
;
332 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
333 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
334 case VK_BLEND_FACTOR_DST_COLOR
:
335 return V_028780_BLEND_DST_COLOR
;
336 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
337 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
338 case VK_BLEND_FACTOR_SRC_ALPHA
:
339 return V_028780_BLEND_SRC_ALPHA
;
340 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
341 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
342 case VK_BLEND_FACTOR_DST_ALPHA
:
343 return V_028780_BLEND_DST_ALPHA
;
344 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
345 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
346 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
347 return V_028780_BLEND_CONSTANT_COLOR
;
348 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
349 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
350 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
351 return V_028780_BLEND_CONSTANT_ALPHA
;
352 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
353 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
354 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
355 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
356 case VK_BLEND_FACTOR_SRC1_COLOR
:
357 return V_028780_BLEND_SRC1_COLOR
;
358 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
359 return V_028780_BLEND_INV_SRC1_COLOR
;
360 case VK_BLEND_FACTOR_SRC1_ALPHA
:
361 return V_028780_BLEND_SRC1_ALPHA
;
362 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
363 return V_028780_BLEND_INV_SRC1_ALPHA
;
369 static uint32_t si_translate_blend_opt_function(VkBlendOp op
)
372 case VK_BLEND_OP_ADD
:
373 return V_028760_OPT_COMB_ADD
;
374 case VK_BLEND_OP_SUBTRACT
:
375 return V_028760_OPT_COMB_SUBTRACT
;
376 case VK_BLEND_OP_REVERSE_SUBTRACT
:
377 return V_028760_OPT_COMB_REVSUBTRACT
;
378 case VK_BLEND_OP_MIN
:
379 return V_028760_OPT_COMB_MIN
;
380 case VK_BLEND_OP_MAX
:
381 return V_028760_OPT_COMB_MAX
;
383 return V_028760_OPT_COMB_BLEND_DISABLED
;
387 static uint32_t si_translate_blend_opt_factor(VkBlendFactor factor
, bool is_alpha
)
390 case VK_BLEND_FACTOR_ZERO
:
391 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_ALL
;
392 case VK_BLEND_FACTOR_ONE
:
393 return V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
;
394 case VK_BLEND_FACTOR_SRC_COLOR
:
395 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
396 : V_028760_BLEND_OPT_PRESERVE_C1_IGNORE_C0
;
397 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
398 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
399 : V_028760_BLEND_OPT_PRESERVE_C0_IGNORE_C1
;
400 case VK_BLEND_FACTOR_SRC_ALPHA
:
401 return V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
;
402 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
403 return V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
;
404 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
405 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
406 : V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
408 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
413 * Get rid of DST in the blend factors by commuting the operands:
414 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
416 static void si_blend_remove_dst(unsigned *func
, unsigned *src_factor
,
417 unsigned *dst_factor
, unsigned expected_dst
,
418 unsigned replacement_src
)
420 if (*src_factor
== expected_dst
&&
421 *dst_factor
== VK_BLEND_FACTOR_ZERO
) {
422 *src_factor
= VK_BLEND_FACTOR_ZERO
;
423 *dst_factor
= replacement_src
;
425 /* Commuting the operands requires reversing subtractions. */
426 if (*func
== VK_BLEND_OP_SUBTRACT
)
427 *func
= VK_BLEND_OP_REVERSE_SUBTRACT
;
428 else if (*func
== VK_BLEND_OP_REVERSE_SUBTRACT
)
429 *func
= VK_BLEND_OP_SUBTRACT
;
433 static bool si_blend_factor_uses_dst(unsigned factor
)
435 return factor
== VK_BLEND_FACTOR_DST_COLOR
||
436 factor
== VK_BLEND_FACTOR_DST_ALPHA
||
437 factor
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
438 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
||
439 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
;
442 static bool is_dual_src(VkBlendFactor factor
)
445 case VK_BLEND_FACTOR_SRC1_COLOR
:
446 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
447 case VK_BLEND_FACTOR_SRC1_ALPHA
:
448 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
455 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
457 bool blend_need_alpha
)
459 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
460 unsigned format
, ntype
, swap
;
462 /* Alpha is needed for alpha-to-coverage.
463 * Blending may be with or without alpha.
465 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
466 unsigned alpha
= 0; /* exports alpha, but may not support blending */
467 unsigned blend
= 0; /* supports blending, but may not export alpha */
468 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
470 format
= radv_translate_colorformat(vk_format
);
471 ntype
= radv_translate_color_numformat(vk_format
, desc
,
472 vk_format_get_first_non_void_channel(vk_format
));
473 swap
= radv_translate_colorswap(vk_format
, false);
475 /* Choose the SPI color formats. These are required values for Stoney/RB+.
476 * Other chips have multiple choices, though they are not necessarily better.
479 case V_028C70_COLOR_5_6_5
:
480 case V_028C70_COLOR_1_5_5_5
:
481 case V_028C70_COLOR_5_5_5_1
:
482 case V_028C70_COLOR_4_4_4_4
:
483 case V_028C70_COLOR_10_11_11
:
484 case V_028C70_COLOR_11_11_10
:
485 case V_028C70_COLOR_8
:
486 case V_028C70_COLOR_8_8
:
487 case V_028C70_COLOR_8_8_8_8
:
488 case V_028C70_COLOR_10_10_10_2
:
489 case V_028C70_COLOR_2_10_10_10
:
490 if (ntype
== V_028C70_NUMBER_UINT
)
491 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
492 else if (ntype
== V_028C70_NUMBER_SINT
)
493 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
495 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
498 case V_028C70_COLOR_16
:
499 case V_028C70_COLOR_16_16
:
500 case V_028C70_COLOR_16_16_16_16
:
501 if (ntype
== V_028C70_NUMBER_UNORM
||
502 ntype
== V_028C70_NUMBER_SNORM
) {
503 /* UNORM16 and SNORM16 don't support blending */
504 if (ntype
== V_028C70_NUMBER_UNORM
)
505 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
507 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
509 /* Use 32 bits per channel for blending. */
510 if (format
== V_028C70_COLOR_16
) {
511 if (swap
== V_028C70_SWAP_STD
) { /* R */
512 blend
= V_028714_SPI_SHADER_32_R
;
513 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
514 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
515 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
518 } else if (format
== V_028C70_COLOR_16_16
) {
519 if (swap
== V_028C70_SWAP_STD
) { /* RG */
520 blend
= V_028714_SPI_SHADER_32_GR
;
521 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
522 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
523 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
526 } else /* 16_16_16_16 */
527 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
528 } else if (ntype
== V_028C70_NUMBER_UINT
)
529 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
530 else if (ntype
== V_028C70_NUMBER_SINT
)
531 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
532 else if (ntype
== V_028C70_NUMBER_FLOAT
)
533 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
538 case V_028C70_COLOR_32
:
539 if (swap
== V_028C70_SWAP_STD
) { /* R */
540 blend
= normal
= V_028714_SPI_SHADER_32_R
;
541 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
542 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
543 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
548 case V_028C70_COLOR_32_32
:
549 if (swap
== V_028C70_SWAP_STD
) { /* RG */
550 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
551 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
552 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
553 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
558 case V_028C70_COLOR_32_32_32_32
:
559 case V_028C70_COLOR_8_24
:
560 case V_028C70_COLOR_24_8
:
561 case V_028C70_COLOR_X24_8_32_FLOAT
:
562 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
566 unreachable("unhandled blend format");
569 if (blend_enable
&& blend_need_alpha
)
571 else if(blend_need_alpha
)
573 else if(blend_enable
)
580 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
581 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
582 struct radv_blend_state
*blend
)
584 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
585 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
586 unsigned col_format
= 0;
587 unsigned num_targets
;
589 for (unsigned i
= 0; i
< (blend
->single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
592 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
) {
593 cf
= V_028714_SPI_SHADER_ZERO
;
595 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
597 blend
->blend_enable_4bit
& (0xfu
<< (i
* 4));
599 cf
= si_choose_spi_color_format(attachment
->format
,
601 blend
->need_src_alpha
& (1 << i
));
604 col_format
|= cf
<< (4 * i
);
607 if (!(col_format
& 0xf) && blend
->need_src_alpha
& (1 << 0)) {
608 /* When a subpass doesn't have any color attachments, write the
609 * alpha channel of MRT0 when alpha coverage is enabled because
610 * the depth attachment needs it.
612 col_format
|= V_028714_SPI_SHADER_32_AR
;
615 /* If the i-th target format is set, all previous target formats must
616 * be non-zero to avoid hangs.
618 num_targets
= (util_last_bit(col_format
) + 3) / 4;
619 for (unsigned i
= 0; i
< num_targets
; i
++) {
620 if (!(col_format
& (0xf << (i
* 4)))) {
621 col_format
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
625 /* The output for dual source blending should have the same format as
628 if (blend
->mrt0_is_dual_src
)
629 col_format
|= (col_format
& 0xf) << 4;
631 blend
->cb_shader_mask
= ac_get_cb_shader_mask(col_format
);
632 blend
->spi_shader_col_format
= col_format
;
636 format_is_int8(VkFormat format
)
638 const struct vk_format_description
*desc
= vk_format_description(format
);
639 int channel
= vk_format_get_first_non_void_channel(format
);
641 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
642 desc
->channel
[channel
].size
== 8;
646 format_is_int10(VkFormat format
)
648 const struct vk_format_description
*desc
= vk_format_description(format
);
650 if (desc
->nr_channels
!= 4)
652 for (unsigned i
= 0; i
< 4; i
++) {
653 if (desc
->channel
[i
].pure_integer
&& desc
->channel
[i
].size
== 10)
660 * Ordered so that for each i,
661 * radv_format_meta_fs_key(radv_fs_key_format_exemplars[i]) == i.
663 const VkFormat radv_fs_key_format_exemplars
[NUM_META_FS_KEYS
] = {
664 VK_FORMAT_R32_SFLOAT
,
665 VK_FORMAT_R32G32_SFLOAT
,
666 VK_FORMAT_R8G8B8A8_UNORM
,
667 VK_FORMAT_R16G16B16A16_UNORM
,
668 VK_FORMAT_R16G16B16A16_SNORM
,
669 VK_FORMAT_R16G16B16A16_UINT
,
670 VK_FORMAT_R16G16B16A16_SINT
,
671 VK_FORMAT_R32G32B32A32_SFLOAT
,
672 VK_FORMAT_R8G8B8A8_UINT
,
673 VK_FORMAT_R8G8B8A8_SINT
,
674 VK_FORMAT_A2R10G10B10_UINT_PACK32
,
675 VK_FORMAT_A2R10G10B10_SINT_PACK32
,
678 unsigned radv_format_meta_fs_key(VkFormat format
)
680 unsigned col_format
= si_choose_spi_color_format(format
, false, false);
682 assert(col_format
!= V_028714_SPI_SHADER_32_AR
);
683 if (col_format
>= V_028714_SPI_SHADER_32_AR
)
684 --col_format
; /* Skip V_028714_SPI_SHADER_32_AR since there is no such VkFormat */
686 --col_format
; /* Skip V_028714_SPI_SHADER_ZERO */
687 bool is_int8
= format_is_int8(format
);
688 bool is_int10
= format_is_int10(format
);
690 return col_format
+ (is_int8
? 3 : is_int10
? 5 : 0);
694 radv_pipeline_compute_get_int_clamp(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
695 unsigned *is_int8
, unsigned *is_int10
)
697 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
698 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
702 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
703 struct radv_render_pass_attachment
*attachment
;
705 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
708 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
710 if (format_is_int8(attachment
->format
))
712 if (format_is_int10(attachment
->format
))
718 radv_blend_check_commutativity(struct radv_blend_state
*blend
,
719 VkBlendOp op
, VkBlendFactor src
,
720 VkBlendFactor dst
, unsigned chanmask
)
722 /* Src factor is allowed when it does not depend on Dst. */
723 static const uint32_t src_allowed
=
724 (1u << VK_BLEND_FACTOR_ONE
) |
725 (1u << VK_BLEND_FACTOR_SRC_COLOR
) |
726 (1u << VK_BLEND_FACTOR_SRC_ALPHA
) |
727 (1u << VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
) |
728 (1u << VK_BLEND_FACTOR_CONSTANT_COLOR
) |
729 (1u << VK_BLEND_FACTOR_CONSTANT_ALPHA
) |
730 (1u << VK_BLEND_FACTOR_SRC1_COLOR
) |
731 (1u << VK_BLEND_FACTOR_SRC1_ALPHA
) |
732 (1u << VK_BLEND_FACTOR_ZERO
) |
733 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
) |
734 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
) |
735 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
) |
736 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
) |
737 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
) |
738 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
);
740 if (dst
== VK_BLEND_FACTOR_ONE
&&
741 (src_allowed
& (1u << src
))) {
742 /* Addition is commutative, but floating point addition isn't
743 * associative: subtle changes can be introduced via different
744 * rounding. Be conservative, only enable for min and max.
746 if (op
== VK_BLEND_OP_MAX
|| op
== VK_BLEND_OP_MIN
)
747 blend
->commutative_4bit
|= chanmask
;
751 static struct radv_blend_state
752 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
753 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
754 const struct radv_graphics_pipeline_create_info
*extra
)
756 const VkPipelineColorBlendStateCreateInfo
*vkblend
= radv_pipeline_get_color_blend_state(pCreateInfo
);
757 const VkPipelineMultisampleStateCreateInfo
*vkms
= radv_pipeline_get_multisample_state(pCreateInfo
);
758 struct radv_blend_state blend
= {0};
759 unsigned mode
= V_028808_CB_NORMAL
;
762 if (extra
&& extra
->custom_blend_mode
) {
763 blend
.single_cb_enable
= true;
764 mode
= extra
->custom_blend_mode
;
767 blend
.cb_color_control
= 0;
769 if (vkblend
->logicOpEnable
)
770 blend
.cb_color_control
|= S_028808_ROP3(si_translate_blend_logic_op(vkblend
->logicOp
));
772 blend
.cb_color_control
|= S_028808_ROP3(V_028808_ROP3_COPY
);
775 blend
.db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(3) |
776 S_028B70_ALPHA_TO_MASK_OFFSET1(1) |
777 S_028B70_ALPHA_TO_MASK_OFFSET2(0) |
778 S_028B70_ALPHA_TO_MASK_OFFSET3(2) |
779 S_028B70_OFFSET_ROUND(1);
781 if (vkms
&& vkms
->alphaToCoverageEnable
) {
782 blend
.db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
783 blend
.need_src_alpha
|= 0x1;
786 blend
.cb_target_mask
= 0;
788 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
789 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
790 unsigned blend_cntl
= 0;
791 unsigned srcRGB_opt
, dstRGB_opt
, srcA_opt
, dstA_opt
;
792 VkBlendOp eqRGB
= att
->colorBlendOp
;
793 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
794 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
795 VkBlendOp eqA
= att
->alphaBlendOp
;
796 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
797 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
799 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
801 if (!att
->colorWriteMask
)
804 blend
.cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
805 blend
.cb_target_enabled_4bit
|= 0xf << (4 * i
);
806 if (!att
->blendEnable
) {
807 blend
.cb_blend_control
[i
] = blend_cntl
;
811 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
813 blend
.mrt0_is_dual_src
= true;
815 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
816 srcRGB
= VK_BLEND_FACTOR_ONE
;
817 dstRGB
= VK_BLEND_FACTOR_ONE
;
819 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
820 srcA
= VK_BLEND_FACTOR_ONE
;
821 dstA
= VK_BLEND_FACTOR_ONE
;
824 radv_blend_check_commutativity(&blend
, eqRGB
, srcRGB
, dstRGB
,
826 radv_blend_check_commutativity(&blend
, eqA
, srcA
, dstA
,
829 /* Blending optimizations for RB+.
830 * These transformations don't change the behavior.
832 * First, get rid of DST in the blend factors:
833 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
835 si_blend_remove_dst(&eqRGB
, &srcRGB
, &dstRGB
,
836 VK_BLEND_FACTOR_DST_COLOR
,
837 VK_BLEND_FACTOR_SRC_COLOR
);
839 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
840 VK_BLEND_FACTOR_DST_COLOR
,
841 VK_BLEND_FACTOR_SRC_COLOR
);
843 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
844 VK_BLEND_FACTOR_DST_ALPHA
,
845 VK_BLEND_FACTOR_SRC_ALPHA
);
847 /* Look up the ideal settings from tables. */
848 srcRGB_opt
= si_translate_blend_opt_factor(srcRGB
, false);
849 dstRGB_opt
= si_translate_blend_opt_factor(dstRGB
, false);
850 srcA_opt
= si_translate_blend_opt_factor(srcA
, true);
851 dstA_opt
= si_translate_blend_opt_factor(dstA
, true);
853 /* Handle interdependencies. */
854 if (si_blend_factor_uses_dst(srcRGB
))
855 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
856 if (si_blend_factor_uses_dst(srcA
))
857 dstA_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
859 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
&&
860 (dstRGB
== VK_BLEND_FACTOR_ZERO
||
861 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
862 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
))
863 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
865 /* Set the final value. */
866 blend
.sx_mrt_blend_opt
[i
] =
867 S_028760_COLOR_SRC_OPT(srcRGB_opt
) |
868 S_028760_COLOR_DST_OPT(dstRGB_opt
) |
869 S_028760_COLOR_COMB_FCN(si_translate_blend_opt_function(eqRGB
)) |
870 S_028760_ALPHA_SRC_OPT(srcA_opt
) |
871 S_028760_ALPHA_DST_OPT(dstA_opt
) |
872 S_028760_ALPHA_COMB_FCN(si_translate_blend_opt_function(eqA
));
873 blend_cntl
|= S_028780_ENABLE(1);
875 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
876 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
877 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
878 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
879 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
880 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
881 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
882 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
884 blend
.cb_blend_control
[i
] = blend_cntl
;
886 blend
.blend_enable_4bit
|= 0xfu
<< (i
* 4);
888 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
889 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
890 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
891 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
892 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
893 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
894 blend
.need_src_alpha
|= 1 << i
;
896 for (i
= vkblend
->attachmentCount
; i
< 8; i
++) {
897 blend
.cb_blend_control
[i
] = 0;
898 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
902 if (pipeline
->device
->physical_device
->rad_info
.has_rbplus
) {
903 /* Disable RB+ blend optimizations for dual source blending. */
904 if (blend
.mrt0_is_dual_src
) {
905 for (i
= 0; i
< 8; i
++) {
906 blend
.sx_mrt_blend_opt
[i
] =
907 S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE
) |
908 S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE
);
912 /* RB+ doesn't work with dual source blending, logic op and
915 if (blend
.mrt0_is_dual_src
||
916 (vkblend
&& vkblend
->logicOpEnable
) ||
917 mode
== V_028808_CB_RESOLVE
)
918 blend
.cb_color_control
|= S_028808_DISABLE_DUAL_QUAD(1);
921 if (blend
.cb_target_mask
)
922 blend
.cb_color_control
|= S_028808_MODE(mode
);
924 blend
.cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
926 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
, &blend
);
930 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
933 case VK_STENCIL_OP_KEEP
:
934 return V_02842C_STENCIL_KEEP
;
935 case VK_STENCIL_OP_ZERO
:
936 return V_02842C_STENCIL_ZERO
;
937 case VK_STENCIL_OP_REPLACE
:
938 return V_02842C_STENCIL_REPLACE_TEST
;
939 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
940 return V_02842C_STENCIL_ADD_CLAMP
;
941 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
942 return V_02842C_STENCIL_SUB_CLAMP
;
943 case VK_STENCIL_OP_INVERT
:
944 return V_02842C_STENCIL_INVERT
;
945 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
946 return V_02842C_STENCIL_ADD_WRAP
;
947 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
948 return V_02842C_STENCIL_SUB_WRAP
;
954 static uint32_t si_translate_fill(VkPolygonMode func
)
957 case VK_POLYGON_MODE_FILL
:
958 return V_028814_X_DRAW_TRIANGLES
;
959 case VK_POLYGON_MODE_LINE
:
960 return V_028814_X_DRAW_LINES
;
961 case VK_POLYGON_MODE_POINT
:
962 return V_028814_X_DRAW_POINTS
;
965 return V_028814_X_DRAW_POINTS
;
969 static uint8_t radv_pipeline_get_ps_iter_samples(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
971 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
972 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
973 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
974 uint32_t ps_iter_samples
= 1;
975 uint32_t num_samples
;
977 /* From the Vulkan 1.1.129 spec, 26.7. Sample Shading:
979 * "If the VK_AMD_mixed_attachment_samples extension is enabled and the
980 * subpass uses color attachments, totalSamples is the number of
981 * samples of the color attachments. Otherwise, totalSamples is the
982 * value of VkPipelineMultisampleStateCreateInfo::rasterizationSamples
983 * specified at pipeline creation time."
985 if (subpass
->has_color_att
) {
986 num_samples
= subpass
->color_sample_count
;
988 num_samples
= vkms
->rasterizationSamples
;
991 if (vkms
->sampleShadingEnable
) {
992 ps_iter_samples
= ceil(vkms
->minSampleShading
* num_samples
);
993 ps_iter_samples
= util_next_power_of_two(ps_iter_samples
);
995 return ps_iter_samples
;
999 radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
1001 return pCreateInfo
->depthTestEnable
&&
1002 pCreateInfo
->depthWriteEnable
&&
1003 pCreateInfo
->depthCompareOp
!= VK_COMPARE_OP_NEVER
;
1007 radv_writes_stencil(const VkStencilOpState
*state
)
1009 return state
->writeMask
&&
1010 (state
->failOp
!= VK_STENCIL_OP_KEEP
||
1011 state
->passOp
!= VK_STENCIL_OP_KEEP
||
1012 state
->depthFailOp
!= VK_STENCIL_OP_KEEP
);
1016 radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
1018 return pCreateInfo
->stencilTestEnable
&&
1019 (radv_writes_stencil(&pCreateInfo
->front
) ||
1020 radv_writes_stencil(&pCreateInfo
->back
));
1024 radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
1026 return radv_is_depth_write_enabled(pCreateInfo
) ||
1027 radv_is_stencil_write_enabled(pCreateInfo
);
1031 radv_order_invariant_stencil_op(VkStencilOp op
)
1033 /* REPLACE is normally order invariant, except when the stencil
1034 * reference value is written by the fragment shader. Tracking this
1035 * interaction does not seem worth the effort, so be conservative.
1037 return op
!= VK_STENCIL_OP_INCREMENT_AND_CLAMP
&&
1038 op
!= VK_STENCIL_OP_DECREMENT_AND_CLAMP
&&
1039 op
!= VK_STENCIL_OP_REPLACE
;
1043 radv_order_invariant_stencil_state(const VkStencilOpState
*state
)
1045 /* Compute whether, assuming Z writes are disabled, this stencil state
1046 * is order invariant in the sense that the set of passing fragments as
1047 * well as the final stencil buffer result does not depend on the order
1050 return !state
->writeMask
||
1051 /* The following assumes that Z writes are disabled. */
1052 (state
->compareOp
== VK_COMPARE_OP_ALWAYS
&&
1053 radv_order_invariant_stencil_op(state
->passOp
) &&
1054 radv_order_invariant_stencil_op(state
->depthFailOp
)) ||
1055 (state
->compareOp
== VK_COMPARE_OP_NEVER
&&
1056 radv_order_invariant_stencil_op(state
->failOp
));
1060 radv_pipeline_out_of_order_rast(struct radv_pipeline
*pipeline
,
1061 struct radv_blend_state
*blend
,
1062 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1064 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1065 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
1066 const VkPipelineDepthStencilStateCreateInfo
*vkds
= radv_pipeline_get_depth_stencil_state(pCreateInfo
);
1067 const VkPipelineColorBlendStateCreateInfo
*vkblend
= radv_pipeline_get_color_blend_state(pCreateInfo
);
1068 unsigned colormask
= blend
->cb_target_enabled_4bit
;
1070 if (!pipeline
->device
->physical_device
->out_of_order_rast_allowed
)
1073 /* Be conservative if a logic operation is enabled with color buffers. */
1074 if (colormask
&& vkblend
&& vkblend
->logicOpEnable
)
1077 /* Default depth/stencil invariance when no attachment is bound. */
1078 struct radv_dsa_order_invariance dsa_order_invariant
= {
1079 .zs
= true, .pass_set
= true
1083 struct radv_render_pass_attachment
*attachment
=
1084 pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
1085 bool has_stencil
= vk_format_is_stencil(attachment
->format
);
1086 struct radv_dsa_order_invariance order_invariance
[2];
1087 struct radv_shader_variant
*ps
=
1088 pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
1090 /* Compute depth/stencil order invariance in order to know if
1091 * it's safe to enable out-of-order.
1093 bool zfunc_is_ordered
=
1094 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
||
1095 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS
||
1096 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS_OR_EQUAL
||
1097 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER
||
1098 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER_OR_EQUAL
;
1100 bool nozwrite_and_order_invariant_stencil
=
1101 !radv_is_ds_write_enabled(vkds
) ||
1102 (!radv_is_depth_write_enabled(vkds
) &&
1103 radv_order_invariant_stencil_state(&vkds
->front
) &&
1104 radv_order_invariant_stencil_state(&vkds
->back
));
1106 order_invariance
[1].zs
=
1107 nozwrite_and_order_invariant_stencil
||
1108 (!radv_is_stencil_write_enabled(vkds
) &&
1110 order_invariance
[0].zs
=
1111 !radv_is_depth_write_enabled(vkds
) || zfunc_is_ordered
;
1113 order_invariance
[1].pass_set
=
1114 nozwrite_and_order_invariant_stencil
||
1115 (!radv_is_stencil_write_enabled(vkds
) &&
1116 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1117 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
));
1118 order_invariance
[0].pass_set
=
1119 !radv_is_depth_write_enabled(vkds
) ||
1120 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1121 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
);
1123 dsa_order_invariant
= order_invariance
[has_stencil
];
1124 if (!dsa_order_invariant
.zs
)
1127 /* The set of PS invocations is always order invariant,
1128 * except when early Z/S tests are requested.
1131 ps
->info
.ps
.writes_memory
&&
1132 ps
->info
.ps
.early_fragment_test
&&
1133 !dsa_order_invariant
.pass_set
)
1136 /* Determine if out-of-order rasterization should be disabled
1137 * when occlusion queries are used.
1139 pipeline
->graphics
.disable_out_of_order_rast_for_occlusion
=
1140 !dsa_order_invariant
.pass_set
;
1143 /* No color buffers are enabled for writing. */
1147 unsigned blendmask
= colormask
& blend
->blend_enable_4bit
;
1150 /* Only commutative blending. */
1151 if (blendmask
& ~blend
->commutative_4bit
)
1154 if (!dsa_order_invariant
.pass_set
)
1158 if (colormask
& ~blendmask
)
1165 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1166 struct radv_blend_state
*blend
,
1167 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1169 const VkPipelineMultisampleStateCreateInfo
*vkms
= radv_pipeline_get_multisample_state(pCreateInfo
);
1170 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1171 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1172 bool out_of_order_rast
= false;
1173 int ps_iter_samples
= 1;
1174 uint32_t mask
= 0xffff;
1177 ms
->num_samples
= vkms
->rasterizationSamples
;
1179 /* From the Vulkan 1.1.129 spec, 26.7. Sample Shading:
1181 * "Sample shading is enabled for a graphics pipeline:
1183 * - If the interface of the fragment shader entry point of the
1184 * graphics pipeline includes an input variable decorated
1185 * with SampleId or SamplePosition. In this case
1186 * minSampleShadingFactor takes the value 1.0.
1187 * - Else if the sampleShadingEnable member of the
1188 * VkPipelineMultisampleStateCreateInfo structure specified
1189 * when creating the graphics pipeline is set to VK_TRUE. In
1190 * this case minSampleShadingFactor takes the value of
1191 * VkPipelineMultisampleStateCreateInfo::minSampleShading.
1193 * Otherwise, sample shading is considered disabled."
1195 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.force_persample
) {
1196 ps_iter_samples
= ms
->num_samples
;
1198 ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
);
1201 ms
->num_samples
= 1;
1204 const struct VkPipelineRasterizationStateRasterizationOrderAMD
*raster_order
=
1205 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
, PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD
);
1206 if (raster_order
&& raster_order
->rasterizationOrder
== VK_RASTERIZATION_ORDER_RELAXED_AMD
) {
1207 /* Out-of-order rasterization is explicitly enabled by the
1210 out_of_order_rast
= true;
1212 /* Determine if the driver can enable out-of-order
1213 * rasterization internally.
1216 radv_pipeline_out_of_order_rast(pipeline
, blend
, pCreateInfo
);
1219 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1220 ms
->pa_sc_aa_config
= 0;
1221 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1222 S_028804_INCOHERENT_EQAA_READS(1) |
1223 S_028804_INTERPOLATE_COMP_Z(1) |
1224 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1225 ms
->pa_sc_mode_cntl_1
=
1226 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1227 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1228 S_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(out_of_order_rast
) |
1229 S_028A4C_OUT_OF_ORDER_WATER_MARK(0x7) |
1231 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1232 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1233 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1234 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1235 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1236 S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1237 ms
->pa_sc_mode_cntl_0
= S_028A48_ALTERNATE_RBS_PER_TILE(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
1238 S_028A48_VPORT_SCISSOR_ENABLE(1);
1240 const VkPipelineRasterizationLineStateCreateInfoEXT
*rast_line
=
1241 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
,
1242 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT
);
1244 ms
->pa_sc_mode_cntl_0
|= S_028A48_LINE_STIPPLE_ENABLE(rast_line
->stippledLineEnable
);
1245 if (rast_line
->lineRasterizationMode
== VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT
) {
1246 /* From the Vulkan spec 1.1.129:
1248 * "When VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT lines
1249 * are being rasterized, sample locations may all be
1250 * treated as being at the pixel center (this may
1251 * affect attribute and depth interpolation)."
1253 ms
->num_samples
= 1;
1257 if (ms
->num_samples
> 1) {
1258 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1259 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1260 uint32_t z_samples
= subpass
->depth_stencil_attachment
? subpass
->depth_sample_count
: ms
->num_samples
;
1261 unsigned log_samples
= util_logbase2(ms
->num_samples
);
1262 unsigned log_z_samples
= util_logbase2(z_samples
);
1263 unsigned log_ps_iter_samples
= util_logbase2(ps_iter_samples
);
1264 ms
->pa_sc_mode_cntl_0
|= S_028A48_MSAA_ENABLE(1);
1265 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1266 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_z_samples
) |
1267 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1268 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1269 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1270 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1271 S_028BE0_MAX_SAMPLE_DIST(radv_get_default_max_sample_dist(log_samples
)) |
1272 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1273 ms
->pa_sc_mode_cntl_1
|= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1274 if (ps_iter_samples
> 1)
1275 pipeline
->graphics
.spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1278 if (vkms
&& vkms
->pSampleMask
) {
1279 mask
= vkms
->pSampleMask
[0] & 0xffff;
1282 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1283 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1287 radv_prim_can_use_guardband(enum VkPrimitiveTopology topology
)
1290 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1291 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1292 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1293 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1294 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1296 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1297 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1298 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1299 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1300 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1301 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1304 unreachable("unhandled primitive type");
1309 si_translate_prim(enum VkPrimitiveTopology topology
)
1312 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1313 return V_008958_DI_PT_POINTLIST
;
1314 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1315 return V_008958_DI_PT_LINELIST
;
1316 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1317 return V_008958_DI_PT_LINESTRIP
;
1318 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1319 return V_008958_DI_PT_TRILIST
;
1320 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1321 return V_008958_DI_PT_TRISTRIP
;
1322 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1323 return V_008958_DI_PT_TRIFAN
;
1324 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1325 return V_008958_DI_PT_LINELIST_ADJ
;
1326 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1327 return V_008958_DI_PT_LINESTRIP_ADJ
;
1328 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1329 return V_008958_DI_PT_TRILIST_ADJ
;
1330 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1331 return V_008958_DI_PT_TRISTRIP_ADJ
;
1332 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1333 return V_008958_DI_PT_PATCH
;
1341 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1344 case 0: /* GL_POINTS */
1345 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1346 case 1: /* GL_LINES */
1347 case 3: /* GL_LINE_STRIP */
1348 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1349 case 0x8E7A: /* GL_ISOLINES */
1350 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1352 case 4: /* GL_TRIANGLES */
1353 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1354 case 5: /* GL_TRIANGLE_STRIP */
1355 case 7: /* GL_QUADS */
1356 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1364 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1367 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1368 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1369 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1370 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1371 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1372 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1373 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1374 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1375 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1376 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1377 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1378 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1379 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1380 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1387 static unsigned radv_dynamic_state_mask(VkDynamicState state
)
1390 case VK_DYNAMIC_STATE_VIEWPORT
:
1391 return RADV_DYNAMIC_VIEWPORT
;
1392 case VK_DYNAMIC_STATE_SCISSOR
:
1393 return RADV_DYNAMIC_SCISSOR
;
1394 case VK_DYNAMIC_STATE_LINE_WIDTH
:
1395 return RADV_DYNAMIC_LINE_WIDTH
;
1396 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
1397 return RADV_DYNAMIC_DEPTH_BIAS
;
1398 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
1399 return RADV_DYNAMIC_BLEND_CONSTANTS
;
1400 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
1401 return RADV_DYNAMIC_DEPTH_BOUNDS
;
1402 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
1403 return RADV_DYNAMIC_STENCIL_COMPARE_MASK
;
1404 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
1405 return RADV_DYNAMIC_STENCIL_WRITE_MASK
;
1406 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
1407 return RADV_DYNAMIC_STENCIL_REFERENCE
;
1408 case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT
:
1409 return RADV_DYNAMIC_DISCARD_RECTANGLE
;
1410 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT
:
1411 return RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1412 case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT
:
1413 return RADV_DYNAMIC_LINE_STIPPLE
;
1415 unreachable("Unhandled dynamic state");
1419 static uint32_t radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1421 uint32_t states
= RADV_DYNAMIC_ALL
;
1423 /* If rasterization is disabled we do not care about any of the dynamic states,
1424 * since they are all rasterization related only. */
1425 if (pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
1428 if (!pCreateInfo
->pRasterizationState
->depthBiasEnable
)
1429 states
&= ~RADV_DYNAMIC_DEPTH_BIAS
;
1431 if (!pCreateInfo
->pDepthStencilState
||
1432 !pCreateInfo
->pDepthStencilState
->depthBoundsTestEnable
)
1433 states
&= ~RADV_DYNAMIC_DEPTH_BOUNDS
;
1435 if (!pCreateInfo
->pDepthStencilState
||
1436 !pCreateInfo
->pDepthStencilState
->stencilTestEnable
)
1437 states
&= ~(RADV_DYNAMIC_STENCIL_COMPARE_MASK
|
1438 RADV_DYNAMIC_STENCIL_WRITE_MASK
|
1439 RADV_DYNAMIC_STENCIL_REFERENCE
);
1441 if (!vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
))
1442 states
&= ~RADV_DYNAMIC_DISCARD_RECTANGLE
;
1444 if (!pCreateInfo
->pMultisampleState
||
1445 !vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1446 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
))
1447 states
&= ~RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1449 if (!pCreateInfo
->pRasterizationState
||
1450 !vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
,
1451 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT
))
1452 states
&= ~RADV_DYNAMIC_LINE_STIPPLE
;
1454 /* TODO: blend constants & line width. */
1461 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1462 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1464 uint32_t needed_states
= radv_pipeline_needed_dynamic_state(pCreateInfo
);
1465 uint32_t states
= needed_states
;
1466 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1467 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1469 pipeline
->dynamic_state
= default_dynamic_state
;
1470 pipeline
->graphics
.needed_dynamic_state
= needed_states
;
1472 if (pCreateInfo
->pDynamicState
) {
1473 /* Remove all of the states that are marked as dynamic */
1474 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1475 for (uint32_t s
= 0; s
< count
; s
++)
1476 states
&= ~radv_dynamic_state_mask(pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1479 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1481 if (needed_states
& RADV_DYNAMIC_VIEWPORT
) {
1482 assert(pCreateInfo
->pViewportState
);
1484 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1485 if (states
& RADV_DYNAMIC_VIEWPORT
) {
1486 typed_memcpy(dynamic
->viewport
.viewports
,
1487 pCreateInfo
->pViewportState
->pViewports
,
1488 pCreateInfo
->pViewportState
->viewportCount
);
1492 if (needed_states
& RADV_DYNAMIC_SCISSOR
) {
1493 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1494 if (states
& RADV_DYNAMIC_SCISSOR
) {
1495 typed_memcpy(dynamic
->scissor
.scissors
,
1496 pCreateInfo
->pViewportState
->pScissors
,
1497 pCreateInfo
->pViewportState
->scissorCount
);
1501 if (states
& RADV_DYNAMIC_LINE_WIDTH
) {
1502 assert(pCreateInfo
->pRasterizationState
);
1503 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1506 if (states
& RADV_DYNAMIC_DEPTH_BIAS
) {
1507 assert(pCreateInfo
->pRasterizationState
);
1508 dynamic
->depth_bias
.bias
=
1509 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1510 dynamic
->depth_bias
.clamp
=
1511 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1512 dynamic
->depth_bias
.slope
=
1513 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1516 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1518 * pColorBlendState is [...] NULL if the pipeline has rasterization
1519 * disabled or if the subpass of the render pass the pipeline is
1520 * created against does not use any color attachments.
1522 if (subpass
->has_color_att
&& states
& RADV_DYNAMIC_BLEND_CONSTANTS
) {
1523 assert(pCreateInfo
->pColorBlendState
);
1524 typed_memcpy(dynamic
->blend_constants
,
1525 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1528 /* If there is no depthstencil attachment, then don't read
1529 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1530 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1531 * no need to override the depthstencil defaults in
1532 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1534 * Section 9.2 of the Vulkan 1.0.15 spec says:
1536 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1537 * disabled or if the subpass of the render pass the pipeline is created
1538 * against does not use a depth/stencil attachment.
1540 if (needed_states
&& subpass
->depth_stencil_attachment
) {
1541 assert(pCreateInfo
->pDepthStencilState
);
1543 if (states
& RADV_DYNAMIC_DEPTH_BOUNDS
) {
1544 dynamic
->depth_bounds
.min
=
1545 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1546 dynamic
->depth_bounds
.max
=
1547 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1550 if (states
& RADV_DYNAMIC_STENCIL_COMPARE_MASK
) {
1551 dynamic
->stencil_compare_mask
.front
=
1552 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1553 dynamic
->stencil_compare_mask
.back
=
1554 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1557 if (states
& RADV_DYNAMIC_STENCIL_WRITE_MASK
) {
1558 dynamic
->stencil_write_mask
.front
=
1559 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1560 dynamic
->stencil_write_mask
.back
=
1561 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1564 if (states
& RADV_DYNAMIC_STENCIL_REFERENCE
) {
1565 dynamic
->stencil_reference
.front
=
1566 pCreateInfo
->pDepthStencilState
->front
.reference
;
1567 dynamic
->stencil_reference
.back
=
1568 pCreateInfo
->pDepthStencilState
->back
.reference
;
1572 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
1573 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
1574 if (needed_states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1575 dynamic
->discard_rectangle
.count
= discard_rectangle_info
->discardRectangleCount
;
1576 if (states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1577 typed_memcpy(dynamic
->discard_rectangle
.rectangles
,
1578 discard_rectangle_info
->pDiscardRectangles
,
1579 discard_rectangle_info
->discardRectangleCount
);
1583 if (needed_states
& RADV_DYNAMIC_SAMPLE_LOCATIONS
) {
1584 const VkPipelineSampleLocationsStateCreateInfoEXT
*sample_location_info
=
1585 vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1586 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
1587 /* If sampleLocationsEnable is VK_FALSE, the default sample
1588 * locations are used and the values specified in
1589 * sampleLocationsInfo are ignored.
1591 if (sample_location_info
->sampleLocationsEnable
) {
1592 const VkSampleLocationsInfoEXT
*pSampleLocationsInfo
=
1593 &sample_location_info
->sampleLocationsInfo
;
1595 assert(pSampleLocationsInfo
->sampleLocationsCount
<= MAX_SAMPLE_LOCATIONS
);
1597 dynamic
->sample_location
.per_pixel
= pSampleLocationsInfo
->sampleLocationsPerPixel
;
1598 dynamic
->sample_location
.grid_size
= pSampleLocationsInfo
->sampleLocationGridSize
;
1599 dynamic
->sample_location
.count
= pSampleLocationsInfo
->sampleLocationsCount
;
1600 typed_memcpy(&dynamic
->sample_location
.locations
[0],
1601 pSampleLocationsInfo
->pSampleLocations
,
1602 pSampleLocationsInfo
->sampleLocationsCount
);
1606 const VkPipelineRasterizationLineStateCreateInfoEXT
*rast_line_info
=
1607 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
,
1608 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT
);
1609 if (needed_states
& RADV_DYNAMIC_LINE_STIPPLE
) {
1610 dynamic
->line_stipple
.factor
= rast_line_info
->lineStippleFactor
;
1611 dynamic
->line_stipple
.pattern
= rast_line_info
->lineStipplePattern
;
1614 pipeline
->dynamic_state
.mask
= states
;
1618 gfx9_get_gs_info(const struct radv_pipeline_key
*key
,
1619 const struct radv_pipeline
*pipeline
,
1621 struct radv_shader_info
*infos
,
1622 struct gfx9_gs_info
*out
)
1624 struct radv_shader_info
*gs_info
= &infos
[MESA_SHADER_GEOMETRY
];
1625 struct radv_es_output_info
*es_info
;
1626 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1627 es_info
= nir
[MESA_SHADER_TESS_CTRL
] ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1629 es_info
= nir
[MESA_SHADER_TESS_CTRL
] ?
1630 &infos
[MESA_SHADER_TESS_EVAL
].tes
.es_info
:
1631 &infos
[MESA_SHADER_VERTEX
].vs
.es_info
;
1633 unsigned gs_num_invocations
= MAX2(gs_info
->gs
.invocations
, 1);
1634 bool uses_adjacency
;
1635 switch(key
->topology
) {
1636 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1637 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1638 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1639 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1640 uses_adjacency
= true;
1643 uses_adjacency
= false;
1647 /* All these are in dwords: */
1648 /* We can't allow using the whole LDS, because GS waves compete with
1649 * other shader stages for LDS space. */
1650 const unsigned max_lds_size
= 8 * 1024;
1651 const unsigned esgs_itemsize
= es_info
->esgs_itemsize
/ 4;
1652 unsigned esgs_lds_size
;
1654 /* All these are per subgroup: */
1655 const unsigned max_out_prims
= 32 * 1024;
1656 const unsigned max_es_verts
= 255;
1657 const unsigned ideal_gs_prims
= 64;
1658 unsigned max_gs_prims
, gs_prims
;
1659 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
1661 if (uses_adjacency
|| gs_num_invocations
> 1)
1662 max_gs_prims
= 127 / gs_num_invocations
;
1666 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
1667 * Make sure we don't go over the maximum value.
1669 if (gs_info
->gs
.vertices_out
> 0) {
1670 max_gs_prims
= MIN2(max_gs_prims
,
1672 (gs_info
->gs
.vertices_out
* gs_num_invocations
));
1674 assert(max_gs_prims
> 0);
1676 /* If the primitive has adjacency, halve the number of vertices
1677 * that will be reused in multiple primitives.
1679 min_es_verts
= gs_info
->gs
.vertices_in
/ (uses_adjacency
? 2 : 1);
1681 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
1682 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
1684 /* Compute ESGS LDS size based on the worst case number of ES vertices
1685 * needed to create the target number of GS prims per subgroup.
1687 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1689 /* If total LDS usage is too big, refactor partitions based on ratio
1690 * of ESGS item sizes.
1692 if (esgs_lds_size
> max_lds_size
) {
1693 /* Our target GS Prims Per Subgroup was too large. Calculate
1694 * the maximum number of GS Prims Per Subgroup that will fit
1695 * into LDS, capped by the maximum that the hardware can support.
1697 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
1699 assert(gs_prims
> 0);
1700 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
1703 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1704 assert(esgs_lds_size
<= max_lds_size
);
1707 /* Now calculate remaining ESGS information. */
1709 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
1711 es_verts
= max_es_verts
;
1713 /* Vertices for adjacency primitives are not always reused, so restore
1714 * it for ES_VERTS_PER_SUBGRP.
1716 min_es_verts
= gs_info
->gs
.vertices_in
;
1718 /* For normal primitives, the VGT only checks if they are past the ES
1719 * verts per subgroup after allocating a full GS primitive and if they
1720 * are, kick off a new subgroup. But if those additional ES verts are
1721 * unique (e.g. not reused) we need to make sure there is enough LDS
1722 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
1724 es_verts
-= min_es_verts
- 1;
1726 uint32_t es_verts_per_subgroup
= es_verts
;
1727 uint32_t gs_prims_per_subgroup
= gs_prims
;
1728 uint32_t gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
1729 uint32_t max_prims_per_subgroup
= gs_inst_prims_in_subgroup
* gs_info
->gs
.vertices_out
;
1730 out
->lds_size
= align(esgs_lds_size
, 128) / 128;
1731 out
->vgt_gs_onchip_cntl
= S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup
) |
1732 S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup
) |
1733 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup
);
1734 out
->vgt_gs_max_prims_per_subgroup
= S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup
);
1735 out
->vgt_esgs_ring_itemsize
= esgs_itemsize
;
1736 assert(max_prims_per_subgroup
<= max_out_prims
);
1739 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1740 unsigned min_verts_per_prim
, bool use_adjacency
)
1742 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1745 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1749 radv_get_num_input_vertices(nir_shader
**nir
)
1751 if (nir
[MESA_SHADER_GEOMETRY
]) {
1752 nir_shader
*gs
= nir
[MESA_SHADER_GEOMETRY
];
1754 return gs
->info
.gs
.vertices_in
;
1757 if (nir
[MESA_SHADER_TESS_CTRL
]) {
1758 nir_shader
*tes
= nir
[MESA_SHADER_TESS_EVAL
];
1760 if (tes
->info
.tess
.point_mode
)
1762 if (tes
->info
.tess
.primitive_mode
== GL_ISOLINES
)
1771 gfx10_get_ngg_info(const struct radv_pipeline_key
*key
,
1772 struct radv_pipeline
*pipeline
,
1774 struct radv_shader_info
*infos
,
1775 struct gfx10_ngg_info
*ngg
)
1777 struct radv_shader_info
*gs_info
= &infos
[MESA_SHADER_GEOMETRY
];
1778 struct radv_es_output_info
*es_info
=
1779 nir
[MESA_SHADER_TESS_CTRL
] ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1780 unsigned gs_type
= nir
[MESA_SHADER_GEOMETRY
] ? MESA_SHADER_GEOMETRY
: MESA_SHADER_VERTEX
;
1781 unsigned max_verts_per_prim
= radv_get_num_input_vertices(nir
);
1782 unsigned min_verts_per_prim
=
1783 gs_type
== MESA_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1784 unsigned gs_num_invocations
= nir
[MESA_SHADER_GEOMETRY
] ? MAX2(gs_info
->gs
.invocations
, 1) : 1;
1785 bool uses_adjacency
;
1786 switch(key
->topology
) {
1787 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1788 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1789 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1790 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1791 uses_adjacency
= true;
1794 uses_adjacency
= false;
1798 /* All these are in dwords: */
1799 /* We can't allow using the whole LDS, because GS waves compete with
1800 * other shader stages for LDS space.
1802 * TODO: We should really take the shader's internal LDS use into
1803 * account. The linker will fail if the size is greater than
1806 const unsigned max_lds_size
= 8 * 1024 - 768;
1807 const unsigned target_lds_size
= max_lds_size
;
1808 unsigned esvert_lds_size
= 0;
1809 unsigned gsprim_lds_size
= 0;
1811 /* All these are per subgroup: */
1812 bool max_vert_out_per_gs_instance
= false;
1813 unsigned max_esverts_base
= 256;
1814 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1816 /* Hardware has the following non-natural restrictions on the value
1817 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1819 * - at most 252 for any line input primitive type
1820 * - at most 251 for any quad input primitive type
1821 * - at most 251 for triangle strips with adjacency (this happens to
1822 * be the natural limit for triangle *lists* with adjacency)
1824 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1826 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1827 unsigned max_out_verts_per_gsprim
=
1828 gs_info
->gs
.vertices_out
* gs_num_invocations
;
1830 if (max_out_verts_per_gsprim
<= 256) {
1831 if (max_out_verts_per_gsprim
) {
1832 max_gsprims_base
= MIN2(max_gsprims_base
,
1833 256 / max_out_verts_per_gsprim
);
1836 /* Use special multi-cycling mode in which each GS
1837 * instance gets its own subgroup. Does not work with
1839 max_vert_out_per_gs_instance
= true;
1840 max_gsprims_base
= 1;
1841 max_out_verts_per_gsprim
= gs_info
->gs
.vertices_out
;
1844 esvert_lds_size
= es_info
->esgs_itemsize
/ 4;
1845 gsprim_lds_size
= (gs_info
->gs
.gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1848 /* LDS size for passing data from GS to ES. */
1849 struct radv_streamout_info
*so_info
= nir
[MESA_SHADER_TESS_CTRL
]
1850 ? &infos
[MESA_SHADER_TESS_EVAL
].so
1851 : &infos
[MESA_SHADER_VERTEX
].so
;
1853 if (so_info
->num_outputs
)
1854 esvert_lds_size
= 4 * so_info
->num_outputs
+ 1;
1856 /* GS stores Primitive IDs (one DWORD) into LDS at the address
1857 * corresponding to the ES thread of the provoking vertex. All
1858 * ES threads load and export PrimitiveID for their thread.
1860 if (!nir
[MESA_SHADER_TESS_CTRL
] &&
1861 infos
[MESA_SHADER_VERTEX
].vs
.outinfo
.export_prim_id
)
1862 esvert_lds_size
= MAX2(esvert_lds_size
, 1);
1865 unsigned max_gsprims
= max_gsprims_base
;
1866 unsigned max_esverts
= max_esverts_base
;
1868 if (esvert_lds_size
)
1869 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1870 if (gsprim_lds_size
)
1871 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1873 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1874 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, uses_adjacency
);
1875 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1877 if (esvert_lds_size
|| gsprim_lds_size
) {
1878 /* Now that we have a rough proportionality between esverts
1879 * and gsprims based on the primitive type, scale both of them
1880 * down simultaneously based on required LDS space.
1882 * We could be smarter about this if we knew how much vertex
1885 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1886 max_gsprims
* gsprim_lds_size
;
1887 if (lds_total
> target_lds_size
) {
1888 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1889 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1891 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1892 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1893 min_verts_per_prim
, uses_adjacency
);
1894 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1898 /* Round up towards full wave sizes for better ALU utilization. */
1899 if (!max_vert_out_per_gs_instance
) {
1900 unsigned orig_max_esverts
;
1901 unsigned orig_max_gsprims
;
1904 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1905 wavesize
= gs_info
->wave_size
;
1907 wavesize
= nir
[MESA_SHADER_TESS_CTRL
]
1908 ? infos
[MESA_SHADER_TESS_EVAL
].wave_size
1909 : infos
[MESA_SHADER_VERTEX
].wave_size
;
1913 orig_max_esverts
= max_esverts
;
1914 orig_max_gsprims
= max_gsprims
;
1916 max_esverts
= align(max_esverts
, wavesize
);
1917 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1918 if (esvert_lds_size
)
1919 max_esverts
= MIN2(max_esverts
,
1920 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1922 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1924 max_gsprims
= align(max_gsprims
, wavesize
);
1925 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1926 if (gsprim_lds_size
)
1927 max_gsprims
= MIN2(max_gsprims
,
1928 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1930 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1931 min_verts_per_prim
, uses_adjacency
);
1932 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1933 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1936 /* Hardware restriction: minimum value of max_esverts */
1937 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1939 unsigned max_out_vertices
=
1940 max_vert_out_per_gs_instance
? gs_info
->gs
.vertices_out
:
1941 gs_type
== MESA_SHADER_GEOMETRY
?
1942 max_gsprims
* gs_num_invocations
* gs_info
->gs
.vertices_out
:
1944 assert(max_out_vertices
<= 256);
1946 unsigned prim_amp_factor
= 1;
1947 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1948 /* Number of output primitives per GS input primitive after
1950 prim_amp_factor
= gs_info
->gs
.vertices_out
;
1953 /* The GE only checks against the maximum number of ES verts after
1954 * allocating a full GS primitive. So we need to ensure that whenever
1955 * this check passes, there is enough space for a full primitive without
1958 ngg
->hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1959 ngg
->max_gsprims
= max_gsprims
;
1960 ngg
->max_out_verts
= max_out_vertices
;
1961 ngg
->prim_amp_factor
= prim_amp_factor
;
1962 ngg
->max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1963 ngg
->ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1964 ngg
->esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1966 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1967 ngg
->vgt_esgs_ring_itemsize
= es_info
->esgs_itemsize
/ 4;
1969 ngg
->vgt_esgs_ring_itemsize
= 1;
1972 pipeline
->graphics
.esgs_ring_size
= ngg
->esgs_ring_size
;
1974 assert(ngg
->hw_max_esverts
>= 24); /* HW limitation */
1978 calculate_gs_ring_sizes(struct radv_pipeline
*pipeline
,
1979 const struct gfx9_gs_info
*gs
)
1981 struct radv_device
*device
= pipeline
->device
;
1982 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
1983 unsigned wave_size
= 64;
1984 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
1985 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
1986 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
1988 unsigned gs_vertex_reuse
=
1989 (device
->physical_device
->rad_info
.chip_class
>= GFX8
? 32 : 16) * num_se
;
1990 unsigned alignment
= 256 * num_se
;
1991 /* The maximum size is 63.999 MB per SE. */
1992 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
1993 struct radv_shader_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1995 /* Calculate the minimum size. */
1996 unsigned min_esgs_ring_size
= align(gs
->vgt_esgs_ring_itemsize
* 4 * gs_vertex_reuse
*
1997 wave_size
, alignment
);
1998 /* These are recommended sizes, not minimum sizes. */
1999 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
2000 gs
->vgt_esgs_ring_itemsize
* 4 * gs_info
->gs
.vertices_in
;
2001 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
2002 gs_info
->gs
.max_gsvs_emit_size
;
2004 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
2005 esgs_ring_size
= align(esgs_ring_size
, alignment
);
2006 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
2008 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
2009 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
2011 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
2014 static void si_multiwave_lds_size_workaround(struct radv_device
*device
,
2017 /* If tessellation is all offchip and on-chip GS isn't used, this
2018 * workaround is not needed.
2022 /* SPI barrier management bug:
2023 * Make sure we have at least 4k of LDS in use to avoid the bug.
2024 * It applies to workgroup sizes of more than one wavefront.
2026 if (device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
||
2027 device
->physical_device
->rad_info
.family
== CHIP_KABINI
)
2028 *lds_size
= MAX2(*lds_size
, 8);
2031 struct radv_shader_variant
*
2032 radv_get_shader(struct radv_pipeline
*pipeline
,
2033 gl_shader_stage stage
)
2035 if (stage
== MESA_SHADER_VERTEX
) {
2036 if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
2037 return pipeline
->shaders
[MESA_SHADER_VERTEX
];
2038 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
])
2039 return pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
2040 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
2041 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
2042 } else if (stage
== MESA_SHADER_TESS_EVAL
) {
2043 if (!radv_pipeline_has_tess(pipeline
))
2045 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
2046 return pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
2047 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
2048 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
2050 return pipeline
->shaders
[stage
];
2053 static struct radv_tessellation_state
2054 calculate_tess_state(struct radv_pipeline
*pipeline
,
2055 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2057 unsigned num_tcs_input_cp
;
2058 unsigned num_tcs_output_cp
;
2060 unsigned num_patches
;
2061 struct radv_tessellation_state tess
= {0};
2063 num_tcs_input_cp
= pCreateInfo
->pTessellationState
->patchControlPoints
;
2064 num_tcs_output_cp
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.tcs_vertices_out
; //TCS VERTICES OUT
2065 num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2067 lds_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.lds_size
;
2069 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
2070 assert(lds_size
<= 65536);
2071 lds_size
= align(lds_size
, 512) / 512;
2073 assert(lds_size
<= 32768);
2074 lds_size
= align(lds_size
, 256) / 256;
2076 si_multiwave_lds_size_workaround(pipeline
->device
, &lds_size
);
2078 tess
.lds_size
= lds_size
;
2080 tess
.ls_hs_config
= S_028B58_NUM_PATCHES(num_patches
) |
2081 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
2082 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
2083 tess
.num_patches
= num_patches
;
2085 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
2086 unsigned type
= 0, partitioning
= 0, topology
= 0, distribution_mode
= 0;
2088 switch (tes
->info
.tes
.primitive_mode
) {
2090 type
= V_028B6C_TESS_TRIANGLE
;
2093 type
= V_028B6C_TESS_QUAD
;
2096 type
= V_028B6C_TESS_ISOLINE
;
2100 switch (tes
->info
.tes
.spacing
) {
2101 case TESS_SPACING_EQUAL
:
2102 partitioning
= V_028B6C_PART_INTEGER
;
2104 case TESS_SPACING_FRACTIONAL_ODD
:
2105 partitioning
= V_028B6C_PART_FRAC_ODD
;
2107 case TESS_SPACING_FRACTIONAL_EVEN
:
2108 partitioning
= V_028B6C_PART_FRAC_EVEN
;
2114 bool ccw
= tes
->info
.tes
.ccw
;
2115 const VkPipelineTessellationDomainOriginStateCreateInfo
*domain_origin_state
=
2116 vk_find_struct_const(pCreateInfo
->pTessellationState
,
2117 PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO
);
2119 if (domain_origin_state
&& domain_origin_state
->domainOrigin
!= VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT
)
2122 if (tes
->info
.tes
.point_mode
)
2123 topology
= V_028B6C_OUTPUT_POINT
;
2124 else if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
2125 topology
= V_028B6C_OUTPUT_LINE
;
2127 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
2129 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
2131 if (pipeline
->device
->physical_device
->rad_info
.has_distributed_tess
) {
2132 if (pipeline
->device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
2133 pipeline
->device
->physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
2134 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
2136 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
2138 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
2140 tess
.tf_param
= S_028B6C_TYPE(type
) |
2141 S_028B6C_PARTITIONING(partitioning
) |
2142 S_028B6C_TOPOLOGY(topology
) |
2143 S_028B6C_DISTRIBUTION_MODE(distribution_mode
);
2148 static const struct radv_prim_vertex_count prim_size_table
[] = {
2149 [V_008958_DI_PT_NONE
] = {0, 0},
2150 [V_008958_DI_PT_POINTLIST
] = {1, 1},
2151 [V_008958_DI_PT_LINELIST
] = {2, 2},
2152 [V_008958_DI_PT_LINESTRIP
] = {2, 1},
2153 [V_008958_DI_PT_TRILIST
] = {3, 3},
2154 [V_008958_DI_PT_TRIFAN
] = {3, 1},
2155 [V_008958_DI_PT_TRISTRIP
] = {3, 1},
2156 [V_008958_DI_PT_LINELIST_ADJ
] = {4, 4},
2157 [V_008958_DI_PT_LINESTRIP_ADJ
] = {4, 1},
2158 [V_008958_DI_PT_TRILIST_ADJ
] = {6, 6},
2159 [V_008958_DI_PT_TRISTRIP_ADJ
] = {6, 2},
2160 [V_008958_DI_PT_RECTLIST
] = {3, 3},
2161 [V_008958_DI_PT_LINELOOP
] = {2, 1},
2162 [V_008958_DI_PT_POLYGON
] = {3, 1},
2163 [V_008958_DI_PT_2D_TRI_STRIP
] = {0, 0},
2166 static const struct radv_vs_output_info
*get_vs_output_info(const struct radv_pipeline
*pipeline
)
2168 if (radv_pipeline_has_gs(pipeline
))
2169 if (radv_pipeline_has_ngg(pipeline
))
2170 return &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.vs
.outinfo
;
2172 return &pipeline
->gs_copy_shader
->info
.vs
.outinfo
;
2173 else if (radv_pipeline_has_tess(pipeline
))
2174 return &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.outinfo
;
2176 return &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.outinfo
;
2180 radv_link_shaders(struct radv_pipeline
*pipeline
, nir_shader
**shaders
)
2182 nir_shader
* ordered_shaders
[MESA_SHADER_STAGES
];
2183 int shader_count
= 0;
2185 if(shaders
[MESA_SHADER_FRAGMENT
]) {
2186 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_FRAGMENT
];
2188 if(shaders
[MESA_SHADER_GEOMETRY
]) {
2189 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_GEOMETRY
];
2191 if(shaders
[MESA_SHADER_TESS_EVAL
]) {
2192 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_EVAL
];
2194 if(shaders
[MESA_SHADER_TESS_CTRL
]) {
2195 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_CTRL
];
2197 if(shaders
[MESA_SHADER_VERTEX
]) {
2198 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_VERTEX
];
2201 if (shader_count
> 1) {
2202 unsigned first
= ordered_shaders
[shader_count
- 1]->info
.stage
;
2203 unsigned last
= ordered_shaders
[0]->info
.stage
;
2205 if (ordered_shaders
[0]->info
.stage
== MESA_SHADER_FRAGMENT
&&
2206 ordered_shaders
[1]->info
.has_transform_feedback_varyings
)
2207 nir_link_xfb_varyings(ordered_shaders
[1], ordered_shaders
[0]);
2209 for (int i
= 0; i
< shader_count
; ++i
) {
2210 nir_variable_mode mask
= 0;
2212 if (ordered_shaders
[i
]->info
.stage
!= first
)
2213 mask
= mask
| nir_var_shader_in
;
2215 if (ordered_shaders
[i
]->info
.stage
!= last
)
2216 mask
= mask
| nir_var_shader_out
;
2218 nir_lower_io_to_scalar_early(ordered_shaders
[i
], mask
);
2219 radv_optimize_nir(ordered_shaders
[i
], false, false);
2223 for (int i
= 1; i
< shader_count
; ++i
) {
2224 nir_lower_io_arrays_to_elements(ordered_shaders
[i
],
2225 ordered_shaders
[i
- 1]);
2227 if (nir_link_opt_varyings(ordered_shaders
[i
],
2228 ordered_shaders
[i
- 1]))
2229 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2231 nir_remove_dead_variables(ordered_shaders
[i
],
2232 nir_var_shader_out
);
2233 nir_remove_dead_variables(ordered_shaders
[i
- 1],
2236 bool progress
= nir_remove_unused_varyings(ordered_shaders
[i
],
2237 ordered_shaders
[i
- 1]);
2239 nir_compact_varyings(ordered_shaders
[i
],
2240 ordered_shaders
[i
- 1], true);
2243 if (nir_lower_global_vars_to_local(ordered_shaders
[i
])) {
2244 ac_lower_indirect_derefs(ordered_shaders
[i
],
2245 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2247 radv_optimize_nir(ordered_shaders
[i
], false, false);
2249 if (nir_lower_global_vars_to_local(ordered_shaders
[i
- 1])) {
2250 ac_lower_indirect_derefs(ordered_shaders
[i
- 1],
2251 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2253 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2259 radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo
*input_state
,
2260 uint32_t attrib_binding
)
2262 for (uint32_t i
= 0; i
< input_state
->vertexBindingDescriptionCount
; i
++) {
2263 const VkVertexInputBindingDescription
*input_binding
=
2264 &input_state
->pVertexBindingDescriptions
[i
];
2266 if (input_binding
->binding
== attrib_binding
)
2267 return input_binding
->stride
;
2273 static struct radv_pipeline_key
2274 radv_generate_graphics_pipeline_key(struct radv_pipeline
*pipeline
,
2275 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2276 const struct radv_blend_state
*blend
,
2277 bool has_view_index
)
2279 const VkPipelineVertexInputStateCreateInfo
*input_state
=
2280 pCreateInfo
->pVertexInputState
;
2281 const VkPipelineVertexInputDivisorStateCreateInfoEXT
*divisor_state
=
2282 vk_find_struct_const(input_state
->pNext
, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT
);
2284 struct radv_pipeline_key key
;
2285 memset(&key
, 0, sizeof(key
));
2287 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
2288 key
.optimisations_disabled
= 1;
2290 key
.has_multiview_view_index
= has_view_index
;
2292 uint32_t binding_input_rate
= 0;
2293 uint32_t instance_rate_divisors
[MAX_VERTEX_ATTRIBS
];
2294 for (unsigned i
= 0; i
< input_state
->vertexBindingDescriptionCount
; ++i
) {
2295 if (input_state
->pVertexBindingDescriptions
[i
].inputRate
) {
2296 unsigned binding
= input_state
->pVertexBindingDescriptions
[i
].binding
;
2297 binding_input_rate
|= 1u << binding
;
2298 instance_rate_divisors
[binding
] = 1;
2301 if (divisor_state
) {
2302 for (unsigned i
= 0; i
< divisor_state
->vertexBindingDivisorCount
; ++i
) {
2303 instance_rate_divisors
[divisor_state
->pVertexBindingDivisors
[i
].binding
] =
2304 divisor_state
->pVertexBindingDivisors
[i
].divisor
;
2308 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
2309 const VkVertexInputAttributeDescription
*desc
=
2310 &input_state
->pVertexAttributeDescriptions
[i
];
2311 const struct vk_format_description
*format_desc
;
2312 unsigned location
= desc
->location
;
2313 unsigned binding
= desc
->binding
;
2314 unsigned num_format
, data_format
;
2317 if (binding_input_rate
& (1u << binding
)) {
2318 key
.instance_rate_inputs
|= 1u << location
;
2319 key
.instance_rate_divisors
[location
] = instance_rate_divisors
[binding
];
2322 format_desc
= vk_format_description(desc
->format
);
2323 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
2325 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
2326 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
2328 key
.vertex_attribute_formats
[location
] = data_format
| (num_format
<< 4);
2329 key
.vertex_attribute_bindings
[location
] = desc
->binding
;
2330 key
.vertex_attribute_offsets
[location
] = desc
->offset
;
2331 key
.vertex_attribute_strides
[location
] = radv_get_attrib_stride(input_state
, desc
->binding
);
2333 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
&&
2334 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_STONEY
) {
2335 VkFormat format
= input_state
->pVertexAttributeDescriptions
[i
].format
;
2338 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2339 case VK_FORMAT_A2B10G10R10_SNORM_PACK32
:
2340 adjust
= RADV_ALPHA_ADJUST_SNORM
;
2342 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2343 case VK_FORMAT_A2B10G10R10_SSCALED_PACK32
:
2344 adjust
= RADV_ALPHA_ADJUST_SSCALED
;
2346 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2347 case VK_FORMAT_A2B10G10R10_SINT_PACK32
:
2348 adjust
= RADV_ALPHA_ADJUST_SINT
;
2354 key
.vertex_alpha_adjust
|= adjust
<< (2 * location
);
2357 switch (desc
->format
) {
2358 case VK_FORMAT_B8G8R8A8_UNORM
:
2359 case VK_FORMAT_B8G8R8A8_SNORM
:
2360 case VK_FORMAT_B8G8R8A8_USCALED
:
2361 case VK_FORMAT_B8G8R8A8_SSCALED
:
2362 case VK_FORMAT_B8G8R8A8_UINT
:
2363 case VK_FORMAT_B8G8R8A8_SINT
:
2364 case VK_FORMAT_B8G8R8A8_SRGB
:
2365 case VK_FORMAT_A2R10G10B10_UNORM_PACK32
:
2366 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2367 case VK_FORMAT_A2R10G10B10_USCALED_PACK32
:
2368 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2369 case VK_FORMAT_A2R10G10B10_UINT_PACK32
:
2370 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2371 key
.vertex_post_shuffle
|= 1 << location
;
2378 const VkPipelineTessellationStateCreateInfo
*tess
=
2379 radv_pipeline_get_tessellation_state(pCreateInfo
);
2381 key
.tess_input_vertices
= tess
->patchControlPoints
;
2383 const VkPipelineMultisampleStateCreateInfo
*vkms
=
2384 radv_pipeline_get_multisample_state(pCreateInfo
);
2385 if (vkms
&& vkms
->rasterizationSamples
> 1) {
2386 uint32_t num_samples
= vkms
->rasterizationSamples
;
2387 uint32_t ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
);
2388 key
.num_samples
= num_samples
;
2389 key
.log2_ps_iter_samples
= util_logbase2(ps_iter_samples
);
2392 key
.col_format
= blend
->spi_shader_col_format
;
2393 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX8
)
2394 radv_pipeline_compute_get_int_clamp(pCreateInfo
, &key
.is_int8
, &key
.is_int10
);
2396 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
)
2397 key
.topology
= pCreateInfo
->pInputAssemblyState
->topology
;
2403 radv_nir_stage_uses_xfb(const nir_shader
*nir
)
2405 nir_xfb_info
*xfb
= nir_gather_xfb_info(nir
, NULL
);
2406 bool uses_xfb
= !!xfb
;
2413 radv_fill_shader_keys(struct radv_device
*device
,
2414 struct radv_shader_variant_key
*keys
,
2415 const struct radv_pipeline_key
*key
,
2418 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_inputs
= key
->instance_rate_inputs
;
2419 keys
[MESA_SHADER_VERTEX
].vs
.alpha_adjust
= key
->vertex_alpha_adjust
;
2420 keys
[MESA_SHADER_VERTEX
].vs
.post_shuffle
= key
->vertex_post_shuffle
;
2421 for (unsigned i
= 0; i
< MAX_VERTEX_ATTRIBS
; ++i
) {
2422 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_divisors
[i
] = key
->instance_rate_divisors
[i
];
2423 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_formats
[i
] = key
->vertex_attribute_formats
[i
];
2424 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_bindings
[i
] = key
->vertex_attribute_bindings
[i
];
2425 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_offsets
[i
] = key
->vertex_attribute_offsets
[i
];
2426 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_strides
[i
] = key
->vertex_attribute_strides
[i
];
2428 keys
[MESA_SHADER_VERTEX
].vs
.outprim
= si_conv_prim_to_gs_out(key
->topology
);
2430 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2431 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ls
= true;
2432 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= 0;
2433 keys
[MESA_SHADER_TESS_CTRL
].tcs
.input_vertices
= key
->tess_input_vertices
;
2434 keys
[MESA_SHADER_TESS_CTRL
].tcs
.primitive_mode
= nir
[MESA_SHADER_TESS_EVAL
]->info
.tess
.primitive_mode
;
2436 keys
[MESA_SHADER_TESS_CTRL
].tcs
.tes_reads_tess_factors
= !!(nir
[MESA_SHADER_TESS_EVAL
]->info
.inputs_read
& (VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
));
2439 if (nir
[MESA_SHADER_GEOMETRY
]) {
2440 if (nir
[MESA_SHADER_TESS_CTRL
])
2441 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_es
= true;
2443 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_es
= true;
2446 if (device
->physical_device
->use_ngg
) {
2447 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2448 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= true;
2450 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= true;
2453 if (nir
[MESA_SHADER_TESS_CTRL
] &&
2454 nir
[MESA_SHADER_GEOMETRY
] &&
2455 nir
[MESA_SHADER_GEOMETRY
]->info
.gs
.invocations
*
2456 nir
[MESA_SHADER_GEOMETRY
]->info
.gs
.vertices_out
> 256) {
2457 /* Fallback to the legacy path if tessellation is
2458 * enabled with extreme geometry because
2459 * EN_MAX_VERT_OUT_PER_GS_INSTANCE doesn't work and it
2462 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2465 gl_shader_stage last_xfb_stage
= MESA_SHADER_VERTEX
;
2467 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
2472 bool uses_xfb
= nir
[last_xfb_stage
] &&
2473 radv_nir_stage_uses_xfb(nir
[last_xfb_stage
]);
2475 if (!device
->physical_device
->use_ngg_streamout
&& uses_xfb
) {
2476 if (nir
[MESA_SHADER_TESS_CTRL
])
2477 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2479 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= false;
2482 /* Determine if the pipeline is eligible for the NGG passthrough
2483 * mode. It can't be enabled for geometry shaders, for NGG
2484 * streamout or for vertex shaders that export the primitive ID
2485 * (this is checked later because we don't have the info here.)
2487 if (!nir
[MESA_SHADER_GEOMETRY
] && !uses_xfb
) {
2488 if (nir
[MESA_SHADER_TESS_CTRL
] &&
2489 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
) {
2490 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg_passthrough
= true;
2491 } else if (nir
[MESA_SHADER_VERTEX
] &&
2492 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
) {
2493 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg_passthrough
= true;
2498 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
2499 keys
[i
].has_multiview_view_index
= key
->has_multiview_view_index
;
2501 keys
[MESA_SHADER_FRAGMENT
].fs
.col_format
= key
->col_format
;
2502 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int8
= key
->is_int8
;
2503 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int10
= key
->is_int10
;
2504 keys
[MESA_SHADER_FRAGMENT
].fs
.log2_ps_iter_samples
= key
->log2_ps_iter_samples
;
2505 keys
[MESA_SHADER_FRAGMENT
].fs
.num_samples
= key
->num_samples
;
2507 if (nir
[MESA_SHADER_COMPUTE
]) {
2508 keys
[MESA_SHADER_COMPUTE
].cs
.subgroup_size
= key
->compute_subgroup_size
;
2513 radv_get_wave_size(struct radv_device
*device
,
2514 const VkPipelineShaderStageCreateInfo
*pStage
,
2515 gl_shader_stage stage
,
2516 const struct radv_shader_variant_key
*key
)
2518 if (stage
== MESA_SHADER_GEOMETRY
&& !key
->vs_common_out
.as_ngg
)
2520 else if (stage
== MESA_SHADER_COMPUTE
) {
2521 if (key
->cs
.subgroup_size
) {
2522 /* Return the required subgroup size if specified. */
2523 return key
->cs
.subgroup_size
;
2525 return device
->physical_device
->cs_wave_size
;
2527 else if (stage
== MESA_SHADER_FRAGMENT
)
2528 return device
->physical_device
->ps_wave_size
;
2530 return device
->physical_device
->ge_wave_size
;
2534 radv_fill_shader_info(struct radv_pipeline
*pipeline
,
2535 const VkPipelineShaderStageCreateInfo
**pStages
,
2536 struct radv_shader_variant_key
*keys
,
2537 struct radv_shader_info
*infos
,
2540 unsigned active_stages
= 0;
2541 unsigned filled_stages
= 0;
2543 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2545 active_stages
|= (1 << i
);
2548 if (nir
[MESA_SHADER_FRAGMENT
]) {
2549 radv_nir_shader_info_init(&infos
[MESA_SHADER_FRAGMENT
]);
2550 radv_nir_shader_info_pass(nir
[MESA_SHADER_FRAGMENT
],
2552 &keys
[MESA_SHADER_FRAGMENT
],
2553 &infos
[MESA_SHADER_FRAGMENT
]);
2555 /* TODO: These are no longer used as keys we should refactor this */
2556 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_prim_id
=
2557 infos
[MESA_SHADER_FRAGMENT
].ps
.prim_id_input
;
2558 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_layer_id
=
2559 infos
[MESA_SHADER_FRAGMENT
].ps
.layer_input
;
2560 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_clip_dists
=
2561 !!infos
[MESA_SHADER_FRAGMENT
].ps
.num_input_clips_culls
;
2562 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_prim_id
=
2563 infos
[MESA_SHADER_FRAGMENT
].ps
.prim_id_input
;
2564 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_layer_id
=
2565 infos
[MESA_SHADER_FRAGMENT
].ps
.layer_input
;
2566 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_clip_dists
=
2567 !!infos
[MESA_SHADER_FRAGMENT
].ps
.num_input_clips_culls
;
2569 /* NGG passthrough mode can't be enabled for vertex shaders
2570 * that export the primitive ID.
2572 * TODO: I should really refactor the keys logic.
2574 if (nir
[MESA_SHADER_VERTEX
] &&
2575 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_prim_id
) {
2576 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg_passthrough
= false;
2579 filled_stages
|= (1 << MESA_SHADER_FRAGMENT
);
2582 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
2583 nir
[MESA_SHADER_TESS_CTRL
]) {
2584 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2585 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2586 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2588 radv_nir_shader_info_init(&infos
[MESA_SHADER_TESS_CTRL
]);
2590 for (int i
= 0; i
< 2; i
++) {
2591 radv_nir_shader_info_pass(combined_nir
[i
],
2592 pipeline
->layout
, &key
,
2593 &infos
[MESA_SHADER_TESS_CTRL
]);
2596 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
=
2597 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_patches
;
2598 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
=
2599 util_last_bit64(infos
[MESA_SHADER_TESS_CTRL
].tcs
.outputs_written
);
2601 filled_stages
|= (1 << MESA_SHADER_VERTEX
);
2602 filled_stages
|= (1 << MESA_SHADER_TESS_CTRL
);
2605 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
2606 nir
[MESA_SHADER_GEOMETRY
]) {
2607 gl_shader_stage pre_stage
= nir
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2608 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2610 radv_nir_shader_info_init(&infos
[MESA_SHADER_GEOMETRY
]);
2612 for (int i
= 0; i
< 2; i
++) {
2613 radv_nir_shader_info_pass(combined_nir
[i
],
2616 &infos
[MESA_SHADER_GEOMETRY
]);
2619 filled_stages
|= (1 << pre_stage
);
2620 filled_stages
|= (1 << MESA_SHADER_GEOMETRY
);
2623 active_stages
^= filled_stages
;
2624 while (active_stages
) {
2625 int i
= u_bit_scan(&active_stages
);
2627 if (i
== MESA_SHADER_TESS_CTRL
) {
2628 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
=
2629 util_last_bit64(infos
[MESA_SHADER_VERTEX
].vs
.ls_outputs_written
);
2632 if (i
== MESA_SHADER_TESS_EVAL
) {
2633 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
=
2634 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_patches
;
2635 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
=
2636 util_last_bit64(infos
[MESA_SHADER_TESS_CTRL
].tcs
.outputs_written
);
2639 radv_nir_shader_info_init(&infos
[i
]);
2640 radv_nir_shader_info_pass(nir
[i
], pipeline
->layout
,
2641 &keys
[i
], &infos
[i
]);
2644 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2646 infos
[i
].wave_size
=
2647 radv_get_wave_size(pipeline
->device
, pStages
[i
],
2653 merge_tess_info(struct shader_info
*tes_info
,
2654 const struct shader_info
*tcs_info
)
2656 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
2658 * "PointMode. Controls generation of points rather than triangles
2659 * or lines. This functionality defaults to disabled, and is
2660 * enabled if either shader stage includes the execution mode.
2662 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
2663 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
2664 * and OutputVertices, it says:
2666 * "One mode must be set in at least one of the tessellation
2669 * So, the fields can be set in either the TCS or TES, but they must
2670 * agree if set in both. Our backend looks at TES, so bitwise-or in
2671 * the values from the TCS.
2673 assert(tcs_info
->tess
.tcs_vertices_out
== 0 ||
2674 tes_info
->tess
.tcs_vertices_out
== 0 ||
2675 tcs_info
->tess
.tcs_vertices_out
== tes_info
->tess
.tcs_vertices_out
);
2676 tes_info
->tess
.tcs_vertices_out
|= tcs_info
->tess
.tcs_vertices_out
;
2678 assert(tcs_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2679 tes_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2680 tcs_info
->tess
.spacing
== tes_info
->tess
.spacing
);
2681 tes_info
->tess
.spacing
|= tcs_info
->tess
.spacing
;
2683 assert(tcs_info
->tess
.primitive_mode
== 0 ||
2684 tes_info
->tess
.primitive_mode
== 0 ||
2685 tcs_info
->tess
.primitive_mode
== tes_info
->tess
.primitive_mode
);
2686 tes_info
->tess
.primitive_mode
|= tcs_info
->tess
.primitive_mode
;
2687 tes_info
->tess
.ccw
|= tcs_info
->tess
.ccw
;
2688 tes_info
->tess
.point_mode
|= tcs_info
->tess
.point_mode
;
2692 void radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT
*ext
)
2697 if (ext
->pPipelineCreationFeedback
) {
2698 ext
->pPipelineCreationFeedback
->flags
= 0;
2699 ext
->pPipelineCreationFeedback
->duration
= 0;
2702 for (unsigned i
= 0; i
< ext
->pipelineStageCreationFeedbackCount
; ++i
) {
2703 ext
->pPipelineStageCreationFeedbacks
[i
].flags
= 0;
2704 ext
->pPipelineStageCreationFeedbacks
[i
].duration
= 0;
2709 void radv_start_feedback(VkPipelineCreationFeedbackEXT
*feedback
)
2714 feedback
->duration
-= radv_get_current_time();
2715 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
;
2719 void radv_stop_feedback(VkPipelineCreationFeedbackEXT
*feedback
, bool cache_hit
)
2724 feedback
->duration
+= radv_get_current_time();
2725 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
|
2726 (cache_hit
? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT
: 0);
2729 void radv_create_shaders(struct radv_pipeline
*pipeline
,
2730 struct radv_device
*device
,
2731 struct radv_pipeline_cache
*cache
,
2732 const struct radv_pipeline_key
*key
,
2733 const VkPipelineShaderStageCreateInfo
**pStages
,
2734 const VkPipelineCreateFlags flags
,
2735 VkPipelineCreationFeedbackEXT
*pipeline_feedback
,
2736 VkPipelineCreationFeedbackEXT
**stage_feedbacks
)
2738 struct radv_shader_module fs_m
= {0};
2739 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
2740 nir_shader
*nir
[MESA_SHADER_STAGES
] = {0};
2741 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2742 struct radv_shader_variant_key keys
[MESA_SHADER_STAGES
] = {{{{{0}}}}};
2743 struct radv_shader_info infos
[MESA_SHADER_STAGES
] = {0};
2744 unsigned char hash
[20], gs_copy_hash
[20];
2745 bool keep_executable_info
= (flags
& VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR
) || device
->keep_shader_info
;
2747 radv_start_feedback(pipeline_feedback
);
2749 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2751 modules
[i
] = radv_shader_module_from_handle(pStages
[i
]->module
);
2752 if (modules
[i
]->nir
)
2753 _mesa_sha1_compute(modules
[i
]->nir
->info
.name
,
2754 strlen(modules
[i
]->nir
->info
.name
),
2757 pipeline
->active_stages
|= mesa_to_vk_shader_stage(i
);
2761 radv_hash_shaders(hash
, pStages
, pipeline
->layout
, key
, get_hash_flags(device
));
2762 memcpy(gs_copy_hash
, hash
, 20);
2763 gs_copy_hash
[0] ^= 1;
2765 bool found_in_application_cache
= true;
2766 if (modules
[MESA_SHADER_GEOMETRY
] && !keep_executable_info
) {
2767 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2768 radv_create_shader_variants_from_pipeline_cache(device
, cache
, gs_copy_hash
, variants
,
2769 &found_in_application_cache
);
2770 pipeline
->gs_copy_shader
= variants
[MESA_SHADER_GEOMETRY
];
2773 if (!keep_executable_info
&&
2774 radv_create_shader_variants_from_pipeline_cache(device
, cache
, hash
, pipeline
->shaders
,
2775 &found_in_application_cache
) &&
2776 (!modules
[MESA_SHADER_GEOMETRY
] || pipeline
->gs_copy_shader
)) {
2777 radv_stop_feedback(pipeline_feedback
, found_in_application_cache
);
2781 if (!modules
[MESA_SHADER_FRAGMENT
] && !modules
[MESA_SHADER_COMPUTE
]) {
2783 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
2784 fs_b
.shader
->info
.name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
2785 fs_m
.nir
= fs_b
.shader
;
2786 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
2789 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2790 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[i
];
2791 unsigned subgroup_size
= 64;
2796 radv_start_feedback(stage_feedbacks
[i
]);
2798 if (key
->compute_subgroup_size
) {
2799 /* Only GFX10+ and compute shaders currently support
2800 * requiring a specific subgroup size.
2802 assert(device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
2803 i
== MESA_SHADER_COMPUTE
);
2804 subgroup_size
= key
->compute_subgroup_size
;
2807 nir
[i
] = radv_shader_compile_to_nir(device
, modules
[i
],
2808 stage
? stage
->pName
: "main", i
,
2809 stage
? stage
->pSpecializationInfo
: NULL
,
2810 flags
, pipeline
->layout
,
2813 /* We don't want to alter meta shaders IR directly so clone it
2816 if (nir
[i
]->info
.name
) {
2817 nir
[i
] = nir_shader_clone(NULL
, nir
[i
]);
2820 radv_stop_feedback(stage_feedbacks
[i
], false);
2823 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2824 nir_lower_patch_vertices(nir
[MESA_SHADER_TESS_EVAL
], nir
[MESA_SHADER_TESS_CTRL
]->info
.tess
.tcs_vertices_out
, NULL
);
2825 merge_tess_info(&nir
[MESA_SHADER_TESS_EVAL
]->info
, &nir
[MESA_SHADER_TESS_CTRL
]->info
);
2828 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
2829 radv_link_shaders(pipeline
, nir
);
2831 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2833 if (device
->physical_device
->use_aco
) {
2834 NIR_PASS_V(nir
[i
], nir_lower_non_uniform_access
,
2835 nir_lower_non_uniform_ubo_access
|
2836 nir_lower_non_uniform_ssbo_access
|
2837 nir_lower_non_uniform_texture_access
|
2838 nir_lower_non_uniform_image_access
);
2840 NIR_PASS_V(nir
[i
], nir_lower_bool_to_int32
);
2844 if (nir
[MESA_SHADER_FRAGMENT
])
2845 radv_lower_fs_io(nir
[MESA_SHADER_FRAGMENT
]);
2847 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2848 if (radv_can_dump_shader(device
, modules
[i
], false))
2849 nir_print_shader(nir
[i
], stderr
);
2852 radv_fill_shader_keys(device
, keys
, key
, nir
);
2854 radv_fill_shader_info(pipeline
, pStages
, keys
, infos
, nir
);
2856 if ((nir
[MESA_SHADER_VERTEX
] &&
2857 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
) ||
2858 (nir
[MESA_SHADER_TESS_EVAL
] &&
2859 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
)) {
2860 struct gfx10_ngg_info
*ngg_info
;
2862 if (nir
[MESA_SHADER_GEOMETRY
])
2863 ngg_info
= &infos
[MESA_SHADER_GEOMETRY
].ngg_info
;
2864 else if (nir
[MESA_SHADER_TESS_CTRL
])
2865 ngg_info
= &infos
[MESA_SHADER_TESS_EVAL
].ngg_info
;
2867 ngg_info
= &infos
[MESA_SHADER_VERTEX
].ngg_info
;
2869 gfx10_get_ngg_info(key
, pipeline
, nir
, infos
, ngg_info
);
2870 } else if (nir
[MESA_SHADER_GEOMETRY
]) {
2871 struct gfx9_gs_info
*gs_info
=
2872 &infos
[MESA_SHADER_GEOMETRY
].gs_ring_info
;
2874 gfx9_get_gs_info(key
, pipeline
, nir
, infos
, gs_info
);
2877 if(modules
[MESA_SHADER_GEOMETRY
]) {
2878 struct radv_shader_binary
*gs_copy_binary
= NULL
;
2879 if (!pipeline
->gs_copy_shader
&&
2880 !radv_pipeline_has_ngg(pipeline
)) {
2881 struct radv_shader_info info
= {};
2882 struct radv_shader_variant_key key
= {};
2884 key
.has_multiview_view_index
=
2885 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
;
2887 radv_nir_shader_info_pass(nir
[MESA_SHADER_GEOMETRY
],
2888 pipeline
->layout
, &key
,
2890 info
.wave_size
= 64; /* Wave32 not supported. */
2892 pipeline
->gs_copy_shader
= radv_create_gs_copy_shader(
2893 device
, nir
[MESA_SHADER_GEOMETRY
], &info
,
2894 &gs_copy_binary
, keep_executable_info
,
2895 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
);
2898 if (!keep_executable_info
&& pipeline
->gs_copy_shader
) {
2899 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2900 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2902 binaries
[MESA_SHADER_GEOMETRY
] = gs_copy_binary
;
2903 variants
[MESA_SHADER_GEOMETRY
] = pipeline
->gs_copy_shader
;
2905 radv_pipeline_cache_insert_shaders(device
, cache
,
2910 free(gs_copy_binary
);
2913 if (nir
[MESA_SHADER_FRAGMENT
]) {
2914 if (!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]) {
2915 radv_start_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
]);
2917 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
2918 radv_shader_variant_compile(device
, modules
[MESA_SHADER_FRAGMENT
], &nir
[MESA_SHADER_FRAGMENT
], 1,
2919 pipeline
->layout
, keys
+ MESA_SHADER_FRAGMENT
,
2920 infos
+ MESA_SHADER_FRAGMENT
,
2921 keep_executable_info
,
2922 &binaries
[MESA_SHADER_FRAGMENT
]);
2924 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
], false);
2928 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_TESS_CTRL
]) {
2929 if (!pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]) {
2930 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2931 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2932 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2934 radv_start_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
]);
2936 pipeline
->shaders
[MESA_SHADER_TESS_CTRL
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_TESS_CTRL
], combined_nir
, 2,
2938 &key
, &infos
[MESA_SHADER_TESS_CTRL
], keep_executable_info
,
2939 &binaries
[MESA_SHADER_TESS_CTRL
]);
2941 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
], false);
2943 modules
[MESA_SHADER_VERTEX
] = NULL
;
2944 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2945 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.outputs_written
);
2948 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_GEOMETRY
]) {
2949 gl_shader_stage pre_stage
= modules
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2950 if (!pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
2951 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2953 radv_start_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
]);
2955 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_GEOMETRY
], combined_nir
, 2,
2957 &keys
[pre_stage
], &infos
[MESA_SHADER_GEOMETRY
], keep_executable_info
,
2958 &binaries
[MESA_SHADER_GEOMETRY
]);
2960 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
], false);
2962 modules
[pre_stage
] = NULL
;
2965 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2966 if(modules
[i
] && !pipeline
->shaders
[i
]) {
2967 if (i
== MESA_SHADER_TESS_CTRL
) {
2968 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.ls_outputs_written
);
2970 if (i
== MESA_SHADER_TESS_EVAL
) {
2971 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2972 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.outputs_written
);
2975 radv_start_feedback(stage_feedbacks
[i
]);
2977 pipeline
->shaders
[i
] = radv_shader_variant_compile(device
, modules
[i
], &nir
[i
], 1,
2979 keys
+ i
, infos
+ i
,keep_executable_info
,
2982 radv_stop_feedback(stage_feedbacks
[i
], false);
2986 if (!keep_executable_info
) {
2987 radv_pipeline_cache_insert_shaders(device
, cache
, hash
, pipeline
->shaders
,
2991 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2994 ralloc_free(nir
[i
]);
2996 if (radv_can_dump_shader_stats(device
, modules
[i
]))
2997 radv_shader_dump_stats(device
,
2998 pipeline
->shaders
[i
],
3004 ralloc_free(fs_m
.nir
);
3006 radv_stop_feedback(pipeline_feedback
, false);
3010 radv_pipeline_stage_to_user_data_0(struct radv_pipeline
*pipeline
,
3011 gl_shader_stage stage
, enum chip_class chip_class
)
3013 bool has_gs
= radv_pipeline_has_gs(pipeline
);
3014 bool has_tess
= radv_pipeline_has_tess(pipeline
);
3015 bool has_ngg
= radv_pipeline_has_ngg(pipeline
);
3018 case MESA_SHADER_FRAGMENT
:
3019 return R_00B030_SPI_SHADER_USER_DATA_PS_0
;
3020 case MESA_SHADER_VERTEX
:
3022 if (chip_class
>= GFX10
) {
3023 return R_00B430_SPI_SHADER_USER_DATA_HS_0
;
3024 } else if (chip_class
== GFX9
) {
3025 return R_00B430_SPI_SHADER_USER_DATA_LS_0
;
3027 return R_00B530_SPI_SHADER_USER_DATA_LS_0
;
3033 if (chip_class
>= GFX10
) {
3034 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3036 return R_00B330_SPI_SHADER_USER_DATA_ES_0
;
3041 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3043 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
3044 case MESA_SHADER_GEOMETRY
:
3045 return chip_class
== GFX9
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
3046 R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3047 case MESA_SHADER_COMPUTE
:
3048 return R_00B900_COMPUTE_USER_DATA_0
;
3049 case MESA_SHADER_TESS_CTRL
:
3050 return chip_class
== GFX9
? R_00B430_SPI_SHADER_USER_DATA_LS_0
:
3051 R_00B430_SPI_SHADER_USER_DATA_HS_0
;
3052 case MESA_SHADER_TESS_EVAL
:
3054 return chip_class
>= GFX10
? R_00B230_SPI_SHADER_USER_DATA_GS_0
:
3055 R_00B330_SPI_SHADER_USER_DATA_ES_0
;
3056 } else if (has_ngg
) {
3057 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
3059 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
3062 unreachable("unknown shader");
3066 struct radv_bin_size_entry
{
3072 radv_gfx9_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3074 static const struct radv_bin_size_entry color_size_table
[][3][9] = {
3078 /* One shader engine */
3084 { UINT_MAX
, { 0, 0}},
3087 /* Two shader engines */
3093 { UINT_MAX
, { 0, 0}},
3096 /* Four shader engines */
3101 { UINT_MAX
, { 0, 0}},
3107 /* One shader engine */
3113 { UINT_MAX
, { 0, 0}},
3116 /* Two shader engines */
3122 { UINT_MAX
, { 0, 0}},
3125 /* Four shader engines */
3132 { UINT_MAX
, { 0, 0}},
3138 /* One shader engine */
3145 { UINT_MAX
, { 0, 0}},
3148 /* Two shader engines */
3156 { UINT_MAX
, { 0, 0}},
3159 /* Four shader engines */
3167 { UINT_MAX
, { 0, 0}},
3171 static const struct radv_bin_size_entry ds_size_table
[][3][9] = {
3175 // One shader engine
3182 { UINT_MAX
, { 0, 0}},
3185 // Two shader engines
3193 { UINT_MAX
, { 0, 0}},
3196 // Four shader engines
3204 { UINT_MAX
, { 0, 0}},
3210 // One shader engine
3218 { UINT_MAX
, { 0, 0}},
3221 // Two shader engines
3230 { UINT_MAX
, { 0, 0}},
3233 // Four shader engines
3242 { UINT_MAX
, { 0, 0}},
3248 // One shader engine
3256 { UINT_MAX
, { 0, 0}},
3259 // Two shader engines
3268 { UINT_MAX
, { 0, 0}},
3271 // Four shader engines
3279 { UINT_MAX
, { 0, 0}},
3284 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3285 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3286 VkExtent2D extent
= {512, 512};
3288 unsigned log_num_rb_per_se
=
3289 util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.num_render_backends
/
3290 pipeline
->device
->physical_device
->rad_info
.max_se
);
3291 unsigned log_num_se
= util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.max_se
);
3293 unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
3294 unsigned ps_iter_samples
= 1u << G_028804_PS_ITER_SAMPLES(pipeline
->graphics
.ms
.db_eqaa
);
3295 unsigned effective_samples
= total_samples
;
3296 unsigned color_bytes_per_pixel
= 0;
3298 const VkPipelineColorBlendStateCreateInfo
*vkblend
=
3299 radv_pipeline_get_color_blend_state(pCreateInfo
);
3301 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3302 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3305 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3308 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3309 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
3312 /* MSAA images typically don't use all samples all the time. */
3313 if (effective_samples
>= 2 && ps_iter_samples
<= 1)
3314 effective_samples
= 2;
3315 color_bytes_per_pixel
*= effective_samples
;
3318 const struct radv_bin_size_entry
*color_entry
= color_size_table
[log_num_rb_per_se
][log_num_se
];
3319 while(color_entry
[1].bpp
<= color_bytes_per_pixel
)
3322 extent
= color_entry
->extent
;
3324 if (subpass
->depth_stencil_attachment
) {
3325 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3327 /* Coefficients taken from AMDVLK */
3328 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
3329 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
3330 unsigned ds_bytes_per_pixel
= 4 * (depth_coeff
+ stencil_coeff
) * total_samples
;
3332 const struct radv_bin_size_entry
*ds_entry
= ds_size_table
[log_num_rb_per_se
][log_num_se
];
3333 while(ds_entry
[1].bpp
<= ds_bytes_per_pixel
)
3336 if (ds_entry
->extent
.width
* ds_entry
->extent
.height
< extent
.width
* extent
.height
)
3337 extent
= ds_entry
->extent
;
3344 radv_gfx10_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3346 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3347 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3348 VkExtent2D extent
= {512, 512};
3350 const unsigned db_tag_size
= 64;
3351 const unsigned db_tag_count
= 312;
3352 const unsigned color_tag_size
= 1024;
3353 const unsigned color_tag_count
= 31;
3354 const unsigned fmask_tag_size
= 256;
3355 const unsigned fmask_tag_count
= 44;
3357 const unsigned rb_count
= pipeline
->device
->physical_device
->rad_info
.num_render_backends
;
3358 const unsigned pipe_count
= MAX2(rb_count
, pipeline
->device
->physical_device
->rad_info
.num_sdp_interfaces
);
3360 const unsigned db_tag_part
= (db_tag_count
* rb_count
/ pipe_count
) * db_tag_size
* pipe_count
;
3361 const unsigned color_tag_part
= (color_tag_count
* rb_count
/ pipe_count
) * color_tag_size
* pipe_count
;
3362 const unsigned fmask_tag_part
= (fmask_tag_count
* rb_count
/ pipe_count
) * fmask_tag_size
* pipe_count
;
3364 const unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
3365 const unsigned samples_log
= util_logbase2_ceil(total_samples
);
3367 unsigned color_bytes_per_pixel
= 0;
3368 unsigned fmask_bytes_per_pixel
= 0;
3370 const VkPipelineColorBlendStateCreateInfo
*vkblend
=
3371 radv_pipeline_get_color_blend_state(pCreateInfo
);
3373 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3374 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3377 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3380 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3381 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
3383 if (total_samples
> 1) {
3384 assert(samples_log
<= 3);
3385 const unsigned fmask_array
[] = {0, 1, 1, 4};
3386 fmask_bytes_per_pixel
+= fmask_array
[samples_log
];
3390 color_bytes_per_pixel
*= total_samples
;
3392 color_bytes_per_pixel
= MAX2(color_bytes_per_pixel
, 1);
3394 const unsigned color_pixel_count_log
= util_logbase2(color_tag_part
/ color_bytes_per_pixel
);
3395 extent
.width
= 1ull << ((color_pixel_count_log
+ 1) / 2);
3396 extent
.height
= 1ull << (color_pixel_count_log
/ 2);
3398 if (fmask_bytes_per_pixel
) {
3399 const unsigned fmask_pixel_count_log
= util_logbase2(fmask_tag_part
/ fmask_bytes_per_pixel
);
3401 const VkExtent2D fmask_extent
= (VkExtent2D
){
3402 .width
= 1ull << ((fmask_pixel_count_log
+ 1) / 2),
3403 .height
= 1ull << (color_pixel_count_log
/ 2)
3406 if (fmask_extent
.width
* fmask_extent
.height
< extent
.width
* extent
.height
)
3407 extent
= fmask_extent
;
3410 if (subpass
->depth_stencil_attachment
) {
3411 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3413 /* Coefficients taken from AMDVLK */
3414 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
3415 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
3416 unsigned db_bytes_per_pixel
= (depth_coeff
+ stencil_coeff
) * total_samples
;
3418 const unsigned db_pixel_count_log
= util_logbase2(db_tag_part
/ db_bytes_per_pixel
);
3420 const VkExtent2D db_extent
= (VkExtent2D
){
3421 .width
= 1ull << ((db_pixel_count_log
+ 1) / 2),
3422 .height
= 1ull << (color_pixel_count_log
/ 2)
3425 if (db_extent
.width
* db_extent
.height
< extent
.width
* extent
.height
)
3429 extent
.width
= MAX2(extent
.width
, 128);
3430 extent
.height
= MAX2(extent
.width
, 64);
3436 radv_pipeline_generate_disabled_binning_state(struct radeon_cmdbuf
*ctx_cs
,
3437 struct radv_pipeline
*pipeline
,
3438 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3440 uint32_t pa_sc_binner_cntl_0
=
3441 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC
) |
3442 S_028C44_DISABLE_START_OF_PRIM(1);
3443 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
3445 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3446 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3447 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3448 const VkPipelineColorBlendStateCreateInfo
*vkblend
=
3449 radv_pipeline_get_color_blend_state(pCreateInfo
);
3450 unsigned min_bytes_per_pixel
= 0;
3453 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3454 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3457 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3460 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3461 unsigned bytes
= vk_format_get_blocksize(format
);
3462 if (!min_bytes_per_pixel
|| bytes
< min_bytes_per_pixel
)
3463 min_bytes_per_pixel
= bytes
;
3467 pa_sc_binner_cntl_0
=
3468 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_NEW_SC
) |
3469 S_028C44_BIN_SIZE_X(0) |
3470 S_028C44_BIN_SIZE_Y(0) |
3471 S_028C44_BIN_SIZE_X_EXTEND(2) | /* 128 */
3472 S_028C44_BIN_SIZE_Y_EXTEND(min_bytes_per_pixel
<= 4 ? 2 : 1) | /* 128 or 64 */
3473 S_028C44_DISABLE_START_OF_PRIM(1);
3476 pipeline
->graphics
.binning
.pa_sc_binner_cntl_0
= pa_sc_binner_cntl_0
;
3477 pipeline
->graphics
.binning
.db_dfsm_control
= db_dfsm_control
;
3480 struct radv_binning_settings
3481 radv_get_binning_settings(const struct radv_physical_device
*pdev
)
3483 struct radv_binning_settings settings
;
3484 if (pdev
->rad_info
.has_dedicated_vram
) {
3485 if (pdev
->rad_info
.num_render_backends
> 4) {
3486 settings
.context_states_per_bin
= 1;
3487 settings
.persistent_states_per_bin
= 1;
3489 settings
.context_states_per_bin
= 3;
3490 settings
.persistent_states_per_bin
= 8;
3492 settings
.fpovs_per_batch
= 63;
3494 /* The context states are affected by the scissor bug. */
3495 settings
.context_states_per_bin
= 6;
3496 /* 32 causes hangs for RAVEN. */
3497 settings
.persistent_states_per_bin
= 16;
3498 settings
.fpovs_per_batch
= 63;
3501 if (pdev
->rad_info
.has_gfx9_scissor_bug
)
3502 settings
.context_states_per_bin
= 1;
3508 radv_pipeline_generate_binning_state(struct radeon_cmdbuf
*ctx_cs
,
3509 struct radv_pipeline
*pipeline
,
3510 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3511 const struct radv_blend_state
*blend
)
3513 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX9
)
3516 VkExtent2D bin_size
;
3517 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3518 bin_size
= radv_gfx10_compute_bin_size(pipeline
, pCreateInfo
);
3519 } else if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX9
) {
3520 bin_size
= radv_gfx9_compute_bin_size(pipeline
, pCreateInfo
);
3522 unreachable("Unhandled generation for binning bin size calculation");
3524 if (pipeline
->device
->pbb_allowed
&& bin_size
.width
&& bin_size
.height
) {
3525 struct radv_binning_settings settings
=
3526 radv_get_binning_settings(pipeline
->device
->physical_device
);
3528 bool disable_start_of_prim
= true;
3529 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
3531 const struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3533 if (pipeline
->device
->dfsm_allowed
&& ps
&&
3534 !ps
->info
.ps
.can_discard
&&
3535 !ps
->info
.ps
.writes_memory
&&
3536 blend
->cb_target_enabled_4bit
) {
3537 db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_AUTO
);
3538 disable_start_of_prim
= (blend
->blend_enable_4bit
& blend
->cb_target_enabled_4bit
) != 0;
3541 const uint32_t pa_sc_binner_cntl_0
=
3542 S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED
) |
3543 S_028C44_BIN_SIZE_X(bin_size
.width
== 16) |
3544 S_028C44_BIN_SIZE_Y(bin_size
.height
== 16) |
3545 S_028C44_BIN_SIZE_X_EXTEND(util_logbase2(MAX2(bin_size
.width
, 32)) - 5) |
3546 S_028C44_BIN_SIZE_Y_EXTEND(util_logbase2(MAX2(bin_size
.height
, 32)) - 5) |
3547 S_028C44_CONTEXT_STATES_PER_BIN(settings
.context_states_per_bin
- 1) |
3548 S_028C44_PERSISTENT_STATES_PER_BIN(settings
.persistent_states_per_bin
- 1) |
3549 S_028C44_DISABLE_START_OF_PRIM(disable_start_of_prim
) |
3550 S_028C44_FPOVS_PER_BATCH(settings
.fpovs_per_batch
) |
3551 S_028C44_OPTIMAL_BIN_SELECTION(1);
3553 pipeline
->graphics
.binning
.pa_sc_binner_cntl_0
= pa_sc_binner_cntl_0
;
3554 pipeline
->graphics
.binning
.db_dfsm_control
= db_dfsm_control
;
3556 radv_pipeline_generate_disabled_binning_state(ctx_cs
, pipeline
, pCreateInfo
);
3561 radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf
*ctx_cs
,
3562 struct radv_pipeline
*pipeline
,
3563 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3564 const struct radv_graphics_pipeline_create_info
*extra
)
3566 const VkPipelineDepthStencilStateCreateInfo
*vkds
= radv_pipeline_get_depth_stencil_state(pCreateInfo
);
3567 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3568 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3569 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3570 struct radv_render_pass_attachment
*attachment
= NULL
;
3571 uint32_t db_depth_control
= 0, db_stencil_control
= 0;
3572 uint32_t db_render_control
= 0, db_render_override2
= 0;
3573 uint32_t db_render_override
= 0;
3575 if (subpass
->depth_stencil_attachment
)
3576 attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3578 bool has_depth_attachment
= attachment
&& vk_format_is_depth(attachment
->format
);
3579 bool has_stencil_attachment
= attachment
&& vk_format_is_stencil(attachment
->format
);
3581 if (vkds
&& has_depth_attachment
) {
3582 db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
3583 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
3584 S_028800_ZFUNC(vkds
->depthCompareOp
) |
3585 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
3587 /* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
3588 db_render_override2
|= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment
->samples
> 2);
3591 if (has_stencil_attachment
&& vkds
&& vkds
->stencilTestEnable
) {
3592 db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
3593 db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
3594 db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
3595 db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
3596 db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
3598 db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
3599 db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
3600 db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
3601 db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
3604 if (attachment
&& extra
) {
3605 db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
3606 db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
3608 db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
3609 db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
3610 db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
3611 db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
3612 db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
3615 db_render_override
|= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
3616 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
);
3618 if (!pCreateInfo
->pRasterizationState
->depthClampEnable
&&
3619 ps
->info
.ps
.writes_z
) {
3620 /* From VK_EXT_depth_range_unrestricted spec:
3622 * "The behavior described in Primitive Clipping still applies.
3623 * If depth clamping is disabled the depth values are still
3624 * clipped to 0 ≤ zc ≤ wc before the viewport transform. If
3625 * depth clamping is enabled the above equation is ignored and
3626 * the depth values are instead clamped to the VkViewport
3627 * minDepth and maxDepth values, which in the case of this
3628 * extension can be outside of the 0.0 to 1.0 range."
3630 db_render_override
|= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
3633 radeon_set_context_reg(ctx_cs
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
3634 radeon_set_context_reg(ctx_cs
, R_02842C_DB_STENCIL_CONTROL
, db_stencil_control
);
3636 radeon_set_context_reg(ctx_cs
, R_028000_DB_RENDER_CONTROL
, db_render_control
);
3637 radeon_set_context_reg(ctx_cs
, R_02800C_DB_RENDER_OVERRIDE
, db_render_override
);
3638 radeon_set_context_reg(ctx_cs
, R_028010_DB_RENDER_OVERRIDE2
, db_render_override2
);
3642 radv_pipeline_generate_blend_state(struct radeon_cmdbuf
*ctx_cs
,
3643 struct radv_pipeline
*pipeline
,
3644 const struct radv_blend_state
*blend
)
3646 radeon_set_context_reg_seq(ctx_cs
, R_028780_CB_BLEND0_CONTROL
, 8);
3647 radeon_emit_array(ctx_cs
, blend
->cb_blend_control
,
3649 radeon_set_context_reg(ctx_cs
, R_028808_CB_COLOR_CONTROL
, blend
->cb_color_control
);
3650 radeon_set_context_reg(ctx_cs
, R_028B70_DB_ALPHA_TO_MASK
, blend
->db_alpha_to_mask
);
3652 if (pipeline
->device
->physical_device
->rad_info
.has_rbplus
) {
3654 radeon_set_context_reg_seq(ctx_cs
, R_028760_SX_MRT0_BLEND_OPT
, 8);
3655 radeon_emit_array(ctx_cs
, blend
->sx_mrt_blend_opt
, 8);
3658 radeon_set_context_reg(ctx_cs
, R_028714_SPI_SHADER_COL_FORMAT
, blend
->spi_shader_col_format
);
3660 radeon_set_context_reg(ctx_cs
, R_028238_CB_TARGET_MASK
, blend
->cb_target_mask
);
3661 radeon_set_context_reg(ctx_cs
, R_02823C_CB_SHADER_MASK
, blend
->cb_shader_mask
);
3663 pipeline
->graphics
.col_format
= blend
->spi_shader_col_format
;
3664 pipeline
->graphics
.cb_target_mask
= blend
->cb_target_mask
;
3667 static const VkConservativeRasterizationModeEXT
3668 radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo
*pCreateInfo
)
3670 const VkPipelineRasterizationConservativeStateCreateInfoEXT
*conservative_raster
=
3671 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT
);
3673 if (!conservative_raster
)
3674 return VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
;
3675 return conservative_raster
->conservativeRasterizationMode
;
3679 radv_pipeline_generate_raster_state(struct radeon_cmdbuf
*ctx_cs
,
3680 struct radv_pipeline
*pipeline
,
3681 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3683 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
3684 const VkConservativeRasterizationModeEXT mode
=
3685 radv_get_conservative_raster_mode(vkraster
);
3686 uint32_t pa_sc_conservative_rast
= S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1);
3687 bool depth_clip_disable
= vkraster
->depthClampEnable
;
3689 const VkPipelineRasterizationDepthClipStateCreateInfoEXT
*depth_clip_state
=
3690 vk_find_struct_const(vkraster
->pNext
, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT
);
3691 if (depth_clip_state
) {
3692 depth_clip_disable
= !depth_clip_state
->depthClipEnable
;
3695 radeon_set_context_reg(ctx_cs
, R_028810_PA_CL_CLIP_CNTL
,
3696 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
3697 S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable
? 1 : 0) |
3698 S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable
? 1 : 0) |
3699 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
3700 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
3702 radeon_set_context_reg(ctx_cs
, R_0286D4_SPI_INTERP_CONTROL_0
,
3703 S_0286D4_FLAT_SHADE_ENA(1) |
3704 S_0286D4_PNT_SPRITE_ENA(1) |
3705 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
3706 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
3707 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
3708 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
3709 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
3711 radeon_set_context_reg(ctx_cs
, R_028BE4_PA_SU_VTX_CNTL
,
3712 S_028BE4_PIX_CENTER(1) | // TODO verify
3713 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
3714 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
));
3716 radeon_set_context_reg(ctx_cs
, R_028814_PA_SU_SC_MODE_CNTL
,
3717 S_028814_FACE(vkraster
->frontFace
) |
3718 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
3719 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
3720 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
3721 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3722 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3723 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3724 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3725 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0));
3727 /* Conservative rasterization. */
3728 if (mode
!= VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
) {
3729 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3731 ms
->pa_sc_aa_config
|= S_028BE0_AA_MASK_CENTROID_DTMN(1);
3732 ms
->db_eqaa
|= S_028804_ENABLE_POSTZ_OVERRASTERIZATION(1) |
3733 S_028804_OVERRASTERIZATION_AMOUNT(4);
3735 pa_sc_conservative_rast
= S_028C4C_PREZ_AA_MASK_ENABLE(1) |
3736 S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
3737 S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
3739 if (mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
) {
3740 pa_sc_conservative_rast
|=
3741 S_028C4C_OVER_RAST_ENABLE(1) |
3742 S_028C4C_OVER_RAST_SAMPLE_SELECT(0) |
3743 S_028C4C_UNDER_RAST_ENABLE(0) |
3744 S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
3745 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
3747 assert(mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT
);
3748 pa_sc_conservative_rast
|=
3749 S_028C4C_OVER_RAST_ENABLE(0) |
3750 S_028C4C_OVER_RAST_SAMPLE_SELECT(1) |
3751 S_028C4C_UNDER_RAST_ENABLE(1) |
3752 S_028C4C_UNDER_RAST_SAMPLE_SELECT(0) |
3753 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(0);
3757 radeon_set_context_reg(ctx_cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
3758 pa_sc_conservative_rast
);
3763 radv_pipeline_generate_multisample_state(struct radeon_cmdbuf
*ctx_cs
,
3764 struct radv_pipeline
*pipeline
)
3766 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3768 radeon_set_context_reg_seq(ctx_cs
, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0
, 2);
3769 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[0]);
3770 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[1]);
3772 radeon_set_context_reg(ctx_cs
, R_028804_DB_EQAA
, ms
->db_eqaa
);
3773 radeon_set_context_reg(ctx_cs
, R_028A48_PA_SC_MODE_CNTL_0
, ms
->pa_sc_mode_cntl_0
);
3774 radeon_set_context_reg(ctx_cs
, R_028A4C_PA_SC_MODE_CNTL_1
, ms
->pa_sc_mode_cntl_1
);
3775 radeon_set_context_reg(ctx_cs
, R_028BDC_PA_SC_LINE_CNTL
, ms
->pa_sc_line_cntl
);
3776 radeon_set_context_reg(ctx_cs
, R_028BE0_PA_SC_AA_CONFIG
, ms
->pa_sc_aa_config
);
3778 /* The exclusion bits can be set to improve rasterization efficiency
3779 * if no sample lies on the pixel boundary (-8 sample offset). It's
3780 * currently always TRUE because the driver doesn't support 16 samples.
3782 bool exclusion
= pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
;
3783 radeon_set_context_reg(ctx_cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
,
3784 S_02882C_XMAX_RIGHT_EXCLUSION(exclusion
) |
3785 S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion
));
3787 /* GFX9: Flush DFSM when the AA mode changes. */
3788 if (pipeline
->device
->dfsm_allowed
) {
3789 radeon_emit(ctx_cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
3790 radeon_emit(ctx_cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
3795 radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf
*ctx_cs
,
3796 struct radv_pipeline
*pipeline
)
3798 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3799 const struct radv_shader_variant
*vs
=
3800 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] ?
3801 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] :
3802 pipeline
->shaders
[MESA_SHADER_VERTEX
];
3803 unsigned vgt_primitiveid_en
= 0;
3804 uint32_t vgt_gs_mode
= 0;
3806 if (radv_pipeline_has_ngg(pipeline
))
3809 if (radv_pipeline_has_gs(pipeline
)) {
3810 const struct radv_shader_variant
*gs
=
3811 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3813 vgt_gs_mode
= ac_vgt_gs_mode(gs
->info
.gs
.vertices_out
,
3814 pipeline
->device
->physical_device
->rad_info
.chip_class
);
3815 } else if (outinfo
->export_prim_id
|| vs
->info
.uses_prim_id
) {
3816 vgt_gs_mode
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
3817 vgt_primitiveid_en
|= S_028A84_PRIMITIVEID_EN(1);
3820 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
, vgt_primitiveid_en
);
3821 radeon_set_context_reg(ctx_cs
, R_028A40_VGT_GS_MODE
, vgt_gs_mode
);
3825 radv_pipeline_generate_hw_vs(struct radeon_cmdbuf
*ctx_cs
,
3826 struct radeon_cmdbuf
*cs
,
3827 struct radv_pipeline
*pipeline
,
3828 struct radv_shader_variant
*shader
)
3830 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3832 radeon_set_sh_reg_seq(cs
, R_00B120_SPI_SHADER_PGM_LO_VS
, 4);
3833 radeon_emit(cs
, va
>> 8);
3834 radeon_emit(cs
, S_00B124_MEM_BASE(va
>> 40));
3835 radeon_emit(cs
, shader
->config
.rsrc1
);
3836 radeon_emit(cs
, shader
->config
.rsrc2
);
3838 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3839 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3840 clip_dist_mask
= outinfo
->clip_dist_mask
;
3841 cull_dist_mask
= outinfo
->cull_dist_mask
;
3842 total_mask
= clip_dist_mask
| cull_dist_mask
;
3843 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3844 outinfo
->writes_layer
||
3845 outinfo
->writes_viewport_index
;
3846 unsigned spi_vs_out_config
, nparams
;
3848 /* VS is required to export at least one param. */
3849 nparams
= MAX2(outinfo
->param_exports
, 1);
3850 spi_vs_out_config
= S_0286C4_VS_EXPORT_COUNT(nparams
- 1);
3852 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3853 spi_vs_out_config
|= S_0286C4_NO_PC_EXPORT(outinfo
->param_exports
== 0);
3856 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
, spi_vs_out_config
);
3858 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3859 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3860 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3861 V_02870C_SPI_SHADER_4COMP
:
3862 V_02870C_SPI_SHADER_NONE
) |
3863 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3864 V_02870C_SPI_SHADER_4COMP
:
3865 V_02870C_SPI_SHADER_NONE
) |
3866 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3867 V_02870C_SPI_SHADER_4COMP
:
3868 V_02870C_SPI_SHADER_NONE
));
3870 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3871 S_028818_VTX_W0_FMT(1) |
3872 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3873 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3874 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3876 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3877 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3878 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3879 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3880 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3881 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3882 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3883 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3884 cull_dist_mask
<< 8 |
3887 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
3888 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
3889 outinfo
->writes_viewport_index
);
3893 radv_pipeline_generate_hw_es(struct radeon_cmdbuf
*cs
,
3894 struct radv_pipeline
*pipeline
,
3895 struct radv_shader_variant
*shader
)
3897 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3899 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 4);
3900 radeon_emit(cs
, va
>> 8);
3901 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3902 radeon_emit(cs
, shader
->config
.rsrc1
);
3903 radeon_emit(cs
, shader
->config
.rsrc2
);
3907 radv_pipeline_generate_hw_ls(struct radeon_cmdbuf
*cs
,
3908 struct radv_pipeline
*pipeline
,
3909 struct radv_shader_variant
*shader
,
3910 const struct radv_tessellation_state
*tess
)
3912 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3913 uint32_t rsrc2
= shader
->config
.rsrc2
;
3915 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3916 radeon_emit(cs
, va
>> 8);
3917 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3919 rsrc2
|= S_00B52C_LDS_SIZE(tess
->lds_size
);
3920 if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX7
&&
3921 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_HAWAII
)
3922 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, rsrc2
);
3924 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
3925 radeon_emit(cs
, shader
->config
.rsrc1
);
3926 radeon_emit(cs
, rsrc2
);
3930 radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf
*ctx_cs
,
3931 struct radeon_cmdbuf
*cs
,
3932 struct radv_pipeline
*pipeline
,
3933 struct radv_shader_variant
*shader
)
3935 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3936 gl_shader_stage es_type
=
3937 radv_pipeline_has_tess(pipeline
) ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
3938 struct radv_shader_variant
*es
=
3939 es_type
== MESA_SHADER_TESS_EVAL
? pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] : pipeline
->shaders
[MESA_SHADER_VERTEX
];
3940 const struct gfx10_ngg_info
*ngg_state
= &shader
->info
.ngg_info
;
3942 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
3943 radeon_emit(cs
, va
>> 8);
3944 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3945 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3946 radeon_emit(cs
, shader
->config
.rsrc1
);
3947 radeon_emit(cs
, shader
->config
.rsrc2
);
3949 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3950 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3951 clip_dist_mask
= outinfo
->clip_dist_mask
;
3952 cull_dist_mask
= outinfo
->cull_dist_mask
;
3953 total_mask
= clip_dist_mask
| cull_dist_mask
;
3954 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3955 outinfo
->writes_layer
||
3956 outinfo
->writes_viewport_index
;
3957 bool es_enable_prim_id
= outinfo
->export_prim_id
||
3958 (es
&& es
->info
.uses_prim_id
);
3959 bool break_wave_at_eoi
= false;
3963 if (es_type
== MESA_SHADER_TESS_EVAL
) {
3964 struct radv_shader_variant
*gs
=
3965 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3967 if (es_enable_prim_id
|| (gs
&& gs
->info
.uses_prim_id
))
3968 break_wave_at_eoi
= true;
3971 nparams
= MAX2(outinfo
->param_exports
, 1);
3972 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
3973 S_0286C4_VS_EXPORT_COUNT(nparams
- 1) |
3974 S_0286C4_NO_PC_EXPORT(outinfo
->param_exports
== 0));
3976 radeon_set_context_reg(ctx_cs
, R_028708_SPI_SHADER_IDX_FORMAT
,
3977 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP
));
3978 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3979 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3980 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3981 V_02870C_SPI_SHADER_4COMP
:
3982 V_02870C_SPI_SHADER_NONE
) |
3983 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3984 V_02870C_SPI_SHADER_4COMP
:
3985 V_02870C_SPI_SHADER_NONE
) |
3986 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3987 V_02870C_SPI_SHADER_4COMP
:
3988 V_02870C_SPI_SHADER_NONE
));
3990 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3991 S_028818_VTX_W0_FMT(1) |
3992 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3993 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3994 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3995 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3996 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3997 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3998 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3999 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
4000 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
4001 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
4002 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
4003 cull_dist_mask
<< 8 |
4006 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
,
4007 S_028A84_PRIMITIVEID_EN(es_enable_prim_id
) |
4008 S_028A84_NGG_DISABLE_PROVOK_REUSE(outinfo
->export_prim_id
));
4010 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
4011 ngg_state
->vgt_esgs_ring_itemsize
);
4013 /* NGG specific registers. */
4014 struct radv_shader_variant
*gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
4015 uint32_t gs_num_invocations
= gs
? gs
->info
.gs
.invocations
: 1;
4017 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
4018 S_028A44_ES_VERTS_PER_SUBGRP(ngg_state
->hw_max_esverts
) |
4019 S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state
->max_gsprims
) |
4020 S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state
->max_gsprims
* gs_num_invocations
));
4021 radeon_set_context_reg(ctx_cs
, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP
,
4022 S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state
->max_out_verts
));
4023 radeon_set_context_reg(ctx_cs
, R_028B4C_GE_NGG_SUBGRP_CNTL
,
4024 S_028B4C_PRIM_AMP_FACTOR(ngg_state
->prim_amp_factor
) |
4025 S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
4026 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
4027 S_028B90_CNT(gs_num_invocations
) |
4028 S_028B90_ENABLE(gs_num_invocations
> 1) |
4029 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state
->max_vert_out_per_gs_instance
));
4031 /* User edge flags are set by the pos exports. If user edge flags are
4032 * not used, we must use hw-generated edge flags and pass them via
4033 * the prim export to prevent drawing lines on internal edges of
4034 * decomposed primitives (such as quads) with polygon mode = lines.
4036 * TODO: We should combine hw-generated edge flags with user edge
4037 * flags in the shader.
4039 radeon_set_context_reg(ctx_cs
, R_028838_PA_CL_NGG_CNTL
,
4040 S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline
) &&
4041 !radv_pipeline_has_gs(pipeline
)));
4043 ge_cntl
= S_03096C_PRIM_GRP_SIZE(ngg_state
->max_gsprims
) |
4044 S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
4045 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
);
4047 /* Bug workaround for a possible hang with non-tessellation cases.
4048 * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
4050 * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
4052 if ((pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI10
||
4053 pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI12
||
4054 pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI14
) &&
4055 !radv_pipeline_has_tess(pipeline
) &&
4056 ngg_state
->hw_max_esverts
!= 256) {
4057 ge_cntl
&= C_03096C_VERT_GRP_SIZE
;
4059 if (ngg_state
->hw_max_esverts
> 5) {
4060 ge_cntl
|= S_03096C_VERT_GRP_SIZE(ngg_state
->hw_max_esverts
- 5);
4064 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
, ge_cntl
);
4068 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf
*cs
,
4069 struct radv_pipeline
*pipeline
,
4070 struct radv_shader_variant
*shader
,
4071 const struct radv_tessellation_state
*tess
)
4073 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
4075 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
4076 unsigned hs_rsrc2
= shader
->config
.rsrc2
;
4078 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4079 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX10(tess
->lds_size
);
4081 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX9(tess
->lds_size
);
4084 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4085 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
4086 radeon_emit(cs
, va
>> 8);
4087 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
4089 radeon_set_sh_reg_seq(cs
, R_00B410_SPI_SHADER_PGM_LO_LS
, 2);
4090 radeon_emit(cs
, va
>> 8);
4091 radeon_emit(cs
, S_00B414_MEM_BASE(va
>> 40));
4094 radeon_set_sh_reg_seq(cs
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
, 2);
4095 radeon_emit(cs
, shader
->config
.rsrc1
);
4096 radeon_emit(cs
, hs_rsrc2
);
4098 radeon_set_sh_reg_seq(cs
, R_00B420_SPI_SHADER_PGM_LO_HS
, 4);
4099 radeon_emit(cs
, va
>> 8);
4100 radeon_emit(cs
, S_00B424_MEM_BASE(va
>> 40));
4101 radeon_emit(cs
, shader
->config
.rsrc1
);
4102 radeon_emit(cs
, shader
->config
.rsrc2
);
4107 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf
*ctx_cs
,
4108 struct radeon_cmdbuf
*cs
,
4109 struct radv_pipeline
*pipeline
,
4110 const struct radv_tessellation_state
*tess
)
4112 struct radv_shader_variant
*vs
;
4114 /* Skip shaders merged into HS/GS */
4115 vs
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
4119 if (vs
->info
.vs
.as_ls
)
4120 radv_pipeline_generate_hw_ls(cs
, pipeline
, vs
, tess
);
4121 else if (vs
->info
.vs
.as_es
)
4122 radv_pipeline_generate_hw_es(cs
, pipeline
, vs
);
4123 else if (vs
->info
.is_ngg
)
4124 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, vs
);
4126 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, vs
);
4130 radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf
*ctx_cs
,
4131 struct radeon_cmdbuf
*cs
,
4132 struct radv_pipeline
*pipeline
,
4133 const struct radv_tessellation_state
*tess
)
4135 if (!radv_pipeline_has_tess(pipeline
))
4138 struct radv_shader_variant
*tes
, *tcs
;
4140 tcs
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
4141 tes
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
4144 if (tes
->info
.is_ngg
) {
4145 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, tes
);
4146 } else if (tes
->info
.tes
.as_es
)
4147 radv_pipeline_generate_hw_es(cs
, pipeline
, tes
);
4149 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, tes
);
4152 radv_pipeline_generate_hw_hs(cs
, pipeline
, tcs
, tess
);
4154 radeon_set_context_reg(ctx_cs
, R_028B6C_VGT_TF_PARAM
,
4157 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
)
4158 radeon_set_context_reg_idx(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
4159 tess
->ls_hs_config
);
4161 radeon_set_context_reg(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
,
4162 tess
->ls_hs_config
);
4164 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
4165 !radv_pipeline_has_gs(pipeline
) && !radv_pipeline_has_ngg(pipeline
)) {
4166 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
4167 S_028A44_ES_VERTS_PER_SUBGRP(250) |
4168 S_028A44_GS_PRIMS_PER_SUBGRP(126) |
4169 S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
4174 radv_pipeline_generate_hw_gs(struct radeon_cmdbuf
*ctx_cs
,
4175 struct radeon_cmdbuf
*cs
,
4176 struct radv_pipeline
*pipeline
,
4177 struct radv_shader_variant
*gs
)
4179 const struct gfx9_gs_info
*gs_state
= &gs
->info
.gs_ring_info
;
4180 unsigned gs_max_out_vertices
;
4181 uint8_t *num_components
;
4186 gs_max_out_vertices
= gs
->info
.gs
.vertices_out
;
4187 max_stream
= gs
->info
.gs
.max_stream
;
4188 num_components
= gs
->info
.gs
.num_stream_output_components
;
4190 offset
= num_components
[0] * gs_max_out_vertices
;
4192 radeon_set_context_reg_seq(ctx_cs
, R_028A60_VGT_GSVS_RING_OFFSET_1
, 3);
4193 radeon_emit(ctx_cs
, offset
);
4194 if (max_stream
>= 1)
4195 offset
+= num_components
[1] * gs_max_out_vertices
;
4196 radeon_emit(ctx_cs
, offset
);
4197 if (max_stream
>= 2)
4198 offset
+= num_components
[2] * gs_max_out_vertices
;
4199 radeon_emit(ctx_cs
, offset
);
4200 if (max_stream
>= 3)
4201 offset
+= num_components
[3] * gs_max_out_vertices
;
4202 radeon_set_context_reg(ctx_cs
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
4204 radeon_set_context_reg_seq(ctx_cs
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, 4);
4205 radeon_emit(ctx_cs
, num_components
[0]);
4206 radeon_emit(ctx_cs
, (max_stream
>= 1) ? num_components
[1] : 0);
4207 radeon_emit(ctx_cs
, (max_stream
>= 2) ? num_components
[2] : 0);
4208 radeon_emit(ctx_cs
, (max_stream
>= 3) ? num_components
[3] : 0);
4210 uint32_t gs_num_invocations
= gs
->info
.gs
.invocations
;
4211 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
4212 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
4213 S_028B90_ENABLE(gs_num_invocations
> 0));
4215 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
4216 gs_state
->vgt_esgs_ring_itemsize
);
4218 va
= radv_buffer_get_va(gs
->bo
) + gs
->bo_offset
;
4220 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
4221 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4222 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
4223 radeon_emit(cs
, va
>> 8);
4224 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
4226 radeon_set_sh_reg_seq(cs
, R_00B210_SPI_SHADER_PGM_LO_ES
, 2);
4227 radeon_emit(cs
, va
>> 8);
4228 radeon_emit(cs
, S_00B214_MEM_BASE(va
>> 40));
4231 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
4232 radeon_emit(cs
, gs
->config
.rsrc1
);
4233 radeon_emit(cs
, gs
->config
.rsrc2
| S_00B22C_LDS_SIZE(gs_state
->lds_size
));
4235 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
, gs_state
->vgt_gs_onchip_cntl
);
4236 radeon_set_context_reg(ctx_cs
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
, gs_state
->vgt_gs_max_prims_per_subgroup
);
4238 radeon_set_sh_reg_seq(cs
, R_00B220_SPI_SHADER_PGM_LO_GS
, 4);
4239 radeon_emit(cs
, va
>> 8);
4240 radeon_emit(cs
, S_00B224_MEM_BASE(va
>> 40));
4241 radeon_emit(cs
, gs
->config
.rsrc1
);
4242 radeon_emit(cs
, gs
->config
.rsrc2
);
4245 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, pipeline
->gs_copy_shader
);
4249 radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf
*ctx_cs
,
4250 struct radeon_cmdbuf
*cs
,
4251 struct radv_pipeline
*pipeline
)
4253 struct radv_shader_variant
*gs
;
4255 gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
4259 if (gs
->info
.is_ngg
)
4260 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, gs
);
4262 radv_pipeline_generate_hw_gs(ctx_cs
, cs
, pipeline
, gs
);
4264 radeon_set_context_reg(ctx_cs
, R_028B38_VGT_GS_MAX_VERT_OUT
,
4265 gs
->info
.gs
.vertices_out
);
4268 static uint32_t offset_to_ps_input(uint32_t offset
, bool flat_shade
,
4269 bool explicit, bool float16
)
4271 uint32_t ps_input_cntl
;
4272 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
4273 ps_input_cntl
= S_028644_OFFSET(offset
);
4274 if (flat_shade
|| explicit)
4275 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
4277 /* Force parameter cache to be read in passthrough
4280 ps_input_cntl
|= S_028644_OFFSET(1 << 5);
4283 ps_input_cntl
|= S_028644_FP16_INTERP_MODE(1) |
4284 S_028644_ATTR0_VALID(1);
4287 /* The input is a DEFAULT_VAL constant. */
4288 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
4289 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
4290 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
4291 ps_input_cntl
= S_028644_OFFSET(0x20) |
4292 S_028644_DEFAULT_VAL(offset
);
4294 return ps_input_cntl
;
4298 radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf
*ctx_cs
,
4299 struct radv_pipeline
*pipeline
)
4301 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4302 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
4303 uint32_t ps_input_cntl
[32];
4305 unsigned ps_offset
= 0;
4307 if (ps
->info
.ps
.prim_id_input
) {
4308 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
];
4309 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
4310 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false, false);
4315 if (ps
->info
.ps
.layer_input
||
4316 ps
->info
.needs_multiview_view_index
) {
4317 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
];
4318 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
)
4319 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false, false);
4321 ps_input_cntl
[ps_offset
] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000
, true, false, false);
4325 if (ps
->info
.ps
.has_pcoord
) {
4327 val
= S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
4328 ps_input_cntl
[ps_offset
] = val
;
4332 if (ps
->info
.ps
.num_input_clips_culls
) {
4335 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
];
4336 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
4337 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false, false);
4341 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
];
4342 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
&&
4343 ps
->info
.ps
.num_input_clips_culls
> 4) {
4344 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false, false);
4349 for (unsigned i
= 0; i
< 32 && (1u << i
) <= ps
->info
.ps
.input_mask
; ++i
) {
4354 if (!(ps
->info
.ps
.input_mask
& (1u << i
)))
4357 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_VAR0
+ i
];
4358 if (vs_offset
== AC_EXP_PARAM_UNDEFINED
) {
4359 ps_input_cntl
[ps_offset
] = S_028644_OFFSET(0x20);
4364 flat_shade
= !!(ps
->info
.ps
.flat_shaded_mask
& (1u << ps_offset
));
4365 explicit = !!(ps
->info
.ps
.explicit_shaded_mask
& (1u << ps_offset
));
4366 float16
= !!(ps
->info
.ps
.float16_shaded_mask
& (1u << ps_offset
));
4368 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, flat_shade
, explicit, float16
);
4373 radeon_set_context_reg_seq(ctx_cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps_offset
);
4374 for (unsigned i
= 0; i
< ps_offset
; i
++) {
4375 radeon_emit(ctx_cs
, ps_input_cntl
[i
]);
4381 radv_compute_db_shader_control(const struct radv_device
*device
,
4382 const struct radv_pipeline
*pipeline
,
4383 const struct radv_shader_variant
*ps
)
4386 if (ps
->info
.ps
.early_fragment_test
|| !ps
->info
.ps
.writes_memory
)
4387 z_order
= V_02880C_EARLY_Z_THEN_LATE_Z
;
4389 z_order
= V_02880C_LATE_Z
;
4391 bool disable_rbplus
= device
->physical_device
->rad_info
.has_rbplus
&&
4392 !device
->physical_device
->rad_info
.rbplus_allowed
;
4394 /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
4395 * but this appears to break Project Cars (DXVK). See
4396 * https://bugs.freedesktop.org/show_bug.cgi?id=109401
4398 bool mask_export_enable
= ps
->info
.ps
.writes_sample_mask
;
4400 return S_02880C_Z_EXPORT_ENABLE(ps
->info
.ps
.writes_z
) |
4401 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps
->info
.ps
.writes_stencil
) |
4402 S_02880C_KILL_ENABLE(!!ps
->info
.ps
.can_discard
) |
4403 S_02880C_MASK_EXPORT_ENABLE(mask_export_enable
) |
4404 S_02880C_Z_ORDER(z_order
) |
4405 S_02880C_DEPTH_BEFORE_SHADER(ps
->info
.ps
.early_fragment_test
) |
4406 S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps
->info
.ps
.post_depth_coverage
) |
4407 S_02880C_EXEC_ON_HIER_FAIL(ps
->info
.ps
.writes_memory
) |
4408 S_02880C_EXEC_ON_NOOP(ps
->info
.ps
.writes_memory
) |
4409 S_02880C_DUAL_QUAD_DISABLE(disable_rbplus
);
4413 radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf
*ctx_cs
,
4414 struct radeon_cmdbuf
*cs
,
4415 struct radv_pipeline
*pipeline
)
4417 struct radv_shader_variant
*ps
;
4419 assert (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]);
4421 ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4422 va
= radv_buffer_get_va(ps
->bo
) + ps
->bo_offset
;
4424 radeon_set_sh_reg_seq(cs
, R_00B020_SPI_SHADER_PGM_LO_PS
, 4);
4425 radeon_emit(cs
, va
>> 8);
4426 radeon_emit(cs
, S_00B024_MEM_BASE(va
>> 40));
4427 radeon_emit(cs
, ps
->config
.rsrc1
);
4428 radeon_emit(cs
, ps
->config
.rsrc2
);
4430 radeon_set_context_reg(ctx_cs
, R_02880C_DB_SHADER_CONTROL
,
4431 radv_compute_db_shader_control(pipeline
->device
,
4434 radeon_set_context_reg(ctx_cs
, R_0286CC_SPI_PS_INPUT_ENA
,
4435 ps
->config
.spi_ps_input_ena
);
4437 radeon_set_context_reg(ctx_cs
, R_0286D0_SPI_PS_INPUT_ADDR
,
4438 ps
->config
.spi_ps_input_addr
);
4440 radeon_set_context_reg(ctx_cs
, R_0286D8_SPI_PS_IN_CONTROL
,
4441 S_0286D8_NUM_INTERP(ps
->info
.ps
.num_interp
) |
4442 S_0286D8_PS_W32_EN(ps
->info
.wave_size
== 32));
4444 radeon_set_context_reg(ctx_cs
, R_0286E0_SPI_BARYC_CNTL
, pipeline
->graphics
.spi_baryc_cntl
);
4446 radeon_set_context_reg(ctx_cs
, R_028710_SPI_SHADER_Z_FORMAT
,
4447 ac_get_spi_shader_z_format(ps
->info
.ps
.writes_z
,
4448 ps
->info
.ps
.writes_stencil
,
4449 ps
->info
.ps
.writes_sample_mask
));
4451 if (pipeline
->device
->dfsm_allowed
) {
4452 /* optimise this? */
4453 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
4454 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
4459 radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf
*ctx_cs
,
4460 struct radv_pipeline
*pipeline
)
4462 if (pipeline
->device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
4463 pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
)
4466 unsigned vtx_reuse_depth
= 30;
4467 if (radv_pipeline_has_tess(pipeline
) &&
4468 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.tes
.spacing
== TESS_SPACING_FRACTIONAL_ODD
) {
4469 vtx_reuse_depth
= 14;
4471 radeon_set_context_reg(ctx_cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
4472 S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth
));
4476 radv_compute_vgt_shader_stages_en(const struct radv_pipeline
*pipeline
)
4478 uint32_t stages
= 0;
4479 if (radv_pipeline_has_tess(pipeline
)) {
4480 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
4481 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
4483 if (radv_pipeline_has_gs(pipeline
))
4484 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
4486 else if (radv_pipeline_has_ngg(pipeline
))
4487 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
);
4489 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
4490 } else if (radv_pipeline_has_gs(pipeline
)) {
4491 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
4493 } else if (radv_pipeline_has_ngg(pipeline
)) {
4494 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
);
4497 if (radv_pipeline_has_ngg(pipeline
)) {
4498 stages
|= S_028B54_PRIMGEN_EN(1);
4499 if (pipeline
->streamout_shader
)
4500 stages
|= S_028B54_NGG_WAVE_ID_EN(1);
4501 if (radv_pipeline_has_ngg_passthrough(pipeline
))
4502 stages
|= S_028B54_PRIMGEN_PASSTHRU_EN(1);
4503 } else if (radv_pipeline_has_gs(pipeline
)) {
4504 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
4507 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
4508 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
4510 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4511 uint8_t hs_size
= 64, gs_size
= 64, vs_size
= 64;
4513 if (radv_pipeline_has_tess(pipeline
))
4514 hs_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.wave_size
;
4516 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
4517 vs_size
= gs_size
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.wave_size
;
4518 if (pipeline
->gs_copy_shader
)
4519 vs_size
= pipeline
->gs_copy_shader
->info
.wave_size
;
4520 } else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
4521 vs_size
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.wave_size
;
4522 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
4523 vs_size
= pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.wave_size
;
4525 if (radv_pipeline_has_ngg(pipeline
))
4528 /* legacy GS only supports Wave64 */
4529 stages
|= S_028B54_HS_W32_EN(hs_size
== 32 ? 1 : 0) |
4530 S_028B54_GS_W32_EN(gs_size
== 32 ? 1 : 0) |
4531 S_028B54_VS_W32_EN(vs_size
== 32 ? 1 : 0);
4538 radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4540 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
4541 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
4543 if (!discard_rectangle_info
)
4548 for (unsigned i
= 0; i
< (1u << MAX_DISCARD_RECTANGLES
); ++i
) {
4549 /* Interpret i as a bitmask, and then set the bit in the mask if
4550 * that combination of rectangles in which the pixel is contained
4551 * should pass the cliprect test. */
4552 unsigned relevant_subset
= i
& ((1u << discard_rectangle_info
->discardRectangleCount
) - 1);
4554 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT
&&
4558 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT
&&
4569 gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf
*ctx_cs
,
4570 struct radv_pipeline
*pipeline
,
4571 const struct radv_tessellation_state
*tess
)
4573 bool break_wave_at_eoi
= false;
4574 unsigned primgroup_size
;
4575 unsigned vertgroup_size
= 256; /* 256 = disable vertex grouping */
4577 if (radv_pipeline_has_tess(pipeline
)) {
4578 primgroup_size
= tess
->num_patches
; /* must be a multiple of NUM_PATCHES */
4579 } else if (radv_pipeline_has_gs(pipeline
)) {
4580 const struct gfx9_gs_info
*gs_state
=
4581 &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs_ring_info
;
4582 unsigned vgt_gs_onchip_cntl
= gs_state
->vgt_gs_onchip_cntl
;
4583 primgroup_size
= G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl
);
4585 primgroup_size
= 128; /* recommended without a GS and tess */
4588 if (radv_pipeline_has_tess(pipeline
)) {
4589 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.uses_prim_id
||
4590 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.uses_prim_id
)
4591 break_wave_at_eoi
= true;
4594 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
,
4595 S_03096C_PRIM_GRP_SIZE(primgroup_size
) |
4596 S_03096C_VERT_GRP_SIZE(vertgroup_size
) |
4597 S_03096C_PACKET_TO_ONE_PA(0) /* line stipple */ |
4598 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
));
4602 radv_pipeline_generate_pm4(struct radv_pipeline
*pipeline
,
4603 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4604 const struct radv_graphics_pipeline_create_info
*extra
,
4605 const struct radv_blend_state
*blend
,
4606 const struct radv_tessellation_state
*tess
,
4607 unsigned prim
, unsigned gs_out
)
4609 struct radeon_cmdbuf
*ctx_cs
= &pipeline
->ctx_cs
;
4610 struct radeon_cmdbuf
*cs
= &pipeline
->cs
;
4613 ctx_cs
->max_dw
= 256;
4614 cs
->buf
= malloc(4 * (cs
->max_dw
+ ctx_cs
->max_dw
));
4615 ctx_cs
->buf
= cs
->buf
+ cs
->max_dw
;
4617 radv_pipeline_generate_depth_stencil_state(ctx_cs
, pipeline
, pCreateInfo
, extra
);
4618 radv_pipeline_generate_blend_state(ctx_cs
, pipeline
, blend
);
4619 radv_pipeline_generate_raster_state(ctx_cs
, pipeline
, pCreateInfo
);
4620 radv_pipeline_generate_multisample_state(ctx_cs
, pipeline
);
4621 radv_pipeline_generate_vgt_gs_mode(ctx_cs
, pipeline
);
4622 radv_pipeline_generate_vertex_shader(ctx_cs
, cs
, pipeline
, tess
);
4623 radv_pipeline_generate_tess_shaders(ctx_cs
, cs
, pipeline
, tess
);
4624 radv_pipeline_generate_geometry_shader(ctx_cs
, cs
, pipeline
);
4625 radv_pipeline_generate_fragment_shader(ctx_cs
, cs
, pipeline
);
4626 radv_pipeline_generate_ps_inputs(ctx_cs
, pipeline
);
4627 radv_pipeline_generate_vgt_vertex_reuse(ctx_cs
, pipeline
);
4628 radv_pipeline_generate_binning_state(ctx_cs
, pipeline
, pCreateInfo
, blend
);
4630 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&& !radv_pipeline_has_ngg(pipeline
))
4631 gfx10_pipeline_generate_ge_cntl(ctx_cs
, pipeline
, tess
);
4633 radeon_set_context_reg(ctx_cs
, R_028B54_VGT_SHADER_STAGES_EN
, radv_compute_vgt_shader_stages_en(pipeline
));
4635 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4636 radeon_set_uconfig_reg_idx(pipeline
->device
->physical_device
,
4637 cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
4639 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
4641 radeon_set_context_reg(ctx_cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out
);
4643 radeon_set_context_reg(ctx_cs
, R_02820C_PA_SC_CLIPRECT_RULE
, radv_compute_cliprect_rule(pCreateInfo
));
4645 pipeline
->ctx_cs_hash
= _mesa_hash_data(ctx_cs
->buf
, ctx_cs
->cdw
* 4);
4647 assert(ctx_cs
->cdw
<= ctx_cs
->max_dw
);
4648 assert(cs
->cdw
<= cs
->max_dw
);
4651 static struct radv_ia_multi_vgt_param_helpers
4652 radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline
*pipeline
,
4653 const struct radv_tessellation_state
*tess
,
4656 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param
= {0};
4657 const struct radv_device
*device
= pipeline
->device
;
4659 if (radv_pipeline_has_tess(pipeline
))
4660 ia_multi_vgt_param
.primgroup_size
= tess
->num_patches
;
4661 else if (radv_pipeline_has_gs(pipeline
))
4662 ia_multi_vgt_param
.primgroup_size
= 64;
4664 ia_multi_vgt_param
.primgroup_size
= 128; /* recommended without a GS */
4666 /* GS requirement. */
4667 ia_multi_vgt_param
.partial_es_wave
= false;
4668 if (radv_pipeline_has_gs(pipeline
) && device
->physical_device
->rad_info
.chip_class
<= GFX8
)
4669 if (SI_GS_PER_ES
/ ia_multi_vgt_param
.primgroup_size
>= pipeline
->device
->gs_table_depth
- 3)
4670 ia_multi_vgt_param
.partial_es_wave
= true;
4672 ia_multi_vgt_param
.wd_switch_on_eop
= false;
4673 if (device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4674 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
4675 * 4 shader engines. Set 1 to pass the assertion below.
4676 * The other cases are hardware requirements. */
4677 if (device
->physical_device
->rad_info
.max_se
< 4 ||
4678 prim
== V_008958_DI_PT_POLYGON
||
4679 prim
== V_008958_DI_PT_LINELOOP
||
4680 prim
== V_008958_DI_PT_TRIFAN
||
4681 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
4682 (pipeline
->graphics
.prim_restart_enable
&&
4683 (device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
4684 (prim
!= V_008958_DI_PT_POINTLIST
&&
4685 prim
!= V_008958_DI_PT_LINESTRIP
))))
4686 ia_multi_vgt_param
.wd_switch_on_eop
= true;
4689 ia_multi_vgt_param
.ia_switch_on_eoi
= false;
4690 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.prim_id_input
)
4691 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4692 if (radv_pipeline_has_gs(pipeline
) &&
4693 pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.uses_prim_id
)
4694 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4695 if (radv_pipeline_has_tess(pipeline
)) {
4696 /* SWITCH_ON_EOI must be set if PrimID is used. */
4697 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.uses_prim_id
||
4698 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.uses_prim_id
)
4699 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4702 ia_multi_vgt_param
.partial_vs_wave
= false;
4703 if (radv_pipeline_has_tess(pipeline
)) {
4704 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
4705 if ((device
->physical_device
->rad_info
.family
== CHIP_TAHITI
||
4706 device
->physical_device
->rad_info
.family
== CHIP_PITCAIRN
||
4707 device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
) &&
4708 radv_pipeline_has_gs(pipeline
))
4709 ia_multi_vgt_param
.partial_vs_wave
= true;
4710 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
4711 if (device
->physical_device
->rad_info
.has_distributed_tess
) {
4712 if (radv_pipeline_has_gs(pipeline
)) {
4713 if (device
->physical_device
->rad_info
.chip_class
<= GFX8
)
4714 ia_multi_vgt_param
.partial_es_wave
= true;
4716 ia_multi_vgt_param
.partial_vs_wave
= true;
4721 /* Workaround for a VGT hang when strip primitive types are used with
4722 * primitive restart.
4724 if (pipeline
->graphics
.prim_restart_enable
&&
4725 (prim
== V_008958_DI_PT_LINESTRIP
||
4726 prim
== V_008958_DI_PT_TRISTRIP
||
4727 prim
== V_008958_DI_PT_LINESTRIP_ADJ
||
4728 prim
== V_008958_DI_PT_TRISTRIP_ADJ
)) {
4729 ia_multi_vgt_param
.partial_vs_wave
= true;
4732 if (radv_pipeline_has_gs(pipeline
)) {
4733 /* On these chips there is the possibility of a hang if the
4734 * pipeline uses a GS and partial_vs_wave is not set.
4736 * This mostly does not hit 4-SE chips, as those typically set
4737 * ia_switch_on_eoi and then partial_vs_wave is set for pipelines
4738 * with GS due to another workaround.
4740 * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242
4742 if (device
->physical_device
->rad_info
.family
== CHIP_TONGA
||
4743 device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
4744 device
->physical_device
->rad_info
.family
== CHIP_POLARIS10
||
4745 device
->physical_device
->rad_info
.family
== CHIP_POLARIS11
||
4746 device
->physical_device
->rad_info
.family
== CHIP_POLARIS12
||
4747 device
->physical_device
->rad_info
.family
== CHIP_VEGAM
) {
4748 ia_multi_vgt_param
.partial_vs_wave
= true;
4752 ia_multi_vgt_param
.base
=
4753 S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param
.primgroup_size
- 1) |
4754 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
4755 S_028AA8_MAX_PRIMGRP_IN_WAVE(device
->physical_device
->rad_info
.chip_class
== GFX8
? 2 : 0) |
4756 S_030960_EN_INST_OPT_BASIC(device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
4757 S_030960_EN_INST_OPT_ADV(device
->physical_device
->rad_info
.chip_class
>= GFX9
);
4759 return ia_multi_vgt_param
;
4764 radv_compute_vertex_input_state(struct radv_pipeline
*pipeline
,
4765 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4767 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
4768 pCreateInfo
->pVertexInputState
;
4769 struct radv_vertex_elements_info
*velems
= &pipeline
->vertex_elements
;
4771 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
4772 const VkVertexInputAttributeDescription
*desc
=
4773 &vi_info
->pVertexAttributeDescriptions
[i
];
4774 unsigned loc
= desc
->location
;
4775 const struct vk_format_description
*format_desc
;
4777 format_desc
= vk_format_description(desc
->format
);
4779 velems
->format_size
[loc
] = format_desc
->block
.bits
/ 8;
4782 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
4783 const VkVertexInputBindingDescription
*desc
=
4784 &vi_info
->pVertexBindingDescriptions
[i
];
4786 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
4787 pipeline
->num_vertex_bindings
=
4788 MAX2(pipeline
->num_vertex_bindings
, desc
->binding
+ 1);
4792 static struct radv_shader_variant
*
4793 radv_pipeline_get_streamout_shader(struct radv_pipeline
*pipeline
)
4797 for (i
= MESA_SHADER_GEOMETRY
; i
>= MESA_SHADER_VERTEX
; i
--) {
4798 struct radv_shader_variant
*shader
=
4799 radv_get_shader(pipeline
, i
);
4801 if (shader
&& shader
->info
.so
.num_outputs
> 0)
4809 radv_secure_compile(struct radv_pipeline
*pipeline
,
4810 struct radv_device
*device
,
4811 const struct radv_pipeline_key
*key
,
4812 const VkPipelineShaderStageCreateInfo
**pStages
,
4813 const VkPipelineCreateFlags flags
,
4814 unsigned num_stages
)
4816 uint8_t allowed_pipeline_hashes
[2][20];
4817 radv_hash_shaders(allowed_pipeline_hashes
[0], pStages
,
4818 pipeline
->layout
, key
, get_hash_flags(device
));
4820 /* Generate the GC copy hash */
4821 memcpy(allowed_pipeline_hashes
[1], allowed_pipeline_hashes
[0], 20);
4822 allowed_pipeline_hashes
[1][0] ^= 1;
4824 uint8_t allowed_hashes
[2][20];
4825 for (unsigned i
= 0; i
< 2; ++i
) {
4826 disk_cache_compute_key(device
->physical_device
->disk_cache
,
4827 allowed_pipeline_hashes
[i
], 20,
4831 /* Do an early exit if all cache entries are already there. */
4832 bool may_need_copy_shader
= pStages
[MESA_SHADER_GEOMETRY
];
4833 void *main_entry
= disk_cache_get(device
->physical_device
->disk_cache
, allowed_hashes
[0], NULL
);
4834 void *copy_entry
= NULL
;
4835 if (may_need_copy_shader
)
4836 copy_entry
= disk_cache_get(device
->physical_device
->disk_cache
, allowed_hashes
[1], NULL
);
4838 bool has_all_cache_entries
= main_entry
&& (!may_need_copy_shader
|| copy_entry
);
4842 if(has_all_cache_entries
)
4845 unsigned process
= 0;
4846 uint8_t sc_threads
= device
->instance
->num_sc_threads
;
4848 mtx_lock(&device
->sc_state
->secure_compile_mutex
);
4849 if (device
->sc_state
->secure_compile_thread_counter
< sc_threads
) {
4850 device
->sc_state
->secure_compile_thread_counter
++;
4851 for (unsigned i
= 0; i
< sc_threads
; i
++) {
4852 if (!device
->sc_state
->secure_compile_processes
[i
].in_use
) {
4853 device
->sc_state
->secure_compile_processes
[i
].in_use
= true;
4858 mtx_unlock(&device
->sc_state
->secure_compile_mutex
);
4861 mtx_unlock(&device
->sc_state
->secure_compile_mutex
);
4864 int fd_secure_input
= device
->sc_state
->secure_compile_processes
[process
].fd_secure_input
;
4865 int fd_secure_output
= device
->sc_state
->secure_compile_processes
[process
].fd_secure_output
;
4867 /* Fork a copy of the slim untainted secure compile process */
4868 enum radv_secure_compile_type sc_type
= RADV_SC_TYPE_FORK_DEVICE
;
4869 write(fd_secure_input
, &sc_type
, sizeof(sc_type
));
4871 if (!radv_sc_read(fd_secure_output
, &sc_type
, sizeof(sc_type
), true) ||
4872 sc_type
!= RADV_SC_TYPE_INIT_SUCCESS
)
4873 return VK_ERROR_DEVICE_LOST
;
4875 fd_secure_input
= device
->sc_state
->secure_compile_processes
[process
].fd_server
;
4876 fd_secure_output
= device
->sc_state
->secure_compile_processes
[process
].fd_client
;
4878 /* Write pipeline / shader module out to secure process via pipe */
4879 sc_type
= RADV_SC_TYPE_COMPILE_PIPELINE
;
4880 write(fd_secure_input
, &sc_type
, sizeof(sc_type
));
4882 /* Write pipeline layout out to secure process */
4883 struct radv_pipeline_layout
*layout
= pipeline
->layout
;
4884 write(fd_secure_input
, layout
, sizeof(struct radv_pipeline_layout
));
4885 write(fd_secure_input
, &layout
->num_sets
, sizeof(uint32_t));
4886 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
4887 write(fd_secure_input
, &layout
->set
[set
].layout
->layout_size
, sizeof(uint32_t));
4888 write(fd_secure_input
, layout
->set
[set
].layout
, layout
->set
[set
].layout
->layout_size
);
4891 /* Write pipeline key out to secure process */
4892 write(fd_secure_input
, key
, sizeof(struct radv_pipeline_key
));
4894 /* Write pipeline create flags out to secure process */
4895 write(fd_secure_input
, &flags
, sizeof(VkPipelineCreateFlags
));
4897 /* Write stage and shader information out to secure process */
4898 write(fd_secure_input
, &num_stages
, sizeof(uint32_t));
4899 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4903 /* Write stage out to secure process */
4904 gl_shader_stage stage
= ffs(pStages
[i
]->stage
) - 1;
4905 write(fd_secure_input
, &stage
, sizeof(gl_shader_stage
));
4907 /* Write entry point name out to secure process */
4908 size_t name_size
= strlen(pStages
[i
]->pName
) + 1;
4909 write(fd_secure_input
, &name_size
, sizeof(size_t));
4910 write(fd_secure_input
, pStages
[i
]->pName
, name_size
);
4912 /* Write shader module out to secure process */
4913 struct radv_shader_module
*module
= radv_shader_module_from_handle(pStages
[i
]->module
);
4914 assert(!module
->nir
);
4915 size_t module_size
= sizeof(struct radv_shader_module
) + module
->size
;
4916 write(fd_secure_input
, &module_size
, sizeof(size_t));
4917 write(fd_secure_input
, module
, module_size
);
4919 /* Write specialization info out to secure process */
4920 const VkSpecializationInfo
*specInfo
= pStages
[i
]->pSpecializationInfo
;
4921 bool has_spec_info
= specInfo
? true : false;
4922 write(fd_secure_input
, &has_spec_info
, sizeof(bool));
4924 write(fd_secure_input
, &specInfo
->dataSize
, sizeof(size_t));
4925 write(fd_secure_input
, specInfo
->pData
, specInfo
->dataSize
);
4927 write(fd_secure_input
, &specInfo
->mapEntryCount
, sizeof(uint32_t));
4928 for (uint32_t j
= 0; j
< specInfo
->mapEntryCount
; j
++)
4929 write(fd_secure_input
, &specInfo
->pMapEntries
[j
], sizeof(VkSpecializationMapEntry
));
4933 /* Read the data returned from the secure process */
4934 while (sc_type
!= RADV_SC_TYPE_COMPILE_PIPELINE_FINISHED
) {
4935 if (!radv_sc_read(fd_secure_output
, &sc_type
, sizeof(sc_type
), true))
4936 return VK_ERROR_DEVICE_LOST
;
4938 if (sc_type
== RADV_SC_TYPE_WRITE_DISK_CACHE
) {
4939 assert(device
->physical_device
->disk_cache
);
4941 uint8_t disk_sha1
[20];
4942 if (!radv_sc_read(fd_secure_output
, disk_sha1
, sizeof(uint8_t) * 20, true))
4943 return VK_ERROR_DEVICE_LOST
;
4945 if (memcmp(disk_sha1
, allowed_hashes
[0], 20) &&
4946 memcmp(disk_sha1
, allowed_hashes
[1], 20))
4947 return VK_ERROR_DEVICE_LOST
;
4949 uint32_t entry_size
;
4950 if (!radv_sc_read(fd_secure_output
, &entry_size
, sizeof(uint32_t), true))
4951 return VK_ERROR_DEVICE_LOST
;
4953 struct cache_entry
*entry
= malloc(entry_size
);
4954 if (!radv_sc_read(fd_secure_output
, entry
, entry_size
, true))
4955 return VK_ERROR_DEVICE_LOST
;
4957 disk_cache_put(device
->physical_device
->disk_cache
,
4958 disk_sha1
, entry
, entry_size
,
4962 } else if (sc_type
== RADV_SC_TYPE_READ_DISK_CACHE
) {
4963 uint8_t disk_sha1
[20];
4964 if (!radv_sc_read(fd_secure_output
, disk_sha1
, sizeof(uint8_t) * 20, true))
4965 return VK_ERROR_DEVICE_LOST
;
4967 if (memcmp(disk_sha1
, allowed_hashes
[0], 20) &&
4968 memcmp(disk_sha1
, allowed_hashes
[1], 20))
4969 return VK_ERROR_DEVICE_LOST
;
4972 struct cache_entry
*entry
= (struct cache_entry
*)
4973 disk_cache_get(device
->physical_device
->disk_cache
,
4976 uint8_t found
= entry
? 1 : 0;
4977 write(fd_secure_input
, &found
, sizeof(uint8_t));
4980 write(fd_secure_input
, &size
, sizeof(size_t));
4981 write(fd_secure_input
, entry
, size
);
4988 sc_type
= RADV_SC_TYPE_DESTROY_DEVICE
;
4989 write(fd_secure_input
, &sc_type
, sizeof(sc_type
));
4991 mtx_lock(&device
->sc_state
->secure_compile_mutex
);
4992 device
->sc_state
->secure_compile_thread_counter
--;
4993 device
->sc_state
->secure_compile_processes
[process
].in_use
= false;
4994 mtx_unlock(&device
->sc_state
->secure_compile_mutex
);
5000 radv_pipeline_init(struct radv_pipeline
*pipeline
,
5001 struct radv_device
*device
,
5002 struct radv_pipeline_cache
*cache
,
5003 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
5004 const struct radv_graphics_pipeline_create_info
*extra
)
5007 bool has_view_index
= false;
5009 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
5010 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
5011 if (subpass
->view_mask
)
5012 has_view_index
= true;
5014 pipeline
->device
= device
;
5015 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
5016 assert(pipeline
->layout
);
5018 struct radv_blend_state blend
= radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
5020 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
5021 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
5022 radv_init_feedback(creation_feedback
);
5024 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
5026 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
5027 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
5028 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
5029 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
5030 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
5031 if(creation_feedback
)
5032 stage_feedbacks
[stage
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[i
];
5035 struct radv_pipeline_key key
= radv_generate_graphics_pipeline_key(pipeline
, pCreateInfo
, &blend
, has_view_index
);
5036 if (radv_device_use_secure_compile(device
->instance
)) {
5037 return radv_secure_compile(pipeline
, device
, &key
, pStages
, pCreateInfo
->flags
, pCreateInfo
->stageCount
);
5039 radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
5042 pipeline
->graphics
.spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
5043 radv_pipeline_init_multisample_state(pipeline
, &blend
, pCreateInfo
);
5045 uint32_t prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
5047 pipeline
->graphics
.topology
= pCreateInfo
->pInputAssemblyState
->topology
;
5048 pipeline
->graphics
.can_use_guardband
= radv_prim_can_use_guardband(pCreateInfo
->pInputAssemblyState
->topology
);
5050 if (radv_pipeline_has_gs(pipeline
)) {
5051 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
5052 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
5053 } else if (radv_pipeline_has_tess(pipeline
)) {
5054 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.point_mode
)
5055 gs_out
= V_028A6C_OUTPRIM_TYPE_POINTLIST
;
5057 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.primitive_mode
);
5058 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
5060 gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
5062 if (extra
&& extra
->use_rectlist
) {
5063 prim
= V_008958_DI_PT_RECTLIST
;
5064 gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
5065 pipeline
->graphics
.can_use_guardband
= true;
5066 if (radv_pipeline_has_ngg(pipeline
))
5067 gs_out
= V_028A6C_VGT_OUT_RECT_V0
;
5069 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
5070 /* prim vertex count will need TESS changes */
5071 pipeline
->graphics
.prim_vertex_count
= prim_size_table
[prim
];
5073 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
5075 /* Ensure that some export memory is always allocated, for two reasons:
5077 * 1) Correctness: The hardware ignores the EXEC mask if no export
5078 * memory is allocated, so KILL and alpha test do not work correctly
5080 * 2) Performance: Every shader needs at least a NULL export, even when
5081 * it writes no color/depth output. The NULL export instruction
5082 * stalls without this setting.
5084 * Don't add this to CB_SHADER_MASK.
5086 * GFX10 supports pixel shaders without exports by setting both the
5087 * color and Z formats to SPI_SHADER_ZERO. The hw will skip export
5088 * instructions if any are present.
5090 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
5091 if ((pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX9
||
5092 ps
->info
.ps
.can_discard
) &&
5093 !blend
.spi_shader_col_format
) {
5094 if (!ps
->info
.ps
.writes_z
&&
5095 !ps
->info
.ps
.writes_stencil
&&
5096 !ps
->info
.ps
.writes_sample_mask
)
5097 blend
.spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
5100 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
5101 if (pipeline
->shaders
[i
]) {
5102 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[i
]->info
.need_indirect_descriptor_sets
;
5106 if (radv_pipeline_has_gs(pipeline
) && !radv_pipeline_has_ngg(pipeline
)) {
5107 struct radv_shader_variant
*gs
=
5108 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
5110 calculate_gs_ring_sizes(pipeline
, &gs
->info
.gs_ring_info
);
5113 struct radv_tessellation_state tess
= {0};
5114 if (radv_pipeline_has_tess(pipeline
)) {
5115 if (prim
== V_008958_DI_PT_PATCH
) {
5116 pipeline
->graphics
.prim_vertex_count
.min
= pCreateInfo
->pTessellationState
->patchControlPoints
;
5117 pipeline
->graphics
.prim_vertex_count
.incr
= 1;
5119 tess
= calculate_tess_state(pipeline
, pCreateInfo
);
5122 pipeline
->graphics
.ia_multi_vgt_param
= radv_compute_ia_multi_vgt_param_helpers(pipeline
, &tess
, prim
);
5124 radv_compute_vertex_input_state(pipeline
, pCreateInfo
);
5126 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++)
5127 pipeline
->user_data_0
[i
] = radv_pipeline_stage_to_user_data_0(pipeline
, i
, device
->physical_device
->rad_info
.chip_class
);
5129 struct radv_userdata_info
*loc
= radv_lookup_user_sgpr(pipeline
, MESA_SHADER_VERTEX
,
5130 AC_UD_VS_BASE_VERTEX_START_INSTANCE
);
5131 if (loc
->sgpr_idx
!= -1) {
5132 pipeline
->graphics
.vtx_base_sgpr
= pipeline
->user_data_0
[MESA_SHADER_VERTEX
];
5133 pipeline
->graphics
.vtx_base_sgpr
+= loc
->sgpr_idx
* 4;
5134 if (radv_get_shader(pipeline
, MESA_SHADER_VERTEX
)->info
.vs
.needs_draw_id
)
5135 pipeline
->graphics
.vtx_emit_num
= 3;
5137 pipeline
->graphics
.vtx_emit_num
= 2;
5140 /* Find the last vertex shader stage that eventually uses streamout. */
5141 pipeline
->streamout_shader
= radv_pipeline_get_streamout_shader(pipeline
);
5143 result
= radv_pipeline_scratch_init(device
, pipeline
);
5144 radv_pipeline_generate_pm4(pipeline
, pCreateInfo
, extra
, &blend
, &tess
, prim
, gs_out
);
5150 radv_graphics_pipeline_create(
5152 VkPipelineCache _cache
,
5153 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
5154 const struct radv_graphics_pipeline_create_info
*extra
,
5155 const VkAllocationCallbacks
*pAllocator
,
5156 VkPipeline
*pPipeline
)
5158 RADV_FROM_HANDLE(radv_device
, device
, _device
);
5159 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
5160 struct radv_pipeline
*pipeline
;
5163 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
5164 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
5165 if (pipeline
== NULL
)
5166 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
5168 result
= radv_pipeline_init(pipeline
, device
, cache
,
5169 pCreateInfo
, extra
);
5170 if (result
!= VK_SUCCESS
) {
5171 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
5175 *pPipeline
= radv_pipeline_to_handle(pipeline
);
5180 VkResult
radv_CreateGraphicsPipelines(
5182 VkPipelineCache pipelineCache
,
5184 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
5185 const VkAllocationCallbacks
* pAllocator
,
5186 VkPipeline
* pPipelines
)
5188 VkResult result
= VK_SUCCESS
;
5191 for (; i
< count
; i
++) {
5193 r
= radv_graphics_pipeline_create(_device
,
5196 NULL
, pAllocator
, &pPipelines
[i
]);
5197 if (r
!= VK_SUCCESS
) {
5199 pPipelines
[i
] = VK_NULL_HANDLE
;
5208 radv_compute_generate_pm4(struct radv_pipeline
*pipeline
)
5210 struct radv_shader_variant
*compute_shader
;
5211 struct radv_device
*device
= pipeline
->device
;
5212 unsigned threads_per_threadgroup
;
5213 unsigned threadgroups_per_cu
= 1;
5214 unsigned waves_per_threadgroup
;
5215 unsigned max_waves_per_sh
= 0;
5218 pipeline
->cs
.max_dw
= device
->physical_device
->rad_info
.chip_class
>= GFX10
? 22 : 20;
5219 pipeline
->cs
.buf
= malloc(pipeline
->cs
.max_dw
* 4);
5221 compute_shader
= pipeline
->shaders
[MESA_SHADER_COMPUTE
];
5222 va
= radv_buffer_get_va(compute_shader
->bo
) + compute_shader
->bo_offset
;
5224 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B830_COMPUTE_PGM_LO
, 2);
5225 radeon_emit(&pipeline
->cs
, va
>> 8);
5226 radeon_emit(&pipeline
->cs
, S_00B834_DATA(va
>> 40));
5228 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
5229 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc1
);
5230 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc2
);
5231 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
5232 radeon_set_sh_reg(&pipeline
->cs
, R_00B8A0_COMPUTE_PGM_RSRC3
, compute_shader
->config
.rsrc3
);
5235 /* Calculate best compute resource limits. */
5236 threads_per_threadgroup
= compute_shader
->info
.cs
.block_size
[0] *
5237 compute_shader
->info
.cs
.block_size
[1] *
5238 compute_shader
->info
.cs
.block_size
[2];
5239 waves_per_threadgroup
= DIV_ROUND_UP(threads_per_threadgroup
,
5240 compute_shader
->info
.wave_size
);
5242 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
5243 waves_per_threadgroup
== 1)
5244 threadgroups_per_cu
= 2;
5246 radeon_set_sh_reg(&pipeline
->cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
5247 ac_get_compute_resource_limits(&device
->physical_device
->rad_info
,
5248 waves_per_threadgroup
,
5250 threadgroups_per_cu
));
5252 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
5253 radeon_emit(&pipeline
->cs
,
5254 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[0]));
5255 radeon_emit(&pipeline
->cs
,
5256 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[1]));
5257 radeon_emit(&pipeline
->cs
,
5258 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[2]));
5260 assert(pipeline
->cs
.cdw
<= pipeline
->cs
.max_dw
);
5263 static struct radv_pipeline_key
5264 radv_generate_compute_pipeline_key(struct radv_pipeline
*pipeline
,
5265 const VkComputePipelineCreateInfo
*pCreateInfo
)
5267 const VkPipelineShaderStageCreateInfo
*stage
= &pCreateInfo
->stage
;
5268 struct radv_pipeline_key key
;
5269 memset(&key
, 0, sizeof(key
));
5271 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
5272 key
.optimisations_disabled
= 1;
5274 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT
*subgroup_size
=
5275 vk_find_struct_const(stage
->pNext
,
5276 PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT
);
5278 if (subgroup_size
) {
5279 assert(subgroup_size
->requiredSubgroupSize
== 32 ||
5280 subgroup_size
->requiredSubgroupSize
== 64);
5281 key
.compute_subgroup_size
= subgroup_size
->requiredSubgroupSize
;
5287 static VkResult
radv_compute_pipeline_create(
5289 VkPipelineCache _cache
,
5290 const VkComputePipelineCreateInfo
* pCreateInfo
,
5291 const VkAllocationCallbacks
* pAllocator
,
5292 VkPipeline
* pPipeline
)
5294 RADV_FROM_HANDLE(radv_device
, device
, _device
);
5295 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
5296 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
5297 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
5298 struct radv_pipeline
*pipeline
;
5301 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
5302 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
5303 if (pipeline
== NULL
)
5304 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
5306 pipeline
->device
= device
;
5307 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
5308 assert(pipeline
->layout
);
5310 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
5311 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
5312 radv_init_feedback(creation_feedback
);
5314 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
5315 if (creation_feedback
)
5316 stage_feedbacks
[MESA_SHADER_COMPUTE
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[0];
5318 pStages
[MESA_SHADER_COMPUTE
] = &pCreateInfo
->stage
;
5320 struct radv_pipeline_key key
=
5321 radv_generate_compute_pipeline_key(pipeline
, pCreateInfo
);
5323 if (radv_device_use_secure_compile(device
->instance
)) {
5324 result
= radv_secure_compile(pipeline
, device
, &key
, pStages
, pCreateInfo
->flags
, 1);
5325 *pPipeline
= radv_pipeline_to_handle(pipeline
);
5329 radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
5332 pipeline
->user_data_0
[MESA_SHADER_COMPUTE
] = radv_pipeline_stage_to_user_data_0(pipeline
, MESA_SHADER_COMPUTE
, device
->physical_device
->rad_info
.chip_class
);
5333 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.need_indirect_descriptor_sets
;
5334 result
= radv_pipeline_scratch_init(device
, pipeline
);
5335 if (result
!= VK_SUCCESS
) {
5336 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
5340 radv_compute_generate_pm4(pipeline
);
5342 *pPipeline
= radv_pipeline_to_handle(pipeline
);
5347 VkResult
radv_CreateComputePipelines(
5349 VkPipelineCache pipelineCache
,
5351 const VkComputePipelineCreateInfo
* pCreateInfos
,
5352 const VkAllocationCallbacks
* pAllocator
,
5353 VkPipeline
* pPipelines
)
5355 VkResult result
= VK_SUCCESS
;
5358 for (; i
< count
; i
++) {
5360 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
5362 pAllocator
, &pPipelines
[i
]);
5363 if (r
!= VK_SUCCESS
) {
5365 pPipelines
[i
] = VK_NULL_HANDLE
;
5373 static uint32_t radv_get_executable_count(const struct radv_pipeline
*pipeline
)
5376 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
5377 if (!pipeline
->shaders
[i
])
5380 if (i
== MESA_SHADER_GEOMETRY
&&
5381 !radv_pipeline_has_ngg(pipeline
)) {
5391 static struct radv_shader_variant
*
5392 radv_get_shader_from_executable_index(const struct radv_pipeline
*pipeline
, int index
, gl_shader_stage
*stage
)
5394 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
5395 if (!pipeline
->shaders
[i
])
5399 return pipeline
->shaders
[i
];
5404 if (i
== MESA_SHADER_GEOMETRY
&&
5405 !radv_pipeline_has_ngg(pipeline
)) {
5408 return pipeline
->gs_copy_shader
;
5418 /* Basically strlcpy (which does not exist on linux) specialized for
5420 static void desc_copy(char *desc
, const char *src
) {
5421 int len
= strlen(src
);
5422 assert(len
< VK_MAX_DESCRIPTION_SIZE
);
5423 memcpy(desc
, src
, len
);
5424 memset(desc
+ len
, 0, VK_MAX_DESCRIPTION_SIZE
- len
);
5427 VkResult
radv_GetPipelineExecutablePropertiesKHR(
5429 const VkPipelineInfoKHR
* pPipelineInfo
,
5430 uint32_t* pExecutableCount
,
5431 VkPipelineExecutablePropertiesKHR
* pProperties
)
5433 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pPipelineInfo
->pipeline
);
5434 const uint32_t total_count
= radv_get_executable_count(pipeline
);
5437 *pExecutableCount
= total_count
;
5441 const uint32_t count
= MIN2(total_count
, *pExecutableCount
);
5442 for (unsigned i
= 0, executable_idx
= 0;
5443 i
< MESA_SHADER_STAGES
&& executable_idx
< count
; ++i
) {
5444 if (!pipeline
->shaders
[i
])
5446 pProperties
[executable_idx
].stages
= mesa_to_vk_shader_stage(i
);
5447 const char *name
= NULL
;
5448 const char *description
= NULL
;
5450 case MESA_SHADER_VERTEX
:
5451 name
= "Vertex Shader";
5452 description
= "Vulkan Vertex Shader";
5454 case MESA_SHADER_TESS_CTRL
:
5455 if (!pipeline
->shaders
[MESA_SHADER_VERTEX
]) {
5456 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_VERTEX_BIT
;
5457 name
= "Vertex + Tessellation Control Shaders";
5458 description
= "Combined Vulkan Vertex and Tessellation Control Shaders";
5460 name
= "Tessellation Control Shader";
5461 description
= "Vulkan Tessellation Control Shader";
5464 case MESA_SHADER_TESS_EVAL
:
5465 name
= "Tessellation Evaluation Shader";
5466 description
= "Vulkan Tessellation Evaluation Shader";
5468 case MESA_SHADER_GEOMETRY
:
5469 if (radv_pipeline_has_tess(pipeline
) && !pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]) {
5470 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
;
5471 name
= "Tessellation Evaluation + Geometry Shaders";
5472 description
= "Combined Vulkan Tessellation Evaluation and Geometry Shaders";
5473 } else if (!radv_pipeline_has_tess(pipeline
) && !pipeline
->shaders
[MESA_SHADER_VERTEX
]) {
5474 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_VERTEX_BIT
;
5475 name
= "Vertex + Geometry Shader";
5476 description
= "Combined Vulkan Vertex and Geometry Shaders";
5478 name
= "Geometry Shader";
5479 description
= "Vulkan Geometry Shader";
5482 case MESA_SHADER_FRAGMENT
:
5483 name
= "Fragment Shader";
5484 description
= "Vulkan Fragment Shader";
5486 case MESA_SHADER_COMPUTE
:
5487 name
= "Compute Shader";
5488 description
= "Vulkan Compute Shader";
5492 pProperties
[executable_idx
].subgroupSize
= pipeline
->shaders
[i
]->info
.wave_size
;
5493 desc_copy(pProperties
[executable_idx
].name
, name
);
5494 desc_copy(pProperties
[executable_idx
].description
, description
);
5497 if (i
== MESA_SHADER_GEOMETRY
&&
5498 !radv_pipeline_has_ngg(pipeline
)) {
5499 assert(pipeline
->gs_copy_shader
);
5500 if (executable_idx
>= count
)
5503 pProperties
[executable_idx
].stages
= VK_SHADER_STAGE_GEOMETRY_BIT
;
5504 pProperties
[executable_idx
].subgroupSize
= 64;
5505 desc_copy(pProperties
[executable_idx
].name
, "GS Copy Shader");
5506 desc_copy(pProperties
[executable_idx
].description
,
5507 "Extra shader stage that loads the GS output ringbuffer into the rasterizer");
5513 VkResult result
= *pExecutableCount
< total_count
? VK_INCOMPLETE
: VK_SUCCESS
;
5514 *pExecutableCount
= count
;
5518 VkResult
radv_GetPipelineExecutableStatisticsKHR(
5520 const VkPipelineExecutableInfoKHR
* pExecutableInfo
,
5521 uint32_t* pStatisticCount
,
5522 VkPipelineExecutableStatisticKHR
* pStatistics
)
5524 RADV_FROM_HANDLE(radv_device
, device
, _device
);
5525 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pExecutableInfo
->pipeline
);
5526 gl_shader_stage stage
;
5527 struct radv_shader_variant
*shader
= radv_get_shader_from_executable_index(pipeline
, pExecutableInfo
->executableIndex
, &stage
);
5529 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
5530 unsigned lds_increment
= chip_class
>= GFX7
? 512 : 256;
5531 unsigned max_waves
= radv_get_max_waves(device
, shader
, stage
);
5533 VkPipelineExecutableStatisticKHR
*s
= pStatistics
;
5534 VkPipelineExecutableStatisticKHR
*end
= s
+ (pStatistics
? *pStatisticCount
: 0);
5535 VkResult result
= VK_SUCCESS
;
5538 desc_copy(s
->name
, "SGPRs");
5539 desc_copy(s
->description
, "Number of SGPR registers allocated per subgroup");
5540 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5541 s
->value
.u64
= shader
->config
.num_sgprs
;
5546 desc_copy(s
->name
, "VGPRs");
5547 desc_copy(s
->description
, "Number of VGPR registers allocated per subgroup");
5548 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5549 s
->value
.u64
= shader
->config
.num_vgprs
;
5554 desc_copy(s
->name
, "Spilled SGPRs");
5555 desc_copy(s
->description
, "Number of SGPR registers spilled per subgroup");
5556 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5557 s
->value
.u64
= shader
->config
.spilled_sgprs
;
5562 desc_copy(s
->name
, "Spilled VGPRs");
5563 desc_copy(s
->description
, "Number of VGPR registers spilled per subgroup");
5564 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5565 s
->value
.u64
= shader
->config
.spilled_vgprs
;
5570 desc_copy(s
->name
, "PrivMem VGPRs");
5571 desc_copy(s
->description
, "Number of VGPRs stored in private memory per subgroup");
5572 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5573 s
->value
.u64
= shader
->info
.private_mem_vgprs
;
5578 desc_copy(s
->name
, "Code size");
5579 desc_copy(s
->description
, "Code size in bytes");
5580 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5581 s
->value
.u64
= shader
->exec_size
;
5586 desc_copy(s
->name
, "LDS size");
5587 desc_copy(s
->description
, "LDS size in bytes per workgroup");
5588 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5589 s
->value
.u64
= shader
->config
.lds_size
* lds_increment
;
5594 desc_copy(s
->name
, "Scratch size");
5595 desc_copy(s
->description
, "Private memory in bytes per subgroup");
5596 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5597 s
->value
.u64
= shader
->config
.scratch_bytes_per_wave
;
5602 desc_copy(s
->name
, "Subgroups per SIMD");
5603 desc_copy(s
->description
, "The maximum number of subgroups in flight on a SIMD unit");
5604 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5605 s
->value
.u64
= max_waves
;
5610 *pStatisticCount
= s
- pStatistics
;
5612 *pStatisticCount
= end
- pStatistics
;
5613 result
= VK_INCOMPLETE
;
5615 *pStatisticCount
= s
- pStatistics
;
5621 static VkResult
radv_copy_representation(void *data
, size_t *data_size
, const char *src
)
5623 size_t total_size
= strlen(src
) + 1;
5626 *data_size
= total_size
;
5630 size_t size
= MIN2(total_size
, *data_size
);
5632 memcpy(data
, src
, size
);
5634 *((char*)data
+ size
- 1) = 0;
5635 return size
< total_size
? VK_INCOMPLETE
: VK_SUCCESS
;
5638 VkResult
radv_GetPipelineExecutableInternalRepresentationsKHR(
5640 const VkPipelineExecutableInfoKHR
* pExecutableInfo
,
5641 uint32_t* pInternalRepresentationCount
,
5642 VkPipelineExecutableInternalRepresentationKHR
* pInternalRepresentations
)
5644 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pExecutableInfo
->pipeline
);
5645 gl_shader_stage stage
;
5646 struct radv_shader_variant
*shader
= radv_get_shader_from_executable_index(pipeline
, pExecutableInfo
->executableIndex
, &stage
);
5648 VkPipelineExecutableInternalRepresentationKHR
*p
= pInternalRepresentations
;
5649 VkPipelineExecutableInternalRepresentationKHR
*end
= p
+ (pInternalRepresentations
? *pInternalRepresentationCount
: 0);
5650 VkResult result
= VK_SUCCESS
;
5654 desc_copy(p
->name
, "NIR Shader(s)");
5655 desc_copy(p
->description
, "The optimized NIR shader(s)");
5656 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->nir_string
) != VK_SUCCESS
)
5657 result
= VK_INCOMPLETE
;
5664 if (pipeline
->device
->physical_device
->use_aco
) {
5665 desc_copy(p
->name
, "ACO IR");
5666 desc_copy(p
->description
, "The ACO IR after some optimizations");
5668 desc_copy(p
->name
, "LLVM IR");
5669 desc_copy(p
->description
, "The LLVM IR after some optimizations");
5671 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->ir_string
) != VK_SUCCESS
)
5672 result
= VK_INCOMPLETE
;
5679 desc_copy(p
->name
, "Assembly");
5680 desc_copy(p
->description
, "Final Assembly");
5681 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->disasm_string
) != VK_SUCCESS
)
5682 result
= VK_INCOMPLETE
;
5686 if (!pInternalRepresentations
)
5687 *pInternalRepresentationCount
= p
- pInternalRepresentations
;
5689 result
= VK_INCOMPLETE
;
5690 *pInternalRepresentationCount
= end
- pInternalRepresentations
;
5692 *pInternalRepresentationCount
= p
- pInternalRepresentations
;