2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
33 #include "radv_shader.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
39 #include <llvm-c/Core.h>
40 #include <llvm-c/TargetMachine.h>
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
46 #include "vk_format.h"
47 #include "util/debug.h"
48 #include "ac_exp_param.h"
49 #include "ac_shader_util.h"
50 #include "main/menums.h"
52 struct radv_blend_state
{
53 uint32_t blend_enable_4bit
;
54 uint32_t need_src_alpha
;
56 uint32_t cb_color_control
;
57 uint32_t cb_target_mask
;
58 uint32_t cb_target_enabled_4bit
;
59 uint32_t sx_mrt_blend_opt
[8];
60 uint32_t cb_blend_control
[8];
62 uint32_t spi_shader_col_format
;
63 uint32_t cb_shader_mask
;
64 uint32_t db_alpha_to_mask
;
66 uint32_t commutative_4bit
;
68 bool single_cb_enable
;
69 bool mrt0_is_dual_src
;
72 struct radv_dsa_order_invariance
{
73 /* Whether the final result in Z/S buffers is guaranteed to be
74 * invariant under changes to the order in which fragments arrive.
78 /* Whether the set of fragments that pass the combined Z/S test is
79 * guaranteed to be invariant under changes to the order in which
85 struct radv_tessellation_state
{
86 uint32_t ls_hs_config
;
92 struct radv_gs_state
{
93 uint32_t vgt_gs_onchip_cntl
;
94 uint32_t vgt_gs_max_prims_per_subgroup
;
95 uint32_t vgt_esgs_ring_itemsize
;
99 struct radv_ngg_state
{
100 uint16_t ngg_emit_size
; /* in dwords */
101 uint32_t hw_max_esverts
;
102 uint32_t max_gsprims
;
103 uint32_t max_out_verts
;
104 uint32_t prim_amp_factor
;
105 uint32_t vgt_esgs_ring_itemsize
;
106 bool max_vert_out_per_gs_instance
;
109 bool radv_pipeline_has_ngg(const struct radv_pipeline
*pipeline
)
111 struct radv_shader_variant
*variant
= NULL
;
112 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
113 variant
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
114 else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
115 variant
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
116 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
117 variant
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
120 return variant
->info
.is_ngg
;
124 radv_pipeline_destroy(struct radv_device
*device
,
125 struct radv_pipeline
*pipeline
,
126 const VkAllocationCallbacks
* allocator
)
128 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
129 if (pipeline
->shaders
[i
])
130 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
132 if (pipeline
->gs_copy_shader
)
133 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
136 free(pipeline
->cs
.buf
);
137 vk_free2(&device
->alloc
, allocator
, pipeline
);
140 void radv_DestroyPipeline(
142 VkPipeline _pipeline
,
143 const VkAllocationCallbacks
* pAllocator
)
145 RADV_FROM_HANDLE(radv_device
, device
, _device
);
146 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
151 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
154 static uint32_t get_hash_flags(struct radv_device
*device
)
156 uint32_t hash_flags
= 0;
158 if (device
->instance
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
)
159 hash_flags
|= RADV_HASH_SHADER_UNSAFE_MATH
;
160 if (device
->instance
->perftest_flags
& RADV_PERFTEST_SISCHED
)
161 hash_flags
|= RADV_HASH_SHADER_SISCHED
;
166 radv_pipeline_scratch_init(struct radv_device
*device
,
167 struct radv_pipeline
*pipeline
)
169 unsigned scratch_bytes_per_wave
= 0;
170 unsigned max_waves
= 0;
171 unsigned min_waves
= 1;
173 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
174 if (pipeline
->shaders
[i
]) {
175 unsigned max_stage_waves
= device
->scratch_waves
;
177 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
178 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
180 max_stage_waves
= MIN2(max_stage_waves
,
181 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
182 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
183 max_waves
= MAX2(max_waves
, max_stage_waves
);
187 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
188 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
189 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
190 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
191 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
194 if (scratch_bytes_per_wave
)
195 max_waves
= MIN2(max_waves
, 0xffffffffu
/ scratch_bytes_per_wave
);
197 if (scratch_bytes_per_wave
&& max_waves
< min_waves
) {
198 /* Not really true at this moment, but will be true on first
199 * execution. Avoid having hanging shaders. */
200 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
202 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
203 pipeline
->max_waves
= max_waves
;
207 static uint32_t si_translate_blend_logic_op(VkLogicOp op
)
210 case VK_LOGIC_OP_CLEAR
:
211 return V_028808_ROP3_CLEAR
;
212 case VK_LOGIC_OP_AND
:
213 return V_028808_ROP3_AND
;
214 case VK_LOGIC_OP_AND_REVERSE
:
215 return V_028808_ROP3_AND_REVERSE
;
216 case VK_LOGIC_OP_COPY
:
217 return V_028808_ROP3_COPY
;
218 case VK_LOGIC_OP_AND_INVERTED
:
219 return V_028808_ROP3_AND_INVERTED
;
220 case VK_LOGIC_OP_NO_OP
:
221 return V_028808_ROP3_NO_OP
;
222 case VK_LOGIC_OP_XOR
:
223 return V_028808_ROP3_XOR
;
225 return V_028808_ROP3_OR
;
226 case VK_LOGIC_OP_NOR
:
227 return V_028808_ROP3_NOR
;
228 case VK_LOGIC_OP_EQUIVALENT
:
229 return V_028808_ROP3_EQUIVALENT
;
230 case VK_LOGIC_OP_INVERT
:
231 return V_028808_ROP3_INVERT
;
232 case VK_LOGIC_OP_OR_REVERSE
:
233 return V_028808_ROP3_OR_REVERSE
;
234 case VK_LOGIC_OP_COPY_INVERTED
:
235 return V_028808_ROP3_COPY_INVERTED
;
236 case VK_LOGIC_OP_OR_INVERTED
:
237 return V_028808_ROP3_OR_INVERTED
;
238 case VK_LOGIC_OP_NAND
:
239 return V_028808_ROP3_NAND
;
240 case VK_LOGIC_OP_SET
:
241 return V_028808_ROP3_SET
;
243 unreachable("Unhandled logic op");
248 static uint32_t si_translate_blend_function(VkBlendOp op
)
251 case VK_BLEND_OP_ADD
:
252 return V_028780_COMB_DST_PLUS_SRC
;
253 case VK_BLEND_OP_SUBTRACT
:
254 return V_028780_COMB_SRC_MINUS_DST
;
255 case VK_BLEND_OP_REVERSE_SUBTRACT
:
256 return V_028780_COMB_DST_MINUS_SRC
;
257 case VK_BLEND_OP_MIN
:
258 return V_028780_COMB_MIN_DST_SRC
;
259 case VK_BLEND_OP_MAX
:
260 return V_028780_COMB_MAX_DST_SRC
;
266 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
269 case VK_BLEND_FACTOR_ZERO
:
270 return V_028780_BLEND_ZERO
;
271 case VK_BLEND_FACTOR_ONE
:
272 return V_028780_BLEND_ONE
;
273 case VK_BLEND_FACTOR_SRC_COLOR
:
274 return V_028780_BLEND_SRC_COLOR
;
275 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
276 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
277 case VK_BLEND_FACTOR_DST_COLOR
:
278 return V_028780_BLEND_DST_COLOR
;
279 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
280 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
281 case VK_BLEND_FACTOR_SRC_ALPHA
:
282 return V_028780_BLEND_SRC_ALPHA
;
283 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
284 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
285 case VK_BLEND_FACTOR_DST_ALPHA
:
286 return V_028780_BLEND_DST_ALPHA
;
287 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
288 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
289 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
290 return V_028780_BLEND_CONSTANT_COLOR
;
291 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
292 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
293 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
294 return V_028780_BLEND_CONSTANT_ALPHA
;
295 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
296 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
297 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
298 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
299 case VK_BLEND_FACTOR_SRC1_COLOR
:
300 return V_028780_BLEND_SRC1_COLOR
;
301 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
302 return V_028780_BLEND_INV_SRC1_COLOR
;
303 case VK_BLEND_FACTOR_SRC1_ALPHA
:
304 return V_028780_BLEND_SRC1_ALPHA
;
305 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
306 return V_028780_BLEND_INV_SRC1_ALPHA
;
312 static uint32_t si_translate_blend_opt_function(VkBlendOp op
)
315 case VK_BLEND_OP_ADD
:
316 return V_028760_OPT_COMB_ADD
;
317 case VK_BLEND_OP_SUBTRACT
:
318 return V_028760_OPT_COMB_SUBTRACT
;
319 case VK_BLEND_OP_REVERSE_SUBTRACT
:
320 return V_028760_OPT_COMB_REVSUBTRACT
;
321 case VK_BLEND_OP_MIN
:
322 return V_028760_OPT_COMB_MIN
;
323 case VK_BLEND_OP_MAX
:
324 return V_028760_OPT_COMB_MAX
;
326 return V_028760_OPT_COMB_BLEND_DISABLED
;
330 static uint32_t si_translate_blend_opt_factor(VkBlendFactor factor
, bool is_alpha
)
333 case VK_BLEND_FACTOR_ZERO
:
334 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_ALL
;
335 case VK_BLEND_FACTOR_ONE
:
336 return V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
;
337 case VK_BLEND_FACTOR_SRC_COLOR
:
338 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
339 : V_028760_BLEND_OPT_PRESERVE_C1_IGNORE_C0
;
340 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
341 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
342 : V_028760_BLEND_OPT_PRESERVE_C0_IGNORE_C1
;
343 case VK_BLEND_FACTOR_SRC_ALPHA
:
344 return V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
;
345 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
346 return V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
;
347 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
348 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
349 : V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
351 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
356 * Get rid of DST in the blend factors by commuting the operands:
357 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
359 static void si_blend_remove_dst(unsigned *func
, unsigned *src_factor
,
360 unsigned *dst_factor
, unsigned expected_dst
,
361 unsigned replacement_src
)
363 if (*src_factor
== expected_dst
&&
364 *dst_factor
== VK_BLEND_FACTOR_ZERO
) {
365 *src_factor
= VK_BLEND_FACTOR_ZERO
;
366 *dst_factor
= replacement_src
;
368 /* Commuting the operands requires reversing subtractions. */
369 if (*func
== VK_BLEND_OP_SUBTRACT
)
370 *func
= VK_BLEND_OP_REVERSE_SUBTRACT
;
371 else if (*func
== VK_BLEND_OP_REVERSE_SUBTRACT
)
372 *func
= VK_BLEND_OP_SUBTRACT
;
376 static bool si_blend_factor_uses_dst(unsigned factor
)
378 return factor
== VK_BLEND_FACTOR_DST_COLOR
||
379 factor
== VK_BLEND_FACTOR_DST_ALPHA
||
380 factor
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
381 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
||
382 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
;
385 static bool is_dual_src(VkBlendFactor factor
)
388 case VK_BLEND_FACTOR_SRC1_COLOR
:
389 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
390 case VK_BLEND_FACTOR_SRC1_ALPHA
:
391 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
398 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
400 bool blend_need_alpha
)
402 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
403 unsigned format
, ntype
, swap
;
405 /* Alpha is needed for alpha-to-coverage.
406 * Blending may be with or without alpha.
408 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
409 unsigned alpha
= 0; /* exports alpha, but may not support blending */
410 unsigned blend
= 0; /* supports blending, but may not export alpha */
411 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
413 format
= radv_translate_colorformat(vk_format
);
414 ntype
= radv_translate_color_numformat(vk_format
, desc
,
415 vk_format_get_first_non_void_channel(vk_format
));
416 swap
= radv_translate_colorswap(vk_format
, false);
418 /* Choose the SPI color formats. These are required values for Stoney/RB+.
419 * Other chips have multiple choices, though they are not necessarily better.
422 case V_028C70_COLOR_5_6_5
:
423 case V_028C70_COLOR_1_5_5_5
:
424 case V_028C70_COLOR_5_5_5_1
:
425 case V_028C70_COLOR_4_4_4_4
:
426 case V_028C70_COLOR_10_11_11
:
427 case V_028C70_COLOR_11_11_10
:
428 case V_028C70_COLOR_8
:
429 case V_028C70_COLOR_8_8
:
430 case V_028C70_COLOR_8_8_8_8
:
431 case V_028C70_COLOR_10_10_10_2
:
432 case V_028C70_COLOR_2_10_10_10
:
433 if (ntype
== V_028C70_NUMBER_UINT
)
434 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
435 else if (ntype
== V_028C70_NUMBER_SINT
)
436 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
438 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
441 case V_028C70_COLOR_16
:
442 case V_028C70_COLOR_16_16
:
443 case V_028C70_COLOR_16_16_16_16
:
444 if (ntype
== V_028C70_NUMBER_UNORM
||
445 ntype
== V_028C70_NUMBER_SNORM
) {
446 /* UNORM16 and SNORM16 don't support blending */
447 if (ntype
== V_028C70_NUMBER_UNORM
)
448 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
450 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
452 /* Use 32 bits per channel for blending. */
453 if (format
== V_028C70_COLOR_16
) {
454 if (swap
== V_028C70_SWAP_STD
) { /* R */
455 blend
= V_028714_SPI_SHADER_32_R
;
456 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
457 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
458 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
461 } else if (format
== V_028C70_COLOR_16_16
) {
462 if (swap
== V_028C70_SWAP_STD
) { /* RG */
463 blend
= V_028714_SPI_SHADER_32_GR
;
464 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
465 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
466 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
469 } else /* 16_16_16_16 */
470 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
471 } else if (ntype
== V_028C70_NUMBER_UINT
)
472 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
473 else if (ntype
== V_028C70_NUMBER_SINT
)
474 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
475 else if (ntype
== V_028C70_NUMBER_FLOAT
)
476 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
481 case V_028C70_COLOR_32
:
482 if (swap
== V_028C70_SWAP_STD
) { /* R */
483 blend
= normal
= V_028714_SPI_SHADER_32_R
;
484 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
485 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
486 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
491 case V_028C70_COLOR_32_32
:
492 if (swap
== V_028C70_SWAP_STD
) { /* RG */
493 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
494 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
495 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
496 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
501 case V_028C70_COLOR_32_32_32_32
:
502 case V_028C70_COLOR_8_24
:
503 case V_028C70_COLOR_24_8
:
504 case V_028C70_COLOR_X24_8_32_FLOAT
:
505 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
509 unreachable("unhandled blend format");
512 if (blend_enable
&& blend_need_alpha
)
514 else if(blend_need_alpha
)
516 else if(blend_enable
)
523 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
524 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
525 struct radv_blend_state
*blend
)
527 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
528 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
529 unsigned col_format
= 0;
530 unsigned num_targets
;
532 for (unsigned i
= 0; i
< (blend
->single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
535 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
) {
536 cf
= V_028714_SPI_SHADER_ZERO
;
538 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
540 blend
->blend_enable_4bit
& (0xfu
<< (i
* 4));
542 cf
= si_choose_spi_color_format(attachment
->format
,
544 blend
->need_src_alpha
& (1 << i
));
547 col_format
|= cf
<< (4 * i
);
550 if (!(col_format
& 0xf) && blend
->need_src_alpha
& (1 << 0)) {
551 /* When a subpass doesn't have any color attachments, write the
552 * alpha channel of MRT0 when alpha coverage is enabled because
553 * the depth attachment needs it.
555 col_format
|= V_028714_SPI_SHADER_32_AR
;
558 /* If the i-th target format is set, all previous target formats must
559 * be non-zero to avoid hangs.
561 num_targets
= (util_last_bit(col_format
) + 3) / 4;
562 for (unsigned i
= 0; i
< num_targets
; i
++) {
563 if (!(col_format
& (0xf << (i
* 4)))) {
564 col_format
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
568 /* The output for dual source blending should have the same format as
571 if (blend
->mrt0_is_dual_src
)
572 col_format
|= (col_format
& 0xf) << 4;
574 blend
->cb_shader_mask
= ac_get_cb_shader_mask(col_format
);
575 blend
->spi_shader_col_format
= col_format
;
579 format_is_int8(VkFormat format
)
581 const struct vk_format_description
*desc
= vk_format_description(format
);
582 int channel
= vk_format_get_first_non_void_channel(format
);
584 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
585 desc
->channel
[channel
].size
== 8;
589 format_is_int10(VkFormat format
)
591 const struct vk_format_description
*desc
= vk_format_description(format
);
593 if (desc
->nr_channels
!= 4)
595 for (unsigned i
= 0; i
< 4; i
++) {
596 if (desc
->channel
[i
].pure_integer
&& desc
->channel
[i
].size
== 10)
603 * Ordered so that for each i,
604 * radv_format_meta_fs_key(radv_fs_key_format_exemplars[i]) == i.
606 const VkFormat radv_fs_key_format_exemplars
[NUM_META_FS_KEYS
] = {
607 VK_FORMAT_R32_SFLOAT
,
608 VK_FORMAT_R32G32_SFLOAT
,
609 VK_FORMAT_R8G8B8A8_UNORM
,
610 VK_FORMAT_R16G16B16A16_UNORM
,
611 VK_FORMAT_R16G16B16A16_SNORM
,
612 VK_FORMAT_R16G16B16A16_UINT
,
613 VK_FORMAT_R16G16B16A16_SINT
,
614 VK_FORMAT_R32G32B32A32_SFLOAT
,
615 VK_FORMAT_R8G8B8A8_UINT
,
616 VK_FORMAT_R8G8B8A8_SINT
,
617 VK_FORMAT_A2R10G10B10_UINT_PACK32
,
618 VK_FORMAT_A2R10G10B10_SINT_PACK32
,
621 unsigned radv_format_meta_fs_key(VkFormat format
)
623 unsigned col_format
= si_choose_spi_color_format(format
, false, false);
625 assert(col_format
!= V_028714_SPI_SHADER_32_AR
);
626 if (col_format
>= V_028714_SPI_SHADER_32_AR
)
627 --col_format
; /* Skip V_028714_SPI_SHADER_32_AR since there is no such VkFormat */
629 --col_format
; /* Skip V_028714_SPI_SHADER_ZERO */
630 bool is_int8
= format_is_int8(format
);
631 bool is_int10
= format_is_int10(format
);
633 return col_format
+ (is_int8
? 3 : is_int10
? 5 : 0);
637 radv_pipeline_compute_get_int_clamp(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
638 unsigned *is_int8
, unsigned *is_int10
)
640 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
641 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
645 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
646 struct radv_render_pass_attachment
*attachment
;
648 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
651 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
653 if (format_is_int8(attachment
->format
))
655 if (format_is_int10(attachment
->format
))
661 radv_blend_check_commutativity(struct radv_blend_state
*blend
,
662 VkBlendOp op
, VkBlendFactor src
,
663 VkBlendFactor dst
, unsigned chanmask
)
665 /* Src factor is allowed when it does not depend on Dst. */
666 static const uint32_t src_allowed
=
667 (1u << VK_BLEND_FACTOR_ONE
) |
668 (1u << VK_BLEND_FACTOR_SRC_COLOR
) |
669 (1u << VK_BLEND_FACTOR_SRC_ALPHA
) |
670 (1u << VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
) |
671 (1u << VK_BLEND_FACTOR_CONSTANT_COLOR
) |
672 (1u << VK_BLEND_FACTOR_CONSTANT_ALPHA
) |
673 (1u << VK_BLEND_FACTOR_SRC1_COLOR
) |
674 (1u << VK_BLEND_FACTOR_SRC1_ALPHA
) |
675 (1u << VK_BLEND_FACTOR_ZERO
) |
676 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
) |
677 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
) |
678 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
) |
679 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
) |
680 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
) |
681 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
);
683 if (dst
== VK_BLEND_FACTOR_ONE
&&
684 (src_allowed
& (1u << src
))) {
685 /* Addition is commutative, but floating point addition isn't
686 * associative: subtle changes can be introduced via different
687 * rounding. Be conservative, only enable for min and max.
689 if (op
== VK_BLEND_OP_MAX
|| op
== VK_BLEND_OP_MIN
)
690 blend
->commutative_4bit
|= chanmask
;
694 static struct radv_blend_state
695 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
696 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
697 const struct radv_graphics_pipeline_create_info
*extra
)
699 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
700 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
701 struct radv_blend_state blend
= {0};
702 unsigned mode
= V_028808_CB_NORMAL
;
708 if (extra
&& extra
->custom_blend_mode
) {
709 blend
.single_cb_enable
= true;
710 mode
= extra
->custom_blend_mode
;
712 blend
.cb_color_control
= 0;
713 if (vkblend
->logicOpEnable
)
714 blend
.cb_color_control
|= S_028808_ROP3(si_translate_blend_logic_op(vkblend
->logicOp
));
716 blend
.cb_color_control
|= S_028808_ROP3(V_028808_ROP3_COPY
);
718 blend
.db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(3) |
719 S_028B70_ALPHA_TO_MASK_OFFSET1(1) |
720 S_028B70_ALPHA_TO_MASK_OFFSET2(0) |
721 S_028B70_ALPHA_TO_MASK_OFFSET3(2) |
722 S_028B70_OFFSET_ROUND(1);
724 if (vkms
&& vkms
->alphaToCoverageEnable
) {
725 blend
.db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
726 blend
.need_src_alpha
|= 0x1;
729 blend
.cb_target_mask
= 0;
730 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
731 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
732 unsigned blend_cntl
= 0;
733 unsigned srcRGB_opt
, dstRGB_opt
, srcA_opt
, dstA_opt
;
734 VkBlendOp eqRGB
= att
->colorBlendOp
;
735 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
736 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
737 VkBlendOp eqA
= att
->alphaBlendOp
;
738 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
739 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
741 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
743 if (!att
->colorWriteMask
)
746 blend
.cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
747 blend
.cb_target_enabled_4bit
|= 0xf << (4 * i
);
748 if (!att
->blendEnable
) {
749 blend
.cb_blend_control
[i
] = blend_cntl
;
753 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
755 blend
.mrt0_is_dual_src
= true;
757 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
758 srcRGB
= VK_BLEND_FACTOR_ONE
;
759 dstRGB
= VK_BLEND_FACTOR_ONE
;
761 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
762 srcA
= VK_BLEND_FACTOR_ONE
;
763 dstA
= VK_BLEND_FACTOR_ONE
;
766 radv_blend_check_commutativity(&blend
, eqRGB
, srcRGB
, dstRGB
,
768 radv_blend_check_commutativity(&blend
, eqA
, srcA
, dstA
,
771 /* Blending optimizations for RB+.
772 * These transformations don't change the behavior.
774 * First, get rid of DST in the blend factors:
775 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
777 si_blend_remove_dst(&eqRGB
, &srcRGB
, &dstRGB
,
778 VK_BLEND_FACTOR_DST_COLOR
,
779 VK_BLEND_FACTOR_SRC_COLOR
);
781 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
782 VK_BLEND_FACTOR_DST_COLOR
,
783 VK_BLEND_FACTOR_SRC_COLOR
);
785 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
786 VK_BLEND_FACTOR_DST_ALPHA
,
787 VK_BLEND_FACTOR_SRC_ALPHA
);
789 /* Look up the ideal settings from tables. */
790 srcRGB_opt
= si_translate_blend_opt_factor(srcRGB
, false);
791 dstRGB_opt
= si_translate_blend_opt_factor(dstRGB
, false);
792 srcA_opt
= si_translate_blend_opt_factor(srcA
, true);
793 dstA_opt
= si_translate_blend_opt_factor(dstA
, true);
795 /* Handle interdependencies. */
796 if (si_blend_factor_uses_dst(srcRGB
))
797 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
798 if (si_blend_factor_uses_dst(srcA
))
799 dstA_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
801 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
&&
802 (dstRGB
== VK_BLEND_FACTOR_ZERO
||
803 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
804 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
))
805 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
807 /* Set the final value. */
808 blend
.sx_mrt_blend_opt
[i
] =
809 S_028760_COLOR_SRC_OPT(srcRGB_opt
) |
810 S_028760_COLOR_DST_OPT(dstRGB_opt
) |
811 S_028760_COLOR_COMB_FCN(si_translate_blend_opt_function(eqRGB
)) |
812 S_028760_ALPHA_SRC_OPT(srcA_opt
) |
813 S_028760_ALPHA_DST_OPT(dstA_opt
) |
814 S_028760_ALPHA_COMB_FCN(si_translate_blend_opt_function(eqA
));
815 blend_cntl
|= S_028780_ENABLE(1);
817 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
818 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
819 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
820 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
821 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
822 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
823 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
824 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
826 blend
.cb_blend_control
[i
] = blend_cntl
;
828 blend
.blend_enable_4bit
|= 0xfu
<< (i
* 4);
830 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
831 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
832 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
833 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
834 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
835 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
836 blend
.need_src_alpha
|= 1 << i
;
838 for (i
= vkblend
->attachmentCount
; i
< 8; i
++) {
839 blend
.cb_blend_control
[i
] = 0;
840 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
843 if (pipeline
->device
->physical_device
->has_rbplus
) {
844 /* Disable RB+ blend optimizations for dual source blending. */
845 if (blend
.mrt0_is_dual_src
) {
846 for (i
= 0; i
< 8; i
++) {
847 blend
.sx_mrt_blend_opt
[i
] =
848 S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE
) |
849 S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE
);
853 /* RB+ doesn't work with dual source blending, logic op and
856 if (blend
.mrt0_is_dual_src
|| vkblend
->logicOpEnable
||
857 mode
== V_028808_CB_RESOLVE
)
858 blend
.cb_color_control
|= S_028808_DISABLE_DUAL_QUAD(1);
861 if (blend
.cb_target_mask
)
862 blend
.cb_color_control
|= S_028808_MODE(mode
);
864 blend
.cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
866 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
, &blend
);
870 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
873 case VK_STENCIL_OP_KEEP
:
874 return V_02842C_STENCIL_KEEP
;
875 case VK_STENCIL_OP_ZERO
:
876 return V_02842C_STENCIL_ZERO
;
877 case VK_STENCIL_OP_REPLACE
:
878 return V_02842C_STENCIL_REPLACE_TEST
;
879 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
880 return V_02842C_STENCIL_ADD_CLAMP
;
881 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
882 return V_02842C_STENCIL_SUB_CLAMP
;
883 case VK_STENCIL_OP_INVERT
:
884 return V_02842C_STENCIL_INVERT
;
885 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
886 return V_02842C_STENCIL_ADD_WRAP
;
887 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
888 return V_02842C_STENCIL_SUB_WRAP
;
894 static uint32_t si_translate_fill(VkPolygonMode func
)
897 case VK_POLYGON_MODE_FILL
:
898 return V_028814_X_DRAW_TRIANGLES
;
899 case VK_POLYGON_MODE_LINE
:
900 return V_028814_X_DRAW_LINES
;
901 case VK_POLYGON_MODE_POINT
:
902 return V_028814_X_DRAW_POINTS
;
905 return V_028814_X_DRAW_POINTS
;
909 static uint8_t radv_pipeline_get_ps_iter_samples(const VkPipelineMultisampleStateCreateInfo
*vkms
)
911 uint32_t num_samples
= vkms
->rasterizationSamples
;
912 uint32_t ps_iter_samples
= 1;
914 if (vkms
->sampleShadingEnable
) {
915 ps_iter_samples
= ceil(vkms
->minSampleShading
* num_samples
);
916 ps_iter_samples
= util_next_power_of_two(ps_iter_samples
);
918 return ps_iter_samples
;
922 radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
924 return pCreateInfo
->depthTestEnable
&&
925 pCreateInfo
->depthWriteEnable
&&
926 pCreateInfo
->depthCompareOp
!= VK_COMPARE_OP_NEVER
;
930 radv_writes_stencil(const VkStencilOpState
*state
)
932 return state
->writeMask
&&
933 (state
->failOp
!= VK_STENCIL_OP_KEEP
||
934 state
->passOp
!= VK_STENCIL_OP_KEEP
||
935 state
->depthFailOp
!= VK_STENCIL_OP_KEEP
);
939 radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
941 return pCreateInfo
->stencilTestEnable
&&
942 (radv_writes_stencil(&pCreateInfo
->front
) ||
943 radv_writes_stencil(&pCreateInfo
->back
));
947 radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
949 return radv_is_depth_write_enabled(pCreateInfo
) ||
950 radv_is_stencil_write_enabled(pCreateInfo
);
954 radv_order_invariant_stencil_op(VkStencilOp op
)
956 /* REPLACE is normally order invariant, except when the stencil
957 * reference value is written by the fragment shader. Tracking this
958 * interaction does not seem worth the effort, so be conservative.
960 return op
!= VK_STENCIL_OP_INCREMENT_AND_CLAMP
&&
961 op
!= VK_STENCIL_OP_DECREMENT_AND_CLAMP
&&
962 op
!= VK_STENCIL_OP_REPLACE
;
966 radv_order_invariant_stencil_state(const VkStencilOpState
*state
)
968 /* Compute whether, assuming Z writes are disabled, this stencil state
969 * is order invariant in the sense that the set of passing fragments as
970 * well as the final stencil buffer result does not depend on the order
973 return !state
->writeMask
||
974 /* The following assumes that Z writes are disabled. */
975 (state
->compareOp
== VK_COMPARE_OP_ALWAYS
&&
976 radv_order_invariant_stencil_op(state
->passOp
) &&
977 radv_order_invariant_stencil_op(state
->depthFailOp
)) ||
978 (state
->compareOp
== VK_COMPARE_OP_NEVER
&&
979 radv_order_invariant_stencil_op(state
->failOp
));
983 radv_pipeline_out_of_order_rast(struct radv_pipeline
*pipeline
,
984 struct radv_blend_state
*blend
,
985 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
987 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
988 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
989 unsigned colormask
= blend
->cb_target_enabled_4bit
;
991 if (!pipeline
->device
->physical_device
->out_of_order_rast_allowed
)
994 /* Be conservative if a logic operation is enabled with color buffers. */
995 if (colormask
&& pCreateInfo
->pColorBlendState
->logicOpEnable
)
998 /* Default depth/stencil invariance when no attachment is bound. */
999 struct radv_dsa_order_invariance dsa_order_invariant
= {
1000 .zs
= true, .pass_set
= true
1003 if (pCreateInfo
->pDepthStencilState
&&
1004 subpass
->depth_stencil_attachment
) {
1005 const VkPipelineDepthStencilStateCreateInfo
*vkds
=
1006 pCreateInfo
->pDepthStencilState
;
1007 struct radv_render_pass_attachment
*attachment
=
1008 pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
1009 bool has_stencil
= vk_format_is_stencil(attachment
->format
);
1010 struct radv_dsa_order_invariance order_invariance
[2];
1011 struct radv_shader_variant
*ps
=
1012 pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
1014 /* Compute depth/stencil order invariance in order to know if
1015 * it's safe to enable out-of-order.
1017 bool zfunc_is_ordered
=
1018 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
||
1019 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS
||
1020 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS_OR_EQUAL
||
1021 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER
||
1022 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER_OR_EQUAL
;
1024 bool nozwrite_and_order_invariant_stencil
=
1025 !radv_is_ds_write_enabled(vkds
) ||
1026 (!radv_is_depth_write_enabled(vkds
) &&
1027 radv_order_invariant_stencil_state(&vkds
->front
) &&
1028 radv_order_invariant_stencil_state(&vkds
->back
));
1030 order_invariance
[1].zs
=
1031 nozwrite_and_order_invariant_stencil
||
1032 (!radv_is_stencil_write_enabled(vkds
) &&
1034 order_invariance
[0].zs
=
1035 !radv_is_depth_write_enabled(vkds
) || zfunc_is_ordered
;
1037 order_invariance
[1].pass_set
=
1038 nozwrite_and_order_invariant_stencil
||
1039 (!radv_is_stencil_write_enabled(vkds
) &&
1040 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1041 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
));
1042 order_invariance
[0].pass_set
=
1043 !radv_is_depth_write_enabled(vkds
) ||
1044 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1045 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
);
1047 dsa_order_invariant
= order_invariance
[has_stencil
];
1048 if (!dsa_order_invariant
.zs
)
1051 /* The set of PS invocations is always order invariant,
1052 * except when early Z/S tests are requested.
1055 ps
->info
.info
.ps
.writes_memory
&&
1056 ps
->info
.fs
.early_fragment_test
&&
1057 !dsa_order_invariant
.pass_set
)
1060 /* Determine if out-of-order rasterization should be disabled
1061 * when occlusion queries are used.
1063 pipeline
->graphics
.disable_out_of_order_rast_for_occlusion
=
1064 !dsa_order_invariant
.pass_set
;
1067 /* No color buffers are enabled for writing. */
1071 unsigned blendmask
= colormask
& blend
->blend_enable_4bit
;
1074 /* Only commutative blending. */
1075 if (blendmask
& ~blend
->commutative_4bit
)
1078 if (!dsa_order_invariant
.pass_set
)
1082 if (colormask
& ~blendmask
)
1089 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1090 struct radv_blend_state
*blend
,
1091 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1093 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
1094 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1095 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1096 bool out_of_order_rast
= false;
1097 int ps_iter_samples
= 1;
1098 uint32_t mask
= 0xffff;
1101 ms
->num_samples
= vkms
->rasterizationSamples
;
1103 ms
->num_samples
= 1;
1106 ps_iter_samples
= radv_pipeline_get_ps_iter_samples(vkms
);
1107 if (vkms
&& !vkms
->sampleShadingEnable
&& pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.force_persample
) {
1108 ps_iter_samples
= ms
->num_samples
;
1111 const struct VkPipelineRasterizationStateRasterizationOrderAMD
*raster_order
=
1112 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
, PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD
);
1113 if (raster_order
&& raster_order
->rasterizationOrder
== VK_RASTERIZATION_ORDER_RELAXED_AMD
) {
1114 /* Out-of-order rasterization is explicitly enabled by the
1117 out_of_order_rast
= true;
1119 /* Determine if the driver can enable out-of-order
1120 * rasterization internally.
1123 radv_pipeline_out_of_order_rast(pipeline
, blend
, pCreateInfo
);
1126 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1127 ms
->pa_sc_aa_config
= 0;
1128 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1129 S_028804_INCOHERENT_EQAA_READS(1) |
1130 S_028804_INTERPOLATE_COMP_Z(1) |
1131 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1132 ms
->pa_sc_mode_cntl_1
=
1133 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1134 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1135 S_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(out_of_order_rast
) |
1136 S_028A4C_OUT_OF_ORDER_WATER_MARK(0x7) |
1138 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1139 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1140 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1141 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1142 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1143 S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1144 ms
->pa_sc_mode_cntl_0
= S_028A48_ALTERNATE_RBS_PER_TILE(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
1145 S_028A48_VPORT_SCISSOR_ENABLE(1);
1147 if (ms
->num_samples
> 1) {
1148 unsigned log_samples
= util_logbase2(ms
->num_samples
);
1149 unsigned log_ps_iter_samples
= util_logbase2(ps_iter_samples
);
1150 ms
->pa_sc_mode_cntl_0
|= S_028A48_MSAA_ENABLE(1);
1151 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1152 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_samples
) |
1153 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1154 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1155 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1156 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1157 S_028BE0_MAX_SAMPLE_DIST(radv_get_default_max_sample_dist(log_samples
)) |
1158 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1159 ms
->pa_sc_mode_cntl_1
|= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1160 if (ps_iter_samples
> 1)
1161 pipeline
->graphics
.spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1164 if (vkms
&& vkms
->pSampleMask
) {
1165 mask
= vkms
->pSampleMask
[0] & 0xffff;
1168 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1169 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1173 radv_prim_can_use_guardband(enum VkPrimitiveTopology topology
)
1176 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1177 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1178 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1179 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1180 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1182 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1183 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1184 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1185 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1186 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1187 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1190 unreachable("unhandled primitive type");
1195 si_translate_prim(enum VkPrimitiveTopology topology
)
1198 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1199 return V_008958_DI_PT_POINTLIST
;
1200 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1201 return V_008958_DI_PT_LINELIST
;
1202 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1203 return V_008958_DI_PT_LINESTRIP
;
1204 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1205 return V_008958_DI_PT_TRILIST
;
1206 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1207 return V_008958_DI_PT_TRISTRIP
;
1208 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1209 return V_008958_DI_PT_TRIFAN
;
1210 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1211 return V_008958_DI_PT_LINELIST_ADJ
;
1212 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1213 return V_008958_DI_PT_LINESTRIP_ADJ
;
1214 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1215 return V_008958_DI_PT_TRILIST_ADJ
;
1216 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1217 return V_008958_DI_PT_TRISTRIP_ADJ
;
1218 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1219 return V_008958_DI_PT_PATCH
;
1227 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1230 case 0: /* GL_POINTS */
1231 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1232 case 1: /* GL_LINES */
1233 case 3: /* GL_LINE_STRIP */
1234 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1235 case 0x8E7A: /* GL_ISOLINES */
1236 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1238 case 4: /* GL_TRIANGLES */
1239 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1240 case 5: /* GL_TRIANGLE_STRIP */
1241 case 7: /* GL_QUADS */
1242 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1250 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1253 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1254 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1255 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1256 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1257 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1258 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1259 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1260 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1261 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1262 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1263 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1264 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1265 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1266 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1273 static unsigned radv_dynamic_state_mask(VkDynamicState state
)
1276 case VK_DYNAMIC_STATE_VIEWPORT
:
1277 return RADV_DYNAMIC_VIEWPORT
;
1278 case VK_DYNAMIC_STATE_SCISSOR
:
1279 return RADV_DYNAMIC_SCISSOR
;
1280 case VK_DYNAMIC_STATE_LINE_WIDTH
:
1281 return RADV_DYNAMIC_LINE_WIDTH
;
1282 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
1283 return RADV_DYNAMIC_DEPTH_BIAS
;
1284 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
1285 return RADV_DYNAMIC_BLEND_CONSTANTS
;
1286 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
1287 return RADV_DYNAMIC_DEPTH_BOUNDS
;
1288 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
1289 return RADV_DYNAMIC_STENCIL_COMPARE_MASK
;
1290 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
1291 return RADV_DYNAMIC_STENCIL_WRITE_MASK
;
1292 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
1293 return RADV_DYNAMIC_STENCIL_REFERENCE
;
1294 case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT
:
1295 return RADV_DYNAMIC_DISCARD_RECTANGLE
;
1296 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT
:
1297 return RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1299 unreachable("Unhandled dynamic state");
1303 static uint32_t radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1305 uint32_t states
= RADV_DYNAMIC_ALL
;
1307 /* If rasterization is disabled we do not care about any of the dynamic states,
1308 * since they are all rasterization related only. */
1309 if (pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
1312 if (!pCreateInfo
->pRasterizationState
->depthBiasEnable
)
1313 states
&= ~RADV_DYNAMIC_DEPTH_BIAS
;
1315 if (!pCreateInfo
->pDepthStencilState
||
1316 !pCreateInfo
->pDepthStencilState
->depthBoundsTestEnable
)
1317 states
&= ~RADV_DYNAMIC_DEPTH_BOUNDS
;
1319 if (!pCreateInfo
->pDepthStencilState
||
1320 !pCreateInfo
->pDepthStencilState
->stencilTestEnable
)
1321 states
&= ~(RADV_DYNAMIC_STENCIL_COMPARE_MASK
|
1322 RADV_DYNAMIC_STENCIL_WRITE_MASK
|
1323 RADV_DYNAMIC_STENCIL_REFERENCE
);
1325 if (!vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
))
1326 states
&= ~RADV_DYNAMIC_DISCARD_RECTANGLE
;
1328 if (!pCreateInfo
->pMultisampleState
||
1329 !vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1330 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
))
1331 states
&= ~RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1333 /* TODO: blend constants & line width. */
1340 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1341 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1343 uint32_t needed_states
= radv_pipeline_needed_dynamic_state(pCreateInfo
);
1344 uint32_t states
= needed_states
;
1345 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1346 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1348 pipeline
->dynamic_state
= default_dynamic_state
;
1349 pipeline
->graphics
.needed_dynamic_state
= needed_states
;
1351 if (pCreateInfo
->pDynamicState
) {
1352 /* Remove all of the states that are marked as dynamic */
1353 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1354 for (uint32_t s
= 0; s
< count
; s
++)
1355 states
&= ~radv_dynamic_state_mask(pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1358 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1360 if (needed_states
& RADV_DYNAMIC_VIEWPORT
) {
1361 assert(pCreateInfo
->pViewportState
);
1363 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1364 if (states
& RADV_DYNAMIC_VIEWPORT
) {
1365 typed_memcpy(dynamic
->viewport
.viewports
,
1366 pCreateInfo
->pViewportState
->pViewports
,
1367 pCreateInfo
->pViewportState
->viewportCount
);
1371 if (needed_states
& RADV_DYNAMIC_SCISSOR
) {
1372 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1373 if (states
& RADV_DYNAMIC_SCISSOR
) {
1374 typed_memcpy(dynamic
->scissor
.scissors
,
1375 pCreateInfo
->pViewportState
->pScissors
,
1376 pCreateInfo
->pViewportState
->scissorCount
);
1380 if (states
& RADV_DYNAMIC_LINE_WIDTH
) {
1381 assert(pCreateInfo
->pRasterizationState
);
1382 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1385 if (states
& RADV_DYNAMIC_DEPTH_BIAS
) {
1386 assert(pCreateInfo
->pRasterizationState
);
1387 dynamic
->depth_bias
.bias
=
1388 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1389 dynamic
->depth_bias
.clamp
=
1390 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1391 dynamic
->depth_bias
.slope
=
1392 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1395 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1397 * pColorBlendState is [...] NULL if the pipeline has rasterization
1398 * disabled or if the subpass of the render pass the pipeline is
1399 * created against does not use any color attachments.
1401 if (subpass
->has_color_att
&& states
& RADV_DYNAMIC_BLEND_CONSTANTS
) {
1402 assert(pCreateInfo
->pColorBlendState
);
1403 typed_memcpy(dynamic
->blend_constants
,
1404 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1407 /* If there is no depthstencil attachment, then don't read
1408 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1409 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1410 * no need to override the depthstencil defaults in
1411 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1413 * Section 9.2 of the Vulkan 1.0.15 spec says:
1415 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1416 * disabled or if the subpass of the render pass the pipeline is created
1417 * against does not use a depth/stencil attachment.
1419 if (needed_states
&& subpass
->depth_stencil_attachment
) {
1420 assert(pCreateInfo
->pDepthStencilState
);
1422 if (states
& RADV_DYNAMIC_DEPTH_BOUNDS
) {
1423 dynamic
->depth_bounds
.min
=
1424 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1425 dynamic
->depth_bounds
.max
=
1426 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1429 if (states
& RADV_DYNAMIC_STENCIL_COMPARE_MASK
) {
1430 dynamic
->stencil_compare_mask
.front
=
1431 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1432 dynamic
->stencil_compare_mask
.back
=
1433 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1436 if (states
& RADV_DYNAMIC_STENCIL_WRITE_MASK
) {
1437 dynamic
->stencil_write_mask
.front
=
1438 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1439 dynamic
->stencil_write_mask
.back
=
1440 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1443 if (states
& RADV_DYNAMIC_STENCIL_REFERENCE
) {
1444 dynamic
->stencil_reference
.front
=
1445 pCreateInfo
->pDepthStencilState
->front
.reference
;
1446 dynamic
->stencil_reference
.back
=
1447 pCreateInfo
->pDepthStencilState
->back
.reference
;
1451 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
1452 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
1453 if (needed_states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1454 dynamic
->discard_rectangle
.count
= discard_rectangle_info
->discardRectangleCount
;
1455 if (states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1456 typed_memcpy(dynamic
->discard_rectangle
.rectangles
,
1457 discard_rectangle_info
->pDiscardRectangles
,
1458 discard_rectangle_info
->discardRectangleCount
);
1462 if (needed_states
& RADV_DYNAMIC_SAMPLE_LOCATIONS
) {
1463 const VkPipelineSampleLocationsStateCreateInfoEXT
*sample_location_info
=
1464 vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1465 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
1466 /* If sampleLocationsEnable is VK_FALSE, the default sample
1467 * locations are used and the values specified in
1468 * sampleLocationsInfo are ignored.
1470 if (sample_location_info
->sampleLocationsEnable
) {
1471 const VkSampleLocationsInfoEXT
*pSampleLocationsInfo
=
1472 &sample_location_info
->sampleLocationsInfo
;
1474 assert(pSampleLocationsInfo
->sampleLocationsCount
<= MAX_SAMPLE_LOCATIONS
);
1476 dynamic
->sample_location
.per_pixel
= pSampleLocationsInfo
->sampleLocationsPerPixel
;
1477 dynamic
->sample_location
.grid_size
= pSampleLocationsInfo
->sampleLocationGridSize
;
1478 dynamic
->sample_location
.count
= pSampleLocationsInfo
->sampleLocationsCount
;
1479 typed_memcpy(&dynamic
->sample_location
.locations
[0],
1480 pSampleLocationsInfo
->pSampleLocations
,
1481 pSampleLocationsInfo
->sampleLocationsCount
);
1485 pipeline
->dynamic_state
.mask
= states
;
1488 static struct radv_gs_state
1489 calculate_gs_info(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1490 const struct radv_pipeline
*pipeline
)
1492 struct radv_gs_state gs
= {0};
1493 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1494 struct radv_es_output_info
*es_info
;
1495 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1496 es_info
= radv_pipeline_has_tess(pipeline
) ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1498 es_info
= radv_pipeline_has_tess(pipeline
) ?
1499 &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.es_info
:
1500 &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.es_info
;
1502 unsigned gs_num_invocations
= MAX2(gs_info
->gs
.invocations
, 1);
1503 bool uses_adjacency
;
1504 switch(pCreateInfo
->pInputAssemblyState
->topology
) {
1505 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1506 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1507 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1508 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1509 uses_adjacency
= true;
1512 uses_adjacency
= false;
1516 /* All these are in dwords: */
1517 /* We can't allow using the whole LDS, because GS waves compete with
1518 * other shader stages for LDS space. */
1519 const unsigned max_lds_size
= 8 * 1024;
1520 const unsigned esgs_itemsize
= es_info
->esgs_itemsize
/ 4;
1521 unsigned esgs_lds_size
;
1523 /* All these are per subgroup: */
1524 const unsigned max_out_prims
= 32 * 1024;
1525 const unsigned max_es_verts
= 255;
1526 const unsigned ideal_gs_prims
= 64;
1527 unsigned max_gs_prims
, gs_prims
;
1528 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
1530 if (uses_adjacency
|| gs_num_invocations
> 1)
1531 max_gs_prims
= 127 / gs_num_invocations
;
1535 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
1536 * Make sure we don't go over the maximum value.
1538 if (gs_info
->gs
.vertices_out
> 0) {
1539 max_gs_prims
= MIN2(max_gs_prims
,
1541 (gs_info
->gs
.vertices_out
* gs_num_invocations
));
1543 assert(max_gs_prims
> 0);
1545 /* If the primitive has adjacency, halve the number of vertices
1546 * that will be reused in multiple primitives.
1548 min_es_verts
= gs_info
->gs
.vertices_in
/ (uses_adjacency
? 2 : 1);
1550 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
1551 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
1553 /* Compute ESGS LDS size based on the worst case number of ES vertices
1554 * needed to create the target number of GS prims per subgroup.
1556 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1558 /* If total LDS usage is too big, refactor partitions based on ratio
1559 * of ESGS item sizes.
1561 if (esgs_lds_size
> max_lds_size
) {
1562 /* Our target GS Prims Per Subgroup was too large. Calculate
1563 * the maximum number of GS Prims Per Subgroup that will fit
1564 * into LDS, capped by the maximum that the hardware can support.
1566 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
1568 assert(gs_prims
> 0);
1569 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
1572 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1573 assert(esgs_lds_size
<= max_lds_size
);
1576 /* Now calculate remaining ESGS information. */
1578 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
1580 es_verts
= max_es_verts
;
1582 /* Vertices for adjacency primitives are not always reused, so restore
1583 * it for ES_VERTS_PER_SUBGRP.
1585 min_es_verts
= gs_info
->gs
.vertices_in
;
1587 /* For normal primitives, the VGT only checks if they are past the ES
1588 * verts per subgroup after allocating a full GS primitive and if they
1589 * are, kick off a new subgroup. But if those additional ES verts are
1590 * unique (e.g. not reused) we need to make sure there is enough LDS
1591 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
1593 es_verts
-= min_es_verts
- 1;
1595 uint32_t es_verts_per_subgroup
= es_verts
;
1596 uint32_t gs_prims_per_subgroup
= gs_prims
;
1597 uint32_t gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
1598 uint32_t max_prims_per_subgroup
= gs_inst_prims_in_subgroup
* gs_info
->gs
.vertices_out
;
1599 gs
.lds_size
= align(esgs_lds_size
, 128) / 128;
1600 gs
.vgt_gs_onchip_cntl
= S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup
) |
1601 S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup
) |
1602 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup
);
1603 gs
.vgt_gs_max_prims_per_subgroup
= S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup
);
1604 gs
.vgt_esgs_ring_itemsize
= esgs_itemsize
;
1605 assert(max_prims_per_subgroup
<= max_out_prims
);
1610 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1611 unsigned min_verts_per_prim
, bool use_adjacency
)
1613 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1616 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1619 static struct radv_ngg_state
1620 calculate_ngg_info(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1621 struct radv_pipeline
*pipeline
)
1623 struct radv_ngg_state ngg
= {0};
1624 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1625 struct radv_es_output_info
*es_info
=
1626 radv_pipeline_has_tess(pipeline
) ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1627 unsigned gs_type
= MESA_SHADER_VERTEX
;
1628 unsigned max_verts_per_prim
= 3; // triangles
1629 unsigned min_verts_per_prim
=
1630 gs_type
== MESA_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1631 unsigned gs_num_invocations
= 1;//MAX2(gs_info->gs.invocations, 1);
1632 bool uses_adjacency
;
1633 switch(pCreateInfo
->pInputAssemblyState
->topology
) {
1634 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1635 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1636 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1637 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1638 uses_adjacency
= true;
1641 uses_adjacency
= false;
1645 /* All these are in dwords: */
1646 /* We can't allow using the whole LDS, because GS waves compete with
1647 * other shader stages for LDS space.
1649 * Streamout can increase the ESGS buffer size later on, so be more
1650 * conservative with streamout and use 4K dwords. This may be suboptimal.
1652 * Otherwise, use the limit of 7K dwords. The reason is that we need
1653 * to leave some headroom for the max_esverts increase at the end.
1655 * TODO: We should really take the shader's internal LDS use into
1656 * account. The linker will fail if the size is greater than
1659 const unsigned max_lds_size
= (0 /*gs_info->info.so.num_outputs*/ ? 4 : 7) * 1024 - 128;
1660 const unsigned target_lds_size
= max_lds_size
;
1661 unsigned esvert_lds_size
= 0;
1662 unsigned gsprim_lds_size
= 0;
1664 /* All these are per subgroup: */
1665 bool max_vert_out_per_gs_instance
= false;
1666 unsigned max_esverts_base
= 256;
1667 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1669 /* Hardware has the following non-natural restrictions on the value
1670 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1672 * - at most 252 for any line input primitive type
1673 * - at most 251 for any quad input primitive type
1674 * - at most 251 for triangle strips with adjacency (this happens to
1675 * be the natural limit for triangle *lists* with adjacency)
1677 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1679 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1680 unsigned max_out_verts_per_gsprim
=
1681 gs_info
->gs
.vertices_out
* gs_num_invocations
;
1683 if (max_out_verts_per_gsprim
<= 256) {
1684 if (max_out_verts_per_gsprim
) {
1685 max_gsprims_base
= MIN2(max_gsprims_base
,
1686 256 / max_out_verts_per_gsprim
);
1689 /* Use special multi-cycling mode in which each GS
1690 * instance gets its own subgroup. Does not work with
1692 max_vert_out_per_gs_instance
= true;
1693 max_gsprims_base
= 1;
1694 max_out_verts_per_gsprim
= gs_info
->gs
.vertices_out
;
1697 esvert_lds_size
= es_info
->esgs_itemsize
/ 4;
1698 gsprim_lds_size
= (gs_info
->gs
.gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1700 /* TODO: This needs to be adjusted once LDS use for compaction
1701 * after culling is implemented. */
1703 if (es_info->info.so.num_outputs)
1704 esvert_lds_size = 4 * es_info->info.so.num_outputs + 1;
1708 unsigned max_gsprims
= max_gsprims_base
;
1709 unsigned max_esverts
= max_esverts_base
;
1711 if (esvert_lds_size
)
1712 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1713 if (gsprim_lds_size
)
1714 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1716 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1717 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, uses_adjacency
);
1718 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1720 if (esvert_lds_size
|| gsprim_lds_size
) {
1721 /* Now that we have a rough proportionality between esverts
1722 * and gsprims based on the primitive type, scale both of them
1723 * down simultaneously based on required LDS space.
1725 * We could be smarter about this if we knew how much vertex
1728 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1729 max_gsprims
* gsprim_lds_size
;
1730 if (lds_total
> target_lds_size
) {
1731 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1732 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1734 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1735 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1736 min_verts_per_prim
, uses_adjacency
);
1737 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1741 /* Round up towards full wave sizes for better ALU utilization. */
1742 if (!max_vert_out_per_gs_instance
) {
1743 const unsigned wavesize
= 64;
1744 unsigned orig_max_esverts
;
1745 unsigned orig_max_gsprims
;
1747 orig_max_esverts
= max_esverts
;
1748 orig_max_gsprims
= max_gsprims
;
1750 max_esverts
= align(max_esverts
, wavesize
);
1751 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1752 if (esvert_lds_size
)
1753 max_esverts
= MIN2(max_esverts
,
1754 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1756 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1758 max_gsprims
= align(max_gsprims
, wavesize
);
1759 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1760 if (gsprim_lds_size
)
1761 max_gsprims
= MIN2(max_gsprims
,
1762 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1764 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1765 min_verts_per_prim
, uses_adjacency
);
1766 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1767 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1770 /* Hardware restriction: minimum value of max_esverts */
1771 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1773 unsigned max_out_vertices
=
1774 max_vert_out_per_gs_instance
? gs_info
->gs
.vertices_out
:
1775 gs_type
== MESA_SHADER_GEOMETRY
?
1776 max_gsprims
* gs_num_invocations
* gs_info
->gs
.vertices_out
:
1778 assert(max_out_vertices
<= 256);
1780 unsigned prim_amp_factor
= 1;
1781 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1782 /* Number of output primitives per GS input primitive after
1784 prim_amp_factor
= gs_info
->gs
.vertices_out
;
1787 /* The GE only checks against the maximum number of ES verts after
1788 * allocating a full GS primitive. So we need to ensure that whenever
1789 * this check passes, there is enough space for a full primitive without
1792 ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1793 ngg
.max_gsprims
= max_gsprims
;
1794 ngg
.max_out_verts
= max_out_vertices
;
1795 ngg
.prim_amp_factor
= prim_amp_factor
;
1796 ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1797 ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1798 ngg
.vgt_esgs_ring_itemsize
= 1;
1800 pipeline
->graphics
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1802 assert(ngg
.hw_max_esverts
>= 24); /* HW limitation */
1808 calculate_gs_ring_sizes(struct radv_pipeline
*pipeline
, const struct radv_gs_state
*gs
)
1810 struct radv_device
*device
= pipeline
->device
;
1811 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
1812 unsigned wave_size
= 64;
1813 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
1814 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
1815 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
1817 unsigned gs_vertex_reuse
=
1818 (device
->physical_device
->rad_info
.chip_class
>= GFX8
? 32 : 16) * num_se
;
1819 unsigned alignment
= 256 * num_se
;
1820 /* The maximum size is 63.999 MB per SE. */
1821 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
1822 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1824 /* Calculate the minimum size. */
1825 unsigned min_esgs_ring_size
= align(gs
->vgt_esgs_ring_itemsize
* 4 * gs_vertex_reuse
*
1826 wave_size
, alignment
);
1827 /* These are recommended sizes, not minimum sizes. */
1828 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
1829 gs
->vgt_esgs_ring_itemsize
* 4 * gs_info
->gs
.vertices_in
;
1830 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
1831 gs_info
->gs
.max_gsvs_emit_size
;
1833 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
1834 esgs_ring_size
= align(esgs_ring_size
, alignment
);
1835 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
1837 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
1838 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
1840 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
1843 static void si_multiwave_lds_size_workaround(struct radv_device
*device
,
1846 /* If tessellation is all offchip and on-chip GS isn't used, this
1847 * workaround is not needed.
1851 /* SPI barrier management bug:
1852 * Make sure we have at least 4k of LDS in use to avoid the bug.
1853 * It applies to workgroup sizes of more than one wavefront.
1855 if (device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
||
1856 device
->physical_device
->rad_info
.family
== CHIP_KABINI
)
1857 *lds_size
= MAX2(*lds_size
, 8);
1860 struct radv_shader_variant
*
1861 radv_get_shader(struct radv_pipeline
*pipeline
,
1862 gl_shader_stage stage
)
1864 if (stage
== MESA_SHADER_VERTEX
) {
1865 if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
1866 return pipeline
->shaders
[MESA_SHADER_VERTEX
];
1867 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
])
1868 return pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
1869 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1870 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1871 } else if (stage
== MESA_SHADER_TESS_EVAL
) {
1872 if (!radv_pipeline_has_tess(pipeline
))
1874 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
1875 return pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
1876 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1877 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1879 return pipeline
->shaders
[stage
];
1882 static struct radv_tessellation_state
1883 calculate_tess_state(struct radv_pipeline
*pipeline
,
1884 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1886 unsigned num_tcs_input_cp
;
1887 unsigned num_tcs_output_cp
;
1889 unsigned num_patches
;
1890 struct radv_tessellation_state tess
= {0};
1892 num_tcs_input_cp
= pCreateInfo
->pTessellationState
->patchControlPoints
;
1893 num_tcs_output_cp
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.tcs_vertices_out
; //TCS VERTICES OUT
1894 num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
1896 lds_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.lds_size
;
1898 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
1899 assert(lds_size
<= 65536);
1900 lds_size
= align(lds_size
, 512) / 512;
1902 assert(lds_size
<= 32768);
1903 lds_size
= align(lds_size
, 256) / 256;
1905 si_multiwave_lds_size_workaround(pipeline
->device
, &lds_size
);
1907 tess
.lds_size
= lds_size
;
1909 tess
.ls_hs_config
= S_028B58_NUM_PATCHES(num_patches
) |
1910 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
1911 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
1912 tess
.num_patches
= num_patches
;
1914 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
1915 unsigned type
= 0, partitioning
= 0, topology
= 0, distribution_mode
= 0;
1917 switch (tes
->info
.tes
.primitive_mode
) {
1919 type
= V_028B6C_TESS_TRIANGLE
;
1922 type
= V_028B6C_TESS_QUAD
;
1925 type
= V_028B6C_TESS_ISOLINE
;
1929 switch (tes
->info
.tes
.spacing
) {
1930 case TESS_SPACING_EQUAL
:
1931 partitioning
= V_028B6C_PART_INTEGER
;
1933 case TESS_SPACING_FRACTIONAL_ODD
:
1934 partitioning
= V_028B6C_PART_FRAC_ODD
;
1936 case TESS_SPACING_FRACTIONAL_EVEN
:
1937 partitioning
= V_028B6C_PART_FRAC_EVEN
;
1943 bool ccw
= tes
->info
.tes
.ccw
;
1944 const VkPipelineTessellationDomainOriginStateCreateInfo
*domain_origin_state
=
1945 vk_find_struct_const(pCreateInfo
->pTessellationState
,
1946 PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO
);
1948 if (domain_origin_state
&& domain_origin_state
->domainOrigin
!= VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT
)
1951 if (tes
->info
.tes
.point_mode
)
1952 topology
= V_028B6C_OUTPUT_POINT
;
1953 else if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
1954 topology
= V_028B6C_OUTPUT_LINE
;
1956 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
1958 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
1960 if (pipeline
->device
->has_distributed_tess
) {
1961 if (pipeline
->device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
1962 pipeline
->device
->physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
1963 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
1965 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
1967 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
1969 tess
.tf_param
= S_028B6C_TYPE(type
) |
1970 S_028B6C_PARTITIONING(partitioning
) |
1971 S_028B6C_TOPOLOGY(topology
) |
1972 S_028B6C_DISTRIBUTION_MODE(distribution_mode
);
1977 static const struct radv_prim_vertex_count prim_size_table
[] = {
1978 [V_008958_DI_PT_NONE
] = {0, 0},
1979 [V_008958_DI_PT_POINTLIST
] = {1, 1},
1980 [V_008958_DI_PT_LINELIST
] = {2, 2},
1981 [V_008958_DI_PT_LINESTRIP
] = {2, 1},
1982 [V_008958_DI_PT_TRILIST
] = {3, 3},
1983 [V_008958_DI_PT_TRIFAN
] = {3, 1},
1984 [V_008958_DI_PT_TRISTRIP
] = {3, 1},
1985 [V_008958_DI_PT_LINELIST_ADJ
] = {4, 4},
1986 [V_008958_DI_PT_LINESTRIP_ADJ
] = {4, 1},
1987 [V_008958_DI_PT_TRILIST_ADJ
] = {6, 6},
1988 [V_008958_DI_PT_TRISTRIP_ADJ
] = {6, 2},
1989 [V_008958_DI_PT_RECTLIST
] = {3, 3},
1990 [V_008958_DI_PT_LINELOOP
] = {2, 1},
1991 [V_008958_DI_PT_POLYGON
] = {3, 1},
1992 [V_008958_DI_PT_2D_TRI_STRIP
] = {0, 0},
1995 static const struct radv_vs_output_info
*get_vs_output_info(const struct radv_pipeline
*pipeline
)
1997 if (radv_pipeline_has_gs(pipeline
))
1998 return &pipeline
->gs_copy_shader
->info
.vs
.outinfo
;
1999 else if (radv_pipeline_has_tess(pipeline
))
2000 return &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.outinfo
;
2002 return &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.outinfo
;
2006 radv_link_shaders(struct radv_pipeline
*pipeline
, nir_shader
**shaders
)
2008 nir_shader
* ordered_shaders
[MESA_SHADER_STAGES
];
2009 int shader_count
= 0;
2011 if(shaders
[MESA_SHADER_FRAGMENT
]) {
2012 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_FRAGMENT
];
2014 if(shaders
[MESA_SHADER_GEOMETRY
]) {
2015 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_GEOMETRY
];
2017 if(shaders
[MESA_SHADER_TESS_EVAL
]) {
2018 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_EVAL
];
2020 if(shaders
[MESA_SHADER_TESS_CTRL
]) {
2021 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_CTRL
];
2023 if(shaders
[MESA_SHADER_VERTEX
]) {
2024 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_VERTEX
];
2027 if (shader_count
> 1) {
2028 unsigned first
= ordered_shaders
[shader_count
- 1]->info
.stage
;
2029 unsigned last
= ordered_shaders
[0]->info
.stage
;
2031 if (ordered_shaders
[0]->info
.stage
== MESA_SHADER_FRAGMENT
&&
2032 ordered_shaders
[1]->info
.has_transform_feedback_varyings
)
2033 nir_link_xfb_varyings(ordered_shaders
[1], ordered_shaders
[0]);
2035 for (int i
= 0; i
< shader_count
; ++i
) {
2036 nir_variable_mode mask
= 0;
2038 if (ordered_shaders
[i
]->info
.stage
!= first
)
2039 mask
= mask
| nir_var_shader_in
;
2041 if (ordered_shaders
[i
]->info
.stage
!= last
)
2042 mask
= mask
| nir_var_shader_out
;
2044 nir_lower_io_to_scalar_early(ordered_shaders
[i
], mask
);
2045 radv_optimize_nir(ordered_shaders
[i
], false, false);
2049 for (int i
= 1; i
< shader_count
; ++i
) {
2050 nir_lower_io_arrays_to_elements(ordered_shaders
[i
],
2051 ordered_shaders
[i
- 1]);
2053 if (nir_link_opt_varyings(ordered_shaders
[i
],
2054 ordered_shaders
[i
- 1]))
2055 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2057 nir_remove_dead_variables(ordered_shaders
[i
],
2058 nir_var_shader_out
);
2059 nir_remove_dead_variables(ordered_shaders
[i
- 1],
2062 bool progress
= nir_remove_unused_varyings(ordered_shaders
[i
],
2063 ordered_shaders
[i
- 1]);
2065 nir_compact_varyings(ordered_shaders
[i
],
2066 ordered_shaders
[i
- 1], true);
2069 if (nir_lower_global_vars_to_local(ordered_shaders
[i
])) {
2070 ac_lower_indirect_derefs(ordered_shaders
[i
],
2071 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2073 radv_optimize_nir(ordered_shaders
[i
], false, false);
2075 if (nir_lower_global_vars_to_local(ordered_shaders
[i
- 1])) {
2076 ac_lower_indirect_derefs(ordered_shaders
[i
- 1],
2077 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2079 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2085 radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo
*input_state
,
2086 uint32_t attrib_binding
)
2088 for (uint32_t i
= 0; i
< input_state
->vertexBindingDescriptionCount
; i
++) {
2089 const VkVertexInputBindingDescription
*input_binding
=
2090 &input_state
->pVertexBindingDescriptions
[i
];
2092 if (input_binding
->binding
== attrib_binding
)
2093 return input_binding
->stride
;
2099 static struct radv_pipeline_key
2100 radv_generate_graphics_pipeline_key(struct radv_pipeline
*pipeline
,
2101 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2102 const struct radv_blend_state
*blend
,
2103 bool has_view_index
)
2105 const VkPipelineVertexInputStateCreateInfo
*input_state
=
2106 pCreateInfo
->pVertexInputState
;
2107 const VkPipelineVertexInputDivisorStateCreateInfoEXT
*divisor_state
=
2108 vk_find_struct_const(input_state
->pNext
, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT
);
2110 struct radv_pipeline_key key
;
2111 memset(&key
, 0, sizeof(key
));
2113 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
2114 key
.optimisations_disabled
= 1;
2116 key
.has_multiview_view_index
= has_view_index
;
2118 uint32_t binding_input_rate
= 0;
2119 uint32_t instance_rate_divisors
[MAX_VERTEX_ATTRIBS
];
2120 for (unsigned i
= 0; i
< input_state
->vertexBindingDescriptionCount
; ++i
) {
2121 if (input_state
->pVertexBindingDescriptions
[i
].inputRate
) {
2122 unsigned binding
= input_state
->pVertexBindingDescriptions
[i
].binding
;
2123 binding_input_rate
|= 1u << binding
;
2124 instance_rate_divisors
[binding
] = 1;
2127 if (divisor_state
) {
2128 for (unsigned i
= 0; i
< divisor_state
->vertexBindingDivisorCount
; ++i
) {
2129 instance_rate_divisors
[divisor_state
->pVertexBindingDivisors
[i
].binding
] =
2130 divisor_state
->pVertexBindingDivisors
[i
].divisor
;
2134 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
2135 const VkVertexInputAttributeDescription
*desc
=
2136 &input_state
->pVertexAttributeDescriptions
[i
];
2137 const struct vk_format_description
*format_desc
;
2138 unsigned location
= desc
->location
;
2139 unsigned binding
= desc
->binding
;
2140 unsigned num_format
, data_format
;
2143 if (binding_input_rate
& (1u << binding
)) {
2144 key
.instance_rate_inputs
|= 1u << location
;
2145 key
.instance_rate_divisors
[location
] = instance_rate_divisors
[binding
];
2148 format_desc
= vk_format_description(desc
->format
);
2149 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
2151 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
2152 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
2154 key
.vertex_attribute_formats
[location
] = data_format
| (num_format
<< 4);
2155 key
.vertex_attribute_bindings
[location
] = desc
->binding
;
2156 key
.vertex_attribute_offsets
[location
] = desc
->offset
;
2157 key
.vertex_attribute_strides
[location
] = radv_get_attrib_stride(input_state
, desc
->binding
);
2159 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
&&
2160 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_STONEY
) {
2161 VkFormat format
= input_state
->pVertexAttributeDescriptions
[i
].format
;
2164 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2165 case VK_FORMAT_A2B10G10R10_SNORM_PACK32
:
2166 adjust
= RADV_ALPHA_ADJUST_SNORM
;
2168 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2169 case VK_FORMAT_A2B10G10R10_SSCALED_PACK32
:
2170 adjust
= RADV_ALPHA_ADJUST_SSCALED
;
2172 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2173 case VK_FORMAT_A2B10G10R10_SINT_PACK32
:
2174 adjust
= RADV_ALPHA_ADJUST_SINT
;
2180 key
.vertex_alpha_adjust
|= adjust
<< (2 * location
);
2183 switch (desc
->format
) {
2184 case VK_FORMAT_B8G8R8A8_UNORM
:
2185 case VK_FORMAT_B8G8R8A8_SNORM
:
2186 case VK_FORMAT_B8G8R8A8_USCALED
:
2187 case VK_FORMAT_B8G8R8A8_SSCALED
:
2188 case VK_FORMAT_B8G8R8A8_UINT
:
2189 case VK_FORMAT_B8G8R8A8_SINT
:
2190 case VK_FORMAT_B8G8R8A8_SRGB
:
2191 case VK_FORMAT_A2R10G10B10_UNORM_PACK32
:
2192 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2193 case VK_FORMAT_A2R10G10B10_USCALED_PACK32
:
2194 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2195 case VK_FORMAT_A2R10G10B10_UINT_PACK32
:
2196 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2197 key
.vertex_post_shuffle
|= 1 << location
;
2204 if (pCreateInfo
->pTessellationState
)
2205 key
.tess_input_vertices
= pCreateInfo
->pTessellationState
->patchControlPoints
;
2208 if (pCreateInfo
->pMultisampleState
&&
2209 pCreateInfo
->pMultisampleState
->rasterizationSamples
> 1) {
2210 uint32_t num_samples
= pCreateInfo
->pMultisampleState
->rasterizationSamples
;
2211 uint32_t ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
->pMultisampleState
);
2212 key
.num_samples
= num_samples
;
2213 key
.log2_ps_iter_samples
= util_logbase2(ps_iter_samples
);
2216 key
.col_format
= blend
->spi_shader_col_format
;
2217 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX8
)
2218 radv_pipeline_compute_get_int_clamp(pCreateInfo
, &key
.is_int8
, &key
.is_int10
);
2224 radv_fill_shader_keys(struct radv_device
*device
,
2225 struct radv_shader_variant_key
*keys
,
2226 const struct radv_pipeline_key
*key
,
2229 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_inputs
= key
->instance_rate_inputs
;
2230 keys
[MESA_SHADER_VERTEX
].vs
.alpha_adjust
= key
->vertex_alpha_adjust
;
2231 keys
[MESA_SHADER_VERTEX
].vs
.post_shuffle
= key
->vertex_post_shuffle
;
2232 for (unsigned i
= 0; i
< MAX_VERTEX_ATTRIBS
; ++i
) {
2233 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_divisors
[i
] = key
->instance_rate_divisors
[i
];
2234 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_formats
[i
] = key
->vertex_attribute_formats
[i
];
2235 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_bindings
[i
] = key
->vertex_attribute_bindings
[i
];
2236 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_offsets
[i
] = key
->vertex_attribute_offsets
[i
];
2237 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_strides
[i
] = key
->vertex_attribute_strides
[i
];
2240 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2241 keys
[MESA_SHADER_VERTEX
].vs
.out
.as_ls
= true;
2242 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= 0;
2243 keys
[MESA_SHADER_TESS_CTRL
].tcs
.input_vertices
= key
->tess_input_vertices
;
2244 keys
[MESA_SHADER_TESS_CTRL
].tcs
.primitive_mode
= nir
[MESA_SHADER_TESS_EVAL
]->info
.tess
.primitive_mode
;
2246 keys
[MESA_SHADER_TESS_CTRL
].tcs
.tes_reads_tess_factors
= !!(nir
[MESA_SHADER_TESS_EVAL
]->info
.inputs_read
& (VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
));
2249 if (nir
[MESA_SHADER_GEOMETRY
]) {
2250 if (nir
[MESA_SHADER_TESS_CTRL
])
2251 keys
[MESA_SHADER_TESS_EVAL
].tes
.out
.as_es
= true;
2253 keys
[MESA_SHADER_VERTEX
].vs
.out
.as_es
= true;
2256 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
2257 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2258 keys
[MESA_SHADER_TESS_EVAL
].tes
.out
.as_ngg
= true;
2260 keys
[MESA_SHADER_VERTEX
].vs
.out
.as_ngg
= true;
2264 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
2265 keys
[i
].has_multiview_view_index
= key
->has_multiview_view_index
;
2267 keys
[MESA_SHADER_FRAGMENT
].fs
.col_format
= key
->col_format
;
2268 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int8
= key
->is_int8
;
2269 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int10
= key
->is_int10
;
2270 keys
[MESA_SHADER_FRAGMENT
].fs
.log2_ps_iter_samples
= key
->log2_ps_iter_samples
;
2271 keys
[MESA_SHADER_FRAGMENT
].fs
.num_samples
= key
->num_samples
;
2275 merge_tess_info(struct shader_info
*tes_info
,
2276 const struct shader_info
*tcs_info
)
2278 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
2280 * "PointMode. Controls generation of points rather than triangles
2281 * or lines. This functionality defaults to disabled, and is
2282 * enabled if either shader stage includes the execution mode.
2284 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
2285 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
2286 * and OutputVertices, it says:
2288 * "One mode must be set in at least one of the tessellation
2291 * So, the fields can be set in either the TCS or TES, but they must
2292 * agree if set in both. Our backend looks at TES, so bitwise-or in
2293 * the values from the TCS.
2295 assert(tcs_info
->tess
.tcs_vertices_out
== 0 ||
2296 tes_info
->tess
.tcs_vertices_out
== 0 ||
2297 tcs_info
->tess
.tcs_vertices_out
== tes_info
->tess
.tcs_vertices_out
);
2298 tes_info
->tess
.tcs_vertices_out
|= tcs_info
->tess
.tcs_vertices_out
;
2300 assert(tcs_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2301 tes_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2302 tcs_info
->tess
.spacing
== tes_info
->tess
.spacing
);
2303 tes_info
->tess
.spacing
|= tcs_info
->tess
.spacing
;
2305 assert(tcs_info
->tess
.primitive_mode
== 0 ||
2306 tes_info
->tess
.primitive_mode
== 0 ||
2307 tcs_info
->tess
.primitive_mode
== tes_info
->tess
.primitive_mode
);
2308 tes_info
->tess
.primitive_mode
|= tcs_info
->tess
.primitive_mode
;
2309 tes_info
->tess
.ccw
|= tcs_info
->tess
.ccw
;
2310 tes_info
->tess
.point_mode
|= tcs_info
->tess
.point_mode
;
2314 void radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT
*ext
)
2319 if (ext
->pPipelineCreationFeedback
) {
2320 ext
->pPipelineCreationFeedback
->flags
= 0;
2321 ext
->pPipelineCreationFeedback
->duration
= 0;
2324 for (unsigned i
= 0; i
< ext
->pipelineStageCreationFeedbackCount
; ++i
) {
2325 ext
->pPipelineStageCreationFeedbacks
[i
].flags
= 0;
2326 ext
->pPipelineStageCreationFeedbacks
[i
].duration
= 0;
2331 void radv_start_feedback(VkPipelineCreationFeedbackEXT
*feedback
)
2336 feedback
->duration
-= radv_get_current_time();
2337 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
;
2341 void radv_stop_feedback(VkPipelineCreationFeedbackEXT
*feedback
, bool cache_hit
)
2346 feedback
->duration
+= radv_get_current_time();
2347 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
|
2348 (cache_hit
? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT
: 0);
2352 void radv_create_shaders(struct radv_pipeline
*pipeline
,
2353 struct radv_device
*device
,
2354 struct radv_pipeline_cache
*cache
,
2355 const struct radv_pipeline_key
*key
,
2356 const VkPipelineShaderStageCreateInfo
**pStages
,
2357 const VkPipelineCreateFlags flags
,
2358 VkPipelineCreationFeedbackEXT
*pipeline_feedback
,
2359 VkPipelineCreationFeedbackEXT
**stage_feedbacks
)
2361 struct radv_shader_module fs_m
= {0};
2362 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
2363 nir_shader
*nir
[MESA_SHADER_STAGES
] = {0};
2364 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2365 struct radv_shader_variant_key keys
[MESA_SHADER_STAGES
] = {{{{{0}}}}};
2366 unsigned char hash
[20], gs_copy_hash
[20];
2368 radv_start_feedback(pipeline_feedback
);
2370 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2372 modules
[i
] = radv_shader_module_from_handle(pStages
[i
]->module
);
2373 if (modules
[i
]->nir
)
2374 _mesa_sha1_compute(modules
[i
]->nir
->info
.name
,
2375 strlen(modules
[i
]->nir
->info
.name
),
2378 pipeline
->active_stages
|= mesa_to_vk_shader_stage(i
);
2382 radv_hash_shaders(hash
, pStages
, pipeline
->layout
, key
, get_hash_flags(device
));
2383 memcpy(gs_copy_hash
, hash
, 20);
2384 gs_copy_hash
[0] ^= 1;
2386 bool found_in_application_cache
= true;
2387 if (modules
[MESA_SHADER_GEOMETRY
]) {
2388 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2389 radv_create_shader_variants_from_pipeline_cache(device
, cache
, gs_copy_hash
, variants
,
2390 &found_in_application_cache
);
2391 pipeline
->gs_copy_shader
= variants
[MESA_SHADER_GEOMETRY
];
2394 if (radv_create_shader_variants_from_pipeline_cache(device
, cache
, hash
, pipeline
->shaders
,
2395 &found_in_application_cache
) &&
2396 (!modules
[MESA_SHADER_GEOMETRY
] || pipeline
->gs_copy_shader
)) {
2397 radv_stop_feedback(pipeline_feedback
, found_in_application_cache
);
2401 if (!modules
[MESA_SHADER_FRAGMENT
] && !modules
[MESA_SHADER_COMPUTE
]) {
2403 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
2404 fs_b
.shader
->info
.name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
2405 fs_m
.nir
= fs_b
.shader
;
2406 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
2409 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2410 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[i
];
2415 radv_start_feedback(stage_feedbacks
[i
]);
2417 nir
[i
] = radv_shader_compile_to_nir(device
, modules
[i
],
2418 stage
? stage
->pName
: "main", i
,
2419 stage
? stage
->pSpecializationInfo
: NULL
,
2420 flags
, pipeline
->layout
);
2422 /* We don't want to alter meta shaders IR directly so clone it
2425 if (nir
[i
]->info
.name
) {
2426 nir
[i
] = nir_shader_clone(NULL
, nir
[i
]);
2429 radv_stop_feedback(stage_feedbacks
[i
], false);
2432 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2433 nir_lower_patch_vertices(nir
[MESA_SHADER_TESS_EVAL
], nir
[MESA_SHADER_TESS_CTRL
]->info
.tess
.tcs_vertices_out
, NULL
);
2434 merge_tess_info(&nir
[MESA_SHADER_TESS_EVAL
]->info
, &nir
[MESA_SHADER_TESS_CTRL
]->info
);
2437 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
2438 radv_link_shaders(pipeline
, nir
);
2440 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2442 NIR_PASS_V(nir
[i
], nir_lower_bool_to_int32
);
2443 NIR_PASS_V(nir
[i
], nir_lower_non_uniform_access
,
2444 nir_lower_non_uniform_ubo_access
|
2445 nir_lower_non_uniform_ssbo_access
|
2446 nir_lower_non_uniform_texture_access
|
2447 nir_lower_non_uniform_image_access
);
2450 if (radv_can_dump_shader(device
, modules
[i
], false))
2451 nir_print_shader(nir
[i
], stderr
);
2454 radv_fill_shader_keys(device
, keys
, key
, nir
);
2456 if (nir
[MESA_SHADER_FRAGMENT
]) {
2457 if (!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]) {
2458 radv_start_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
]);
2460 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
2461 radv_shader_variant_compile(device
, modules
[MESA_SHADER_FRAGMENT
], &nir
[MESA_SHADER_FRAGMENT
], 1,
2462 pipeline
->layout
, keys
+ MESA_SHADER_FRAGMENT
,
2463 &binaries
[MESA_SHADER_FRAGMENT
]);
2465 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
], false);
2468 /* TODO: These are no longer used as keys we should refactor this */
2469 keys
[MESA_SHADER_VERTEX
].vs
.out
.export_prim_id
=
2470 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
;
2471 keys
[MESA_SHADER_VERTEX
].vs
.out
.export_layer_id
=
2472 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.layer_input
;
2473 keys
[MESA_SHADER_VERTEX
].vs
.out
.export_clip_dists
=
2474 !!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.num_input_clips_culls
;
2475 keys
[MESA_SHADER_TESS_EVAL
].tes
.out
.export_prim_id
=
2476 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
;
2477 keys
[MESA_SHADER_TESS_EVAL
].tes
.out
.export_layer_id
=
2478 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.layer_input
;
2479 keys
[MESA_SHADER_TESS_EVAL
].tes
.out
.export_clip_dists
=
2480 !!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.num_input_clips_culls
;
2483 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_TESS_CTRL
]) {
2484 if (!pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]) {
2485 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2486 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2487 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2489 radv_start_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
]);
2491 pipeline
->shaders
[MESA_SHADER_TESS_CTRL
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_TESS_CTRL
], combined_nir
, 2,
2493 &key
, &binaries
[MESA_SHADER_TESS_CTRL
]);
2495 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
], false);
2497 modules
[MESA_SHADER_VERTEX
] = NULL
;
2498 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2499 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.tcs
.outputs_written
);
2502 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_GEOMETRY
]) {
2503 gl_shader_stage pre_stage
= modules
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2504 if (!pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
2505 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2507 radv_start_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
]);
2509 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_GEOMETRY
], combined_nir
, 2,
2511 &keys
[pre_stage
] , &binaries
[MESA_SHADER_GEOMETRY
]);
2513 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
], false);
2515 modules
[pre_stage
] = NULL
;
2518 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2519 if(modules
[i
] && !pipeline
->shaders
[i
]) {
2520 if (i
== MESA_SHADER_TESS_CTRL
) {
2521 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.info
.vs
.ls_outputs_written
);
2523 if (i
== MESA_SHADER_TESS_EVAL
) {
2524 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2525 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.tcs
.outputs_written
);
2528 radv_start_feedback(stage_feedbacks
[i
]);
2530 pipeline
->shaders
[i
] = radv_shader_variant_compile(device
, modules
[i
], &nir
[i
], 1,
2532 keys
+ i
, &binaries
[i
]);
2534 radv_stop_feedback(stage_feedbacks
[i
], false);
2538 if(modules
[MESA_SHADER_GEOMETRY
]) {
2539 struct radv_shader_binary
*gs_copy_binary
= NULL
;
2540 if (!pipeline
->gs_copy_shader
) {
2541 pipeline
->gs_copy_shader
= radv_create_gs_copy_shader(
2542 device
, nir
[MESA_SHADER_GEOMETRY
], &gs_copy_binary
,
2543 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
);
2546 if (pipeline
->gs_copy_shader
) {
2547 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2548 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2550 binaries
[MESA_SHADER_GEOMETRY
] = gs_copy_binary
;
2551 variants
[MESA_SHADER_GEOMETRY
] = pipeline
->gs_copy_shader
;
2553 radv_pipeline_cache_insert_shaders(device
, cache
,
2558 free(gs_copy_binary
);
2561 radv_pipeline_cache_insert_shaders(device
, cache
, hash
, pipeline
->shaders
,
2564 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2567 if (!pipeline
->device
->keep_shader_info
)
2568 ralloc_free(nir
[i
]);
2570 if (radv_can_dump_shader_stats(device
, modules
[i
]))
2571 radv_shader_dump_stats(device
,
2572 pipeline
->shaders
[i
],
2578 ralloc_free(fs_m
.nir
);
2580 radv_stop_feedback(pipeline_feedback
, false);
2584 radv_pipeline_stage_to_user_data_0(struct radv_pipeline
*pipeline
,
2585 gl_shader_stage stage
, enum chip_class chip_class
)
2587 bool has_gs
= radv_pipeline_has_gs(pipeline
);
2588 bool has_tess
= radv_pipeline_has_tess(pipeline
);
2589 bool has_ngg
= radv_pipeline_has_ngg(pipeline
);
2592 case MESA_SHADER_FRAGMENT
:
2593 return R_00B030_SPI_SHADER_USER_DATA_PS_0
;
2594 case MESA_SHADER_VERTEX
:
2596 if (chip_class
>= GFX10
) {
2597 return R_00B430_SPI_SHADER_USER_DATA_HS_0
;
2598 } else if (chip_class
== GFX9
) {
2599 return R_00B430_SPI_SHADER_USER_DATA_LS_0
;
2601 return R_00B530_SPI_SHADER_USER_DATA_LS_0
;
2607 if (chip_class
>= GFX10
) {
2608 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2610 return R_00B330_SPI_SHADER_USER_DATA_ES_0
;
2615 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2617 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2618 case MESA_SHADER_GEOMETRY
:
2619 return chip_class
== GFX9
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
2620 R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2621 case MESA_SHADER_COMPUTE
:
2622 return R_00B900_COMPUTE_USER_DATA_0
;
2623 case MESA_SHADER_TESS_CTRL
:
2624 return chip_class
== GFX9
? R_00B430_SPI_SHADER_USER_DATA_LS_0
:
2625 R_00B430_SPI_SHADER_USER_DATA_HS_0
;
2626 case MESA_SHADER_TESS_EVAL
:
2628 return chip_class
>= GFX10
? R_00B230_SPI_SHADER_USER_DATA_GS_0
:
2629 R_00B330_SPI_SHADER_USER_DATA_ES_0
;
2630 } else if (has_ngg
) {
2631 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2633 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2636 unreachable("unknown shader");
2640 struct radv_bin_size_entry
{
2646 radv_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2648 static const struct radv_bin_size_entry color_size_table
[][3][9] = {
2652 /* One shader engine */
2658 { UINT_MAX
, { 0, 0}},
2661 /* Two shader engines */
2667 { UINT_MAX
, { 0, 0}},
2670 /* Four shader engines */
2675 { UINT_MAX
, { 0, 0}},
2681 /* One shader engine */
2687 { UINT_MAX
, { 0, 0}},
2690 /* Two shader engines */
2696 { UINT_MAX
, { 0, 0}},
2699 /* Four shader engines */
2706 { UINT_MAX
, { 0, 0}},
2712 /* One shader engine */
2719 { UINT_MAX
, { 0, 0}},
2722 /* Two shader engines */
2730 { UINT_MAX
, { 0, 0}},
2733 /* Four shader engines */
2741 { UINT_MAX
, { 0, 0}},
2745 static const struct radv_bin_size_entry ds_size_table
[][3][9] = {
2749 // One shader engine
2756 { UINT_MAX
, { 0, 0}},
2759 // Two shader engines
2767 { UINT_MAX
, { 0, 0}},
2770 // Four shader engines
2778 { UINT_MAX
, { 0, 0}},
2784 // One shader engine
2792 { UINT_MAX
, { 0, 0}},
2795 // Two shader engines
2804 { UINT_MAX
, { 0, 0}},
2807 // Four shader engines
2816 { UINT_MAX
, { 0, 0}},
2822 // One shader engine
2830 { UINT_MAX
, { 0, 0}},
2833 // Two shader engines
2842 { UINT_MAX
, { 0, 0}},
2845 // Four shader engines
2853 { UINT_MAX
, { 0, 0}},
2858 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
2859 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
2860 VkExtent2D extent
= {512, 512};
2862 unsigned log_num_rb_per_se
=
2863 util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.num_render_backends
/
2864 pipeline
->device
->physical_device
->rad_info
.max_se
);
2865 unsigned log_num_se
= util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.max_se
);
2867 unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
2868 unsigned ps_iter_samples
= 1u << G_028804_PS_ITER_SAMPLES(pipeline
->graphics
.ms
.db_eqaa
);
2869 unsigned effective_samples
= total_samples
;
2870 unsigned color_bytes_per_pixel
= 0;
2872 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
2874 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2875 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
2878 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
2881 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
2882 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
2885 /* MSAA images typically don't use all samples all the time. */
2886 if (effective_samples
>= 2 && ps_iter_samples
<= 1)
2887 effective_samples
= 2;
2888 color_bytes_per_pixel
*= effective_samples
;
2891 const struct radv_bin_size_entry
*color_entry
= color_size_table
[log_num_rb_per_se
][log_num_se
];
2892 while(color_entry
[1].bpp
<= color_bytes_per_pixel
)
2895 extent
= color_entry
->extent
;
2897 if (subpass
->depth_stencil_attachment
) {
2898 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
2900 /* Coefficients taken from AMDVLK */
2901 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
2902 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
2903 unsigned ds_bytes_per_pixel
= 4 * (depth_coeff
+ stencil_coeff
) * total_samples
;
2905 const struct radv_bin_size_entry
*ds_entry
= ds_size_table
[log_num_rb_per_se
][log_num_se
];
2906 while(ds_entry
[1].bpp
<= ds_bytes_per_pixel
)
2909 extent
.width
= MIN2(extent
.width
, ds_entry
->extent
.width
);
2910 extent
.height
= MIN2(extent
.height
, ds_entry
->extent
.height
);
2917 radv_pipeline_generate_binning_state(struct radeon_cmdbuf
*ctx_cs
,
2918 struct radv_pipeline
*pipeline
,
2919 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2921 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX9
)
2924 uint32_t pa_sc_binner_cntl_0
=
2925 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC
) |
2926 S_028C44_DISABLE_START_OF_PRIM(1);
2927 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
2929 VkExtent2D bin_size
= radv_compute_bin_size(pipeline
, pCreateInfo
);
2931 if (pipeline
->device
->pbb_allowed
&& bin_size
.width
&& bin_size
.height
) {
2932 unsigned context_states_per_bin
; /* allowed range: [1, 6] */
2933 unsigned persistent_states_per_bin
; /* allowed range: [1, 32] */
2934 unsigned fpovs_per_batch
; /* allowed range: [0, 255], 0 = unlimited */
2936 switch (pipeline
->device
->physical_device
->rad_info
.family
) {
2940 context_states_per_bin
= 1;
2941 persistent_states_per_bin
= 1;
2942 fpovs_per_batch
= 63;
2946 context_states_per_bin
= 6;
2947 persistent_states_per_bin
= 32;
2948 fpovs_per_batch
= 63;
2951 unreachable("unhandled family while determining binning state.");
2954 pa_sc_binner_cntl_0
=
2955 S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED
) |
2956 S_028C44_BIN_SIZE_X(bin_size
.width
== 16) |
2957 S_028C44_BIN_SIZE_Y(bin_size
.height
== 16) |
2958 S_028C44_BIN_SIZE_X_EXTEND(util_logbase2(MAX2(bin_size
.width
, 32)) - 5) |
2959 S_028C44_BIN_SIZE_Y_EXTEND(util_logbase2(MAX2(bin_size
.height
, 32)) - 5) |
2960 S_028C44_CONTEXT_STATES_PER_BIN(context_states_per_bin
- 1) |
2961 S_028C44_PERSISTENT_STATES_PER_BIN(persistent_states_per_bin
- 1) |
2962 S_028C44_DISABLE_START_OF_PRIM(1) |
2963 S_028C44_FPOVS_PER_BATCH(fpovs_per_batch
) |
2964 S_028C44_OPTIMAL_BIN_SELECTION(1);
2967 radeon_set_context_reg(ctx_cs
, R_028C44_PA_SC_BINNER_CNTL_0
,
2968 pa_sc_binner_cntl_0
);
2970 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
2971 radeon_set_context_reg(ctx_cs
, R_028038_DB_DFSM_CONTROL
,
2974 radeon_set_context_reg(ctx_cs
, R_028060_DB_DFSM_CONTROL
,
2981 radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf
*ctx_cs
,
2982 struct radv_pipeline
*pipeline
,
2983 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2984 const struct radv_graphics_pipeline_create_info
*extra
)
2986 const VkPipelineDepthStencilStateCreateInfo
*vkds
= pCreateInfo
->pDepthStencilState
;
2987 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
2988 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
2989 struct radv_render_pass_attachment
*attachment
= NULL
;
2990 uint32_t db_depth_control
= 0, db_stencil_control
= 0;
2991 uint32_t db_render_control
= 0, db_render_override2
= 0;
2992 uint32_t db_render_override
= 0;
2994 if (subpass
->depth_stencil_attachment
)
2995 attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
2997 bool has_depth_attachment
= attachment
&& vk_format_is_depth(attachment
->format
);
2998 bool has_stencil_attachment
= attachment
&& vk_format_is_stencil(attachment
->format
);
3000 if (vkds
&& has_depth_attachment
) {
3001 db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
3002 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
3003 S_028800_ZFUNC(vkds
->depthCompareOp
) |
3004 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
3006 /* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
3007 db_render_override2
|= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment
->samples
> 2);
3010 if (has_stencil_attachment
&& vkds
&& vkds
->stencilTestEnable
) {
3011 db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
3012 db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
3013 db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
3014 db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
3015 db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
3017 db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
3018 db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
3019 db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
3020 db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
3023 if (attachment
&& extra
) {
3024 db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
3025 db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
3027 db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
3028 db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
3029 db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
3030 db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
3031 db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
3034 db_render_override
|= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
3035 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
);
3037 if (!pCreateInfo
->pRasterizationState
->depthClampEnable
) {
3038 /* From VK_EXT_depth_range_unrestricted spec:
3040 * "The behavior described in Primitive Clipping still applies.
3041 * If depth clamping is disabled the depth values are still
3042 * clipped to 0 ≤ zc ≤ wc before the viewport transform. If
3043 * depth clamping is enabled the above equation is ignored and
3044 * the depth values are instead clamped to the VkViewport
3045 * minDepth and maxDepth values, which in the case of this
3046 * extension can be outside of the 0.0 to 1.0 range."
3048 db_render_override
|= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
3051 radeon_set_context_reg(ctx_cs
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
3052 radeon_set_context_reg(ctx_cs
, R_02842C_DB_STENCIL_CONTROL
, db_stencil_control
);
3054 radeon_set_context_reg(ctx_cs
, R_028000_DB_RENDER_CONTROL
, db_render_control
);
3055 radeon_set_context_reg(ctx_cs
, R_02800C_DB_RENDER_OVERRIDE
, db_render_override
);
3056 radeon_set_context_reg(ctx_cs
, R_028010_DB_RENDER_OVERRIDE2
, db_render_override2
);
3060 radv_pipeline_generate_blend_state(struct radeon_cmdbuf
*ctx_cs
,
3061 struct radv_pipeline
*pipeline
,
3062 const struct radv_blend_state
*blend
)
3064 radeon_set_context_reg_seq(ctx_cs
, R_028780_CB_BLEND0_CONTROL
, 8);
3065 radeon_emit_array(ctx_cs
, blend
->cb_blend_control
,
3067 radeon_set_context_reg(ctx_cs
, R_028808_CB_COLOR_CONTROL
, blend
->cb_color_control
);
3068 radeon_set_context_reg(ctx_cs
, R_028B70_DB_ALPHA_TO_MASK
, blend
->db_alpha_to_mask
);
3070 if (pipeline
->device
->physical_device
->has_rbplus
) {
3072 radeon_set_context_reg_seq(ctx_cs
, R_028760_SX_MRT0_BLEND_OPT
, 8);
3073 radeon_emit_array(ctx_cs
, blend
->sx_mrt_blend_opt
, 8);
3076 radeon_set_context_reg(ctx_cs
, R_028714_SPI_SHADER_COL_FORMAT
, blend
->spi_shader_col_format
);
3078 radeon_set_context_reg(ctx_cs
, R_028238_CB_TARGET_MASK
, blend
->cb_target_mask
);
3079 radeon_set_context_reg(ctx_cs
, R_02823C_CB_SHADER_MASK
, blend
->cb_shader_mask
);
3081 pipeline
->graphics
.col_format
= blend
->spi_shader_col_format
;
3082 pipeline
->graphics
.cb_target_mask
= blend
->cb_target_mask
;
3085 static const VkConservativeRasterizationModeEXT
3086 radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo
*pCreateInfo
)
3088 const VkPipelineRasterizationConservativeStateCreateInfoEXT
*conservative_raster
=
3089 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT
);
3091 if (!conservative_raster
)
3092 return VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
;
3093 return conservative_raster
->conservativeRasterizationMode
;
3097 radv_pipeline_generate_raster_state(struct radeon_cmdbuf
*ctx_cs
,
3098 struct radv_pipeline
*pipeline
,
3099 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3101 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
3102 const VkConservativeRasterizationModeEXT mode
=
3103 radv_get_conservative_raster_mode(vkraster
);
3104 uint32_t pa_sc_conservative_rast
= S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1);
3105 bool depth_clip_disable
= vkraster
->depthClampEnable
;
3107 const VkPipelineRasterizationDepthClipStateCreateInfoEXT
*depth_clip_state
=
3108 vk_find_struct_const(vkraster
->pNext
, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT
);
3109 if (depth_clip_state
) {
3110 depth_clip_disable
= !depth_clip_state
->depthClipEnable
;
3113 radeon_set_context_reg(ctx_cs
, R_028810_PA_CL_CLIP_CNTL
,
3114 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
3115 S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable
? 1 : 0) |
3116 S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable
? 1 : 0) |
3117 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
3118 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
3120 radeon_set_context_reg(ctx_cs
, R_0286D4_SPI_INTERP_CONTROL_0
,
3121 S_0286D4_FLAT_SHADE_ENA(1) |
3122 S_0286D4_PNT_SPRITE_ENA(1) |
3123 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
3124 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
3125 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
3126 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
3127 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
3129 radeon_set_context_reg(ctx_cs
, R_028BE4_PA_SU_VTX_CNTL
,
3130 S_028BE4_PIX_CENTER(1) | // TODO verify
3131 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
3132 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
));
3134 radeon_set_context_reg(ctx_cs
, R_028814_PA_SU_SC_MODE_CNTL
,
3135 S_028814_FACE(vkraster
->frontFace
) |
3136 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
3137 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
3138 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
3139 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3140 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3141 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3142 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3143 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0));
3145 /* Conservative rasterization. */
3146 if (mode
!= VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
) {
3147 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3149 ms
->pa_sc_aa_config
|= S_028BE0_AA_MASK_CENTROID_DTMN(1);
3150 ms
->db_eqaa
|= S_028804_ENABLE_POSTZ_OVERRASTERIZATION(1) |
3151 S_028804_OVERRASTERIZATION_AMOUNT(4);
3153 pa_sc_conservative_rast
= S_028C4C_PREZ_AA_MASK_ENABLE(1) |
3154 S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
3155 S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
3157 if (mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
) {
3158 pa_sc_conservative_rast
|=
3159 S_028C4C_OVER_RAST_ENABLE(1) |
3160 S_028C4C_OVER_RAST_SAMPLE_SELECT(0) |
3161 S_028C4C_UNDER_RAST_ENABLE(0) |
3162 S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
3163 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
3165 assert(mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT
);
3166 pa_sc_conservative_rast
|=
3167 S_028C4C_OVER_RAST_ENABLE(0) |
3168 S_028C4C_OVER_RAST_SAMPLE_SELECT(1) |
3169 S_028C4C_UNDER_RAST_ENABLE(1) |
3170 S_028C4C_UNDER_RAST_SAMPLE_SELECT(0) |
3171 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(0);
3175 radeon_set_context_reg(ctx_cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
3176 pa_sc_conservative_rast
);
3181 radv_pipeline_generate_multisample_state(struct radeon_cmdbuf
*ctx_cs
,
3182 struct radv_pipeline
*pipeline
)
3184 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3186 radeon_set_context_reg_seq(ctx_cs
, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0
, 2);
3187 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[0]);
3188 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[1]);
3190 radeon_set_context_reg(ctx_cs
, R_028804_DB_EQAA
, ms
->db_eqaa
);
3191 radeon_set_context_reg(ctx_cs
, R_028A4C_PA_SC_MODE_CNTL_1
, ms
->pa_sc_mode_cntl_1
);
3193 /* The exclusion bits can be set to improve rasterization efficiency
3194 * if no sample lies on the pixel boundary (-8 sample offset). It's
3195 * currently always TRUE because the driver doesn't support 16 samples.
3197 bool exclusion
= pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
;
3198 radeon_set_context_reg(ctx_cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
,
3199 S_02882C_XMAX_RIGHT_EXCLUSION(exclusion
) |
3200 S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion
));
3204 radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf
*ctx_cs
,
3205 struct radv_pipeline
*pipeline
)
3207 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3208 unsigned vgt_primitiveid_en
= 0;
3209 uint32_t vgt_gs_mode
= 0;
3211 if (radv_pipeline_has_gs(pipeline
)) {
3212 const struct radv_shader_variant
*gs
=
3213 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3215 vgt_gs_mode
= ac_vgt_gs_mode(gs
->info
.gs
.vertices_out
,
3216 pipeline
->device
->physical_device
->rad_info
.chip_class
);
3217 } else if (radv_pipeline_has_ngg(pipeline
)) {
3218 const struct radv_shader_variant
*vs
=
3219 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] ?
3220 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] :
3221 pipeline
->shaders
[MESA_SHADER_VERTEX
];
3222 bool enable_prim_id
=
3223 outinfo
->export_prim_id
|| vs
->info
.info
.uses_prim_id
;
3225 vgt_primitiveid_en
|= S_028A84_PRIMITIVEID_EN(enable_prim_id
) |
3226 S_028A84_NGG_DISABLE_PROVOK_REUSE(enable_prim_id
);
3227 } else if (outinfo
->export_prim_id
) {
3228 vgt_gs_mode
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
3229 vgt_primitiveid_en
|= S_028A84_PRIMITIVEID_EN(1);
3232 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
, vgt_primitiveid_en
);
3233 radeon_set_context_reg(ctx_cs
, R_028A40_VGT_GS_MODE
, vgt_gs_mode
);
3237 radv_pipeline_generate_hw_vs(struct radeon_cmdbuf
*ctx_cs
,
3238 struct radeon_cmdbuf
*cs
,
3239 struct radv_pipeline
*pipeline
,
3240 struct radv_shader_variant
*shader
)
3242 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3244 radeon_set_sh_reg_seq(cs
, R_00B120_SPI_SHADER_PGM_LO_VS
, 4);
3245 radeon_emit(cs
, va
>> 8);
3246 radeon_emit(cs
, S_00B124_MEM_BASE(va
>> 40));
3247 radeon_emit(cs
, shader
->config
.rsrc1
);
3248 radeon_emit(cs
, shader
->config
.rsrc2
);
3250 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3251 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3252 clip_dist_mask
= outinfo
->clip_dist_mask
;
3253 cull_dist_mask
= outinfo
->cull_dist_mask
;
3254 total_mask
= clip_dist_mask
| cull_dist_mask
;
3255 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3256 outinfo
->writes_layer
||
3257 outinfo
->writes_viewport_index
;
3259 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
3260 S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo
->param_exports
) - 1));
3262 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3263 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3264 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3265 V_02870C_SPI_SHADER_4COMP
:
3266 V_02870C_SPI_SHADER_NONE
) |
3267 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3268 V_02870C_SPI_SHADER_4COMP
:
3269 V_02870C_SPI_SHADER_NONE
) |
3270 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3271 V_02870C_SPI_SHADER_4COMP
:
3272 V_02870C_SPI_SHADER_NONE
));
3274 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3275 S_028818_VTX_W0_FMT(1) |
3276 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3277 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3278 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3280 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3281 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3282 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3283 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3284 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3285 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3286 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3287 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3288 cull_dist_mask
<< 8 |
3291 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
3292 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
3293 outinfo
->writes_viewport_index
);
3297 radv_pipeline_generate_hw_es(struct radeon_cmdbuf
*cs
,
3298 struct radv_pipeline
*pipeline
,
3299 struct radv_shader_variant
*shader
)
3301 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3303 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 4);
3304 radeon_emit(cs
, va
>> 8);
3305 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3306 radeon_emit(cs
, shader
->config
.rsrc1
);
3307 radeon_emit(cs
, shader
->config
.rsrc2
);
3311 radv_pipeline_generate_hw_ls(struct radeon_cmdbuf
*cs
,
3312 struct radv_pipeline
*pipeline
,
3313 struct radv_shader_variant
*shader
,
3314 const struct radv_tessellation_state
*tess
)
3316 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3317 uint32_t rsrc2
= shader
->config
.rsrc2
;
3319 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3320 radeon_emit(cs
, va
>> 8);
3321 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3323 rsrc2
|= S_00B52C_LDS_SIZE(tess
->lds_size
);
3324 if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX7
&&
3325 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_HAWAII
)
3326 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, rsrc2
);
3328 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
3329 radeon_emit(cs
, shader
->config
.rsrc1
);
3330 radeon_emit(cs
, rsrc2
);
3334 radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf
*ctx_cs
,
3335 struct radeon_cmdbuf
*cs
,
3336 struct radv_pipeline
*pipeline
,
3337 struct radv_shader_variant
*shader
,
3338 const struct radv_ngg_state
*ngg_state
)
3340 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3341 gl_shader_stage es_type
=
3342 radv_pipeline_has_tess(pipeline
) ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
3344 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
3345 radeon_emit(cs
, va
>> 8);
3346 radeon_emit(cs
, va
>> 40);
3347 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3348 radeon_emit(cs
, shader
->config
.rsrc1
);
3349 radeon_emit(cs
, shader
->config
.rsrc2
);
3351 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3352 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3353 clip_dist_mask
= outinfo
->clip_dist_mask
;
3354 cull_dist_mask
= outinfo
->cull_dist_mask
;
3355 total_mask
= clip_dist_mask
| cull_dist_mask
;
3356 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3357 outinfo
->writes_layer
||
3358 outinfo
->writes_viewport_index
;
3359 bool break_wave_at_eoi
= false;
3361 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
3362 S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo
->param_exports
) - 1));
3363 radeon_set_context_reg(ctx_cs
, R_028708_SPI_SHADER_IDX_FORMAT
,
3364 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP
));
3365 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3366 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3367 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3368 V_02870C_SPI_SHADER_4COMP
:
3369 V_02870C_SPI_SHADER_NONE
) |
3370 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3371 V_02870C_SPI_SHADER_4COMP
:
3372 V_02870C_SPI_SHADER_NONE
) |
3373 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3374 V_02870C_SPI_SHADER_4COMP
:
3375 V_02870C_SPI_SHADER_NONE
));
3377 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3378 S_028818_VTX_W0_FMT(1) |
3379 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3380 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3381 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3382 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3383 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3384 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3385 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3386 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3387 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3388 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3389 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3390 cull_dist_mask
<< 8 |
3393 bool vgt_reuse_off
= pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI10
&&
3394 pipeline
->device
->physical_device
->rad_info
.chip_external_rev
== 0x1 &&
3395 es_type
== MESA_SHADER_TESS_EVAL
;
3397 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
3398 S_028AB4_REUSE_OFF(vgt_reuse_off
));
3399 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
3400 ngg_state
->vgt_esgs_ring_itemsize
);
3402 /* NGG specific registers. */
3403 struct radv_shader_variant
*gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3404 uint32_t gs_num_invocations
= gs
? gs
->info
.gs
.invocations
: 1;
3406 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
3407 S_028A44_ES_VERTS_PER_SUBGRP(ngg_state
->hw_max_esverts
) |
3408 S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state
->max_gsprims
) |
3409 S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state
->max_gsprims
* gs_num_invocations
));
3410 radeon_set_context_reg(ctx_cs
, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP
,
3411 S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state
->max_out_verts
));
3412 radeon_set_context_reg(ctx_cs
, R_028B4C_GE_NGG_SUBGRP_CNTL
,
3413 S_028B4C_PRIM_AMP_FACTOR(ngg_state
->prim_amp_factor
) |
3414 S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
3415 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
3416 S_028B90_CNT(gs_num_invocations
) |
3417 S_028B90_ENABLE(gs_num_invocations
> 1) |
3418 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state
->max_vert_out_per_gs_instance
));
3420 /* User edge flags are set by the pos exports. If user edge flags are
3421 * not used, we must use hw-generated edge flags and pass them via
3422 * the prim export to prevent drawing lines on internal edges of
3423 * decomposed primitives (such as quads) with polygon mode = lines.
3425 * TODO: We should combine hw-generated edge flags with user edge
3426 * flags in the shader.
3428 radeon_set_context_reg(ctx_cs
, R_028838_PA_CL_NGG_CNTL
,
3429 S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline
) &&
3430 !radv_pipeline_has_gs(pipeline
)));
3432 radeon_set_context_reg(ctx_cs
, R_03096C_GE_CNTL
,
3433 S_03096C_PRIM_GRP_SIZE(ngg_state
->max_gsprims
) |
3434 S_03096C_VERT_GRP_SIZE(ngg_state
->hw_max_esverts
) |
3435 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
));
3439 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf
*cs
,
3440 struct radv_pipeline
*pipeline
,
3441 struct radv_shader_variant
*shader
,
3442 const struct radv_tessellation_state
*tess
)
3444 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3446 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
3447 unsigned hs_rsrc2
= shader
->config
.rsrc2
;
3449 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3450 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX10(tess
->lds_size
);
3452 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX9(tess
->lds_size
);
3455 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3456 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3457 radeon_emit(cs
, va
>> 8);
3458 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3460 radeon_set_sh_reg_seq(cs
, R_00B410_SPI_SHADER_PGM_LO_LS
, 2);
3461 radeon_emit(cs
, va
>> 8);
3462 radeon_emit(cs
, S_00B414_MEM_BASE(va
>> 40));
3465 radeon_set_sh_reg_seq(cs
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
, 2);
3466 radeon_emit(cs
, shader
->config
.rsrc1
);
3467 radeon_emit(cs
, hs_rsrc2
);
3469 radeon_set_sh_reg_seq(cs
, R_00B420_SPI_SHADER_PGM_LO_HS
, 4);
3470 radeon_emit(cs
, va
>> 8);
3471 radeon_emit(cs
, S_00B424_MEM_BASE(va
>> 40));
3472 radeon_emit(cs
, shader
->config
.rsrc1
);
3473 radeon_emit(cs
, shader
->config
.rsrc2
);
3478 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf
*ctx_cs
,
3479 struct radeon_cmdbuf
*cs
,
3480 struct radv_pipeline
*pipeline
,
3481 const struct radv_tessellation_state
*tess
,
3482 const struct radv_ngg_state
*ngg
)
3484 struct radv_shader_variant
*vs
;
3486 /* Skip shaders merged into HS/GS */
3487 vs
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
3491 if (vs
->info
.vs
.as_ls
)
3492 radv_pipeline_generate_hw_ls(cs
, pipeline
, vs
, tess
);
3493 else if (vs
->info
.vs
.as_es
)
3494 radv_pipeline_generate_hw_es(cs
, pipeline
, vs
);
3495 else if (vs
->info
.is_ngg
)
3496 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, vs
, ngg
);
3498 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, vs
);
3502 radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf
*ctx_cs
,
3503 struct radeon_cmdbuf
*cs
,
3504 struct radv_pipeline
*pipeline
,
3505 const struct radv_tessellation_state
*tess
,
3506 const struct radv_ngg_state
*ngg
)
3508 if (!radv_pipeline_has_tess(pipeline
))
3511 struct radv_shader_variant
*tes
, *tcs
;
3513 tcs
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
3514 tes
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
3517 if (tes
->info
.is_ngg
) {
3518 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, tes
, ngg
);
3519 } else if (tes
->info
.tes
.as_es
)
3520 radv_pipeline_generate_hw_es(cs
, pipeline
, tes
);
3522 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, tes
);
3525 radv_pipeline_generate_hw_hs(cs
, pipeline
, tcs
, tess
);
3527 radeon_set_context_reg(ctx_cs
, R_028B6C_VGT_TF_PARAM
,
3530 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
)
3531 radeon_set_context_reg_idx(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
3532 tess
->ls_hs_config
);
3534 radeon_set_context_reg(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
,
3535 tess
->ls_hs_config
);
3539 radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf
*ctx_cs
,
3540 struct radeon_cmdbuf
*cs
,
3541 struct radv_pipeline
*pipeline
,
3542 const struct radv_gs_state
*gs_state
)
3544 struct radv_shader_variant
*gs
;
3545 unsigned gs_max_out_vertices
;
3546 uint8_t *num_components
;
3551 gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3555 gs_max_out_vertices
= gs
->info
.gs
.vertices_out
;
3556 max_stream
= gs
->info
.info
.gs
.max_stream
;
3557 num_components
= gs
->info
.info
.gs
.num_stream_output_components
;
3559 offset
= num_components
[0] * gs_max_out_vertices
;
3561 radeon_set_context_reg_seq(ctx_cs
, R_028A60_VGT_GSVS_RING_OFFSET_1
, 3);
3562 radeon_emit(ctx_cs
, offset
);
3563 if (max_stream
>= 1)
3564 offset
+= num_components
[1] * gs_max_out_vertices
;
3565 radeon_emit(ctx_cs
, offset
);
3566 if (max_stream
>= 2)
3567 offset
+= num_components
[2] * gs_max_out_vertices
;
3568 radeon_emit(ctx_cs
, offset
);
3569 if (max_stream
>= 3)
3570 offset
+= num_components
[3] * gs_max_out_vertices
;
3571 radeon_set_context_reg(ctx_cs
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
3573 radeon_set_context_reg(ctx_cs
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs
->info
.gs
.vertices_out
);
3575 radeon_set_context_reg_seq(ctx_cs
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, 4);
3576 radeon_emit(ctx_cs
, num_components
[0]);
3577 radeon_emit(ctx_cs
, (max_stream
>= 1) ? num_components
[1] : 0);
3578 radeon_emit(ctx_cs
, (max_stream
>= 2) ? num_components
[2] : 0);
3579 radeon_emit(ctx_cs
, (max_stream
>= 3) ? num_components
[3] : 0);
3581 uint32_t gs_num_invocations
= gs
->info
.gs
.invocations
;
3582 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
3583 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
3584 S_028B90_ENABLE(gs_num_invocations
> 0));
3586 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
3587 gs_state
->vgt_esgs_ring_itemsize
);
3589 va
= radv_buffer_get_va(gs
->bo
) + gs
->bo_offset
;
3591 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
3592 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3593 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
3594 radeon_emit(cs
, va
>> 8);
3595 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3597 radeon_set_sh_reg_seq(cs
, R_00B210_SPI_SHADER_PGM_LO_ES
, 2);
3598 radeon_emit(cs
, va
>> 8);
3599 radeon_emit(cs
, S_00B214_MEM_BASE(va
>> 40));
3602 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3603 radeon_emit(cs
, gs
->config
.rsrc1
);
3604 radeon_emit(cs
, gs
->config
.rsrc2
| S_00B22C_LDS_SIZE(gs_state
->lds_size
));
3606 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
, gs_state
->vgt_gs_onchip_cntl
);
3607 radeon_set_context_reg(ctx_cs
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
, gs_state
->vgt_gs_max_prims_per_subgroup
);
3609 radeon_set_sh_reg_seq(cs
, R_00B220_SPI_SHADER_PGM_LO_GS
, 4);
3610 radeon_emit(cs
, va
>> 8);
3611 radeon_emit(cs
, S_00B224_MEM_BASE(va
>> 40));
3612 radeon_emit(cs
, gs
->config
.rsrc1
);
3613 radeon_emit(cs
, gs
->config
.rsrc2
);
3616 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, pipeline
->gs_copy_shader
);
3619 static uint32_t offset_to_ps_input(uint32_t offset
, bool flat_shade
, bool float16
)
3621 uint32_t ps_input_cntl
;
3622 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
3623 ps_input_cntl
= S_028644_OFFSET(offset
);
3625 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
3627 ps_input_cntl
|= S_028644_FP16_INTERP_MODE(1) |
3628 S_028644_ATTR0_VALID(1);
3631 /* The input is a DEFAULT_VAL constant. */
3632 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
3633 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
3634 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
3635 ps_input_cntl
= S_028644_OFFSET(0x20) |
3636 S_028644_DEFAULT_VAL(offset
);
3638 return ps_input_cntl
;
3642 radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf
*ctx_cs
,
3643 struct radv_pipeline
*pipeline
)
3645 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3646 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3647 uint32_t ps_input_cntl
[32];
3649 unsigned ps_offset
= 0;
3651 if (ps
->info
.info
.ps
.prim_id_input
) {
3652 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
];
3653 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
3654 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
3659 if (ps
->info
.info
.ps
.layer_input
||
3660 ps
->info
.info
.needs_multiview_view_index
) {
3661 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
];
3662 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
)
3663 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
3665 ps_input_cntl
[ps_offset
] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000
, true, false);
3669 if (ps
->info
.info
.ps
.has_pcoord
) {
3671 val
= S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
3672 ps_input_cntl
[ps_offset
] = val
;
3676 if (ps
->info
.info
.ps
.num_input_clips_culls
) {
3679 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
];
3680 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
3681 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
3685 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
];
3686 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
&&
3687 ps
->info
.info
.ps
.num_input_clips_culls
> 4) {
3688 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
3693 for (unsigned i
= 0; i
< 32 && (1u << i
) <= ps
->info
.fs
.input_mask
; ++i
) {
3697 if (!(ps
->info
.fs
.input_mask
& (1u << i
)))
3700 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_VAR0
+ i
];
3701 if (vs_offset
== AC_EXP_PARAM_UNDEFINED
) {
3702 ps_input_cntl
[ps_offset
] = S_028644_OFFSET(0x20);
3707 flat_shade
= !!(ps
->info
.fs
.flat_shaded_mask
& (1u << ps_offset
));
3708 float16
= !!(ps
->info
.fs
.float16_shaded_mask
& (1u << ps_offset
));
3710 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, flat_shade
, float16
);
3715 radeon_set_context_reg_seq(ctx_cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps_offset
);
3716 for (unsigned i
= 0; i
< ps_offset
; i
++) {
3717 radeon_emit(ctx_cs
, ps_input_cntl
[i
]);
3723 radv_compute_db_shader_control(const struct radv_device
*device
,
3724 const struct radv_pipeline
*pipeline
,
3725 const struct radv_shader_variant
*ps
)
3728 if (ps
->info
.fs
.early_fragment_test
|| !ps
->info
.info
.ps
.writes_memory
)
3729 z_order
= V_02880C_EARLY_Z_THEN_LATE_Z
;
3731 z_order
= V_02880C_LATE_Z
;
3733 bool disable_rbplus
= device
->physical_device
->has_rbplus
&&
3734 !device
->physical_device
->rbplus_allowed
;
3736 /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
3737 * but this appears to break Project Cars (DXVK). See
3738 * https://bugs.freedesktop.org/show_bug.cgi?id=109401
3740 bool mask_export_enable
= ps
->info
.info
.ps
.writes_sample_mask
;
3742 return S_02880C_Z_EXPORT_ENABLE(ps
->info
.info
.ps
.writes_z
) |
3743 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps
->info
.info
.ps
.writes_stencil
) |
3744 S_02880C_KILL_ENABLE(!!ps
->info
.fs
.can_discard
) |
3745 S_02880C_MASK_EXPORT_ENABLE(mask_export_enable
) |
3746 S_02880C_Z_ORDER(z_order
) |
3747 S_02880C_DEPTH_BEFORE_SHADER(ps
->info
.fs
.early_fragment_test
) |
3748 S_02880C_EXEC_ON_HIER_FAIL(ps
->info
.info
.ps
.writes_memory
) |
3749 S_02880C_EXEC_ON_NOOP(ps
->info
.info
.ps
.writes_memory
) |
3750 S_02880C_DUAL_QUAD_DISABLE(disable_rbplus
);
3754 radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf
*ctx_cs
,
3755 struct radeon_cmdbuf
*cs
,
3756 struct radv_pipeline
*pipeline
)
3758 struct radv_shader_variant
*ps
;
3760 assert (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]);
3762 ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3763 va
= radv_buffer_get_va(ps
->bo
) + ps
->bo_offset
;
3765 radeon_set_sh_reg_seq(cs
, R_00B020_SPI_SHADER_PGM_LO_PS
, 4);
3766 radeon_emit(cs
, va
>> 8);
3767 radeon_emit(cs
, S_00B024_MEM_BASE(va
>> 40));
3768 radeon_emit(cs
, ps
->config
.rsrc1
);
3769 radeon_emit(cs
, ps
->config
.rsrc2
);
3771 radeon_set_context_reg(ctx_cs
, R_02880C_DB_SHADER_CONTROL
,
3772 radv_compute_db_shader_control(pipeline
->device
,
3775 radeon_set_context_reg(ctx_cs
, R_0286CC_SPI_PS_INPUT_ENA
,
3776 ps
->config
.spi_ps_input_ena
);
3778 radeon_set_context_reg(ctx_cs
, R_0286D0_SPI_PS_INPUT_ADDR
,
3779 ps
->config
.spi_ps_input_addr
);
3781 radeon_set_context_reg(ctx_cs
, R_0286D8_SPI_PS_IN_CONTROL
,
3782 S_0286D8_NUM_INTERP(ps
->info
.fs
.num_interp
));
3784 radeon_set_context_reg(ctx_cs
, R_0286E0_SPI_BARYC_CNTL
, pipeline
->graphics
.spi_baryc_cntl
);
3786 radeon_set_context_reg(ctx_cs
, R_028710_SPI_SHADER_Z_FORMAT
,
3787 ac_get_spi_shader_z_format(ps
->info
.info
.ps
.writes_z
,
3788 ps
->info
.info
.ps
.writes_stencil
,
3789 ps
->info
.info
.ps
.writes_sample_mask
));
3791 if (pipeline
->device
->dfsm_allowed
) {
3792 /* optimise this? */
3793 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
3794 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
3799 radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf
*ctx_cs
,
3800 struct radv_pipeline
*pipeline
)
3802 if (pipeline
->device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
3803 pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
)
3806 unsigned vtx_reuse_depth
= 30;
3807 if (radv_pipeline_has_tess(pipeline
) &&
3808 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.tes
.spacing
== TESS_SPACING_FRACTIONAL_ODD
) {
3809 vtx_reuse_depth
= 14;
3811 radeon_set_context_reg(ctx_cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
3812 S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth
));
3816 radv_compute_vgt_shader_stages_en(const struct radv_pipeline
*pipeline
)
3818 uint32_t stages
= 0;
3819 if (radv_pipeline_has_tess(pipeline
)) {
3820 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
3821 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
3823 if (radv_pipeline_has_gs(pipeline
))
3824 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
3826 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3827 else if (radv_pipeline_has_ngg(pipeline
))
3828 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
3829 S_028B54_PRIMGEN_EN(1);
3831 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
3833 } else if (radv_pipeline_has_gs(pipeline
)) {
3834 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
3836 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3837 } else if (radv_pipeline_has_ngg(pipeline
)) {
3838 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
3839 S_028B54_PRIMGEN_EN(1);
3842 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
3843 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
3849 radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3851 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
3852 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
3854 if (!discard_rectangle_info
)
3859 for (unsigned i
= 0; i
< (1u << MAX_DISCARD_RECTANGLES
); ++i
) {
3860 /* Interpret i as a bitmask, and then set the bit in the mask if
3861 * that combination of rectangles in which the pixel is contained
3862 * should pass the cliprect test. */
3863 unsigned relevant_subset
= i
& ((1u << discard_rectangle_info
->discardRectangleCount
) - 1);
3865 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT
&&
3869 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT
&&
3880 gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf
*ctx_cs
,
3881 struct radv_pipeline
*pipeline
,
3882 const struct radv_tessellation_state
*tess
,
3883 const struct radv_gs_state
*gs_state
)
3885 bool break_wave_at_eoi
= false;
3886 unsigned primgroup_size
;
3887 unsigned vertgroup_size
;
3889 if (radv_pipeline_has_tess(pipeline
)) {
3890 primgroup_size
= tess
->num_patches
; /* must be a multiple of NUM_PATCHES */
3892 } else if (radv_pipeline_has_gs(pipeline
)) {
3893 unsigned vgt_gs_onchip_cntl
= gs_state
->vgt_gs_onchip_cntl
;
3894 primgroup_size
= G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl
);
3895 vertgroup_size
= G_028A44_ES_VERTS_PER_SUBGRP(vgt_gs_onchip_cntl
);
3897 primgroup_size
= 128; /* recommended without a GS and tess */
3901 if (radv_pipeline_has_tess(pipeline
)) {
3902 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.uses_prim_id
||
3903 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.info
.uses_prim_id
)
3904 break_wave_at_eoi
= true;
3907 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
,
3908 S_03096C_PRIM_GRP_SIZE(primgroup_size
) |
3909 S_03096C_VERT_GRP_SIZE(vertgroup_size
) |
3910 S_03096C_PACKET_TO_ONE_PA(0) /* line stipple */ |
3911 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
));
3915 radv_pipeline_generate_pm4(struct radv_pipeline
*pipeline
,
3916 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3917 const struct radv_graphics_pipeline_create_info
*extra
,
3918 const struct radv_blend_state
*blend
,
3919 const struct radv_tessellation_state
*tess
,
3920 const struct radv_gs_state
*gs
,
3921 const struct radv_ngg_state
*ngg
,
3922 unsigned prim
, unsigned gs_out
)
3924 struct radeon_cmdbuf
*ctx_cs
= &pipeline
->ctx_cs
;
3925 struct radeon_cmdbuf
*cs
= &pipeline
->cs
;
3928 ctx_cs
->max_dw
= 256;
3929 cs
->buf
= malloc(4 * (cs
->max_dw
+ ctx_cs
->max_dw
));
3930 ctx_cs
->buf
= cs
->buf
+ cs
->max_dw
;
3932 radv_pipeline_generate_depth_stencil_state(ctx_cs
, pipeline
, pCreateInfo
, extra
);
3933 radv_pipeline_generate_blend_state(ctx_cs
, pipeline
, blend
);
3934 radv_pipeline_generate_raster_state(ctx_cs
, pipeline
, pCreateInfo
);
3935 radv_pipeline_generate_multisample_state(ctx_cs
, pipeline
);
3936 radv_pipeline_generate_vgt_gs_mode(ctx_cs
, pipeline
);
3937 radv_pipeline_generate_vertex_shader(ctx_cs
, cs
, pipeline
, tess
, ngg
);
3938 radv_pipeline_generate_tess_shaders(ctx_cs
, cs
, pipeline
, tess
, ngg
);
3939 radv_pipeline_generate_geometry_shader(ctx_cs
, cs
, pipeline
, gs
);
3940 radv_pipeline_generate_fragment_shader(ctx_cs
, cs
, pipeline
);
3941 radv_pipeline_generate_ps_inputs(ctx_cs
, pipeline
);
3942 radv_pipeline_generate_vgt_vertex_reuse(ctx_cs
, pipeline
);
3943 radv_pipeline_generate_binning_state(ctx_cs
, pipeline
, pCreateInfo
);
3945 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&& !radv_pipeline_has_ngg(pipeline
))
3946 gfx10_pipeline_generate_ge_cntl(ctx_cs
, pipeline
, tess
, gs
);
3948 radeon_set_context_reg(ctx_cs
, R_0286E8_SPI_TMPRING_SIZE
,
3949 S_0286E8_WAVES(pipeline
->max_waves
) |
3950 S_0286E8_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
3952 radeon_set_context_reg(ctx_cs
, R_028B54_VGT_SHADER_STAGES_EN
, radv_compute_vgt_shader_stages_en(pipeline
));
3954 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
3955 radeon_set_uconfig_reg_idx(pipeline
->device
->physical_device
,
3956 cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
3958 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
3960 radeon_set_context_reg(ctx_cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out
);
3962 radeon_set_context_reg(ctx_cs
, R_02820C_PA_SC_CLIPRECT_RULE
, radv_compute_cliprect_rule(pCreateInfo
));
3964 pipeline
->ctx_cs_hash
= _mesa_hash_data(ctx_cs
->buf
, ctx_cs
->cdw
* 4);
3966 assert(ctx_cs
->cdw
<= ctx_cs
->max_dw
);
3967 assert(cs
->cdw
<= cs
->max_dw
);
3970 static struct radv_ia_multi_vgt_param_helpers
3971 radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline
*pipeline
,
3972 const struct radv_tessellation_state
*tess
,
3975 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param
= {0};
3976 const struct radv_device
*device
= pipeline
->device
;
3978 if (radv_pipeline_has_tess(pipeline
))
3979 ia_multi_vgt_param
.primgroup_size
= tess
->num_patches
;
3980 else if (radv_pipeline_has_gs(pipeline
))
3981 ia_multi_vgt_param
.primgroup_size
= 64;
3983 ia_multi_vgt_param
.primgroup_size
= 128; /* recommended without a GS */
3985 /* GS requirement. */
3986 ia_multi_vgt_param
.partial_es_wave
= false;
3987 if (radv_pipeline_has_gs(pipeline
) && device
->physical_device
->rad_info
.chip_class
<= GFX8
)
3988 if (SI_GS_PER_ES
/ ia_multi_vgt_param
.primgroup_size
>= pipeline
->device
->gs_table_depth
- 3)
3989 ia_multi_vgt_param
.partial_es_wave
= true;
3991 ia_multi_vgt_param
.wd_switch_on_eop
= false;
3992 if (device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
3993 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
3994 * 4 shader engines. Set 1 to pass the assertion below.
3995 * The other cases are hardware requirements. */
3996 if (device
->physical_device
->rad_info
.max_se
< 4 ||
3997 prim
== V_008958_DI_PT_POLYGON
||
3998 prim
== V_008958_DI_PT_LINELOOP
||
3999 prim
== V_008958_DI_PT_TRIFAN
||
4000 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
4001 (pipeline
->graphics
.prim_restart_enable
&&
4002 (device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
4003 (prim
!= V_008958_DI_PT_POINTLIST
&&
4004 prim
!= V_008958_DI_PT_LINESTRIP
))))
4005 ia_multi_vgt_param
.wd_switch_on_eop
= true;
4008 ia_multi_vgt_param
.ia_switch_on_eoi
= false;
4009 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
)
4010 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4011 if (radv_pipeline_has_gs(pipeline
) &&
4012 pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.info
.uses_prim_id
)
4013 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4014 if (radv_pipeline_has_tess(pipeline
)) {
4015 /* SWITCH_ON_EOI must be set if PrimID is used. */
4016 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.uses_prim_id
||
4017 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.info
.uses_prim_id
)
4018 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4021 ia_multi_vgt_param
.partial_vs_wave
= false;
4022 if (radv_pipeline_has_tess(pipeline
)) {
4023 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
4024 if ((device
->physical_device
->rad_info
.family
== CHIP_TAHITI
||
4025 device
->physical_device
->rad_info
.family
== CHIP_PITCAIRN
||
4026 device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
) &&
4027 radv_pipeline_has_gs(pipeline
))
4028 ia_multi_vgt_param
.partial_vs_wave
= true;
4029 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
4030 if (device
->has_distributed_tess
) {
4031 if (radv_pipeline_has_gs(pipeline
)) {
4032 if (device
->physical_device
->rad_info
.chip_class
<= GFX8
)
4033 ia_multi_vgt_param
.partial_es_wave
= true;
4035 ia_multi_vgt_param
.partial_vs_wave
= true;
4040 /* Workaround for a VGT hang when strip primitive types are used with
4041 * primitive restart.
4043 if (pipeline
->graphics
.prim_restart_enable
&&
4044 (prim
== V_008958_DI_PT_LINESTRIP
||
4045 prim
== V_008958_DI_PT_TRISTRIP
||
4046 prim
== V_008958_DI_PT_LINESTRIP_ADJ
||
4047 prim
== V_008958_DI_PT_TRISTRIP_ADJ
)) {
4048 ia_multi_vgt_param
.partial_vs_wave
= true;
4051 if (radv_pipeline_has_gs(pipeline
)) {
4052 /* On these chips there is the possibility of a hang if the
4053 * pipeline uses a GS and partial_vs_wave is not set.
4055 * This mostly does not hit 4-SE chips, as those typically set
4056 * ia_switch_on_eoi and then partial_vs_wave is set for pipelines
4057 * with GS due to another workaround.
4059 * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242
4061 if (device
->physical_device
->rad_info
.family
== CHIP_TONGA
||
4062 device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
4063 device
->physical_device
->rad_info
.family
== CHIP_POLARIS10
||
4064 device
->physical_device
->rad_info
.family
== CHIP_POLARIS11
||
4065 device
->physical_device
->rad_info
.family
== CHIP_POLARIS12
||
4066 device
->physical_device
->rad_info
.family
== CHIP_VEGAM
) {
4067 ia_multi_vgt_param
.partial_vs_wave
= true;
4071 ia_multi_vgt_param
.base
=
4072 S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param
.primgroup_size
- 1) |
4073 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
4074 S_028AA8_MAX_PRIMGRP_IN_WAVE(device
->physical_device
->rad_info
.chip_class
== GFX8
? 2 : 0) |
4075 S_030960_EN_INST_OPT_BASIC(device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
4076 S_030960_EN_INST_OPT_ADV(device
->physical_device
->rad_info
.chip_class
>= GFX9
);
4078 return ia_multi_vgt_param
;
4083 radv_compute_vertex_input_state(struct radv_pipeline
*pipeline
,
4084 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4086 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
4087 pCreateInfo
->pVertexInputState
;
4088 struct radv_vertex_elements_info
*velems
= &pipeline
->vertex_elements
;
4090 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
4091 const VkVertexInputAttributeDescription
*desc
=
4092 &vi_info
->pVertexAttributeDescriptions
[i
];
4093 unsigned loc
= desc
->location
;
4094 const struct vk_format_description
*format_desc
;
4096 format_desc
= vk_format_description(desc
->format
);
4098 velems
->format_size
[loc
] = format_desc
->block
.bits
/ 8;
4101 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
4102 const VkVertexInputBindingDescription
*desc
=
4103 &vi_info
->pVertexBindingDescriptions
[i
];
4105 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
4106 pipeline
->num_vertex_bindings
=
4107 MAX2(pipeline
->num_vertex_bindings
, desc
->binding
+ 1);
4111 static struct radv_shader_variant
*
4112 radv_pipeline_get_streamout_shader(struct radv_pipeline
*pipeline
)
4116 for (i
= MESA_SHADER_GEOMETRY
; i
>= MESA_SHADER_VERTEX
; i
--) {
4117 struct radv_shader_variant
*shader
=
4118 radv_get_shader(pipeline
, i
);
4120 if (shader
&& shader
->info
.info
.so
.num_outputs
> 0)
4128 radv_pipeline_init(struct radv_pipeline
*pipeline
,
4129 struct radv_device
*device
,
4130 struct radv_pipeline_cache
*cache
,
4131 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4132 const struct radv_graphics_pipeline_create_info
*extra
)
4135 bool has_view_index
= false;
4137 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
4138 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
4139 if (subpass
->view_mask
)
4140 has_view_index
= true;
4142 pipeline
->device
= device
;
4143 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
4144 assert(pipeline
->layout
);
4146 struct radv_blend_state blend
= radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
4148 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
4149 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
4150 radv_init_feedback(creation_feedback
);
4152 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
4154 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
4155 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
4156 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
4157 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
4158 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
4159 if(creation_feedback
)
4160 stage_feedbacks
[stage
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[i
];
4163 struct radv_pipeline_key key
= radv_generate_graphics_pipeline_key(pipeline
, pCreateInfo
, &blend
, has_view_index
);
4164 radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
4166 pipeline
->graphics
.spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
4167 radv_pipeline_init_multisample_state(pipeline
, &blend
, pCreateInfo
);
4169 uint32_t prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
4171 pipeline
->graphics
.can_use_guardband
= radv_prim_can_use_guardband(pCreateInfo
->pInputAssemblyState
->topology
);
4173 if (radv_pipeline_has_gs(pipeline
)) {
4174 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
4175 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4176 } else if (radv_pipeline_has_tess(pipeline
)) {
4177 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.primitive_mode
);
4178 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4180 gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
4182 if (extra
&& extra
->use_rectlist
) {
4183 prim
= V_008958_DI_PT_RECTLIST
;
4184 gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4185 pipeline
->graphics
.can_use_guardband
= true;
4186 if (radv_pipeline_has_ngg(pipeline
))
4187 gs_out
= V_028A6C_VGT_OUT_RECT_V0
;
4189 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
4190 /* prim vertex count will need TESS changes */
4191 pipeline
->graphics
.prim_vertex_count
= prim_size_table
[prim
];
4193 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
4195 /* Ensure that some export memory is always allocated, for two reasons:
4197 * 1) Correctness: The hardware ignores the EXEC mask if no export
4198 * memory is allocated, so KILL and alpha test do not work correctly
4200 * 2) Performance: Every shader needs at least a NULL export, even when
4201 * it writes no color/depth output. The NULL export instruction
4202 * stalls without this setting.
4204 * Don't add this to CB_SHADER_MASK.
4206 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4207 if (!blend
.spi_shader_col_format
) {
4208 if (!ps
->info
.info
.ps
.writes_z
&&
4209 !ps
->info
.info
.ps
.writes_stencil
&&
4210 !ps
->info
.info
.ps
.writes_sample_mask
)
4211 blend
.spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
4214 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4215 if (pipeline
->shaders
[i
]) {
4216 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[i
]->info
.need_indirect_descriptor_sets
;
4220 struct radv_ngg_state ngg
= {0};
4221 struct radv_gs_state gs
= {0};
4223 if (radv_pipeline_has_ngg(pipeline
)) {
4224 ngg
= calculate_ngg_info(pCreateInfo
, pipeline
);
4225 } else if (radv_pipeline_has_gs(pipeline
)) {
4226 gs
= calculate_gs_info(pCreateInfo
, pipeline
);
4227 calculate_gs_ring_sizes(pipeline
, &gs
);
4230 struct radv_tessellation_state tess
= {0};
4231 if (radv_pipeline_has_tess(pipeline
)) {
4232 if (prim
== V_008958_DI_PT_PATCH
) {
4233 pipeline
->graphics
.prim_vertex_count
.min
= pCreateInfo
->pTessellationState
->patchControlPoints
;
4234 pipeline
->graphics
.prim_vertex_count
.incr
= 1;
4236 tess
= calculate_tess_state(pipeline
, pCreateInfo
);
4239 pipeline
->graphics
.ia_multi_vgt_param
= radv_compute_ia_multi_vgt_param_helpers(pipeline
, &tess
, prim
);
4241 radv_compute_vertex_input_state(pipeline
, pCreateInfo
);
4243 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++)
4244 pipeline
->user_data_0
[i
] = radv_pipeline_stage_to_user_data_0(pipeline
, i
, device
->physical_device
->rad_info
.chip_class
);
4246 struct radv_userdata_info
*loc
= radv_lookup_user_sgpr(pipeline
, MESA_SHADER_VERTEX
,
4247 AC_UD_VS_BASE_VERTEX_START_INSTANCE
);
4248 if (loc
->sgpr_idx
!= -1) {
4249 pipeline
->graphics
.vtx_base_sgpr
= pipeline
->user_data_0
[MESA_SHADER_VERTEX
];
4250 pipeline
->graphics
.vtx_base_sgpr
+= loc
->sgpr_idx
* 4;
4251 if (radv_get_shader(pipeline
, MESA_SHADER_VERTEX
)->info
.info
.vs
.needs_draw_id
)
4252 pipeline
->graphics
.vtx_emit_num
= 3;
4254 pipeline
->graphics
.vtx_emit_num
= 2;
4257 /* Find the last vertex shader stage that eventually uses streamout. */
4258 pipeline
->streamout_shader
= radv_pipeline_get_streamout_shader(pipeline
);
4260 result
= radv_pipeline_scratch_init(device
, pipeline
);
4261 radv_pipeline_generate_pm4(pipeline
, pCreateInfo
, extra
, &blend
, &tess
, &gs
, &ngg
, prim
, gs_out
);
4267 radv_graphics_pipeline_create(
4269 VkPipelineCache _cache
,
4270 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4271 const struct radv_graphics_pipeline_create_info
*extra
,
4272 const VkAllocationCallbacks
*pAllocator
,
4273 VkPipeline
*pPipeline
)
4275 RADV_FROM_HANDLE(radv_device
, device
, _device
);
4276 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
4277 struct radv_pipeline
*pipeline
;
4280 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
4281 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
4282 if (pipeline
== NULL
)
4283 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
4285 result
= radv_pipeline_init(pipeline
, device
, cache
,
4286 pCreateInfo
, extra
);
4287 if (result
!= VK_SUCCESS
) {
4288 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
4292 *pPipeline
= radv_pipeline_to_handle(pipeline
);
4297 VkResult
radv_CreateGraphicsPipelines(
4299 VkPipelineCache pipelineCache
,
4301 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
4302 const VkAllocationCallbacks
* pAllocator
,
4303 VkPipeline
* pPipelines
)
4305 VkResult result
= VK_SUCCESS
;
4308 for (; i
< count
; i
++) {
4310 r
= radv_graphics_pipeline_create(_device
,
4313 NULL
, pAllocator
, &pPipelines
[i
]);
4314 if (r
!= VK_SUCCESS
) {
4316 pPipelines
[i
] = VK_NULL_HANDLE
;
4325 radv_compute_generate_pm4(struct radv_pipeline
*pipeline
)
4327 struct radv_shader_variant
*compute_shader
;
4328 struct radv_device
*device
= pipeline
->device
;
4329 unsigned compute_resource_limits
;
4330 unsigned waves_per_threadgroup
;
4333 pipeline
->cs
.buf
= malloc(20 * 4);
4334 pipeline
->cs
.max_dw
= 20;
4336 compute_shader
= pipeline
->shaders
[MESA_SHADER_COMPUTE
];
4337 va
= radv_buffer_get_va(compute_shader
->bo
) + compute_shader
->bo_offset
;
4339 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B830_COMPUTE_PGM_LO
, 2);
4340 radeon_emit(&pipeline
->cs
, va
>> 8);
4341 radeon_emit(&pipeline
->cs
, S_00B834_DATA(va
>> 40));
4343 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
4344 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc1
);
4345 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc2
);
4347 radeon_set_sh_reg(&pipeline
->cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
4348 S_00B860_WAVES(pipeline
->max_waves
) |
4349 S_00B860_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
4351 /* Calculate best compute resource limits. */
4352 waves_per_threadgroup
=
4353 DIV_ROUND_UP(compute_shader
->info
.cs
.block_size
[0] *
4354 compute_shader
->info
.cs
.block_size
[1] *
4355 compute_shader
->info
.cs
.block_size
[2], 64);
4356 compute_resource_limits
=
4357 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup
% 4 == 0);
4359 if (device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4360 unsigned num_cu_per_se
=
4361 device
->physical_device
->rad_info
.num_good_compute_units
/
4362 device
->physical_device
->rad_info
.max_se
;
4364 /* Force even distribution on all SIMDs in CU if the workgroup
4365 * size is 64. This has shown some good improvements if # of
4366 * CUs per SE is not a multiple of 4.
4368 if (num_cu_per_se
% 4 && waves_per_threadgroup
== 1)
4369 compute_resource_limits
|= S_00B854_FORCE_SIMD_DIST(1);
4372 radeon_set_sh_reg(&pipeline
->cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
4373 compute_resource_limits
);
4375 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
4376 radeon_emit(&pipeline
->cs
,
4377 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[0]));
4378 radeon_emit(&pipeline
->cs
,
4379 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[1]));
4380 radeon_emit(&pipeline
->cs
,
4381 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[2]));
4383 assert(pipeline
->cs
.cdw
<= pipeline
->cs
.max_dw
);
4386 static VkResult
radv_compute_pipeline_create(
4388 VkPipelineCache _cache
,
4389 const VkComputePipelineCreateInfo
* pCreateInfo
,
4390 const VkAllocationCallbacks
* pAllocator
,
4391 VkPipeline
* pPipeline
)
4393 RADV_FROM_HANDLE(radv_device
, device
, _device
);
4394 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
4395 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
4396 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
4397 struct radv_pipeline
*pipeline
;
4400 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
4401 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
4402 if (pipeline
== NULL
)
4403 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
4405 pipeline
->device
= device
;
4406 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
4407 assert(pipeline
->layout
);
4409 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
4410 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
4411 radv_init_feedback(creation_feedback
);
4413 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
4414 if (creation_feedback
)
4415 stage_feedbacks
[MESA_SHADER_COMPUTE
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[0];
4417 pStages
[MESA_SHADER_COMPUTE
] = &pCreateInfo
->stage
;
4418 radv_create_shaders(pipeline
, device
, cache
, &(struct radv_pipeline_key
) {0}, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
4420 pipeline
->user_data_0
[MESA_SHADER_COMPUTE
] = radv_pipeline_stage_to_user_data_0(pipeline
, MESA_SHADER_COMPUTE
, device
->physical_device
->rad_info
.chip_class
);
4421 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.need_indirect_descriptor_sets
;
4422 result
= radv_pipeline_scratch_init(device
, pipeline
);
4423 if (result
!= VK_SUCCESS
) {
4424 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
4428 radv_compute_generate_pm4(pipeline
);
4430 *pPipeline
= radv_pipeline_to_handle(pipeline
);
4435 VkResult
radv_CreateComputePipelines(
4437 VkPipelineCache pipelineCache
,
4439 const VkComputePipelineCreateInfo
* pCreateInfos
,
4440 const VkAllocationCallbacks
* pAllocator
,
4441 VkPipeline
* pPipelines
)
4443 VkResult result
= VK_SUCCESS
;
4446 for (; i
< count
; i
++) {
4448 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
4450 pAllocator
, &pPipelines
[i
]);
4451 if (r
!= VK_SUCCESS
) {
4453 pPipelines
[i
] = VK_NULL_HANDLE
;