2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
33 #include "radv_shader.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
39 #include <llvm-c/Core.h>
40 #include <llvm-c/TargetMachine.h>
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
46 #include "vk_format.h"
47 #include "util/debug.h"
48 #include "ac_exp_param.h"
49 #include "ac_shader_util.h"
50 #include "main/menums.h"
52 struct radv_blend_state
{
53 uint32_t blend_enable_4bit
;
54 uint32_t need_src_alpha
;
56 uint32_t cb_color_control
;
57 uint32_t cb_target_mask
;
58 uint32_t cb_target_enabled_4bit
;
59 uint32_t sx_mrt_blend_opt
[8];
60 uint32_t cb_blend_control
[8];
62 uint32_t spi_shader_col_format
;
63 uint32_t cb_shader_mask
;
64 uint32_t db_alpha_to_mask
;
66 uint32_t commutative_4bit
;
68 bool single_cb_enable
;
69 bool mrt0_is_dual_src
;
72 struct radv_dsa_order_invariance
{
73 /* Whether the final result in Z/S buffers is guaranteed to be
74 * invariant under changes to the order in which fragments arrive.
78 /* Whether the set of fragments that pass the combined Z/S test is
79 * guaranteed to be invariant under changes to the order in which
85 struct radv_tessellation_state
{
86 uint32_t ls_hs_config
;
92 struct radv_gs_state
{
93 uint32_t vgt_gs_onchip_cntl
;
94 uint32_t vgt_gs_max_prims_per_subgroup
;
95 uint32_t vgt_esgs_ring_itemsize
;
99 struct radv_ngg_state
{
100 uint16_t ngg_emit_size
; /* in dwords */
101 uint32_t hw_max_esverts
;
102 uint32_t max_gsprims
;
103 uint32_t max_out_verts
;
104 uint32_t prim_amp_factor
;
105 uint32_t vgt_esgs_ring_itemsize
;
106 bool max_vert_out_per_gs_instance
;
109 bool radv_pipeline_has_ngg(const struct radv_pipeline
*pipeline
)
111 struct radv_shader_variant
*variant
= NULL
;
112 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
113 variant
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
114 else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
115 variant
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
116 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
117 variant
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
120 return variant
->info
.is_ngg
;
124 radv_pipeline_destroy(struct radv_device
*device
,
125 struct radv_pipeline
*pipeline
,
126 const VkAllocationCallbacks
* allocator
)
128 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
129 if (pipeline
->shaders
[i
])
130 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
132 if (pipeline
->gs_copy_shader
)
133 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
136 free(pipeline
->cs
.buf
);
137 vk_free2(&device
->alloc
, allocator
, pipeline
);
140 void radv_DestroyPipeline(
142 VkPipeline _pipeline
,
143 const VkAllocationCallbacks
* pAllocator
)
145 RADV_FROM_HANDLE(radv_device
, device
, _device
);
146 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
151 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
154 static uint32_t get_hash_flags(struct radv_device
*device
)
156 uint32_t hash_flags
= 0;
158 if (device
->instance
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
)
159 hash_flags
|= RADV_HASH_SHADER_UNSAFE_MATH
;
160 if (device
->instance
->perftest_flags
& RADV_PERFTEST_SISCHED
)
161 hash_flags
|= RADV_HASH_SHADER_SISCHED
;
166 radv_pipeline_scratch_init(struct radv_device
*device
,
167 struct radv_pipeline
*pipeline
)
169 unsigned scratch_bytes_per_wave
= 0;
170 unsigned max_waves
= 0;
171 unsigned min_waves
= 1;
173 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
174 if (pipeline
->shaders
[i
]) {
175 unsigned max_stage_waves
= device
->scratch_waves
;
177 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
178 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
180 max_stage_waves
= MIN2(max_stage_waves
,
181 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
182 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
183 max_waves
= MAX2(max_waves
, max_stage_waves
);
187 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
188 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
189 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
190 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
191 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
194 if (scratch_bytes_per_wave
)
195 max_waves
= MIN2(max_waves
, 0xffffffffu
/ scratch_bytes_per_wave
);
197 if (scratch_bytes_per_wave
&& max_waves
< min_waves
) {
198 /* Not really true at this moment, but will be true on first
199 * execution. Avoid having hanging shaders. */
200 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
202 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
203 pipeline
->max_waves
= max_waves
;
207 static uint32_t si_translate_blend_logic_op(VkLogicOp op
)
210 case VK_LOGIC_OP_CLEAR
:
211 return V_028808_ROP3_CLEAR
;
212 case VK_LOGIC_OP_AND
:
213 return V_028808_ROP3_AND
;
214 case VK_LOGIC_OP_AND_REVERSE
:
215 return V_028808_ROP3_AND_REVERSE
;
216 case VK_LOGIC_OP_COPY
:
217 return V_028808_ROP3_COPY
;
218 case VK_LOGIC_OP_AND_INVERTED
:
219 return V_028808_ROP3_AND_INVERTED
;
220 case VK_LOGIC_OP_NO_OP
:
221 return V_028808_ROP3_NO_OP
;
222 case VK_LOGIC_OP_XOR
:
223 return V_028808_ROP3_XOR
;
225 return V_028808_ROP3_OR
;
226 case VK_LOGIC_OP_NOR
:
227 return V_028808_ROP3_NOR
;
228 case VK_LOGIC_OP_EQUIVALENT
:
229 return V_028808_ROP3_EQUIVALENT
;
230 case VK_LOGIC_OP_INVERT
:
231 return V_028808_ROP3_INVERT
;
232 case VK_LOGIC_OP_OR_REVERSE
:
233 return V_028808_ROP3_OR_REVERSE
;
234 case VK_LOGIC_OP_COPY_INVERTED
:
235 return V_028808_ROP3_COPY_INVERTED
;
236 case VK_LOGIC_OP_OR_INVERTED
:
237 return V_028808_ROP3_OR_INVERTED
;
238 case VK_LOGIC_OP_NAND
:
239 return V_028808_ROP3_NAND
;
240 case VK_LOGIC_OP_SET
:
241 return V_028808_ROP3_SET
;
243 unreachable("Unhandled logic op");
248 static uint32_t si_translate_blend_function(VkBlendOp op
)
251 case VK_BLEND_OP_ADD
:
252 return V_028780_COMB_DST_PLUS_SRC
;
253 case VK_BLEND_OP_SUBTRACT
:
254 return V_028780_COMB_SRC_MINUS_DST
;
255 case VK_BLEND_OP_REVERSE_SUBTRACT
:
256 return V_028780_COMB_DST_MINUS_SRC
;
257 case VK_BLEND_OP_MIN
:
258 return V_028780_COMB_MIN_DST_SRC
;
259 case VK_BLEND_OP_MAX
:
260 return V_028780_COMB_MAX_DST_SRC
;
266 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
269 case VK_BLEND_FACTOR_ZERO
:
270 return V_028780_BLEND_ZERO
;
271 case VK_BLEND_FACTOR_ONE
:
272 return V_028780_BLEND_ONE
;
273 case VK_BLEND_FACTOR_SRC_COLOR
:
274 return V_028780_BLEND_SRC_COLOR
;
275 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
276 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
277 case VK_BLEND_FACTOR_DST_COLOR
:
278 return V_028780_BLEND_DST_COLOR
;
279 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
280 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
281 case VK_BLEND_FACTOR_SRC_ALPHA
:
282 return V_028780_BLEND_SRC_ALPHA
;
283 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
284 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
285 case VK_BLEND_FACTOR_DST_ALPHA
:
286 return V_028780_BLEND_DST_ALPHA
;
287 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
288 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
289 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
290 return V_028780_BLEND_CONSTANT_COLOR
;
291 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
292 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
293 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
294 return V_028780_BLEND_CONSTANT_ALPHA
;
295 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
296 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
297 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
298 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
299 case VK_BLEND_FACTOR_SRC1_COLOR
:
300 return V_028780_BLEND_SRC1_COLOR
;
301 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
302 return V_028780_BLEND_INV_SRC1_COLOR
;
303 case VK_BLEND_FACTOR_SRC1_ALPHA
:
304 return V_028780_BLEND_SRC1_ALPHA
;
305 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
306 return V_028780_BLEND_INV_SRC1_ALPHA
;
312 static uint32_t si_translate_blend_opt_function(VkBlendOp op
)
315 case VK_BLEND_OP_ADD
:
316 return V_028760_OPT_COMB_ADD
;
317 case VK_BLEND_OP_SUBTRACT
:
318 return V_028760_OPT_COMB_SUBTRACT
;
319 case VK_BLEND_OP_REVERSE_SUBTRACT
:
320 return V_028760_OPT_COMB_REVSUBTRACT
;
321 case VK_BLEND_OP_MIN
:
322 return V_028760_OPT_COMB_MIN
;
323 case VK_BLEND_OP_MAX
:
324 return V_028760_OPT_COMB_MAX
;
326 return V_028760_OPT_COMB_BLEND_DISABLED
;
330 static uint32_t si_translate_blend_opt_factor(VkBlendFactor factor
, bool is_alpha
)
333 case VK_BLEND_FACTOR_ZERO
:
334 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_ALL
;
335 case VK_BLEND_FACTOR_ONE
:
336 return V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
;
337 case VK_BLEND_FACTOR_SRC_COLOR
:
338 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
339 : V_028760_BLEND_OPT_PRESERVE_C1_IGNORE_C0
;
340 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
341 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
342 : V_028760_BLEND_OPT_PRESERVE_C0_IGNORE_C1
;
343 case VK_BLEND_FACTOR_SRC_ALPHA
:
344 return V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
;
345 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
346 return V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
;
347 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
348 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
349 : V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
351 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
356 * Get rid of DST in the blend factors by commuting the operands:
357 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
359 static void si_blend_remove_dst(unsigned *func
, unsigned *src_factor
,
360 unsigned *dst_factor
, unsigned expected_dst
,
361 unsigned replacement_src
)
363 if (*src_factor
== expected_dst
&&
364 *dst_factor
== VK_BLEND_FACTOR_ZERO
) {
365 *src_factor
= VK_BLEND_FACTOR_ZERO
;
366 *dst_factor
= replacement_src
;
368 /* Commuting the operands requires reversing subtractions. */
369 if (*func
== VK_BLEND_OP_SUBTRACT
)
370 *func
= VK_BLEND_OP_REVERSE_SUBTRACT
;
371 else if (*func
== VK_BLEND_OP_REVERSE_SUBTRACT
)
372 *func
= VK_BLEND_OP_SUBTRACT
;
376 static bool si_blend_factor_uses_dst(unsigned factor
)
378 return factor
== VK_BLEND_FACTOR_DST_COLOR
||
379 factor
== VK_BLEND_FACTOR_DST_ALPHA
||
380 factor
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
381 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
||
382 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
;
385 static bool is_dual_src(VkBlendFactor factor
)
388 case VK_BLEND_FACTOR_SRC1_COLOR
:
389 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
390 case VK_BLEND_FACTOR_SRC1_ALPHA
:
391 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
398 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
400 bool blend_need_alpha
)
402 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
403 unsigned format
, ntype
, swap
;
405 /* Alpha is needed for alpha-to-coverage.
406 * Blending may be with or without alpha.
408 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
409 unsigned alpha
= 0; /* exports alpha, but may not support blending */
410 unsigned blend
= 0; /* supports blending, but may not export alpha */
411 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
413 format
= radv_translate_colorformat(vk_format
);
414 ntype
= radv_translate_color_numformat(vk_format
, desc
,
415 vk_format_get_first_non_void_channel(vk_format
));
416 swap
= radv_translate_colorswap(vk_format
, false);
418 /* Choose the SPI color formats. These are required values for Stoney/RB+.
419 * Other chips have multiple choices, though they are not necessarily better.
422 case V_028C70_COLOR_5_6_5
:
423 case V_028C70_COLOR_1_5_5_5
:
424 case V_028C70_COLOR_5_5_5_1
:
425 case V_028C70_COLOR_4_4_4_4
:
426 case V_028C70_COLOR_10_11_11
:
427 case V_028C70_COLOR_11_11_10
:
428 case V_028C70_COLOR_8
:
429 case V_028C70_COLOR_8_8
:
430 case V_028C70_COLOR_8_8_8_8
:
431 case V_028C70_COLOR_10_10_10_2
:
432 case V_028C70_COLOR_2_10_10_10
:
433 if (ntype
== V_028C70_NUMBER_UINT
)
434 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
435 else if (ntype
== V_028C70_NUMBER_SINT
)
436 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
438 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
441 case V_028C70_COLOR_16
:
442 case V_028C70_COLOR_16_16
:
443 case V_028C70_COLOR_16_16_16_16
:
444 if (ntype
== V_028C70_NUMBER_UNORM
||
445 ntype
== V_028C70_NUMBER_SNORM
) {
446 /* UNORM16 and SNORM16 don't support blending */
447 if (ntype
== V_028C70_NUMBER_UNORM
)
448 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
450 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
452 /* Use 32 bits per channel for blending. */
453 if (format
== V_028C70_COLOR_16
) {
454 if (swap
== V_028C70_SWAP_STD
) { /* R */
455 blend
= V_028714_SPI_SHADER_32_R
;
456 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
457 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
458 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
461 } else if (format
== V_028C70_COLOR_16_16
) {
462 if (swap
== V_028C70_SWAP_STD
) { /* RG */
463 blend
= V_028714_SPI_SHADER_32_GR
;
464 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
465 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
466 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
469 } else /* 16_16_16_16 */
470 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
471 } else if (ntype
== V_028C70_NUMBER_UINT
)
472 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
473 else if (ntype
== V_028C70_NUMBER_SINT
)
474 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
475 else if (ntype
== V_028C70_NUMBER_FLOAT
)
476 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
481 case V_028C70_COLOR_32
:
482 if (swap
== V_028C70_SWAP_STD
) { /* R */
483 blend
= normal
= V_028714_SPI_SHADER_32_R
;
484 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
485 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
486 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
491 case V_028C70_COLOR_32_32
:
492 if (swap
== V_028C70_SWAP_STD
) { /* RG */
493 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
494 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
495 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
496 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
501 case V_028C70_COLOR_32_32_32_32
:
502 case V_028C70_COLOR_8_24
:
503 case V_028C70_COLOR_24_8
:
504 case V_028C70_COLOR_X24_8_32_FLOAT
:
505 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
509 unreachable("unhandled blend format");
512 if (blend_enable
&& blend_need_alpha
)
514 else if(blend_need_alpha
)
516 else if(blend_enable
)
523 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
524 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
525 struct radv_blend_state
*blend
)
527 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
528 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
529 unsigned col_format
= 0;
530 unsigned num_targets
;
532 for (unsigned i
= 0; i
< (blend
->single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
535 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
) {
536 cf
= V_028714_SPI_SHADER_ZERO
;
538 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
540 blend
->blend_enable_4bit
& (0xfu
<< (i
* 4));
542 cf
= si_choose_spi_color_format(attachment
->format
,
544 blend
->need_src_alpha
& (1 << i
));
547 col_format
|= cf
<< (4 * i
);
550 if (!(col_format
& 0xf) && blend
->need_src_alpha
& (1 << 0)) {
551 /* When a subpass doesn't have any color attachments, write the
552 * alpha channel of MRT0 when alpha coverage is enabled because
553 * the depth attachment needs it.
555 col_format
|= V_028714_SPI_SHADER_32_AR
;
558 /* If the i-th target format is set, all previous target formats must
559 * be non-zero to avoid hangs.
561 num_targets
= (util_last_bit(col_format
) + 3) / 4;
562 for (unsigned i
= 0; i
< num_targets
; i
++) {
563 if (!(col_format
& (0xf << (i
* 4)))) {
564 col_format
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
568 /* The output for dual source blending should have the same format as
571 if (blend
->mrt0_is_dual_src
)
572 col_format
|= (col_format
& 0xf) << 4;
574 blend
->cb_shader_mask
= ac_get_cb_shader_mask(col_format
);
575 blend
->spi_shader_col_format
= col_format
;
579 format_is_int8(VkFormat format
)
581 const struct vk_format_description
*desc
= vk_format_description(format
);
582 int channel
= vk_format_get_first_non_void_channel(format
);
584 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
585 desc
->channel
[channel
].size
== 8;
589 format_is_int10(VkFormat format
)
591 const struct vk_format_description
*desc
= vk_format_description(format
);
593 if (desc
->nr_channels
!= 4)
595 for (unsigned i
= 0; i
< 4; i
++) {
596 if (desc
->channel
[i
].pure_integer
&& desc
->channel
[i
].size
== 10)
603 * Ordered so that for each i,
604 * radv_format_meta_fs_key(radv_fs_key_format_exemplars[i]) == i.
606 const VkFormat radv_fs_key_format_exemplars
[NUM_META_FS_KEYS
] = {
607 VK_FORMAT_R32_SFLOAT
,
608 VK_FORMAT_R32G32_SFLOAT
,
609 VK_FORMAT_R8G8B8A8_UNORM
,
610 VK_FORMAT_R16G16B16A16_UNORM
,
611 VK_FORMAT_R16G16B16A16_SNORM
,
612 VK_FORMAT_R16G16B16A16_UINT
,
613 VK_FORMAT_R16G16B16A16_SINT
,
614 VK_FORMAT_R32G32B32A32_SFLOAT
,
615 VK_FORMAT_R8G8B8A8_UINT
,
616 VK_FORMAT_R8G8B8A8_SINT
,
617 VK_FORMAT_A2R10G10B10_UINT_PACK32
,
618 VK_FORMAT_A2R10G10B10_SINT_PACK32
,
621 unsigned radv_format_meta_fs_key(VkFormat format
)
623 unsigned col_format
= si_choose_spi_color_format(format
, false, false);
625 assert(col_format
!= V_028714_SPI_SHADER_32_AR
);
626 if (col_format
>= V_028714_SPI_SHADER_32_AR
)
627 --col_format
; /* Skip V_028714_SPI_SHADER_32_AR since there is no such VkFormat */
629 --col_format
; /* Skip V_028714_SPI_SHADER_ZERO */
630 bool is_int8
= format_is_int8(format
);
631 bool is_int10
= format_is_int10(format
);
633 return col_format
+ (is_int8
? 3 : is_int10
? 5 : 0);
637 radv_pipeline_compute_get_int_clamp(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
638 unsigned *is_int8
, unsigned *is_int10
)
640 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
641 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
645 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
646 struct radv_render_pass_attachment
*attachment
;
648 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
651 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
653 if (format_is_int8(attachment
->format
))
655 if (format_is_int10(attachment
->format
))
661 radv_blend_check_commutativity(struct radv_blend_state
*blend
,
662 VkBlendOp op
, VkBlendFactor src
,
663 VkBlendFactor dst
, unsigned chanmask
)
665 /* Src factor is allowed when it does not depend on Dst. */
666 static const uint32_t src_allowed
=
667 (1u << VK_BLEND_FACTOR_ONE
) |
668 (1u << VK_BLEND_FACTOR_SRC_COLOR
) |
669 (1u << VK_BLEND_FACTOR_SRC_ALPHA
) |
670 (1u << VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
) |
671 (1u << VK_BLEND_FACTOR_CONSTANT_COLOR
) |
672 (1u << VK_BLEND_FACTOR_CONSTANT_ALPHA
) |
673 (1u << VK_BLEND_FACTOR_SRC1_COLOR
) |
674 (1u << VK_BLEND_FACTOR_SRC1_ALPHA
) |
675 (1u << VK_BLEND_FACTOR_ZERO
) |
676 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
) |
677 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
) |
678 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
) |
679 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
) |
680 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
) |
681 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
);
683 if (dst
== VK_BLEND_FACTOR_ONE
&&
684 (src_allowed
& (1u << src
))) {
685 /* Addition is commutative, but floating point addition isn't
686 * associative: subtle changes can be introduced via different
687 * rounding. Be conservative, only enable for min and max.
689 if (op
== VK_BLEND_OP_MAX
|| op
== VK_BLEND_OP_MIN
)
690 blend
->commutative_4bit
|= chanmask
;
694 static struct radv_blend_state
695 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
696 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
697 const struct radv_graphics_pipeline_create_info
*extra
)
699 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
700 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
701 struct radv_blend_state blend
= {0};
702 unsigned mode
= V_028808_CB_NORMAL
;
708 if (extra
&& extra
->custom_blend_mode
) {
709 blend
.single_cb_enable
= true;
710 mode
= extra
->custom_blend_mode
;
712 blend
.cb_color_control
= 0;
713 if (vkblend
->logicOpEnable
)
714 blend
.cb_color_control
|= S_028808_ROP3(si_translate_blend_logic_op(vkblend
->logicOp
));
716 blend
.cb_color_control
|= S_028808_ROP3(V_028808_ROP3_COPY
);
718 blend
.db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(3) |
719 S_028B70_ALPHA_TO_MASK_OFFSET1(1) |
720 S_028B70_ALPHA_TO_MASK_OFFSET2(0) |
721 S_028B70_ALPHA_TO_MASK_OFFSET3(2) |
722 S_028B70_OFFSET_ROUND(1);
724 if (vkms
&& vkms
->alphaToCoverageEnable
) {
725 blend
.db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
726 blend
.need_src_alpha
|= 0x1;
729 blend
.cb_target_mask
= 0;
730 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
731 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
732 unsigned blend_cntl
= 0;
733 unsigned srcRGB_opt
, dstRGB_opt
, srcA_opt
, dstA_opt
;
734 VkBlendOp eqRGB
= att
->colorBlendOp
;
735 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
736 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
737 VkBlendOp eqA
= att
->alphaBlendOp
;
738 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
739 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
741 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
743 if (!att
->colorWriteMask
)
746 blend
.cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
747 blend
.cb_target_enabled_4bit
|= 0xf << (4 * i
);
748 if (!att
->blendEnable
) {
749 blend
.cb_blend_control
[i
] = blend_cntl
;
753 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
755 blend
.mrt0_is_dual_src
= true;
757 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
758 srcRGB
= VK_BLEND_FACTOR_ONE
;
759 dstRGB
= VK_BLEND_FACTOR_ONE
;
761 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
762 srcA
= VK_BLEND_FACTOR_ONE
;
763 dstA
= VK_BLEND_FACTOR_ONE
;
766 radv_blend_check_commutativity(&blend
, eqRGB
, srcRGB
, dstRGB
,
768 radv_blend_check_commutativity(&blend
, eqA
, srcA
, dstA
,
771 /* Blending optimizations for RB+.
772 * These transformations don't change the behavior.
774 * First, get rid of DST in the blend factors:
775 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
777 si_blend_remove_dst(&eqRGB
, &srcRGB
, &dstRGB
,
778 VK_BLEND_FACTOR_DST_COLOR
,
779 VK_BLEND_FACTOR_SRC_COLOR
);
781 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
782 VK_BLEND_FACTOR_DST_COLOR
,
783 VK_BLEND_FACTOR_SRC_COLOR
);
785 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
786 VK_BLEND_FACTOR_DST_ALPHA
,
787 VK_BLEND_FACTOR_SRC_ALPHA
);
789 /* Look up the ideal settings from tables. */
790 srcRGB_opt
= si_translate_blend_opt_factor(srcRGB
, false);
791 dstRGB_opt
= si_translate_blend_opt_factor(dstRGB
, false);
792 srcA_opt
= si_translate_blend_opt_factor(srcA
, true);
793 dstA_opt
= si_translate_blend_opt_factor(dstA
, true);
795 /* Handle interdependencies. */
796 if (si_blend_factor_uses_dst(srcRGB
))
797 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
798 if (si_blend_factor_uses_dst(srcA
))
799 dstA_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
801 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
&&
802 (dstRGB
== VK_BLEND_FACTOR_ZERO
||
803 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
804 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
))
805 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
807 /* Set the final value. */
808 blend
.sx_mrt_blend_opt
[i
] =
809 S_028760_COLOR_SRC_OPT(srcRGB_opt
) |
810 S_028760_COLOR_DST_OPT(dstRGB_opt
) |
811 S_028760_COLOR_COMB_FCN(si_translate_blend_opt_function(eqRGB
)) |
812 S_028760_ALPHA_SRC_OPT(srcA_opt
) |
813 S_028760_ALPHA_DST_OPT(dstA_opt
) |
814 S_028760_ALPHA_COMB_FCN(si_translate_blend_opt_function(eqA
));
815 blend_cntl
|= S_028780_ENABLE(1);
817 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
818 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
819 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
820 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
821 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
822 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
823 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
824 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
826 blend
.cb_blend_control
[i
] = blend_cntl
;
828 blend
.blend_enable_4bit
|= 0xfu
<< (i
* 4);
830 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
831 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
832 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
833 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
834 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
835 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
836 blend
.need_src_alpha
|= 1 << i
;
838 for (i
= vkblend
->attachmentCount
; i
< 8; i
++) {
839 blend
.cb_blend_control
[i
] = 0;
840 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
843 if (pipeline
->device
->physical_device
->has_rbplus
) {
844 /* Disable RB+ blend optimizations for dual source blending. */
845 if (blend
.mrt0_is_dual_src
) {
846 for (i
= 0; i
< 8; i
++) {
847 blend
.sx_mrt_blend_opt
[i
] =
848 S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE
) |
849 S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE
);
853 /* RB+ doesn't work with dual source blending, logic op and
856 if (blend
.mrt0_is_dual_src
|| vkblend
->logicOpEnable
||
857 mode
== V_028808_CB_RESOLVE
)
858 blend
.cb_color_control
|= S_028808_DISABLE_DUAL_QUAD(1);
861 if (blend
.cb_target_mask
)
862 blend
.cb_color_control
|= S_028808_MODE(mode
);
864 blend
.cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
866 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
, &blend
);
870 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
873 case VK_STENCIL_OP_KEEP
:
874 return V_02842C_STENCIL_KEEP
;
875 case VK_STENCIL_OP_ZERO
:
876 return V_02842C_STENCIL_ZERO
;
877 case VK_STENCIL_OP_REPLACE
:
878 return V_02842C_STENCIL_REPLACE_TEST
;
879 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
880 return V_02842C_STENCIL_ADD_CLAMP
;
881 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
882 return V_02842C_STENCIL_SUB_CLAMP
;
883 case VK_STENCIL_OP_INVERT
:
884 return V_02842C_STENCIL_INVERT
;
885 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
886 return V_02842C_STENCIL_ADD_WRAP
;
887 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
888 return V_02842C_STENCIL_SUB_WRAP
;
894 static uint32_t si_translate_fill(VkPolygonMode func
)
897 case VK_POLYGON_MODE_FILL
:
898 return V_028814_X_DRAW_TRIANGLES
;
899 case VK_POLYGON_MODE_LINE
:
900 return V_028814_X_DRAW_LINES
;
901 case VK_POLYGON_MODE_POINT
:
902 return V_028814_X_DRAW_POINTS
;
905 return V_028814_X_DRAW_POINTS
;
909 static uint8_t radv_pipeline_get_ps_iter_samples(const VkPipelineMultisampleStateCreateInfo
*vkms
)
911 uint32_t num_samples
= vkms
->rasterizationSamples
;
912 uint32_t ps_iter_samples
= 1;
914 if (vkms
->sampleShadingEnable
) {
915 ps_iter_samples
= ceil(vkms
->minSampleShading
* num_samples
);
916 ps_iter_samples
= util_next_power_of_two(ps_iter_samples
);
918 return ps_iter_samples
;
922 radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
924 return pCreateInfo
->depthTestEnable
&&
925 pCreateInfo
->depthWriteEnable
&&
926 pCreateInfo
->depthCompareOp
!= VK_COMPARE_OP_NEVER
;
930 radv_writes_stencil(const VkStencilOpState
*state
)
932 return state
->writeMask
&&
933 (state
->failOp
!= VK_STENCIL_OP_KEEP
||
934 state
->passOp
!= VK_STENCIL_OP_KEEP
||
935 state
->depthFailOp
!= VK_STENCIL_OP_KEEP
);
939 radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
941 return pCreateInfo
->stencilTestEnable
&&
942 (radv_writes_stencil(&pCreateInfo
->front
) ||
943 radv_writes_stencil(&pCreateInfo
->back
));
947 radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
949 return radv_is_depth_write_enabled(pCreateInfo
) ||
950 radv_is_stencil_write_enabled(pCreateInfo
);
954 radv_order_invariant_stencil_op(VkStencilOp op
)
956 /* REPLACE is normally order invariant, except when the stencil
957 * reference value is written by the fragment shader. Tracking this
958 * interaction does not seem worth the effort, so be conservative.
960 return op
!= VK_STENCIL_OP_INCREMENT_AND_CLAMP
&&
961 op
!= VK_STENCIL_OP_DECREMENT_AND_CLAMP
&&
962 op
!= VK_STENCIL_OP_REPLACE
;
966 radv_order_invariant_stencil_state(const VkStencilOpState
*state
)
968 /* Compute whether, assuming Z writes are disabled, this stencil state
969 * is order invariant in the sense that the set of passing fragments as
970 * well as the final stencil buffer result does not depend on the order
973 return !state
->writeMask
||
974 /* The following assumes that Z writes are disabled. */
975 (state
->compareOp
== VK_COMPARE_OP_ALWAYS
&&
976 radv_order_invariant_stencil_op(state
->passOp
) &&
977 radv_order_invariant_stencil_op(state
->depthFailOp
)) ||
978 (state
->compareOp
== VK_COMPARE_OP_NEVER
&&
979 radv_order_invariant_stencil_op(state
->failOp
));
983 radv_pipeline_out_of_order_rast(struct radv_pipeline
*pipeline
,
984 struct radv_blend_state
*blend
,
985 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
987 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
988 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
989 unsigned colormask
= blend
->cb_target_enabled_4bit
;
991 if (!pipeline
->device
->physical_device
->out_of_order_rast_allowed
)
994 /* Be conservative if a logic operation is enabled with color buffers. */
995 if (colormask
&& pCreateInfo
->pColorBlendState
->logicOpEnable
)
998 /* Default depth/stencil invariance when no attachment is bound. */
999 struct radv_dsa_order_invariance dsa_order_invariant
= {
1000 .zs
= true, .pass_set
= true
1003 if (pCreateInfo
->pDepthStencilState
&&
1004 subpass
->depth_stencil_attachment
) {
1005 const VkPipelineDepthStencilStateCreateInfo
*vkds
=
1006 pCreateInfo
->pDepthStencilState
;
1007 struct radv_render_pass_attachment
*attachment
=
1008 pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
1009 bool has_stencil
= vk_format_is_stencil(attachment
->format
);
1010 struct radv_dsa_order_invariance order_invariance
[2];
1011 struct radv_shader_variant
*ps
=
1012 pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
1014 /* Compute depth/stencil order invariance in order to know if
1015 * it's safe to enable out-of-order.
1017 bool zfunc_is_ordered
=
1018 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
||
1019 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS
||
1020 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS_OR_EQUAL
||
1021 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER
||
1022 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER_OR_EQUAL
;
1024 bool nozwrite_and_order_invariant_stencil
=
1025 !radv_is_ds_write_enabled(vkds
) ||
1026 (!radv_is_depth_write_enabled(vkds
) &&
1027 radv_order_invariant_stencil_state(&vkds
->front
) &&
1028 radv_order_invariant_stencil_state(&vkds
->back
));
1030 order_invariance
[1].zs
=
1031 nozwrite_and_order_invariant_stencil
||
1032 (!radv_is_stencil_write_enabled(vkds
) &&
1034 order_invariance
[0].zs
=
1035 !radv_is_depth_write_enabled(vkds
) || zfunc_is_ordered
;
1037 order_invariance
[1].pass_set
=
1038 nozwrite_and_order_invariant_stencil
||
1039 (!radv_is_stencil_write_enabled(vkds
) &&
1040 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1041 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
));
1042 order_invariance
[0].pass_set
=
1043 !radv_is_depth_write_enabled(vkds
) ||
1044 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1045 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
);
1047 dsa_order_invariant
= order_invariance
[has_stencil
];
1048 if (!dsa_order_invariant
.zs
)
1051 /* The set of PS invocations is always order invariant,
1052 * except when early Z/S tests are requested.
1055 ps
->info
.info
.ps
.writes_memory
&&
1056 ps
->info
.fs
.early_fragment_test
&&
1057 !dsa_order_invariant
.pass_set
)
1060 /* Determine if out-of-order rasterization should be disabled
1061 * when occlusion queries are used.
1063 pipeline
->graphics
.disable_out_of_order_rast_for_occlusion
=
1064 !dsa_order_invariant
.pass_set
;
1067 /* No color buffers are enabled for writing. */
1071 unsigned blendmask
= colormask
& blend
->blend_enable_4bit
;
1074 /* Only commutative blending. */
1075 if (blendmask
& ~blend
->commutative_4bit
)
1078 if (!dsa_order_invariant
.pass_set
)
1082 if (colormask
& ~blendmask
)
1089 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1090 struct radv_blend_state
*blend
,
1091 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1093 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
1094 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1095 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1096 bool out_of_order_rast
= false;
1097 int ps_iter_samples
= 1;
1098 uint32_t mask
= 0xffff;
1101 ms
->num_samples
= vkms
->rasterizationSamples
;
1103 ms
->num_samples
= 1;
1106 ps_iter_samples
= radv_pipeline_get_ps_iter_samples(vkms
);
1107 if (vkms
&& !vkms
->sampleShadingEnable
&& pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.force_persample
) {
1108 ps_iter_samples
= ms
->num_samples
;
1111 const struct VkPipelineRasterizationStateRasterizationOrderAMD
*raster_order
=
1112 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
, PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD
);
1113 if (raster_order
&& raster_order
->rasterizationOrder
== VK_RASTERIZATION_ORDER_RELAXED_AMD
) {
1114 /* Out-of-order rasterization is explicitly enabled by the
1117 out_of_order_rast
= true;
1119 /* Determine if the driver can enable out-of-order
1120 * rasterization internally.
1123 radv_pipeline_out_of_order_rast(pipeline
, blend
, pCreateInfo
);
1126 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1127 ms
->pa_sc_aa_config
= 0;
1128 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1129 S_028804_INCOHERENT_EQAA_READS(1) |
1130 S_028804_INTERPOLATE_COMP_Z(1) |
1131 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1132 ms
->pa_sc_mode_cntl_1
=
1133 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1134 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1135 S_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(out_of_order_rast
) |
1136 S_028A4C_OUT_OF_ORDER_WATER_MARK(0x7) |
1138 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1139 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1140 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1141 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1142 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1143 S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1144 ms
->pa_sc_mode_cntl_0
= S_028A48_ALTERNATE_RBS_PER_TILE(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
1145 S_028A48_VPORT_SCISSOR_ENABLE(1);
1147 if (ms
->num_samples
> 1) {
1148 unsigned log_samples
= util_logbase2(ms
->num_samples
);
1149 unsigned log_ps_iter_samples
= util_logbase2(ps_iter_samples
);
1150 ms
->pa_sc_mode_cntl_0
|= S_028A48_MSAA_ENABLE(1);
1151 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1152 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_samples
) |
1153 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1154 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1155 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1156 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1157 S_028BE0_MAX_SAMPLE_DIST(radv_get_default_max_sample_dist(log_samples
)) |
1158 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1159 ms
->pa_sc_mode_cntl_1
|= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1160 if (ps_iter_samples
> 1)
1161 pipeline
->graphics
.spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1164 if (vkms
&& vkms
->pSampleMask
) {
1165 mask
= vkms
->pSampleMask
[0] & 0xffff;
1168 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1169 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1173 radv_prim_can_use_guardband(enum VkPrimitiveTopology topology
)
1176 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1177 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1178 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1179 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1180 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1182 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1183 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1184 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1185 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1186 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1187 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1190 unreachable("unhandled primitive type");
1195 si_translate_prim(enum VkPrimitiveTopology topology
)
1198 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1199 return V_008958_DI_PT_POINTLIST
;
1200 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1201 return V_008958_DI_PT_LINELIST
;
1202 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1203 return V_008958_DI_PT_LINESTRIP
;
1204 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1205 return V_008958_DI_PT_TRILIST
;
1206 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1207 return V_008958_DI_PT_TRISTRIP
;
1208 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1209 return V_008958_DI_PT_TRIFAN
;
1210 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1211 return V_008958_DI_PT_LINELIST_ADJ
;
1212 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1213 return V_008958_DI_PT_LINESTRIP_ADJ
;
1214 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1215 return V_008958_DI_PT_TRILIST_ADJ
;
1216 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1217 return V_008958_DI_PT_TRISTRIP_ADJ
;
1218 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1219 return V_008958_DI_PT_PATCH
;
1227 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1230 case 0: /* GL_POINTS */
1231 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1232 case 1: /* GL_LINES */
1233 case 3: /* GL_LINE_STRIP */
1234 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1235 case 0x8E7A: /* GL_ISOLINES */
1236 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1238 case 4: /* GL_TRIANGLES */
1239 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1240 case 5: /* GL_TRIANGLE_STRIP */
1241 case 7: /* GL_QUADS */
1242 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1250 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1253 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1254 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1255 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1256 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1257 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1258 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1259 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1260 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1261 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1262 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1263 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1264 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1265 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1266 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1273 static unsigned radv_dynamic_state_mask(VkDynamicState state
)
1276 case VK_DYNAMIC_STATE_VIEWPORT
:
1277 return RADV_DYNAMIC_VIEWPORT
;
1278 case VK_DYNAMIC_STATE_SCISSOR
:
1279 return RADV_DYNAMIC_SCISSOR
;
1280 case VK_DYNAMIC_STATE_LINE_WIDTH
:
1281 return RADV_DYNAMIC_LINE_WIDTH
;
1282 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
1283 return RADV_DYNAMIC_DEPTH_BIAS
;
1284 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
1285 return RADV_DYNAMIC_BLEND_CONSTANTS
;
1286 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
1287 return RADV_DYNAMIC_DEPTH_BOUNDS
;
1288 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
1289 return RADV_DYNAMIC_STENCIL_COMPARE_MASK
;
1290 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
1291 return RADV_DYNAMIC_STENCIL_WRITE_MASK
;
1292 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
1293 return RADV_DYNAMIC_STENCIL_REFERENCE
;
1294 case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT
:
1295 return RADV_DYNAMIC_DISCARD_RECTANGLE
;
1296 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT
:
1297 return RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1299 unreachable("Unhandled dynamic state");
1303 static uint32_t radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1305 uint32_t states
= RADV_DYNAMIC_ALL
;
1307 /* If rasterization is disabled we do not care about any of the dynamic states,
1308 * since they are all rasterization related only. */
1309 if (pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
1312 if (!pCreateInfo
->pRasterizationState
->depthBiasEnable
)
1313 states
&= ~RADV_DYNAMIC_DEPTH_BIAS
;
1315 if (!pCreateInfo
->pDepthStencilState
||
1316 !pCreateInfo
->pDepthStencilState
->depthBoundsTestEnable
)
1317 states
&= ~RADV_DYNAMIC_DEPTH_BOUNDS
;
1319 if (!pCreateInfo
->pDepthStencilState
||
1320 !pCreateInfo
->pDepthStencilState
->stencilTestEnable
)
1321 states
&= ~(RADV_DYNAMIC_STENCIL_COMPARE_MASK
|
1322 RADV_DYNAMIC_STENCIL_WRITE_MASK
|
1323 RADV_DYNAMIC_STENCIL_REFERENCE
);
1325 if (!vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
))
1326 states
&= ~RADV_DYNAMIC_DISCARD_RECTANGLE
;
1328 if (!pCreateInfo
->pMultisampleState
||
1329 !vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1330 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
))
1331 states
&= ~RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1333 /* TODO: blend constants & line width. */
1340 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1341 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1343 uint32_t needed_states
= radv_pipeline_needed_dynamic_state(pCreateInfo
);
1344 uint32_t states
= needed_states
;
1345 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1346 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1348 pipeline
->dynamic_state
= default_dynamic_state
;
1349 pipeline
->graphics
.needed_dynamic_state
= needed_states
;
1351 if (pCreateInfo
->pDynamicState
) {
1352 /* Remove all of the states that are marked as dynamic */
1353 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1354 for (uint32_t s
= 0; s
< count
; s
++)
1355 states
&= ~radv_dynamic_state_mask(pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1358 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1360 if (needed_states
& RADV_DYNAMIC_VIEWPORT
) {
1361 assert(pCreateInfo
->pViewportState
);
1363 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1364 if (states
& RADV_DYNAMIC_VIEWPORT
) {
1365 typed_memcpy(dynamic
->viewport
.viewports
,
1366 pCreateInfo
->pViewportState
->pViewports
,
1367 pCreateInfo
->pViewportState
->viewportCount
);
1371 if (needed_states
& RADV_DYNAMIC_SCISSOR
) {
1372 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1373 if (states
& RADV_DYNAMIC_SCISSOR
) {
1374 typed_memcpy(dynamic
->scissor
.scissors
,
1375 pCreateInfo
->pViewportState
->pScissors
,
1376 pCreateInfo
->pViewportState
->scissorCount
);
1380 if (states
& RADV_DYNAMIC_LINE_WIDTH
) {
1381 assert(pCreateInfo
->pRasterizationState
);
1382 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1385 if (states
& RADV_DYNAMIC_DEPTH_BIAS
) {
1386 assert(pCreateInfo
->pRasterizationState
);
1387 dynamic
->depth_bias
.bias
=
1388 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1389 dynamic
->depth_bias
.clamp
=
1390 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1391 dynamic
->depth_bias
.slope
=
1392 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1395 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1397 * pColorBlendState is [...] NULL if the pipeline has rasterization
1398 * disabled or if the subpass of the render pass the pipeline is
1399 * created against does not use any color attachments.
1401 if (subpass
->has_color_att
&& states
& RADV_DYNAMIC_BLEND_CONSTANTS
) {
1402 assert(pCreateInfo
->pColorBlendState
);
1403 typed_memcpy(dynamic
->blend_constants
,
1404 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1407 /* If there is no depthstencil attachment, then don't read
1408 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1409 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1410 * no need to override the depthstencil defaults in
1411 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1413 * Section 9.2 of the Vulkan 1.0.15 spec says:
1415 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1416 * disabled or if the subpass of the render pass the pipeline is created
1417 * against does not use a depth/stencil attachment.
1419 if (needed_states
&& subpass
->depth_stencil_attachment
) {
1420 assert(pCreateInfo
->pDepthStencilState
);
1422 if (states
& RADV_DYNAMIC_DEPTH_BOUNDS
) {
1423 dynamic
->depth_bounds
.min
=
1424 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1425 dynamic
->depth_bounds
.max
=
1426 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1429 if (states
& RADV_DYNAMIC_STENCIL_COMPARE_MASK
) {
1430 dynamic
->stencil_compare_mask
.front
=
1431 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1432 dynamic
->stencil_compare_mask
.back
=
1433 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1436 if (states
& RADV_DYNAMIC_STENCIL_WRITE_MASK
) {
1437 dynamic
->stencil_write_mask
.front
=
1438 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1439 dynamic
->stencil_write_mask
.back
=
1440 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1443 if (states
& RADV_DYNAMIC_STENCIL_REFERENCE
) {
1444 dynamic
->stencil_reference
.front
=
1445 pCreateInfo
->pDepthStencilState
->front
.reference
;
1446 dynamic
->stencil_reference
.back
=
1447 pCreateInfo
->pDepthStencilState
->back
.reference
;
1451 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
1452 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
1453 if (needed_states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1454 dynamic
->discard_rectangle
.count
= discard_rectangle_info
->discardRectangleCount
;
1455 if (states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1456 typed_memcpy(dynamic
->discard_rectangle
.rectangles
,
1457 discard_rectangle_info
->pDiscardRectangles
,
1458 discard_rectangle_info
->discardRectangleCount
);
1462 if (needed_states
& RADV_DYNAMIC_SAMPLE_LOCATIONS
) {
1463 const VkPipelineSampleLocationsStateCreateInfoEXT
*sample_location_info
=
1464 vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1465 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
1466 /* If sampleLocationsEnable is VK_FALSE, the default sample
1467 * locations are used and the values specified in
1468 * sampleLocationsInfo are ignored.
1470 if (sample_location_info
->sampleLocationsEnable
) {
1471 const VkSampleLocationsInfoEXT
*pSampleLocationsInfo
=
1472 &sample_location_info
->sampleLocationsInfo
;
1474 assert(pSampleLocationsInfo
->sampleLocationsCount
<= MAX_SAMPLE_LOCATIONS
);
1476 dynamic
->sample_location
.per_pixel
= pSampleLocationsInfo
->sampleLocationsPerPixel
;
1477 dynamic
->sample_location
.grid_size
= pSampleLocationsInfo
->sampleLocationGridSize
;
1478 dynamic
->sample_location
.count
= pSampleLocationsInfo
->sampleLocationsCount
;
1479 typed_memcpy(&dynamic
->sample_location
.locations
[0],
1480 pSampleLocationsInfo
->pSampleLocations
,
1481 pSampleLocationsInfo
->sampleLocationsCount
);
1485 pipeline
->dynamic_state
.mask
= states
;
1488 static struct radv_gs_state
1489 calculate_gs_info(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1490 const struct radv_pipeline
*pipeline
)
1492 struct radv_gs_state gs
= {0};
1493 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1494 struct radv_es_output_info
*es_info
;
1495 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1496 es_info
= radv_pipeline_has_tess(pipeline
) ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1498 es_info
= radv_pipeline_has_tess(pipeline
) ?
1499 &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.es_info
:
1500 &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.es_info
;
1502 unsigned gs_num_invocations
= MAX2(gs_info
->gs
.invocations
, 1);
1503 bool uses_adjacency
;
1504 switch(pCreateInfo
->pInputAssemblyState
->topology
) {
1505 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1506 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1507 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1508 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1509 uses_adjacency
= true;
1512 uses_adjacency
= false;
1516 /* All these are in dwords: */
1517 /* We can't allow using the whole LDS, because GS waves compete with
1518 * other shader stages for LDS space. */
1519 const unsigned max_lds_size
= 8 * 1024;
1520 const unsigned esgs_itemsize
= es_info
->esgs_itemsize
/ 4;
1521 unsigned esgs_lds_size
;
1523 /* All these are per subgroup: */
1524 const unsigned max_out_prims
= 32 * 1024;
1525 const unsigned max_es_verts
= 255;
1526 const unsigned ideal_gs_prims
= 64;
1527 unsigned max_gs_prims
, gs_prims
;
1528 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
1530 if (uses_adjacency
|| gs_num_invocations
> 1)
1531 max_gs_prims
= 127 / gs_num_invocations
;
1535 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
1536 * Make sure we don't go over the maximum value.
1538 if (gs_info
->gs
.vertices_out
> 0) {
1539 max_gs_prims
= MIN2(max_gs_prims
,
1541 (gs_info
->gs
.vertices_out
* gs_num_invocations
));
1543 assert(max_gs_prims
> 0);
1545 /* If the primitive has adjacency, halve the number of vertices
1546 * that will be reused in multiple primitives.
1548 min_es_verts
= gs_info
->gs
.vertices_in
/ (uses_adjacency
? 2 : 1);
1550 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
1551 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
1553 /* Compute ESGS LDS size based on the worst case number of ES vertices
1554 * needed to create the target number of GS prims per subgroup.
1556 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1558 /* If total LDS usage is too big, refactor partitions based on ratio
1559 * of ESGS item sizes.
1561 if (esgs_lds_size
> max_lds_size
) {
1562 /* Our target GS Prims Per Subgroup was too large. Calculate
1563 * the maximum number of GS Prims Per Subgroup that will fit
1564 * into LDS, capped by the maximum that the hardware can support.
1566 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
1568 assert(gs_prims
> 0);
1569 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
1572 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1573 assert(esgs_lds_size
<= max_lds_size
);
1576 /* Now calculate remaining ESGS information. */
1578 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
1580 es_verts
= max_es_verts
;
1582 /* Vertices for adjacency primitives are not always reused, so restore
1583 * it for ES_VERTS_PER_SUBGRP.
1585 min_es_verts
= gs_info
->gs
.vertices_in
;
1587 /* For normal primitives, the VGT only checks if they are past the ES
1588 * verts per subgroup after allocating a full GS primitive and if they
1589 * are, kick off a new subgroup. But if those additional ES verts are
1590 * unique (e.g. not reused) we need to make sure there is enough LDS
1591 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
1593 es_verts
-= min_es_verts
- 1;
1595 uint32_t es_verts_per_subgroup
= es_verts
;
1596 uint32_t gs_prims_per_subgroup
= gs_prims
;
1597 uint32_t gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
1598 uint32_t max_prims_per_subgroup
= gs_inst_prims_in_subgroup
* gs_info
->gs
.vertices_out
;
1599 gs
.lds_size
= align(esgs_lds_size
, 128) / 128;
1600 gs
.vgt_gs_onchip_cntl
= S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup
) |
1601 S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup
) |
1602 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup
);
1603 gs
.vgt_gs_max_prims_per_subgroup
= S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup
);
1604 gs
.vgt_esgs_ring_itemsize
= esgs_itemsize
;
1605 assert(max_prims_per_subgroup
<= max_out_prims
);
1610 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1611 unsigned min_verts_per_prim
, bool use_adjacency
)
1613 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1616 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1620 radv_get_num_input_vertices(struct radv_pipeline
*pipeline
)
1622 if (radv_pipeline_has_gs(pipeline
)) {
1623 struct radv_shader_variant
*gs
=
1624 radv_get_shader(pipeline
, MESA_SHADER_GEOMETRY
);
1626 return gs
->info
.gs
.vertices_in
;
1629 if (radv_pipeline_has_tess(pipeline
)) {
1630 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
1632 if (tes
->info
.tes
.point_mode
)
1634 if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
1642 static struct radv_ngg_state
1643 calculate_ngg_info(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1644 struct radv_pipeline
*pipeline
)
1646 struct radv_ngg_state ngg
= {0};
1647 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1648 struct radv_es_output_info
*es_info
=
1649 radv_pipeline_has_tess(pipeline
) ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1650 unsigned gs_type
= radv_pipeline_has_gs(pipeline
) ? MESA_SHADER_GEOMETRY
: MESA_SHADER_VERTEX
;
1651 unsigned max_verts_per_prim
= radv_get_num_input_vertices(pipeline
);
1652 unsigned min_verts_per_prim
=
1653 gs_type
== MESA_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1654 unsigned gs_num_invocations
= radv_pipeline_has_gs(pipeline
) ? MAX2(gs_info
->gs
.invocations
, 1) : 1;
1655 bool uses_adjacency
;
1656 switch(pCreateInfo
->pInputAssemblyState
->topology
) {
1657 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1658 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1659 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1660 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1661 uses_adjacency
= true;
1664 uses_adjacency
= false;
1668 /* All these are in dwords: */
1669 /* We can't allow using the whole LDS, because GS waves compete with
1670 * other shader stages for LDS space.
1672 * Streamout can increase the ESGS buffer size later on, so be more
1673 * conservative with streamout and use 4K dwords. This may be suboptimal.
1675 * Otherwise, use the limit of 7K dwords. The reason is that we need
1676 * to leave some headroom for the max_esverts increase at the end.
1678 * TODO: We should really take the shader's internal LDS use into
1679 * account. The linker will fail if the size is greater than
1682 const unsigned max_lds_size
= (0 /*gs_info->info.so.num_outputs*/ ? 4 : 7) * 1024 - 128;
1683 const unsigned target_lds_size
= max_lds_size
;
1684 unsigned esvert_lds_size
= 0;
1685 unsigned gsprim_lds_size
= 0;
1687 /* All these are per subgroup: */
1688 bool max_vert_out_per_gs_instance
= false;
1689 unsigned max_esverts_base
= 256;
1690 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1692 /* Hardware has the following non-natural restrictions on the value
1693 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1695 * - at most 252 for any line input primitive type
1696 * - at most 251 for any quad input primitive type
1697 * - at most 251 for triangle strips with adjacency (this happens to
1698 * be the natural limit for triangle *lists* with adjacency)
1700 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1702 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1703 unsigned max_out_verts_per_gsprim
=
1704 gs_info
->gs
.vertices_out
* gs_num_invocations
;
1706 if (max_out_verts_per_gsprim
<= 256) {
1707 if (max_out_verts_per_gsprim
) {
1708 max_gsprims_base
= MIN2(max_gsprims_base
,
1709 256 / max_out_verts_per_gsprim
);
1712 /* Use special multi-cycling mode in which each GS
1713 * instance gets its own subgroup. Does not work with
1715 max_vert_out_per_gs_instance
= true;
1716 max_gsprims_base
= 1;
1717 max_out_verts_per_gsprim
= gs_info
->gs
.vertices_out
;
1720 esvert_lds_size
= es_info
->esgs_itemsize
/ 4;
1721 gsprim_lds_size
= (gs_info
->gs
.gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1723 /* TODO: This needs to be adjusted once LDS use for compaction
1724 * after culling is implemented. */
1726 if (es_info->info.so.num_outputs)
1727 esvert_lds_size = 4 * es_info->info.so.num_outputs + 1;
1731 unsigned max_gsprims
= max_gsprims_base
;
1732 unsigned max_esverts
= max_esverts_base
;
1734 if (esvert_lds_size
)
1735 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1736 if (gsprim_lds_size
)
1737 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1739 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1740 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, uses_adjacency
);
1741 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1743 if (esvert_lds_size
|| gsprim_lds_size
) {
1744 /* Now that we have a rough proportionality between esverts
1745 * and gsprims based on the primitive type, scale both of them
1746 * down simultaneously based on required LDS space.
1748 * We could be smarter about this if we knew how much vertex
1751 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1752 max_gsprims
* gsprim_lds_size
;
1753 if (lds_total
> target_lds_size
) {
1754 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1755 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1757 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1758 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1759 min_verts_per_prim
, uses_adjacency
);
1760 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1764 /* Round up towards full wave sizes for better ALU utilization. */
1765 if (!max_vert_out_per_gs_instance
) {
1766 const unsigned wavesize
= 64;
1767 unsigned orig_max_esverts
;
1768 unsigned orig_max_gsprims
;
1770 orig_max_esverts
= max_esverts
;
1771 orig_max_gsprims
= max_gsprims
;
1773 max_esverts
= align(max_esverts
, wavesize
);
1774 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1775 if (esvert_lds_size
)
1776 max_esverts
= MIN2(max_esverts
,
1777 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1779 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1781 max_gsprims
= align(max_gsprims
, wavesize
);
1782 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1783 if (gsprim_lds_size
)
1784 max_gsprims
= MIN2(max_gsprims
,
1785 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1787 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1788 min_verts_per_prim
, uses_adjacency
);
1789 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1790 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1793 /* Hardware restriction: minimum value of max_esverts */
1794 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1796 unsigned max_out_vertices
=
1797 max_vert_out_per_gs_instance
? gs_info
->gs
.vertices_out
:
1798 gs_type
== MESA_SHADER_GEOMETRY
?
1799 max_gsprims
* gs_num_invocations
* gs_info
->gs
.vertices_out
:
1801 assert(max_out_vertices
<= 256);
1803 unsigned prim_amp_factor
= 1;
1804 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1805 /* Number of output primitives per GS input primitive after
1807 prim_amp_factor
= gs_info
->gs
.vertices_out
;
1810 /* The GE only checks against the maximum number of ES verts after
1811 * allocating a full GS primitive. So we need to ensure that whenever
1812 * this check passes, there is enough space for a full primitive without
1815 ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1816 ngg
.max_gsprims
= max_gsprims
;
1817 ngg
.max_out_verts
= max_out_vertices
;
1818 ngg
.prim_amp_factor
= prim_amp_factor
;
1819 ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1820 ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1822 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1823 ngg
.vgt_esgs_ring_itemsize
= es_info
->esgs_itemsize
/ 4;
1825 ngg
.vgt_esgs_ring_itemsize
= 1;
1828 pipeline
->graphics
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1830 assert(ngg
.hw_max_esverts
>= 24); /* HW limitation */
1836 calculate_gs_ring_sizes(struct radv_pipeline
*pipeline
, const struct radv_gs_state
*gs
)
1838 struct radv_device
*device
= pipeline
->device
;
1839 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
1840 unsigned wave_size
= 64;
1841 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
1842 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
1843 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
1845 unsigned gs_vertex_reuse
=
1846 (device
->physical_device
->rad_info
.chip_class
>= GFX8
? 32 : 16) * num_se
;
1847 unsigned alignment
= 256 * num_se
;
1848 /* The maximum size is 63.999 MB per SE. */
1849 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
1850 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1852 /* Calculate the minimum size. */
1853 unsigned min_esgs_ring_size
= align(gs
->vgt_esgs_ring_itemsize
* 4 * gs_vertex_reuse
*
1854 wave_size
, alignment
);
1855 /* These are recommended sizes, not minimum sizes. */
1856 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
1857 gs
->vgt_esgs_ring_itemsize
* 4 * gs_info
->gs
.vertices_in
;
1858 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
1859 gs_info
->gs
.max_gsvs_emit_size
;
1861 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
1862 esgs_ring_size
= align(esgs_ring_size
, alignment
);
1863 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
1865 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
1866 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
1868 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
1871 static void si_multiwave_lds_size_workaround(struct radv_device
*device
,
1874 /* If tessellation is all offchip and on-chip GS isn't used, this
1875 * workaround is not needed.
1879 /* SPI barrier management bug:
1880 * Make sure we have at least 4k of LDS in use to avoid the bug.
1881 * It applies to workgroup sizes of more than one wavefront.
1883 if (device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
||
1884 device
->physical_device
->rad_info
.family
== CHIP_KABINI
)
1885 *lds_size
= MAX2(*lds_size
, 8);
1888 struct radv_shader_variant
*
1889 radv_get_shader(struct radv_pipeline
*pipeline
,
1890 gl_shader_stage stage
)
1892 if (stage
== MESA_SHADER_VERTEX
) {
1893 if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
1894 return pipeline
->shaders
[MESA_SHADER_VERTEX
];
1895 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
])
1896 return pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
1897 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1898 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1899 } else if (stage
== MESA_SHADER_TESS_EVAL
) {
1900 if (!radv_pipeline_has_tess(pipeline
))
1902 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
1903 return pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
1904 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1905 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1907 return pipeline
->shaders
[stage
];
1910 static struct radv_tessellation_state
1911 calculate_tess_state(struct radv_pipeline
*pipeline
,
1912 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1914 unsigned num_tcs_input_cp
;
1915 unsigned num_tcs_output_cp
;
1917 unsigned num_patches
;
1918 struct radv_tessellation_state tess
= {0};
1920 num_tcs_input_cp
= pCreateInfo
->pTessellationState
->patchControlPoints
;
1921 num_tcs_output_cp
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.tcs_vertices_out
; //TCS VERTICES OUT
1922 num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
1924 lds_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.lds_size
;
1926 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
1927 assert(lds_size
<= 65536);
1928 lds_size
= align(lds_size
, 512) / 512;
1930 assert(lds_size
<= 32768);
1931 lds_size
= align(lds_size
, 256) / 256;
1933 si_multiwave_lds_size_workaround(pipeline
->device
, &lds_size
);
1935 tess
.lds_size
= lds_size
;
1937 tess
.ls_hs_config
= S_028B58_NUM_PATCHES(num_patches
) |
1938 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
1939 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
1940 tess
.num_patches
= num_patches
;
1942 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
1943 unsigned type
= 0, partitioning
= 0, topology
= 0, distribution_mode
= 0;
1945 switch (tes
->info
.tes
.primitive_mode
) {
1947 type
= V_028B6C_TESS_TRIANGLE
;
1950 type
= V_028B6C_TESS_QUAD
;
1953 type
= V_028B6C_TESS_ISOLINE
;
1957 switch (tes
->info
.tes
.spacing
) {
1958 case TESS_SPACING_EQUAL
:
1959 partitioning
= V_028B6C_PART_INTEGER
;
1961 case TESS_SPACING_FRACTIONAL_ODD
:
1962 partitioning
= V_028B6C_PART_FRAC_ODD
;
1964 case TESS_SPACING_FRACTIONAL_EVEN
:
1965 partitioning
= V_028B6C_PART_FRAC_EVEN
;
1971 bool ccw
= tes
->info
.tes
.ccw
;
1972 const VkPipelineTessellationDomainOriginStateCreateInfo
*domain_origin_state
=
1973 vk_find_struct_const(pCreateInfo
->pTessellationState
,
1974 PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO
);
1976 if (domain_origin_state
&& domain_origin_state
->domainOrigin
!= VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT
)
1979 if (tes
->info
.tes
.point_mode
)
1980 topology
= V_028B6C_OUTPUT_POINT
;
1981 else if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
1982 topology
= V_028B6C_OUTPUT_LINE
;
1984 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
1986 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
1988 if (pipeline
->device
->has_distributed_tess
) {
1989 if (pipeline
->device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
1990 pipeline
->device
->physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
1991 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
1993 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
1995 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
1997 tess
.tf_param
= S_028B6C_TYPE(type
) |
1998 S_028B6C_PARTITIONING(partitioning
) |
1999 S_028B6C_TOPOLOGY(topology
) |
2000 S_028B6C_DISTRIBUTION_MODE(distribution_mode
);
2005 static const struct radv_prim_vertex_count prim_size_table
[] = {
2006 [V_008958_DI_PT_NONE
] = {0, 0},
2007 [V_008958_DI_PT_POINTLIST
] = {1, 1},
2008 [V_008958_DI_PT_LINELIST
] = {2, 2},
2009 [V_008958_DI_PT_LINESTRIP
] = {2, 1},
2010 [V_008958_DI_PT_TRILIST
] = {3, 3},
2011 [V_008958_DI_PT_TRIFAN
] = {3, 1},
2012 [V_008958_DI_PT_TRISTRIP
] = {3, 1},
2013 [V_008958_DI_PT_LINELIST_ADJ
] = {4, 4},
2014 [V_008958_DI_PT_LINESTRIP_ADJ
] = {4, 1},
2015 [V_008958_DI_PT_TRILIST_ADJ
] = {6, 6},
2016 [V_008958_DI_PT_TRISTRIP_ADJ
] = {6, 2},
2017 [V_008958_DI_PT_RECTLIST
] = {3, 3},
2018 [V_008958_DI_PT_LINELOOP
] = {2, 1},
2019 [V_008958_DI_PT_POLYGON
] = {3, 1},
2020 [V_008958_DI_PT_2D_TRI_STRIP
] = {0, 0},
2023 static const struct radv_vs_output_info
*get_vs_output_info(const struct radv_pipeline
*pipeline
)
2025 if (radv_pipeline_has_gs(pipeline
))
2026 if (radv_pipeline_has_ngg(pipeline
))
2027 return &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.vs
.outinfo
;
2029 return &pipeline
->gs_copy_shader
->info
.vs
.outinfo
;
2030 else if (radv_pipeline_has_tess(pipeline
))
2031 return &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.outinfo
;
2033 return &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.outinfo
;
2037 radv_link_shaders(struct radv_pipeline
*pipeline
, nir_shader
**shaders
)
2039 nir_shader
* ordered_shaders
[MESA_SHADER_STAGES
];
2040 int shader_count
= 0;
2042 if(shaders
[MESA_SHADER_FRAGMENT
]) {
2043 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_FRAGMENT
];
2045 if(shaders
[MESA_SHADER_GEOMETRY
]) {
2046 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_GEOMETRY
];
2048 if(shaders
[MESA_SHADER_TESS_EVAL
]) {
2049 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_EVAL
];
2051 if(shaders
[MESA_SHADER_TESS_CTRL
]) {
2052 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_CTRL
];
2054 if(shaders
[MESA_SHADER_VERTEX
]) {
2055 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_VERTEX
];
2058 if (shader_count
> 1) {
2059 unsigned first
= ordered_shaders
[shader_count
- 1]->info
.stage
;
2060 unsigned last
= ordered_shaders
[0]->info
.stage
;
2062 if (ordered_shaders
[0]->info
.stage
== MESA_SHADER_FRAGMENT
&&
2063 ordered_shaders
[1]->info
.has_transform_feedback_varyings
)
2064 nir_link_xfb_varyings(ordered_shaders
[1], ordered_shaders
[0]);
2066 for (int i
= 0; i
< shader_count
; ++i
) {
2067 nir_variable_mode mask
= 0;
2069 if (ordered_shaders
[i
]->info
.stage
!= first
)
2070 mask
= mask
| nir_var_shader_in
;
2072 if (ordered_shaders
[i
]->info
.stage
!= last
)
2073 mask
= mask
| nir_var_shader_out
;
2075 nir_lower_io_to_scalar_early(ordered_shaders
[i
], mask
);
2076 radv_optimize_nir(ordered_shaders
[i
], false, false);
2080 for (int i
= 1; i
< shader_count
; ++i
) {
2081 nir_lower_io_arrays_to_elements(ordered_shaders
[i
],
2082 ordered_shaders
[i
- 1]);
2084 if (nir_link_opt_varyings(ordered_shaders
[i
],
2085 ordered_shaders
[i
- 1]))
2086 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2088 nir_remove_dead_variables(ordered_shaders
[i
],
2089 nir_var_shader_out
);
2090 nir_remove_dead_variables(ordered_shaders
[i
- 1],
2093 bool progress
= nir_remove_unused_varyings(ordered_shaders
[i
],
2094 ordered_shaders
[i
- 1]);
2096 nir_compact_varyings(ordered_shaders
[i
],
2097 ordered_shaders
[i
- 1], true);
2100 if (nir_lower_global_vars_to_local(ordered_shaders
[i
])) {
2101 ac_lower_indirect_derefs(ordered_shaders
[i
],
2102 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2104 radv_optimize_nir(ordered_shaders
[i
], false, false);
2106 if (nir_lower_global_vars_to_local(ordered_shaders
[i
- 1])) {
2107 ac_lower_indirect_derefs(ordered_shaders
[i
- 1],
2108 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2110 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2116 radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo
*input_state
,
2117 uint32_t attrib_binding
)
2119 for (uint32_t i
= 0; i
< input_state
->vertexBindingDescriptionCount
; i
++) {
2120 const VkVertexInputBindingDescription
*input_binding
=
2121 &input_state
->pVertexBindingDescriptions
[i
];
2123 if (input_binding
->binding
== attrib_binding
)
2124 return input_binding
->stride
;
2130 static struct radv_pipeline_key
2131 radv_generate_graphics_pipeline_key(struct radv_pipeline
*pipeline
,
2132 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2133 const struct radv_blend_state
*blend
,
2134 bool has_view_index
)
2136 const VkPipelineVertexInputStateCreateInfo
*input_state
=
2137 pCreateInfo
->pVertexInputState
;
2138 const VkPipelineVertexInputDivisorStateCreateInfoEXT
*divisor_state
=
2139 vk_find_struct_const(input_state
->pNext
, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT
);
2141 struct radv_pipeline_key key
;
2142 memset(&key
, 0, sizeof(key
));
2144 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
2145 key
.optimisations_disabled
= 1;
2147 key
.has_multiview_view_index
= has_view_index
;
2149 uint32_t binding_input_rate
= 0;
2150 uint32_t instance_rate_divisors
[MAX_VERTEX_ATTRIBS
];
2151 for (unsigned i
= 0; i
< input_state
->vertexBindingDescriptionCount
; ++i
) {
2152 if (input_state
->pVertexBindingDescriptions
[i
].inputRate
) {
2153 unsigned binding
= input_state
->pVertexBindingDescriptions
[i
].binding
;
2154 binding_input_rate
|= 1u << binding
;
2155 instance_rate_divisors
[binding
] = 1;
2158 if (divisor_state
) {
2159 for (unsigned i
= 0; i
< divisor_state
->vertexBindingDivisorCount
; ++i
) {
2160 instance_rate_divisors
[divisor_state
->pVertexBindingDivisors
[i
].binding
] =
2161 divisor_state
->pVertexBindingDivisors
[i
].divisor
;
2165 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
2166 const VkVertexInputAttributeDescription
*desc
=
2167 &input_state
->pVertexAttributeDescriptions
[i
];
2168 const struct vk_format_description
*format_desc
;
2169 unsigned location
= desc
->location
;
2170 unsigned binding
= desc
->binding
;
2171 unsigned num_format
, data_format
;
2174 if (binding_input_rate
& (1u << binding
)) {
2175 key
.instance_rate_inputs
|= 1u << location
;
2176 key
.instance_rate_divisors
[location
] = instance_rate_divisors
[binding
];
2179 format_desc
= vk_format_description(desc
->format
);
2180 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
2182 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
2183 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
2185 key
.vertex_attribute_formats
[location
] = data_format
| (num_format
<< 4);
2186 key
.vertex_attribute_bindings
[location
] = desc
->binding
;
2187 key
.vertex_attribute_offsets
[location
] = desc
->offset
;
2188 key
.vertex_attribute_strides
[location
] = radv_get_attrib_stride(input_state
, desc
->binding
);
2190 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
&&
2191 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_STONEY
) {
2192 VkFormat format
= input_state
->pVertexAttributeDescriptions
[i
].format
;
2195 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2196 case VK_FORMAT_A2B10G10R10_SNORM_PACK32
:
2197 adjust
= RADV_ALPHA_ADJUST_SNORM
;
2199 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2200 case VK_FORMAT_A2B10G10R10_SSCALED_PACK32
:
2201 adjust
= RADV_ALPHA_ADJUST_SSCALED
;
2203 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2204 case VK_FORMAT_A2B10G10R10_SINT_PACK32
:
2205 adjust
= RADV_ALPHA_ADJUST_SINT
;
2211 key
.vertex_alpha_adjust
|= adjust
<< (2 * location
);
2214 switch (desc
->format
) {
2215 case VK_FORMAT_B8G8R8A8_UNORM
:
2216 case VK_FORMAT_B8G8R8A8_SNORM
:
2217 case VK_FORMAT_B8G8R8A8_USCALED
:
2218 case VK_FORMAT_B8G8R8A8_SSCALED
:
2219 case VK_FORMAT_B8G8R8A8_UINT
:
2220 case VK_FORMAT_B8G8R8A8_SINT
:
2221 case VK_FORMAT_B8G8R8A8_SRGB
:
2222 case VK_FORMAT_A2R10G10B10_UNORM_PACK32
:
2223 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2224 case VK_FORMAT_A2R10G10B10_USCALED_PACK32
:
2225 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2226 case VK_FORMAT_A2R10G10B10_UINT_PACK32
:
2227 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2228 key
.vertex_post_shuffle
|= 1 << location
;
2235 if (pCreateInfo
->pTessellationState
)
2236 key
.tess_input_vertices
= pCreateInfo
->pTessellationState
->patchControlPoints
;
2239 if (pCreateInfo
->pMultisampleState
&&
2240 pCreateInfo
->pMultisampleState
->rasterizationSamples
> 1) {
2241 uint32_t num_samples
= pCreateInfo
->pMultisampleState
->rasterizationSamples
;
2242 uint32_t ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
->pMultisampleState
);
2243 key
.num_samples
= num_samples
;
2244 key
.log2_ps_iter_samples
= util_logbase2(ps_iter_samples
);
2247 key
.col_format
= blend
->spi_shader_col_format
;
2248 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX8
)
2249 radv_pipeline_compute_get_int_clamp(pCreateInfo
, &key
.is_int8
, &key
.is_int10
);
2255 radv_fill_shader_keys(struct radv_device
*device
,
2256 struct radv_shader_variant_key
*keys
,
2257 const struct radv_pipeline_key
*key
,
2260 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_inputs
= key
->instance_rate_inputs
;
2261 keys
[MESA_SHADER_VERTEX
].vs
.alpha_adjust
= key
->vertex_alpha_adjust
;
2262 keys
[MESA_SHADER_VERTEX
].vs
.post_shuffle
= key
->vertex_post_shuffle
;
2263 for (unsigned i
= 0; i
< MAX_VERTEX_ATTRIBS
; ++i
) {
2264 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_divisors
[i
] = key
->instance_rate_divisors
[i
];
2265 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_formats
[i
] = key
->vertex_attribute_formats
[i
];
2266 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_bindings
[i
] = key
->vertex_attribute_bindings
[i
];
2267 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_offsets
[i
] = key
->vertex_attribute_offsets
[i
];
2268 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_strides
[i
] = key
->vertex_attribute_strides
[i
];
2271 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2272 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ls
= true;
2273 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= 0;
2274 keys
[MESA_SHADER_TESS_CTRL
].tcs
.input_vertices
= key
->tess_input_vertices
;
2275 keys
[MESA_SHADER_TESS_CTRL
].tcs
.primitive_mode
= nir
[MESA_SHADER_TESS_EVAL
]->info
.tess
.primitive_mode
;
2277 keys
[MESA_SHADER_TESS_CTRL
].tcs
.tes_reads_tess_factors
= !!(nir
[MESA_SHADER_TESS_EVAL
]->info
.inputs_read
& (VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
));
2280 if (nir
[MESA_SHADER_GEOMETRY
]) {
2281 if (nir
[MESA_SHADER_TESS_CTRL
])
2282 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_es
= true;
2284 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_es
= true;
2287 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
2288 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2289 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= true;
2291 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= true;
2295 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
2296 keys
[i
].has_multiview_view_index
= key
->has_multiview_view_index
;
2298 keys
[MESA_SHADER_FRAGMENT
].fs
.col_format
= key
->col_format
;
2299 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int8
= key
->is_int8
;
2300 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int10
= key
->is_int10
;
2301 keys
[MESA_SHADER_FRAGMENT
].fs
.log2_ps_iter_samples
= key
->log2_ps_iter_samples
;
2302 keys
[MESA_SHADER_FRAGMENT
].fs
.num_samples
= key
->num_samples
;
2306 merge_tess_info(struct shader_info
*tes_info
,
2307 const struct shader_info
*tcs_info
)
2309 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
2311 * "PointMode. Controls generation of points rather than triangles
2312 * or lines. This functionality defaults to disabled, and is
2313 * enabled if either shader stage includes the execution mode.
2315 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
2316 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
2317 * and OutputVertices, it says:
2319 * "One mode must be set in at least one of the tessellation
2322 * So, the fields can be set in either the TCS or TES, but they must
2323 * agree if set in both. Our backend looks at TES, so bitwise-or in
2324 * the values from the TCS.
2326 assert(tcs_info
->tess
.tcs_vertices_out
== 0 ||
2327 tes_info
->tess
.tcs_vertices_out
== 0 ||
2328 tcs_info
->tess
.tcs_vertices_out
== tes_info
->tess
.tcs_vertices_out
);
2329 tes_info
->tess
.tcs_vertices_out
|= tcs_info
->tess
.tcs_vertices_out
;
2331 assert(tcs_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2332 tes_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2333 tcs_info
->tess
.spacing
== tes_info
->tess
.spacing
);
2334 tes_info
->tess
.spacing
|= tcs_info
->tess
.spacing
;
2336 assert(tcs_info
->tess
.primitive_mode
== 0 ||
2337 tes_info
->tess
.primitive_mode
== 0 ||
2338 tcs_info
->tess
.primitive_mode
== tes_info
->tess
.primitive_mode
);
2339 tes_info
->tess
.primitive_mode
|= tcs_info
->tess
.primitive_mode
;
2340 tes_info
->tess
.ccw
|= tcs_info
->tess
.ccw
;
2341 tes_info
->tess
.point_mode
|= tcs_info
->tess
.point_mode
;
2345 void radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT
*ext
)
2350 if (ext
->pPipelineCreationFeedback
) {
2351 ext
->pPipelineCreationFeedback
->flags
= 0;
2352 ext
->pPipelineCreationFeedback
->duration
= 0;
2355 for (unsigned i
= 0; i
< ext
->pipelineStageCreationFeedbackCount
; ++i
) {
2356 ext
->pPipelineStageCreationFeedbacks
[i
].flags
= 0;
2357 ext
->pPipelineStageCreationFeedbacks
[i
].duration
= 0;
2362 void radv_start_feedback(VkPipelineCreationFeedbackEXT
*feedback
)
2367 feedback
->duration
-= radv_get_current_time();
2368 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
;
2372 void radv_stop_feedback(VkPipelineCreationFeedbackEXT
*feedback
, bool cache_hit
)
2377 feedback
->duration
+= radv_get_current_time();
2378 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
|
2379 (cache_hit
? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT
: 0);
2383 void radv_create_shaders(struct radv_pipeline
*pipeline
,
2384 struct radv_device
*device
,
2385 struct radv_pipeline_cache
*cache
,
2386 const struct radv_pipeline_key
*key
,
2387 const VkPipelineShaderStageCreateInfo
**pStages
,
2388 const VkPipelineCreateFlags flags
,
2389 VkPipelineCreationFeedbackEXT
*pipeline_feedback
,
2390 VkPipelineCreationFeedbackEXT
**stage_feedbacks
)
2392 struct radv_shader_module fs_m
= {0};
2393 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
2394 nir_shader
*nir
[MESA_SHADER_STAGES
] = {0};
2395 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2396 struct radv_shader_variant_key keys
[MESA_SHADER_STAGES
] = {{{{{0}}}}};
2397 unsigned char hash
[20], gs_copy_hash
[20];
2398 bool use_ngg
= device
->physical_device
->rad_info
.chip_class
>= GFX10
;
2400 radv_start_feedback(pipeline_feedback
);
2402 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2404 modules
[i
] = radv_shader_module_from_handle(pStages
[i
]->module
);
2405 if (modules
[i
]->nir
)
2406 _mesa_sha1_compute(modules
[i
]->nir
->info
.name
,
2407 strlen(modules
[i
]->nir
->info
.name
),
2410 pipeline
->active_stages
|= mesa_to_vk_shader_stage(i
);
2414 radv_hash_shaders(hash
, pStages
, pipeline
->layout
, key
, get_hash_flags(device
));
2415 memcpy(gs_copy_hash
, hash
, 20);
2416 gs_copy_hash
[0] ^= 1;
2418 bool found_in_application_cache
= true;
2419 if (modules
[MESA_SHADER_GEOMETRY
] && !use_ngg
) {
2420 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2421 radv_create_shader_variants_from_pipeline_cache(device
, cache
, gs_copy_hash
, variants
,
2422 &found_in_application_cache
);
2423 pipeline
->gs_copy_shader
= variants
[MESA_SHADER_GEOMETRY
];
2426 if (radv_create_shader_variants_from_pipeline_cache(device
, cache
, hash
, pipeline
->shaders
,
2427 &found_in_application_cache
) &&
2428 (!modules
[MESA_SHADER_GEOMETRY
] || pipeline
->gs_copy_shader
)) {
2429 radv_stop_feedback(pipeline_feedback
, found_in_application_cache
);
2433 if (!modules
[MESA_SHADER_FRAGMENT
] && !modules
[MESA_SHADER_COMPUTE
]) {
2435 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
2436 fs_b
.shader
->info
.name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
2437 fs_m
.nir
= fs_b
.shader
;
2438 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
2441 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2442 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[i
];
2447 radv_start_feedback(stage_feedbacks
[i
]);
2449 nir
[i
] = radv_shader_compile_to_nir(device
, modules
[i
],
2450 stage
? stage
->pName
: "main", i
,
2451 stage
? stage
->pSpecializationInfo
: NULL
,
2452 flags
, pipeline
->layout
);
2454 /* We don't want to alter meta shaders IR directly so clone it
2457 if (nir
[i
]->info
.name
) {
2458 nir
[i
] = nir_shader_clone(NULL
, nir
[i
]);
2461 radv_stop_feedback(stage_feedbacks
[i
], false);
2464 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2465 nir_lower_patch_vertices(nir
[MESA_SHADER_TESS_EVAL
], nir
[MESA_SHADER_TESS_CTRL
]->info
.tess
.tcs_vertices_out
, NULL
);
2466 merge_tess_info(&nir
[MESA_SHADER_TESS_EVAL
]->info
, &nir
[MESA_SHADER_TESS_CTRL
]->info
);
2469 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
2470 radv_link_shaders(pipeline
, nir
);
2472 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2474 NIR_PASS_V(nir
[i
], nir_lower_bool_to_int32
);
2475 NIR_PASS_V(nir
[i
], nir_lower_non_uniform_access
,
2476 nir_lower_non_uniform_ubo_access
|
2477 nir_lower_non_uniform_ssbo_access
|
2478 nir_lower_non_uniform_texture_access
|
2479 nir_lower_non_uniform_image_access
);
2482 if (radv_can_dump_shader(device
, modules
[i
], false))
2483 nir_print_shader(nir
[i
], stderr
);
2486 radv_fill_shader_keys(device
, keys
, key
, nir
);
2488 if (nir
[MESA_SHADER_FRAGMENT
]) {
2489 if (!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]) {
2490 radv_start_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
]);
2492 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
2493 radv_shader_variant_compile(device
, modules
[MESA_SHADER_FRAGMENT
], &nir
[MESA_SHADER_FRAGMENT
], 1,
2494 pipeline
->layout
, keys
+ MESA_SHADER_FRAGMENT
,
2495 &binaries
[MESA_SHADER_FRAGMENT
]);
2497 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
], false);
2500 /* TODO: These are no longer used as keys we should refactor this */
2501 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_prim_id
=
2502 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
;
2503 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_layer_id
=
2504 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.layer_input
;
2505 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_clip_dists
=
2506 !!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.num_input_clips_culls
;
2507 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_prim_id
=
2508 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
;
2509 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_layer_id
=
2510 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.layer_input
;
2511 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_clip_dists
=
2512 !!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.num_input_clips_culls
;
2515 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_TESS_CTRL
]) {
2516 if (!pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]) {
2517 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2518 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2519 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2521 radv_start_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
]);
2523 pipeline
->shaders
[MESA_SHADER_TESS_CTRL
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_TESS_CTRL
], combined_nir
, 2,
2525 &key
, &binaries
[MESA_SHADER_TESS_CTRL
]);
2527 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
], false);
2529 modules
[MESA_SHADER_VERTEX
] = NULL
;
2530 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2531 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.tcs
.outputs_written
);
2534 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_GEOMETRY
]) {
2535 gl_shader_stage pre_stage
= modules
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2536 if (!pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
2537 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2539 radv_start_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
]);
2541 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_GEOMETRY
], combined_nir
, 2,
2543 &keys
[pre_stage
] , &binaries
[MESA_SHADER_GEOMETRY
]);
2545 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
], false);
2547 modules
[pre_stage
] = NULL
;
2550 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2551 if(modules
[i
] && !pipeline
->shaders
[i
]) {
2552 if (i
== MESA_SHADER_TESS_CTRL
) {
2553 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.info
.vs
.ls_outputs_written
);
2555 if (i
== MESA_SHADER_TESS_EVAL
) {
2556 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2557 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.tcs
.outputs_written
);
2560 radv_start_feedback(stage_feedbacks
[i
]);
2562 pipeline
->shaders
[i
] = radv_shader_variant_compile(device
, modules
[i
], &nir
[i
], 1,
2564 keys
+ i
, &binaries
[i
]);
2566 radv_stop_feedback(stage_feedbacks
[i
], false);
2570 if(modules
[MESA_SHADER_GEOMETRY
] && !use_ngg
) {
2571 struct radv_shader_binary
*gs_copy_binary
= NULL
;
2572 if (!pipeline
->gs_copy_shader
) {
2573 pipeline
->gs_copy_shader
= radv_create_gs_copy_shader(
2574 device
, nir
[MESA_SHADER_GEOMETRY
], &gs_copy_binary
,
2575 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
);
2578 if (pipeline
->gs_copy_shader
) {
2579 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2580 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2582 binaries
[MESA_SHADER_GEOMETRY
] = gs_copy_binary
;
2583 variants
[MESA_SHADER_GEOMETRY
] = pipeline
->gs_copy_shader
;
2585 radv_pipeline_cache_insert_shaders(device
, cache
,
2590 free(gs_copy_binary
);
2593 radv_pipeline_cache_insert_shaders(device
, cache
, hash
, pipeline
->shaders
,
2596 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2599 if (!pipeline
->device
->keep_shader_info
)
2600 ralloc_free(nir
[i
]);
2602 if (radv_can_dump_shader_stats(device
, modules
[i
]))
2603 radv_shader_dump_stats(device
,
2604 pipeline
->shaders
[i
],
2610 ralloc_free(fs_m
.nir
);
2612 radv_stop_feedback(pipeline_feedback
, false);
2616 radv_pipeline_stage_to_user_data_0(struct radv_pipeline
*pipeline
,
2617 gl_shader_stage stage
, enum chip_class chip_class
)
2619 bool has_gs
= radv_pipeline_has_gs(pipeline
);
2620 bool has_tess
= radv_pipeline_has_tess(pipeline
);
2621 bool has_ngg
= radv_pipeline_has_ngg(pipeline
);
2624 case MESA_SHADER_FRAGMENT
:
2625 return R_00B030_SPI_SHADER_USER_DATA_PS_0
;
2626 case MESA_SHADER_VERTEX
:
2628 if (chip_class
>= GFX10
) {
2629 return R_00B430_SPI_SHADER_USER_DATA_HS_0
;
2630 } else if (chip_class
== GFX9
) {
2631 return R_00B430_SPI_SHADER_USER_DATA_LS_0
;
2633 return R_00B530_SPI_SHADER_USER_DATA_LS_0
;
2639 if (chip_class
>= GFX10
) {
2640 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2642 return R_00B330_SPI_SHADER_USER_DATA_ES_0
;
2647 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2649 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2650 case MESA_SHADER_GEOMETRY
:
2651 return chip_class
== GFX9
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
2652 R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2653 case MESA_SHADER_COMPUTE
:
2654 return R_00B900_COMPUTE_USER_DATA_0
;
2655 case MESA_SHADER_TESS_CTRL
:
2656 return chip_class
== GFX9
? R_00B430_SPI_SHADER_USER_DATA_LS_0
:
2657 R_00B430_SPI_SHADER_USER_DATA_HS_0
;
2658 case MESA_SHADER_TESS_EVAL
:
2660 return chip_class
>= GFX10
? R_00B230_SPI_SHADER_USER_DATA_GS_0
:
2661 R_00B330_SPI_SHADER_USER_DATA_ES_0
;
2662 } else if (has_ngg
) {
2663 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2665 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2668 unreachable("unknown shader");
2672 struct radv_bin_size_entry
{
2678 radv_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2680 static const struct radv_bin_size_entry color_size_table
[][3][9] = {
2684 /* One shader engine */
2690 { UINT_MAX
, { 0, 0}},
2693 /* Two shader engines */
2699 { UINT_MAX
, { 0, 0}},
2702 /* Four shader engines */
2707 { UINT_MAX
, { 0, 0}},
2713 /* One shader engine */
2719 { UINT_MAX
, { 0, 0}},
2722 /* Two shader engines */
2728 { UINT_MAX
, { 0, 0}},
2731 /* Four shader engines */
2738 { UINT_MAX
, { 0, 0}},
2744 /* One shader engine */
2751 { UINT_MAX
, { 0, 0}},
2754 /* Two shader engines */
2762 { UINT_MAX
, { 0, 0}},
2765 /* Four shader engines */
2773 { UINT_MAX
, { 0, 0}},
2777 static const struct radv_bin_size_entry ds_size_table
[][3][9] = {
2781 // One shader engine
2788 { UINT_MAX
, { 0, 0}},
2791 // Two shader engines
2799 { UINT_MAX
, { 0, 0}},
2802 // Four shader engines
2810 { UINT_MAX
, { 0, 0}},
2816 // One shader engine
2824 { UINT_MAX
, { 0, 0}},
2827 // Two shader engines
2836 { UINT_MAX
, { 0, 0}},
2839 // Four shader engines
2848 { UINT_MAX
, { 0, 0}},
2854 // One shader engine
2862 { UINT_MAX
, { 0, 0}},
2865 // Two shader engines
2874 { UINT_MAX
, { 0, 0}},
2877 // Four shader engines
2885 { UINT_MAX
, { 0, 0}},
2890 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
2891 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
2892 VkExtent2D extent
= {512, 512};
2894 unsigned log_num_rb_per_se
=
2895 util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.num_render_backends
/
2896 pipeline
->device
->physical_device
->rad_info
.max_se
);
2897 unsigned log_num_se
= util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.max_se
);
2899 unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
2900 unsigned ps_iter_samples
= 1u << G_028804_PS_ITER_SAMPLES(pipeline
->graphics
.ms
.db_eqaa
);
2901 unsigned effective_samples
= total_samples
;
2902 unsigned color_bytes_per_pixel
= 0;
2904 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
2906 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2907 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
2910 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
2913 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
2914 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
2917 /* MSAA images typically don't use all samples all the time. */
2918 if (effective_samples
>= 2 && ps_iter_samples
<= 1)
2919 effective_samples
= 2;
2920 color_bytes_per_pixel
*= effective_samples
;
2923 const struct radv_bin_size_entry
*color_entry
= color_size_table
[log_num_rb_per_se
][log_num_se
];
2924 while(color_entry
[1].bpp
<= color_bytes_per_pixel
)
2927 extent
= color_entry
->extent
;
2929 if (subpass
->depth_stencil_attachment
) {
2930 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
2932 /* Coefficients taken from AMDVLK */
2933 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
2934 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
2935 unsigned ds_bytes_per_pixel
= 4 * (depth_coeff
+ stencil_coeff
) * total_samples
;
2937 const struct radv_bin_size_entry
*ds_entry
= ds_size_table
[log_num_rb_per_se
][log_num_se
];
2938 while(ds_entry
[1].bpp
<= ds_bytes_per_pixel
)
2941 extent
.width
= MIN2(extent
.width
, ds_entry
->extent
.width
);
2942 extent
.height
= MIN2(extent
.height
, ds_entry
->extent
.height
);
2949 radv_pipeline_generate_binning_state(struct radeon_cmdbuf
*ctx_cs
,
2950 struct radv_pipeline
*pipeline
,
2951 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2953 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX9
)
2956 uint32_t pa_sc_binner_cntl_0
=
2957 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC
) |
2958 S_028C44_DISABLE_START_OF_PRIM(1);
2959 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
2961 VkExtent2D bin_size
= radv_compute_bin_size(pipeline
, pCreateInfo
);
2963 if (pipeline
->device
->pbb_allowed
&& bin_size
.width
&& bin_size
.height
) {
2964 unsigned context_states_per_bin
; /* allowed range: [1, 6] */
2965 unsigned persistent_states_per_bin
; /* allowed range: [1, 32] */
2966 unsigned fpovs_per_batch
; /* allowed range: [0, 255], 0 = unlimited */
2968 switch (pipeline
->device
->physical_device
->rad_info
.family
) {
2972 context_states_per_bin
= 1;
2973 persistent_states_per_bin
= 1;
2974 fpovs_per_batch
= 63;
2978 context_states_per_bin
= 6;
2979 persistent_states_per_bin
= 32;
2980 fpovs_per_batch
= 63;
2983 unreachable("unhandled family while determining binning state.");
2986 pa_sc_binner_cntl_0
=
2987 S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED
) |
2988 S_028C44_BIN_SIZE_X(bin_size
.width
== 16) |
2989 S_028C44_BIN_SIZE_Y(bin_size
.height
== 16) |
2990 S_028C44_BIN_SIZE_X_EXTEND(util_logbase2(MAX2(bin_size
.width
, 32)) - 5) |
2991 S_028C44_BIN_SIZE_Y_EXTEND(util_logbase2(MAX2(bin_size
.height
, 32)) - 5) |
2992 S_028C44_CONTEXT_STATES_PER_BIN(context_states_per_bin
- 1) |
2993 S_028C44_PERSISTENT_STATES_PER_BIN(persistent_states_per_bin
- 1) |
2994 S_028C44_DISABLE_START_OF_PRIM(1) |
2995 S_028C44_FPOVS_PER_BATCH(fpovs_per_batch
) |
2996 S_028C44_OPTIMAL_BIN_SELECTION(1);
2999 radeon_set_context_reg(ctx_cs
, R_028C44_PA_SC_BINNER_CNTL_0
,
3000 pa_sc_binner_cntl_0
);
3002 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3003 radeon_set_context_reg(ctx_cs
, R_028038_DB_DFSM_CONTROL
,
3006 radeon_set_context_reg(ctx_cs
, R_028060_DB_DFSM_CONTROL
,
3013 radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf
*ctx_cs
,
3014 struct radv_pipeline
*pipeline
,
3015 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3016 const struct radv_graphics_pipeline_create_info
*extra
)
3018 const VkPipelineDepthStencilStateCreateInfo
*vkds
= pCreateInfo
->pDepthStencilState
;
3019 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3020 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3021 struct radv_render_pass_attachment
*attachment
= NULL
;
3022 uint32_t db_depth_control
= 0, db_stencil_control
= 0;
3023 uint32_t db_render_control
= 0, db_render_override2
= 0;
3024 uint32_t db_render_override
= 0;
3026 if (subpass
->depth_stencil_attachment
)
3027 attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3029 bool has_depth_attachment
= attachment
&& vk_format_is_depth(attachment
->format
);
3030 bool has_stencil_attachment
= attachment
&& vk_format_is_stencil(attachment
->format
);
3032 if (vkds
&& has_depth_attachment
) {
3033 db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
3034 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
3035 S_028800_ZFUNC(vkds
->depthCompareOp
) |
3036 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
3038 /* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
3039 db_render_override2
|= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment
->samples
> 2);
3042 if (has_stencil_attachment
&& vkds
&& vkds
->stencilTestEnable
) {
3043 db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
3044 db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
3045 db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
3046 db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
3047 db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
3049 db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
3050 db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
3051 db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
3052 db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
3055 if (attachment
&& extra
) {
3056 db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
3057 db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
3059 db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
3060 db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
3061 db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
3062 db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
3063 db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
3066 db_render_override
|= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
3067 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
);
3069 if (!pCreateInfo
->pRasterizationState
->depthClampEnable
) {
3070 /* From VK_EXT_depth_range_unrestricted spec:
3072 * "The behavior described in Primitive Clipping still applies.
3073 * If depth clamping is disabled the depth values are still
3074 * clipped to 0 ≤ zc ≤ wc before the viewport transform. If
3075 * depth clamping is enabled the above equation is ignored and
3076 * the depth values are instead clamped to the VkViewport
3077 * minDepth and maxDepth values, which in the case of this
3078 * extension can be outside of the 0.0 to 1.0 range."
3080 db_render_override
|= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
3083 radeon_set_context_reg(ctx_cs
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
3084 radeon_set_context_reg(ctx_cs
, R_02842C_DB_STENCIL_CONTROL
, db_stencil_control
);
3086 radeon_set_context_reg(ctx_cs
, R_028000_DB_RENDER_CONTROL
, db_render_control
);
3087 radeon_set_context_reg(ctx_cs
, R_02800C_DB_RENDER_OVERRIDE
, db_render_override
);
3088 radeon_set_context_reg(ctx_cs
, R_028010_DB_RENDER_OVERRIDE2
, db_render_override2
);
3092 radv_pipeline_generate_blend_state(struct radeon_cmdbuf
*ctx_cs
,
3093 struct radv_pipeline
*pipeline
,
3094 const struct radv_blend_state
*blend
)
3096 radeon_set_context_reg_seq(ctx_cs
, R_028780_CB_BLEND0_CONTROL
, 8);
3097 radeon_emit_array(ctx_cs
, blend
->cb_blend_control
,
3099 radeon_set_context_reg(ctx_cs
, R_028808_CB_COLOR_CONTROL
, blend
->cb_color_control
);
3100 radeon_set_context_reg(ctx_cs
, R_028B70_DB_ALPHA_TO_MASK
, blend
->db_alpha_to_mask
);
3102 if (pipeline
->device
->physical_device
->has_rbplus
) {
3104 radeon_set_context_reg_seq(ctx_cs
, R_028760_SX_MRT0_BLEND_OPT
, 8);
3105 radeon_emit_array(ctx_cs
, blend
->sx_mrt_blend_opt
, 8);
3108 radeon_set_context_reg(ctx_cs
, R_028714_SPI_SHADER_COL_FORMAT
, blend
->spi_shader_col_format
);
3110 radeon_set_context_reg(ctx_cs
, R_028238_CB_TARGET_MASK
, blend
->cb_target_mask
);
3111 radeon_set_context_reg(ctx_cs
, R_02823C_CB_SHADER_MASK
, blend
->cb_shader_mask
);
3113 pipeline
->graphics
.col_format
= blend
->spi_shader_col_format
;
3114 pipeline
->graphics
.cb_target_mask
= blend
->cb_target_mask
;
3117 static const VkConservativeRasterizationModeEXT
3118 radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo
*pCreateInfo
)
3120 const VkPipelineRasterizationConservativeStateCreateInfoEXT
*conservative_raster
=
3121 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT
);
3123 if (!conservative_raster
)
3124 return VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
;
3125 return conservative_raster
->conservativeRasterizationMode
;
3129 radv_pipeline_generate_raster_state(struct radeon_cmdbuf
*ctx_cs
,
3130 struct radv_pipeline
*pipeline
,
3131 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3133 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
3134 const VkConservativeRasterizationModeEXT mode
=
3135 radv_get_conservative_raster_mode(vkraster
);
3136 uint32_t pa_sc_conservative_rast
= S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1);
3137 bool depth_clip_disable
= vkraster
->depthClampEnable
;
3139 const VkPipelineRasterizationDepthClipStateCreateInfoEXT
*depth_clip_state
=
3140 vk_find_struct_const(vkraster
->pNext
, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT
);
3141 if (depth_clip_state
) {
3142 depth_clip_disable
= !depth_clip_state
->depthClipEnable
;
3145 radeon_set_context_reg(ctx_cs
, R_028810_PA_CL_CLIP_CNTL
,
3146 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
3147 S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable
? 1 : 0) |
3148 S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable
? 1 : 0) |
3149 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
3150 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
3152 radeon_set_context_reg(ctx_cs
, R_0286D4_SPI_INTERP_CONTROL_0
,
3153 S_0286D4_FLAT_SHADE_ENA(1) |
3154 S_0286D4_PNT_SPRITE_ENA(1) |
3155 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
3156 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
3157 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
3158 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
3159 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
3161 radeon_set_context_reg(ctx_cs
, R_028BE4_PA_SU_VTX_CNTL
,
3162 S_028BE4_PIX_CENTER(1) | // TODO verify
3163 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
3164 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
));
3166 radeon_set_context_reg(ctx_cs
, R_028814_PA_SU_SC_MODE_CNTL
,
3167 S_028814_FACE(vkraster
->frontFace
) |
3168 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
3169 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
3170 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
3171 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3172 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3173 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3174 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3175 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0));
3177 /* Conservative rasterization. */
3178 if (mode
!= VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
) {
3179 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3181 ms
->pa_sc_aa_config
|= S_028BE0_AA_MASK_CENTROID_DTMN(1);
3182 ms
->db_eqaa
|= S_028804_ENABLE_POSTZ_OVERRASTERIZATION(1) |
3183 S_028804_OVERRASTERIZATION_AMOUNT(4);
3185 pa_sc_conservative_rast
= S_028C4C_PREZ_AA_MASK_ENABLE(1) |
3186 S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
3187 S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
3189 if (mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
) {
3190 pa_sc_conservative_rast
|=
3191 S_028C4C_OVER_RAST_ENABLE(1) |
3192 S_028C4C_OVER_RAST_SAMPLE_SELECT(0) |
3193 S_028C4C_UNDER_RAST_ENABLE(0) |
3194 S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
3195 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
3197 assert(mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT
);
3198 pa_sc_conservative_rast
|=
3199 S_028C4C_OVER_RAST_ENABLE(0) |
3200 S_028C4C_OVER_RAST_SAMPLE_SELECT(1) |
3201 S_028C4C_UNDER_RAST_ENABLE(1) |
3202 S_028C4C_UNDER_RAST_SAMPLE_SELECT(0) |
3203 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(0);
3207 radeon_set_context_reg(ctx_cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
3208 pa_sc_conservative_rast
);
3213 radv_pipeline_generate_multisample_state(struct radeon_cmdbuf
*ctx_cs
,
3214 struct radv_pipeline
*pipeline
)
3216 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3218 radeon_set_context_reg_seq(ctx_cs
, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0
, 2);
3219 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[0]);
3220 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[1]);
3222 radeon_set_context_reg(ctx_cs
, R_028804_DB_EQAA
, ms
->db_eqaa
);
3223 radeon_set_context_reg(ctx_cs
, R_028A4C_PA_SC_MODE_CNTL_1
, ms
->pa_sc_mode_cntl_1
);
3225 /* The exclusion bits can be set to improve rasterization efficiency
3226 * if no sample lies on the pixel boundary (-8 sample offset). It's
3227 * currently always TRUE because the driver doesn't support 16 samples.
3229 bool exclusion
= pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
;
3230 radeon_set_context_reg(ctx_cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
,
3231 S_02882C_XMAX_RIGHT_EXCLUSION(exclusion
) |
3232 S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion
));
3236 radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf
*ctx_cs
,
3237 struct radv_pipeline
*pipeline
)
3239 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3240 unsigned vgt_primitiveid_en
= 0;
3241 uint32_t vgt_gs_mode
= 0;
3243 if (radv_pipeline_has_gs(pipeline
)) {
3244 const struct radv_shader_variant
*gs
=
3245 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3247 vgt_gs_mode
= ac_vgt_gs_mode(gs
->info
.gs
.vertices_out
,
3248 pipeline
->device
->physical_device
->rad_info
.chip_class
);
3249 } else if (radv_pipeline_has_ngg(pipeline
)) {
3250 const struct radv_shader_variant
*vs
=
3251 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] ?
3252 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] :
3253 pipeline
->shaders
[MESA_SHADER_VERTEX
];
3254 bool enable_prim_id
=
3255 outinfo
->export_prim_id
|| vs
->info
.info
.uses_prim_id
;
3257 vgt_primitiveid_en
|= S_028A84_PRIMITIVEID_EN(enable_prim_id
) |
3258 S_028A84_NGG_DISABLE_PROVOK_REUSE(enable_prim_id
);
3259 } else if (outinfo
->export_prim_id
) {
3260 vgt_gs_mode
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
3261 vgt_primitiveid_en
|= S_028A84_PRIMITIVEID_EN(1);
3264 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
, vgt_primitiveid_en
);
3265 radeon_set_context_reg(ctx_cs
, R_028A40_VGT_GS_MODE
, vgt_gs_mode
);
3269 radv_pipeline_generate_hw_vs(struct radeon_cmdbuf
*ctx_cs
,
3270 struct radeon_cmdbuf
*cs
,
3271 struct radv_pipeline
*pipeline
,
3272 struct radv_shader_variant
*shader
)
3274 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3276 radeon_set_sh_reg_seq(cs
, R_00B120_SPI_SHADER_PGM_LO_VS
, 4);
3277 radeon_emit(cs
, va
>> 8);
3278 radeon_emit(cs
, S_00B124_MEM_BASE(va
>> 40));
3279 radeon_emit(cs
, shader
->config
.rsrc1
);
3280 radeon_emit(cs
, shader
->config
.rsrc2
);
3282 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3283 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3284 clip_dist_mask
= outinfo
->clip_dist_mask
;
3285 cull_dist_mask
= outinfo
->cull_dist_mask
;
3286 total_mask
= clip_dist_mask
| cull_dist_mask
;
3287 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3288 outinfo
->writes_layer
||
3289 outinfo
->writes_viewport_index
;
3291 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
3292 S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo
->param_exports
) - 1));
3294 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3295 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3296 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3297 V_02870C_SPI_SHADER_4COMP
:
3298 V_02870C_SPI_SHADER_NONE
) |
3299 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3300 V_02870C_SPI_SHADER_4COMP
:
3301 V_02870C_SPI_SHADER_NONE
) |
3302 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3303 V_02870C_SPI_SHADER_4COMP
:
3304 V_02870C_SPI_SHADER_NONE
));
3306 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3307 S_028818_VTX_W0_FMT(1) |
3308 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3309 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3310 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3312 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3313 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3314 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3315 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3316 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3317 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3318 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3319 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3320 cull_dist_mask
<< 8 |
3323 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
3324 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
3325 outinfo
->writes_viewport_index
);
3329 radv_pipeline_generate_hw_es(struct radeon_cmdbuf
*cs
,
3330 struct radv_pipeline
*pipeline
,
3331 struct radv_shader_variant
*shader
)
3333 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3335 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 4);
3336 radeon_emit(cs
, va
>> 8);
3337 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3338 radeon_emit(cs
, shader
->config
.rsrc1
);
3339 radeon_emit(cs
, shader
->config
.rsrc2
);
3343 radv_pipeline_generate_hw_ls(struct radeon_cmdbuf
*cs
,
3344 struct radv_pipeline
*pipeline
,
3345 struct radv_shader_variant
*shader
,
3346 const struct radv_tessellation_state
*tess
)
3348 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3349 uint32_t rsrc2
= shader
->config
.rsrc2
;
3351 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3352 radeon_emit(cs
, va
>> 8);
3353 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3355 rsrc2
|= S_00B52C_LDS_SIZE(tess
->lds_size
);
3356 if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX7
&&
3357 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_HAWAII
)
3358 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, rsrc2
);
3360 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
3361 radeon_emit(cs
, shader
->config
.rsrc1
);
3362 radeon_emit(cs
, rsrc2
);
3366 radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf
*ctx_cs
,
3367 struct radeon_cmdbuf
*cs
,
3368 struct radv_pipeline
*pipeline
,
3369 struct radv_shader_variant
*shader
,
3370 const struct radv_ngg_state
*ngg_state
)
3372 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3373 gl_shader_stage es_type
=
3374 radv_pipeline_has_tess(pipeline
) ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
3376 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
3377 radeon_emit(cs
, va
>> 8);
3378 radeon_emit(cs
, va
>> 40);
3379 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3380 radeon_emit(cs
, shader
->config
.rsrc1
);
3381 radeon_emit(cs
, shader
->config
.rsrc2
);
3383 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3384 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3385 clip_dist_mask
= outinfo
->clip_dist_mask
;
3386 cull_dist_mask
= outinfo
->cull_dist_mask
;
3387 total_mask
= clip_dist_mask
| cull_dist_mask
;
3388 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3389 outinfo
->writes_layer
||
3390 outinfo
->writes_viewport_index
;
3391 bool break_wave_at_eoi
= false;
3393 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
3394 S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo
->param_exports
) - 1));
3395 radeon_set_context_reg(ctx_cs
, R_028708_SPI_SHADER_IDX_FORMAT
,
3396 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP
));
3397 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3398 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3399 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3400 V_02870C_SPI_SHADER_4COMP
:
3401 V_02870C_SPI_SHADER_NONE
) |
3402 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3403 V_02870C_SPI_SHADER_4COMP
:
3404 V_02870C_SPI_SHADER_NONE
) |
3405 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3406 V_02870C_SPI_SHADER_4COMP
:
3407 V_02870C_SPI_SHADER_NONE
));
3409 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3410 S_028818_VTX_W0_FMT(1) |
3411 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3412 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3413 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3414 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3415 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3416 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3417 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3418 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3419 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3420 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3421 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3422 cull_dist_mask
<< 8 |
3425 bool vgt_reuse_off
= pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI10
&&
3426 pipeline
->device
->physical_device
->rad_info
.chip_external_rev
== 0x1 &&
3427 es_type
== MESA_SHADER_TESS_EVAL
;
3429 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
3430 S_028AB4_REUSE_OFF(vgt_reuse_off
));
3431 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
3432 ngg_state
->vgt_esgs_ring_itemsize
);
3434 /* NGG specific registers. */
3435 struct radv_shader_variant
*gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3436 uint32_t gs_num_invocations
= gs
? gs
->info
.gs
.invocations
: 1;
3438 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
3439 S_028A44_ES_VERTS_PER_SUBGRP(ngg_state
->hw_max_esverts
) |
3440 S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state
->max_gsprims
) |
3441 S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state
->max_gsprims
* gs_num_invocations
));
3442 radeon_set_context_reg(ctx_cs
, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP
,
3443 S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state
->max_out_verts
));
3444 radeon_set_context_reg(ctx_cs
, R_028B4C_GE_NGG_SUBGRP_CNTL
,
3445 S_028B4C_PRIM_AMP_FACTOR(ngg_state
->prim_amp_factor
) |
3446 S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
3447 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
3448 S_028B90_CNT(gs_num_invocations
) |
3449 S_028B90_ENABLE(gs_num_invocations
> 1) |
3450 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state
->max_vert_out_per_gs_instance
));
3452 /* User edge flags are set by the pos exports. If user edge flags are
3453 * not used, we must use hw-generated edge flags and pass them via
3454 * the prim export to prevent drawing lines on internal edges of
3455 * decomposed primitives (such as quads) with polygon mode = lines.
3457 * TODO: We should combine hw-generated edge flags with user edge
3458 * flags in the shader.
3460 radeon_set_context_reg(ctx_cs
, R_028838_PA_CL_NGG_CNTL
,
3461 S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline
) &&
3462 !radv_pipeline_has_gs(pipeline
)));
3464 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
,
3465 S_03096C_PRIM_GRP_SIZE(ngg_state
->max_gsprims
) |
3466 S_03096C_VERT_GRP_SIZE(ngg_state
->hw_max_esverts
) |
3467 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
));
3471 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf
*cs
,
3472 struct radv_pipeline
*pipeline
,
3473 struct radv_shader_variant
*shader
,
3474 const struct radv_tessellation_state
*tess
)
3476 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3478 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
3479 unsigned hs_rsrc2
= shader
->config
.rsrc2
;
3481 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3482 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX10(tess
->lds_size
);
3484 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX9(tess
->lds_size
);
3487 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3488 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3489 radeon_emit(cs
, va
>> 8);
3490 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3492 radeon_set_sh_reg_seq(cs
, R_00B410_SPI_SHADER_PGM_LO_LS
, 2);
3493 radeon_emit(cs
, va
>> 8);
3494 radeon_emit(cs
, S_00B414_MEM_BASE(va
>> 40));
3497 radeon_set_sh_reg_seq(cs
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
, 2);
3498 radeon_emit(cs
, shader
->config
.rsrc1
);
3499 radeon_emit(cs
, hs_rsrc2
);
3501 radeon_set_sh_reg_seq(cs
, R_00B420_SPI_SHADER_PGM_LO_HS
, 4);
3502 radeon_emit(cs
, va
>> 8);
3503 radeon_emit(cs
, S_00B424_MEM_BASE(va
>> 40));
3504 radeon_emit(cs
, shader
->config
.rsrc1
);
3505 radeon_emit(cs
, shader
->config
.rsrc2
);
3510 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf
*ctx_cs
,
3511 struct radeon_cmdbuf
*cs
,
3512 struct radv_pipeline
*pipeline
,
3513 const struct radv_tessellation_state
*tess
,
3514 const struct radv_ngg_state
*ngg
)
3516 struct radv_shader_variant
*vs
;
3518 /* Skip shaders merged into HS/GS */
3519 vs
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
3523 if (vs
->info
.vs
.as_ls
)
3524 radv_pipeline_generate_hw_ls(cs
, pipeline
, vs
, tess
);
3525 else if (vs
->info
.vs
.as_es
)
3526 radv_pipeline_generate_hw_es(cs
, pipeline
, vs
);
3527 else if (vs
->info
.is_ngg
)
3528 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, vs
, ngg
);
3530 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, vs
);
3534 radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf
*ctx_cs
,
3535 struct radeon_cmdbuf
*cs
,
3536 struct radv_pipeline
*pipeline
,
3537 const struct radv_tessellation_state
*tess
,
3538 const struct radv_ngg_state
*ngg
)
3540 if (!radv_pipeline_has_tess(pipeline
))
3543 struct radv_shader_variant
*tes
, *tcs
;
3545 tcs
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
3546 tes
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
3549 if (tes
->info
.is_ngg
) {
3550 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, tes
, ngg
);
3551 } else if (tes
->info
.tes
.as_es
)
3552 radv_pipeline_generate_hw_es(cs
, pipeline
, tes
);
3554 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, tes
);
3557 radv_pipeline_generate_hw_hs(cs
, pipeline
, tcs
, tess
);
3559 radeon_set_context_reg(ctx_cs
, R_028B6C_VGT_TF_PARAM
,
3562 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
)
3563 radeon_set_context_reg_idx(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
3564 tess
->ls_hs_config
);
3566 radeon_set_context_reg(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
,
3567 tess
->ls_hs_config
);
3571 radv_pipeline_generate_hw_gs(struct radeon_cmdbuf
*ctx_cs
,
3572 struct radeon_cmdbuf
*cs
,
3573 struct radv_pipeline
*pipeline
,
3574 struct radv_shader_variant
*gs
,
3575 const struct radv_gs_state
*gs_state
)
3577 unsigned gs_max_out_vertices
;
3578 uint8_t *num_components
;
3583 gs_max_out_vertices
= gs
->info
.gs
.vertices_out
;
3584 max_stream
= gs
->info
.info
.gs
.max_stream
;
3585 num_components
= gs
->info
.info
.gs
.num_stream_output_components
;
3587 offset
= num_components
[0] * gs_max_out_vertices
;
3589 radeon_set_context_reg_seq(ctx_cs
, R_028A60_VGT_GSVS_RING_OFFSET_1
, 3);
3590 radeon_emit(ctx_cs
, offset
);
3591 if (max_stream
>= 1)
3592 offset
+= num_components
[1] * gs_max_out_vertices
;
3593 radeon_emit(ctx_cs
, offset
);
3594 if (max_stream
>= 2)
3595 offset
+= num_components
[2] * gs_max_out_vertices
;
3596 radeon_emit(ctx_cs
, offset
);
3597 if (max_stream
>= 3)
3598 offset
+= num_components
[3] * gs_max_out_vertices
;
3599 radeon_set_context_reg(ctx_cs
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
3601 radeon_set_context_reg_seq(ctx_cs
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, 4);
3602 radeon_emit(ctx_cs
, num_components
[0]);
3603 radeon_emit(ctx_cs
, (max_stream
>= 1) ? num_components
[1] : 0);
3604 radeon_emit(ctx_cs
, (max_stream
>= 2) ? num_components
[2] : 0);
3605 radeon_emit(ctx_cs
, (max_stream
>= 3) ? num_components
[3] : 0);
3607 uint32_t gs_num_invocations
= gs
->info
.gs
.invocations
;
3608 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
3609 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
3610 S_028B90_ENABLE(gs_num_invocations
> 0));
3612 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
3613 gs_state
->vgt_esgs_ring_itemsize
);
3615 va
= radv_buffer_get_va(gs
->bo
) + gs
->bo_offset
;
3617 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
3618 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3619 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
3620 radeon_emit(cs
, va
>> 8);
3621 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3623 radeon_set_sh_reg_seq(cs
, R_00B210_SPI_SHADER_PGM_LO_ES
, 2);
3624 radeon_emit(cs
, va
>> 8);
3625 radeon_emit(cs
, S_00B214_MEM_BASE(va
>> 40));
3628 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3629 radeon_emit(cs
, gs
->config
.rsrc1
);
3630 radeon_emit(cs
, gs
->config
.rsrc2
| S_00B22C_LDS_SIZE(gs_state
->lds_size
));
3632 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
, gs_state
->vgt_gs_onchip_cntl
);
3633 radeon_set_context_reg(ctx_cs
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
, gs_state
->vgt_gs_max_prims_per_subgroup
);
3635 radeon_set_sh_reg_seq(cs
, R_00B220_SPI_SHADER_PGM_LO_GS
, 4);
3636 radeon_emit(cs
, va
>> 8);
3637 radeon_emit(cs
, S_00B224_MEM_BASE(va
>> 40));
3638 radeon_emit(cs
, gs
->config
.rsrc1
);
3639 radeon_emit(cs
, gs
->config
.rsrc2
);
3642 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, pipeline
->gs_copy_shader
);
3646 radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf
*ctx_cs
,
3647 struct radeon_cmdbuf
*cs
,
3648 struct radv_pipeline
*pipeline
,
3649 const struct radv_gs_state
*gs_state
,
3650 const struct radv_ngg_state
*ngg_state
)
3652 struct radv_shader_variant
*gs
;
3654 gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3658 if (gs
->info
.is_ngg
)
3659 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, gs
, ngg_state
);
3661 radv_pipeline_generate_hw_gs(ctx_cs
, cs
, pipeline
, gs
, gs_state
);
3663 radeon_set_context_reg(ctx_cs
, R_028B38_VGT_GS_MAX_VERT_OUT
,
3664 gs
->info
.gs
.vertices_out
);
3667 static uint32_t offset_to_ps_input(uint32_t offset
, bool flat_shade
, bool float16
)
3669 uint32_t ps_input_cntl
;
3670 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
3671 ps_input_cntl
= S_028644_OFFSET(offset
);
3673 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
3675 ps_input_cntl
|= S_028644_FP16_INTERP_MODE(1) |
3676 S_028644_ATTR0_VALID(1);
3679 /* The input is a DEFAULT_VAL constant. */
3680 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
3681 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
3682 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
3683 ps_input_cntl
= S_028644_OFFSET(0x20) |
3684 S_028644_DEFAULT_VAL(offset
);
3686 return ps_input_cntl
;
3690 radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf
*ctx_cs
,
3691 struct radv_pipeline
*pipeline
)
3693 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3694 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3695 uint32_t ps_input_cntl
[32];
3697 unsigned ps_offset
= 0;
3699 if (ps
->info
.info
.ps
.prim_id_input
) {
3700 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
];
3701 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
3702 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
3707 if (ps
->info
.info
.ps
.layer_input
||
3708 ps
->info
.info
.needs_multiview_view_index
) {
3709 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
];
3710 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
)
3711 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
3713 ps_input_cntl
[ps_offset
] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000
, true, false);
3717 if (ps
->info
.info
.ps
.has_pcoord
) {
3719 val
= S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
3720 ps_input_cntl
[ps_offset
] = val
;
3724 if (ps
->info
.info
.ps
.num_input_clips_culls
) {
3727 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
];
3728 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
3729 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
3733 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
];
3734 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
&&
3735 ps
->info
.info
.ps
.num_input_clips_culls
> 4) {
3736 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
3741 for (unsigned i
= 0; i
< 32 && (1u << i
) <= ps
->info
.fs
.input_mask
; ++i
) {
3745 if (!(ps
->info
.fs
.input_mask
& (1u << i
)))
3748 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_VAR0
+ i
];
3749 if (vs_offset
== AC_EXP_PARAM_UNDEFINED
) {
3750 ps_input_cntl
[ps_offset
] = S_028644_OFFSET(0x20);
3755 flat_shade
= !!(ps
->info
.fs
.flat_shaded_mask
& (1u << ps_offset
));
3756 float16
= !!(ps
->info
.fs
.float16_shaded_mask
& (1u << ps_offset
));
3758 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, flat_shade
, float16
);
3763 radeon_set_context_reg_seq(ctx_cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps_offset
);
3764 for (unsigned i
= 0; i
< ps_offset
; i
++) {
3765 radeon_emit(ctx_cs
, ps_input_cntl
[i
]);
3771 radv_compute_db_shader_control(const struct radv_device
*device
,
3772 const struct radv_pipeline
*pipeline
,
3773 const struct radv_shader_variant
*ps
)
3776 if (ps
->info
.fs
.early_fragment_test
|| !ps
->info
.info
.ps
.writes_memory
)
3777 z_order
= V_02880C_EARLY_Z_THEN_LATE_Z
;
3779 z_order
= V_02880C_LATE_Z
;
3781 bool disable_rbplus
= device
->physical_device
->has_rbplus
&&
3782 !device
->physical_device
->rbplus_allowed
;
3784 /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
3785 * but this appears to break Project Cars (DXVK). See
3786 * https://bugs.freedesktop.org/show_bug.cgi?id=109401
3788 bool mask_export_enable
= ps
->info
.info
.ps
.writes_sample_mask
;
3790 return S_02880C_Z_EXPORT_ENABLE(ps
->info
.info
.ps
.writes_z
) |
3791 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps
->info
.info
.ps
.writes_stencil
) |
3792 S_02880C_KILL_ENABLE(!!ps
->info
.fs
.can_discard
) |
3793 S_02880C_MASK_EXPORT_ENABLE(mask_export_enable
) |
3794 S_02880C_Z_ORDER(z_order
) |
3795 S_02880C_DEPTH_BEFORE_SHADER(ps
->info
.fs
.early_fragment_test
) |
3796 S_02880C_EXEC_ON_HIER_FAIL(ps
->info
.info
.ps
.writes_memory
) |
3797 S_02880C_EXEC_ON_NOOP(ps
->info
.info
.ps
.writes_memory
) |
3798 S_02880C_DUAL_QUAD_DISABLE(disable_rbplus
);
3802 radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf
*ctx_cs
,
3803 struct radeon_cmdbuf
*cs
,
3804 struct radv_pipeline
*pipeline
)
3806 struct radv_shader_variant
*ps
;
3808 assert (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]);
3810 ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3811 va
= radv_buffer_get_va(ps
->bo
) + ps
->bo_offset
;
3813 radeon_set_sh_reg_seq(cs
, R_00B020_SPI_SHADER_PGM_LO_PS
, 4);
3814 radeon_emit(cs
, va
>> 8);
3815 radeon_emit(cs
, S_00B024_MEM_BASE(va
>> 40));
3816 radeon_emit(cs
, ps
->config
.rsrc1
);
3817 radeon_emit(cs
, ps
->config
.rsrc2
);
3819 radeon_set_context_reg(ctx_cs
, R_02880C_DB_SHADER_CONTROL
,
3820 radv_compute_db_shader_control(pipeline
->device
,
3823 radeon_set_context_reg(ctx_cs
, R_0286CC_SPI_PS_INPUT_ENA
,
3824 ps
->config
.spi_ps_input_ena
);
3826 radeon_set_context_reg(ctx_cs
, R_0286D0_SPI_PS_INPUT_ADDR
,
3827 ps
->config
.spi_ps_input_addr
);
3829 radeon_set_context_reg(ctx_cs
, R_0286D8_SPI_PS_IN_CONTROL
,
3830 S_0286D8_NUM_INTERP(ps
->info
.fs
.num_interp
));
3832 radeon_set_context_reg(ctx_cs
, R_0286E0_SPI_BARYC_CNTL
, pipeline
->graphics
.spi_baryc_cntl
);
3834 radeon_set_context_reg(ctx_cs
, R_028710_SPI_SHADER_Z_FORMAT
,
3835 ac_get_spi_shader_z_format(ps
->info
.info
.ps
.writes_z
,
3836 ps
->info
.info
.ps
.writes_stencil
,
3837 ps
->info
.info
.ps
.writes_sample_mask
));
3839 if (pipeline
->device
->dfsm_allowed
) {
3840 /* optimise this? */
3841 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
3842 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
3847 radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf
*ctx_cs
,
3848 struct radv_pipeline
*pipeline
)
3850 if (pipeline
->device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
3851 pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
)
3854 unsigned vtx_reuse_depth
= 30;
3855 if (radv_pipeline_has_tess(pipeline
) &&
3856 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.tes
.spacing
== TESS_SPACING_FRACTIONAL_ODD
) {
3857 vtx_reuse_depth
= 14;
3859 radeon_set_context_reg(ctx_cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
3860 S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth
));
3864 radv_compute_vgt_shader_stages_en(const struct radv_pipeline
*pipeline
)
3866 uint32_t stages
= 0;
3867 if (radv_pipeline_has_tess(pipeline
)) {
3868 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
3869 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
3871 if (radv_pipeline_has_gs(pipeline
))
3872 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
3874 else if (radv_pipeline_has_ngg(pipeline
))
3875 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
);
3877 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
3878 } else if (radv_pipeline_has_gs(pipeline
)) {
3879 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
3881 } else if (radv_pipeline_has_ngg(pipeline
)) {
3882 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
);
3885 if (radv_pipeline_has_ngg(pipeline
)) {
3886 stages
|= S_028B54_PRIMGEN_EN(1);
3887 } else if (radv_pipeline_has_gs(pipeline
)) {
3888 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3891 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
3892 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
3898 radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3900 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
3901 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
3903 if (!discard_rectangle_info
)
3908 for (unsigned i
= 0; i
< (1u << MAX_DISCARD_RECTANGLES
); ++i
) {
3909 /* Interpret i as a bitmask, and then set the bit in the mask if
3910 * that combination of rectangles in which the pixel is contained
3911 * should pass the cliprect test. */
3912 unsigned relevant_subset
= i
& ((1u << discard_rectangle_info
->discardRectangleCount
) - 1);
3914 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT
&&
3918 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT
&&
3929 gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf
*ctx_cs
,
3930 struct radv_pipeline
*pipeline
,
3931 const struct radv_tessellation_state
*tess
,
3932 const struct radv_gs_state
*gs_state
)
3934 bool break_wave_at_eoi
= false;
3935 unsigned primgroup_size
;
3936 unsigned vertgroup_size
;
3938 if (radv_pipeline_has_tess(pipeline
)) {
3939 primgroup_size
= tess
->num_patches
; /* must be a multiple of NUM_PATCHES */
3941 } else if (radv_pipeline_has_gs(pipeline
)) {
3942 unsigned vgt_gs_onchip_cntl
= gs_state
->vgt_gs_onchip_cntl
;
3943 primgroup_size
= G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl
);
3944 vertgroup_size
= G_028A44_ES_VERTS_PER_SUBGRP(vgt_gs_onchip_cntl
);
3946 primgroup_size
= 128; /* recommended without a GS and tess */
3950 if (radv_pipeline_has_tess(pipeline
)) {
3951 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.uses_prim_id
||
3952 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.info
.uses_prim_id
)
3953 break_wave_at_eoi
= true;
3956 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
,
3957 S_03096C_PRIM_GRP_SIZE(primgroup_size
) |
3958 S_03096C_VERT_GRP_SIZE(vertgroup_size
) |
3959 S_03096C_PACKET_TO_ONE_PA(0) /* line stipple */ |
3960 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
));
3964 radv_pipeline_generate_pm4(struct radv_pipeline
*pipeline
,
3965 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3966 const struct radv_graphics_pipeline_create_info
*extra
,
3967 const struct radv_blend_state
*blend
,
3968 const struct radv_tessellation_state
*tess
,
3969 const struct radv_gs_state
*gs
,
3970 const struct radv_ngg_state
*ngg
,
3971 unsigned prim
, unsigned gs_out
)
3973 struct radeon_cmdbuf
*ctx_cs
= &pipeline
->ctx_cs
;
3974 struct radeon_cmdbuf
*cs
= &pipeline
->cs
;
3977 ctx_cs
->max_dw
= 256;
3978 cs
->buf
= malloc(4 * (cs
->max_dw
+ ctx_cs
->max_dw
));
3979 ctx_cs
->buf
= cs
->buf
+ cs
->max_dw
;
3981 radv_pipeline_generate_depth_stencil_state(ctx_cs
, pipeline
, pCreateInfo
, extra
);
3982 radv_pipeline_generate_blend_state(ctx_cs
, pipeline
, blend
);
3983 radv_pipeline_generate_raster_state(ctx_cs
, pipeline
, pCreateInfo
);
3984 radv_pipeline_generate_multisample_state(ctx_cs
, pipeline
);
3985 radv_pipeline_generate_vgt_gs_mode(ctx_cs
, pipeline
);
3986 radv_pipeline_generate_vertex_shader(ctx_cs
, cs
, pipeline
, tess
, ngg
);
3987 radv_pipeline_generate_tess_shaders(ctx_cs
, cs
, pipeline
, tess
, ngg
);
3988 radv_pipeline_generate_geometry_shader(ctx_cs
, cs
, pipeline
, gs
, ngg
);
3989 radv_pipeline_generate_fragment_shader(ctx_cs
, cs
, pipeline
);
3990 radv_pipeline_generate_ps_inputs(ctx_cs
, pipeline
);
3991 radv_pipeline_generate_vgt_vertex_reuse(ctx_cs
, pipeline
);
3992 radv_pipeline_generate_binning_state(ctx_cs
, pipeline
, pCreateInfo
);
3994 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&& !radv_pipeline_has_ngg(pipeline
))
3995 gfx10_pipeline_generate_ge_cntl(ctx_cs
, pipeline
, tess
, gs
);
3997 radeon_set_context_reg(ctx_cs
, R_0286E8_SPI_TMPRING_SIZE
,
3998 S_0286E8_WAVES(pipeline
->max_waves
) |
3999 S_0286E8_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
4001 radeon_set_context_reg(ctx_cs
, R_028B54_VGT_SHADER_STAGES_EN
, radv_compute_vgt_shader_stages_en(pipeline
));
4003 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4004 radeon_set_uconfig_reg_idx(pipeline
->device
->physical_device
,
4005 cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
4007 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
4009 radeon_set_context_reg(ctx_cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out
);
4011 radeon_set_context_reg(ctx_cs
, R_02820C_PA_SC_CLIPRECT_RULE
, radv_compute_cliprect_rule(pCreateInfo
));
4013 pipeline
->ctx_cs_hash
= _mesa_hash_data(ctx_cs
->buf
, ctx_cs
->cdw
* 4);
4015 assert(ctx_cs
->cdw
<= ctx_cs
->max_dw
);
4016 assert(cs
->cdw
<= cs
->max_dw
);
4019 static struct radv_ia_multi_vgt_param_helpers
4020 radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline
*pipeline
,
4021 const struct radv_tessellation_state
*tess
,
4024 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param
= {0};
4025 const struct radv_device
*device
= pipeline
->device
;
4027 if (radv_pipeline_has_tess(pipeline
))
4028 ia_multi_vgt_param
.primgroup_size
= tess
->num_patches
;
4029 else if (radv_pipeline_has_gs(pipeline
))
4030 ia_multi_vgt_param
.primgroup_size
= 64;
4032 ia_multi_vgt_param
.primgroup_size
= 128; /* recommended without a GS */
4034 /* GS requirement. */
4035 ia_multi_vgt_param
.partial_es_wave
= false;
4036 if (radv_pipeline_has_gs(pipeline
) && device
->physical_device
->rad_info
.chip_class
<= GFX8
)
4037 if (SI_GS_PER_ES
/ ia_multi_vgt_param
.primgroup_size
>= pipeline
->device
->gs_table_depth
- 3)
4038 ia_multi_vgt_param
.partial_es_wave
= true;
4040 ia_multi_vgt_param
.wd_switch_on_eop
= false;
4041 if (device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4042 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
4043 * 4 shader engines. Set 1 to pass the assertion below.
4044 * The other cases are hardware requirements. */
4045 if (device
->physical_device
->rad_info
.max_se
< 4 ||
4046 prim
== V_008958_DI_PT_POLYGON
||
4047 prim
== V_008958_DI_PT_LINELOOP
||
4048 prim
== V_008958_DI_PT_TRIFAN
||
4049 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
4050 (pipeline
->graphics
.prim_restart_enable
&&
4051 (device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
4052 (prim
!= V_008958_DI_PT_POINTLIST
&&
4053 prim
!= V_008958_DI_PT_LINESTRIP
))))
4054 ia_multi_vgt_param
.wd_switch_on_eop
= true;
4057 ia_multi_vgt_param
.ia_switch_on_eoi
= false;
4058 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
)
4059 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4060 if (radv_pipeline_has_gs(pipeline
) &&
4061 pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.info
.uses_prim_id
)
4062 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4063 if (radv_pipeline_has_tess(pipeline
)) {
4064 /* SWITCH_ON_EOI must be set if PrimID is used. */
4065 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.uses_prim_id
||
4066 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.info
.uses_prim_id
)
4067 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4070 ia_multi_vgt_param
.partial_vs_wave
= false;
4071 if (radv_pipeline_has_tess(pipeline
)) {
4072 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
4073 if ((device
->physical_device
->rad_info
.family
== CHIP_TAHITI
||
4074 device
->physical_device
->rad_info
.family
== CHIP_PITCAIRN
||
4075 device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
) &&
4076 radv_pipeline_has_gs(pipeline
))
4077 ia_multi_vgt_param
.partial_vs_wave
= true;
4078 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
4079 if (device
->has_distributed_tess
) {
4080 if (radv_pipeline_has_gs(pipeline
)) {
4081 if (device
->physical_device
->rad_info
.chip_class
<= GFX8
)
4082 ia_multi_vgt_param
.partial_es_wave
= true;
4084 ia_multi_vgt_param
.partial_vs_wave
= true;
4089 /* Workaround for a VGT hang when strip primitive types are used with
4090 * primitive restart.
4092 if (pipeline
->graphics
.prim_restart_enable
&&
4093 (prim
== V_008958_DI_PT_LINESTRIP
||
4094 prim
== V_008958_DI_PT_TRISTRIP
||
4095 prim
== V_008958_DI_PT_LINESTRIP_ADJ
||
4096 prim
== V_008958_DI_PT_TRISTRIP_ADJ
)) {
4097 ia_multi_vgt_param
.partial_vs_wave
= true;
4100 if (radv_pipeline_has_gs(pipeline
)) {
4101 /* On these chips there is the possibility of a hang if the
4102 * pipeline uses a GS and partial_vs_wave is not set.
4104 * This mostly does not hit 4-SE chips, as those typically set
4105 * ia_switch_on_eoi and then partial_vs_wave is set for pipelines
4106 * with GS due to another workaround.
4108 * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242
4110 if (device
->physical_device
->rad_info
.family
== CHIP_TONGA
||
4111 device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
4112 device
->physical_device
->rad_info
.family
== CHIP_POLARIS10
||
4113 device
->physical_device
->rad_info
.family
== CHIP_POLARIS11
||
4114 device
->physical_device
->rad_info
.family
== CHIP_POLARIS12
||
4115 device
->physical_device
->rad_info
.family
== CHIP_VEGAM
) {
4116 ia_multi_vgt_param
.partial_vs_wave
= true;
4120 ia_multi_vgt_param
.base
=
4121 S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param
.primgroup_size
- 1) |
4122 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
4123 S_028AA8_MAX_PRIMGRP_IN_WAVE(device
->physical_device
->rad_info
.chip_class
== GFX8
? 2 : 0) |
4124 S_030960_EN_INST_OPT_BASIC(device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
4125 S_030960_EN_INST_OPT_ADV(device
->physical_device
->rad_info
.chip_class
>= GFX9
);
4127 return ia_multi_vgt_param
;
4132 radv_compute_vertex_input_state(struct radv_pipeline
*pipeline
,
4133 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4135 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
4136 pCreateInfo
->pVertexInputState
;
4137 struct radv_vertex_elements_info
*velems
= &pipeline
->vertex_elements
;
4139 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
4140 const VkVertexInputAttributeDescription
*desc
=
4141 &vi_info
->pVertexAttributeDescriptions
[i
];
4142 unsigned loc
= desc
->location
;
4143 const struct vk_format_description
*format_desc
;
4145 format_desc
= vk_format_description(desc
->format
);
4147 velems
->format_size
[loc
] = format_desc
->block
.bits
/ 8;
4150 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
4151 const VkVertexInputBindingDescription
*desc
=
4152 &vi_info
->pVertexBindingDescriptions
[i
];
4154 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
4155 pipeline
->num_vertex_bindings
=
4156 MAX2(pipeline
->num_vertex_bindings
, desc
->binding
+ 1);
4160 static struct radv_shader_variant
*
4161 radv_pipeline_get_streamout_shader(struct radv_pipeline
*pipeline
)
4165 for (i
= MESA_SHADER_GEOMETRY
; i
>= MESA_SHADER_VERTEX
; i
--) {
4166 struct radv_shader_variant
*shader
=
4167 radv_get_shader(pipeline
, i
);
4169 if (shader
&& shader
->info
.info
.so
.num_outputs
> 0)
4177 radv_pipeline_init(struct radv_pipeline
*pipeline
,
4178 struct radv_device
*device
,
4179 struct radv_pipeline_cache
*cache
,
4180 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4181 const struct radv_graphics_pipeline_create_info
*extra
)
4184 bool has_view_index
= false;
4186 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
4187 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
4188 if (subpass
->view_mask
)
4189 has_view_index
= true;
4191 pipeline
->device
= device
;
4192 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
4193 assert(pipeline
->layout
);
4195 struct radv_blend_state blend
= radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
4197 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
4198 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
4199 radv_init_feedback(creation_feedback
);
4201 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
4203 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
4204 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
4205 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
4206 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
4207 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
4208 if(creation_feedback
)
4209 stage_feedbacks
[stage
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[i
];
4212 struct radv_pipeline_key key
= radv_generate_graphics_pipeline_key(pipeline
, pCreateInfo
, &blend
, has_view_index
);
4213 radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
4215 pipeline
->graphics
.spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
4216 radv_pipeline_init_multisample_state(pipeline
, &blend
, pCreateInfo
);
4218 uint32_t prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
4220 pipeline
->graphics
.can_use_guardband
= radv_prim_can_use_guardband(pCreateInfo
->pInputAssemblyState
->topology
);
4222 if (radv_pipeline_has_gs(pipeline
)) {
4223 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
4224 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4225 } else if (radv_pipeline_has_tess(pipeline
)) {
4226 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.point_mode
)
4227 gs_out
= V_028A6C_OUTPRIM_TYPE_POINTLIST
;
4229 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.primitive_mode
);
4230 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4232 gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
4234 if (extra
&& extra
->use_rectlist
) {
4235 prim
= V_008958_DI_PT_RECTLIST
;
4236 gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4237 pipeline
->graphics
.can_use_guardband
= true;
4238 if (radv_pipeline_has_ngg(pipeline
))
4239 gs_out
= V_028A6C_VGT_OUT_RECT_V0
;
4241 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
4242 /* prim vertex count will need TESS changes */
4243 pipeline
->graphics
.prim_vertex_count
= prim_size_table
[prim
];
4245 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
4247 /* Ensure that some export memory is always allocated, for two reasons:
4249 * 1) Correctness: The hardware ignores the EXEC mask if no export
4250 * memory is allocated, so KILL and alpha test do not work correctly
4252 * 2) Performance: Every shader needs at least a NULL export, even when
4253 * it writes no color/depth output. The NULL export instruction
4254 * stalls without this setting.
4256 * Don't add this to CB_SHADER_MASK.
4258 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4259 if (!blend
.spi_shader_col_format
) {
4260 if (!ps
->info
.info
.ps
.writes_z
&&
4261 !ps
->info
.info
.ps
.writes_stencil
&&
4262 !ps
->info
.info
.ps
.writes_sample_mask
)
4263 blend
.spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
4266 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4267 if (pipeline
->shaders
[i
]) {
4268 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[i
]->info
.need_indirect_descriptor_sets
;
4272 struct radv_ngg_state ngg
= {0};
4273 struct radv_gs_state gs
= {0};
4275 if (radv_pipeline_has_ngg(pipeline
)) {
4276 ngg
= calculate_ngg_info(pCreateInfo
, pipeline
);
4277 } else if (radv_pipeline_has_gs(pipeline
)) {
4278 gs
= calculate_gs_info(pCreateInfo
, pipeline
);
4279 calculate_gs_ring_sizes(pipeline
, &gs
);
4282 struct radv_tessellation_state tess
= {0};
4283 if (radv_pipeline_has_tess(pipeline
)) {
4284 if (prim
== V_008958_DI_PT_PATCH
) {
4285 pipeline
->graphics
.prim_vertex_count
.min
= pCreateInfo
->pTessellationState
->patchControlPoints
;
4286 pipeline
->graphics
.prim_vertex_count
.incr
= 1;
4288 tess
= calculate_tess_state(pipeline
, pCreateInfo
);
4291 pipeline
->graphics
.ia_multi_vgt_param
= radv_compute_ia_multi_vgt_param_helpers(pipeline
, &tess
, prim
);
4293 radv_compute_vertex_input_state(pipeline
, pCreateInfo
);
4295 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++)
4296 pipeline
->user_data_0
[i
] = radv_pipeline_stage_to_user_data_0(pipeline
, i
, device
->physical_device
->rad_info
.chip_class
);
4298 struct radv_userdata_info
*loc
= radv_lookup_user_sgpr(pipeline
, MESA_SHADER_VERTEX
,
4299 AC_UD_VS_BASE_VERTEX_START_INSTANCE
);
4300 if (loc
->sgpr_idx
!= -1) {
4301 pipeline
->graphics
.vtx_base_sgpr
= pipeline
->user_data_0
[MESA_SHADER_VERTEX
];
4302 pipeline
->graphics
.vtx_base_sgpr
+= loc
->sgpr_idx
* 4;
4303 if (radv_get_shader(pipeline
, MESA_SHADER_VERTEX
)->info
.info
.vs
.needs_draw_id
)
4304 pipeline
->graphics
.vtx_emit_num
= 3;
4306 pipeline
->graphics
.vtx_emit_num
= 2;
4309 /* Find the last vertex shader stage that eventually uses streamout. */
4310 pipeline
->streamout_shader
= radv_pipeline_get_streamout_shader(pipeline
);
4312 result
= radv_pipeline_scratch_init(device
, pipeline
);
4313 radv_pipeline_generate_pm4(pipeline
, pCreateInfo
, extra
, &blend
, &tess
, &gs
, &ngg
, prim
, gs_out
);
4319 radv_graphics_pipeline_create(
4321 VkPipelineCache _cache
,
4322 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4323 const struct radv_graphics_pipeline_create_info
*extra
,
4324 const VkAllocationCallbacks
*pAllocator
,
4325 VkPipeline
*pPipeline
)
4327 RADV_FROM_HANDLE(radv_device
, device
, _device
);
4328 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
4329 struct radv_pipeline
*pipeline
;
4332 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
4333 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
4334 if (pipeline
== NULL
)
4335 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
4337 result
= radv_pipeline_init(pipeline
, device
, cache
,
4338 pCreateInfo
, extra
);
4339 if (result
!= VK_SUCCESS
) {
4340 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
4344 *pPipeline
= radv_pipeline_to_handle(pipeline
);
4349 VkResult
radv_CreateGraphicsPipelines(
4351 VkPipelineCache pipelineCache
,
4353 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
4354 const VkAllocationCallbacks
* pAllocator
,
4355 VkPipeline
* pPipelines
)
4357 VkResult result
= VK_SUCCESS
;
4360 for (; i
< count
; i
++) {
4362 r
= radv_graphics_pipeline_create(_device
,
4365 NULL
, pAllocator
, &pPipelines
[i
]);
4366 if (r
!= VK_SUCCESS
) {
4368 pPipelines
[i
] = VK_NULL_HANDLE
;
4377 radv_compute_generate_pm4(struct radv_pipeline
*pipeline
)
4379 struct radv_shader_variant
*compute_shader
;
4380 struct radv_device
*device
= pipeline
->device
;
4381 unsigned threads_per_threadgroup
;
4382 unsigned threadgroups_per_cu
= 1;
4383 unsigned waves_per_threadgroup
;
4384 unsigned max_waves_per_sh
= 0;
4387 pipeline
->cs
.buf
= malloc(20 * 4);
4388 pipeline
->cs
.max_dw
= 20;
4390 compute_shader
= pipeline
->shaders
[MESA_SHADER_COMPUTE
];
4391 va
= radv_buffer_get_va(compute_shader
->bo
) + compute_shader
->bo_offset
;
4393 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B830_COMPUTE_PGM_LO
, 2);
4394 radeon_emit(&pipeline
->cs
, va
>> 8);
4395 radeon_emit(&pipeline
->cs
, S_00B834_DATA(va
>> 40));
4397 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
4398 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc1
);
4399 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc2
);
4401 radeon_set_sh_reg(&pipeline
->cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
4402 S_00B860_WAVES(pipeline
->max_waves
) |
4403 S_00B860_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
4405 /* Calculate best compute resource limits. */
4406 threads_per_threadgroup
= compute_shader
->info
.cs
.block_size
[0] *
4407 compute_shader
->info
.cs
.block_size
[1] *
4408 compute_shader
->info
.cs
.block_size
[2];
4409 waves_per_threadgroup
= DIV_ROUND_UP(threads_per_threadgroup
, 64);
4411 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
4412 waves_per_threadgroup
== 1)
4413 threadgroups_per_cu
= 2;
4415 radeon_set_sh_reg(&pipeline
->cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
4416 ac_get_compute_resource_limits(&device
->physical_device
->rad_info
,
4417 waves_per_threadgroup
,
4419 threadgroups_per_cu
));
4421 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
4422 radeon_emit(&pipeline
->cs
,
4423 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[0]));
4424 radeon_emit(&pipeline
->cs
,
4425 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[1]));
4426 radeon_emit(&pipeline
->cs
,
4427 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[2]));
4429 assert(pipeline
->cs
.cdw
<= pipeline
->cs
.max_dw
);
4432 static VkResult
radv_compute_pipeline_create(
4434 VkPipelineCache _cache
,
4435 const VkComputePipelineCreateInfo
* pCreateInfo
,
4436 const VkAllocationCallbacks
* pAllocator
,
4437 VkPipeline
* pPipeline
)
4439 RADV_FROM_HANDLE(radv_device
, device
, _device
);
4440 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
4441 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
4442 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
4443 struct radv_pipeline
*pipeline
;
4446 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
4447 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
4448 if (pipeline
== NULL
)
4449 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
4451 pipeline
->device
= device
;
4452 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
4453 assert(pipeline
->layout
);
4455 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
4456 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
4457 radv_init_feedback(creation_feedback
);
4459 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
4460 if (creation_feedback
)
4461 stage_feedbacks
[MESA_SHADER_COMPUTE
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[0];
4463 pStages
[MESA_SHADER_COMPUTE
] = &pCreateInfo
->stage
;
4464 radv_create_shaders(pipeline
, device
, cache
, &(struct radv_pipeline_key
) {0}, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
4466 pipeline
->user_data_0
[MESA_SHADER_COMPUTE
] = radv_pipeline_stage_to_user_data_0(pipeline
, MESA_SHADER_COMPUTE
, device
->physical_device
->rad_info
.chip_class
);
4467 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.need_indirect_descriptor_sets
;
4468 result
= radv_pipeline_scratch_init(device
, pipeline
);
4469 if (result
!= VK_SUCCESS
) {
4470 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
4474 radv_compute_generate_pm4(pipeline
);
4476 *pPipeline
= radv_pipeline_to_handle(pipeline
);
4481 VkResult
radv_CreateComputePipelines(
4483 VkPipelineCache pipelineCache
,
4485 const VkComputePipelineCreateInfo
* pCreateInfos
,
4486 const VkAllocationCallbacks
* pAllocator
,
4487 VkPipeline
* pPipelines
)
4489 VkResult result
= VK_SUCCESS
;
4492 for (; i
< count
; i
++) {
4494 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
4496 pAllocator
, &pPipelines
[i
]);
4497 if (r
!= VK_SUCCESS
) {
4499 pPipelines
[i
] = VK_NULL_HANDLE
;