2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
33 #include "radv_shader.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
39 #include <llvm-c/Core.h>
40 #include <llvm-c/TargetMachine.h>
44 #include "ac_binary.h"
45 #include "ac_llvm_util.h"
46 #include "ac_nir_to_llvm.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
50 #include "ac_shader_util.h"
51 #include "main/menums.h"
53 struct radv_blend_state
{
54 uint32_t blend_enable_4bit
;
55 uint32_t need_src_alpha
;
57 uint32_t cb_color_control
;
58 uint32_t cb_target_mask
;
59 uint32_t cb_target_enabled_4bit
;
60 uint32_t sx_mrt_blend_opt
[8];
61 uint32_t cb_blend_control
[8];
63 uint32_t spi_shader_col_format
;
64 uint32_t cb_shader_mask
;
65 uint32_t db_alpha_to_mask
;
67 uint32_t commutative_4bit
;
69 bool single_cb_enable
;
70 bool mrt0_is_dual_src
;
73 struct radv_dsa_order_invariance
{
74 /* Whether the final result in Z/S buffers is guaranteed to be
75 * invariant under changes to the order in which fragments arrive.
79 /* Whether the set of fragments that pass the combined Z/S test is
80 * guaranteed to be invariant under changes to the order in which
86 struct radv_tessellation_state
{
87 uint32_t ls_hs_config
;
93 struct radv_gs_state
{
94 uint32_t vgt_gs_onchip_cntl
;
95 uint32_t vgt_gs_max_prims_per_subgroup
;
96 uint32_t vgt_esgs_ring_itemsize
;
101 radv_pipeline_destroy(struct radv_device
*device
,
102 struct radv_pipeline
*pipeline
,
103 const VkAllocationCallbacks
* allocator
)
105 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
106 if (pipeline
->shaders
[i
])
107 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
109 if (pipeline
->gs_copy_shader
)
110 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
113 free(pipeline
->cs
.buf
);
114 vk_free2(&device
->alloc
, allocator
, pipeline
);
117 void radv_DestroyPipeline(
119 VkPipeline _pipeline
,
120 const VkAllocationCallbacks
* pAllocator
)
122 RADV_FROM_HANDLE(radv_device
, device
, _device
);
123 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
128 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
131 static uint32_t get_hash_flags(struct radv_device
*device
)
133 uint32_t hash_flags
= 0;
135 if (device
->instance
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
)
136 hash_flags
|= RADV_HASH_SHADER_UNSAFE_MATH
;
137 if (device
->instance
->perftest_flags
& RADV_PERFTEST_SISCHED
)
138 hash_flags
|= RADV_HASH_SHADER_SISCHED
;
143 radv_pipeline_scratch_init(struct radv_device
*device
,
144 struct radv_pipeline
*pipeline
)
146 unsigned scratch_bytes_per_wave
= 0;
147 unsigned max_waves
= 0;
148 unsigned min_waves
= 1;
150 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
151 if (pipeline
->shaders
[i
]) {
152 unsigned max_stage_waves
= device
->scratch_waves
;
154 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
155 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
157 max_stage_waves
= MIN2(max_stage_waves
,
158 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
159 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
160 max_waves
= MAX2(max_waves
, max_stage_waves
);
164 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
165 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
166 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
167 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
168 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
171 if (scratch_bytes_per_wave
)
172 max_waves
= MIN2(max_waves
, 0xffffffffu
/ scratch_bytes_per_wave
);
174 if (scratch_bytes_per_wave
&& max_waves
< min_waves
) {
175 /* Not really true at this moment, but will be true on first
176 * execution. Avoid having hanging shaders. */
177 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
179 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
180 pipeline
->max_waves
= max_waves
;
184 static uint32_t si_translate_blend_logic_op(VkLogicOp op
)
187 case VK_LOGIC_OP_CLEAR
:
188 return V_028808_ROP3_CLEAR
;
189 case VK_LOGIC_OP_AND
:
190 return V_028808_ROP3_AND
;
191 case VK_LOGIC_OP_AND_REVERSE
:
192 return V_028808_ROP3_AND_REVERSE
;
193 case VK_LOGIC_OP_COPY
:
194 return V_028808_ROP3_COPY
;
195 case VK_LOGIC_OP_AND_INVERTED
:
196 return V_028808_ROP3_AND_INVERTED
;
197 case VK_LOGIC_OP_NO_OP
:
198 return V_028808_ROP3_NO_OP
;
199 case VK_LOGIC_OP_XOR
:
200 return V_028808_ROP3_XOR
;
202 return V_028808_ROP3_OR
;
203 case VK_LOGIC_OP_NOR
:
204 return V_028808_ROP3_NOR
;
205 case VK_LOGIC_OP_EQUIVALENT
:
206 return V_028808_ROP3_EQUIVALENT
;
207 case VK_LOGIC_OP_INVERT
:
208 return V_028808_ROP3_INVERT
;
209 case VK_LOGIC_OP_OR_REVERSE
:
210 return V_028808_ROP3_OR_REVERSE
;
211 case VK_LOGIC_OP_COPY_INVERTED
:
212 return V_028808_ROP3_COPY_INVERTED
;
213 case VK_LOGIC_OP_OR_INVERTED
:
214 return V_028808_ROP3_OR_INVERTED
;
215 case VK_LOGIC_OP_NAND
:
216 return V_028808_ROP3_NAND
;
217 case VK_LOGIC_OP_SET
:
218 return V_028808_ROP3_SET
;
220 unreachable("Unhandled logic op");
225 static uint32_t si_translate_blend_function(VkBlendOp op
)
228 case VK_BLEND_OP_ADD
:
229 return V_028780_COMB_DST_PLUS_SRC
;
230 case VK_BLEND_OP_SUBTRACT
:
231 return V_028780_COMB_SRC_MINUS_DST
;
232 case VK_BLEND_OP_REVERSE_SUBTRACT
:
233 return V_028780_COMB_DST_MINUS_SRC
;
234 case VK_BLEND_OP_MIN
:
235 return V_028780_COMB_MIN_DST_SRC
;
236 case VK_BLEND_OP_MAX
:
237 return V_028780_COMB_MAX_DST_SRC
;
243 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
246 case VK_BLEND_FACTOR_ZERO
:
247 return V_028780_BLEND_ZERO
;
248 case VK_BLEND_FACTOR_ONE
:
249 return V_028780_BLEND_ONE
;
250 case VK_BLEND_FACTOR_SRC_COLOR
:
251 return V_028780_BLEND_SRC_COLOR
;
252 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
253 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
254 case VK_BLEND_FACTOR_DST_COLOR
:
255 return V_028780_BLEND_DST_COLOR
;
256 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
257 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
258 case VK_BLEND_FACTOR_SRC_ALPHA
:
259 return V_028780_BLEND_SRC_ALPHA
;
260 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
261 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
262 case VK_BLEND_FACTOR_DST_ALPHA
:
263 return V_028780_BLEND_DST_ALPHA
;
264 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
265 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
266 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
267 return V_028780_BLEND_CONSTANT_COLOR
;
268 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
269 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
270 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
271 return V_028780_BLEND_CONSTANT_ALPHA
;
272 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
273 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
274 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
275 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
276 case VK_BLEND_FACTOR_SRC1_COLOR
:
277 return V_028780_BLEND_SRC1_COLOR
;
278 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
279 return V_028780_BLEND_INV_SRC1_COLOR
;
280 case VK_BLEND_FACTOR_SRC1_ALPHA
:
281 return V_028780_BLEND_SRC1_ALPHA
;
282 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
283 return V_028780_BLEND_INV_SRC1_ALPHA
;
289 static uint32_t si_translate_blend_opt_function(VkBlendOp op
)
292 case VK_BLEND_OP_ADD
:
293 return V_028760_OPT_COMB_ADD
;
294 case VK_BLEND_OP_SUBTRACT
:
295 return V_028760_OPT_COMB_SUBTRACT
;
296 case VK_BLEND_OP_REVERSE_SUBTRACT
:
297 return V_028760_OPT_COMB_REVSUBTRACT
;
298 case VK_BLEND_OP_MIN
:
299 return V_028760_OPT_COMB_MIN
;
300 case VK_BLEND_OP_MAX
:
301 return V_028760_OPT_COMB_MAX
;
303 return V_028760_OPT_COMB_BLEND_DISABLED
;
307 static uint32_t si_translate_blend_opt_factor(VkBlendFactor factor
, bool is_alpha
)
310 case VK_BLEND_FACTOR_ZERO
:
311 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_ALL
;
312 case VK_BLEND_FACTOR_ONE
:
313 return V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
;
314 case VK_BLEND_FACTOR_SRC_COLOR
:
315 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
316 : V_028760_BLEND_OPT_PRESERVE_C1_IGNORE_C0
;
317 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
318 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
319 : V_028760_BLEND_OPT_PRESERVE_C0_IGNORE_C1
;
320 case VK_BLEND_FACTOR_SRC_ALPHA
:
321 return V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
;
322 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
323 return V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
;
324 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
325 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
326 : V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
328 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
333 * Get rid of DST in the blend factors by commuting the operands:
334 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
336 static void si_blend_remove_dst(unsigned *func
, unsigned *src_factor
,
337 unsigned *dst_factor
, unsigned expected_dst
,
338 unsigned replacement_src
)
340 if (*src_factor
== expected_dst
&&
341 *dst_factor
== VK_BLEND_FACTOR_ZERO
) {
342 *src_factor
= VK_BLEND_FACTOR_ZERO
;
343 *dst_factor
= replacement_src
;
345 /* Commuting the operands requires reversing subtractions. */
346 if (*func
== VK_BLEND_OP_SUBTRACT
)
347 *func
= VK_BLEND_OP_REVERSE_SUBTRACT
;
348 else if (*func
== VK_BLEND_OP_REVERSE_SUBTRACT
)
349 *func
= VK_BLEND_OP_SUBTRACT
;
353 static bool si_blend_factor_uses_dst(unsigned factor
)
355 return factor
== VK_BLEND_FACTOR_DST_COLOR
||
356 factor
== VK_BLEND_FACTOR_DST_ALPHA
||
357 factor
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
358 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
||
359 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
;
362 static bool is_dual_src(VkBlendFactor factor
)
365 case VK_BLEND_FACTOR_SRC1_COLOR
:
366 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
367 case VK_BLEND_FACTOR_SRC1_ALPHA
:
368 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
375 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
377 bool blend_need_alpha
)
379 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
380 unsigned format
, ntype
, swap
;
382 /* Alpha is needed for alpha-to-coverage.
383 * Blending may be with or without alpha.
385 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
386 unsigned alpha
= 0; /* exports alpha, but may not support blending */
387 unsigned blend
= 0; /* supports blending, but may not export alpha */
388 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
390 format
= radv_translate_colorformat(vk_format
);
391 ntype
= radv_translate_color_numformat(vk_format
, desc
,
392 vk_format_get_first_non_void_channel(vk_format
));
393 swap
= radv_translate_colorswap(vk_format
, false);
395 /* Choose the SPI color formats. These are required values for Stoney/RB+.
396 * Other chips have multiple choices, though they are not necessarily better.
399 case V_028C70_COLOR_5_6_5
:
400 case V_028C70_COLOR_1_5_5_5
:
401 case V_028C70_COLOR_5_5_5_1
:
402 case V_028C70_COLOR_4_4_4_4
:
403 case V_028C70_COLOR_10_11_11
:
404 case V_028C70_COLOR_11_11_10
:
405 case V_028C70_COLOR_8
:
406 case V_028C70_COLOR_8_8
:
407 case V_028C70_COLOR_8_8_8_8
:
408 case V_028C70_COLOR_10_10_10_2
:
409 case V_028C70_COLOR_2_10_10_10
:
410 if (ntype
== V_028C70_NUMBER_UINT
)
411 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
412 else if (ntype
== V_028C70_NUMBER_SINT
)
413 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
415 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
418 case V_028C70_COLOR_16
:
419 case V_028C70_COLOR_16_16
:
420 case V_028C70_COLOR_16_16_16_16
:
421 if (ntype
== V_028C70_NUMBER_UNORM
||
422 ntype
== V_028C70_NUMBER_SNORM
) {
423 /* UNORM16 and SNORM16 don't support blending */
424 if (ntype
== V_028C70_NUMBER_UNORM
)
425 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
427 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
429 /* Use 32 bits per channel for blending. */
430 if (format
== V_028C70_COLOR_16
) {
431 if (swap
== V_028C70_SWAP_STD
) { /* R */
432 blend
= V_028714_SPI_SHADER_32_R
;
433 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
434 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
435 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
438 } else if (format
== V_028C70_COLOR_16_16
) {
439 if (swap
== V_028C70_SWAP_STD
) { /* RG */
440 blend
= V_028714_SPI_SHADER_32_GR
;
441 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
442 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
443 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
446 } else /* 16_16_16_16 */
447 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
448 } else if (ntype
== V_028C70_NUMBER_UINT
)
449 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
450 else if (ntype
== V_028C70_NUMBER_SINT
)
451 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
452 else if (ntype
== V_028C70_NUMBER_FLOAT
)
453 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
458 case V_028C70_COLOR_32
:
459 if (swap
== V_028C70_SWAP_STD
) { /* R */
460 blend
= normal
= V_028714_SPI_SHADER_32_R
;
461 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
462 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
463 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
468 case V_028C70_COLOR_32_32
:
469 if (swap
== V_028C70_SWAP_STD
) { /* RG */
470 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
471 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
472 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
473 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
478 case V_028C70_COLOR_32_32_32_32
:
479 case V_028C70_COLOR_8_24
:
480 case V_028C70_COLOR_24_8
:
481 case V_028C70_COLOR_X24_8_32_FLOAT
:
482 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
486 unreachable("unhandled blend format");
489 if (blend_enable
&& blend_need_alpha
)
491 else if(blend_need_alpha
)
493 else if(blend_enable
)
500 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
501 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
502 struct radv_blend_state
*blend
)
504 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
505 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
506 unsigned col_format
= 0;
507 unsigned num_targets
;
509 for (unsigned i
= 0; i
< (blend
->single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
512 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
) {
513 cf
= V_028714_SPI_SHADER_ZERO
;
515 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
517 blend
->blend_enable_4bit
& (0xfu
<< (i
* 4));
519 cf
= si_choose_spi_color_format(attachment
->format
,
521 blend
->need_src_alpha
& (1 << i
));
524 col_format
|= cf
<< (4 * i
);
527 if (!col_format
&& blend
->need_src_alpha
& (1 << 0)) {
528 /* When a subpass doesn't have any color attachments, write the
529 * alpha channel of MRT0 when alpha coverage is enabled because
530 * the depth attachment needs it.
532 col_format
|= V_028714_SPI_SHADER_32_AR
;
535 /* If the i-th target format is set, all previous target formats must
536 * be non-zero to avoid hangs.
538 num_targets
= (util_last_bit(col_format
) + 3) / 4;
539 for (unsigned i
= 0; i
< num_targets
; i
++) {
540 if (!(col_format
& (0xf << (i
* 4)))) {
541 col_format
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
545 blend
->cb_shader_mask
= ac_get_cb_shader_mask(col_format
);
547 if (blend
->mrt0_is_dual_src
)
548 col_format
|= (col_format
& 0xf) << 4;
549 blend
->spi_shader_col_format
= col_format
;
553 format_is_int8(VkFormat format
)
555 const struct vk_format_description
*desc
= vk_format_description(format
);
556 int channel
= vk_format_get_first_non_void_channel(format
);
558 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
559 desc
->channel
[channel
].size
== 8;
563 format_is_int10(VkFormat format
)
565 const struct vk_format_description
*desc
= vk_format_description(format
);
567 if (desc
->nr_channels
!= 4)
569 for (unsigned i
= 0; i
< 4; i
++) {
570 if (desc
->channel
[i
].pure_integer
&& desc
->channel
[i
].size
== 10)
577 * Ordered so that for each i,
578 * radv_format_meta_fs_key(radv_fs_key_format_exemplars[i]) == i.
580 const VkFormat radv_fs_key_format_exemplars
[NUM_META_FS_KEYS
] = {
581 VK_FORMAT_R32_SFLOAT
,
582 VK_FORMAT_R32G32_SFLOAT
,
583 VK_FORMAT_R8G8B8A8_UNORM
,
584 VK_FORMAT_R16G16B16A16_UNORM
,
585 VK_FORMAT_R16G16B16A16_SNORM
,
586 VK_FORMAT_R16G16B16A16_UINT
,
587 VK_FORMAT_R16G16B16A16_SINT
,
588 VK_FORMAT_R32G32B32A32_SFLOAT
,
589 VK_FORMAT_R8G8B8A8_UINT
,
590 VK_FORMAT_R8G8B8A8_SINT
,
591 VK_FORMAT_A2R10G10B10_UINT_PACK32
,
592 VK_FORMAT_A2R10G10B10_SINT_PACK32
,
595 unsigned radv_format_meta_fs_key(VkFormat format
)
597 unsigned col_format
= si_choose_spi_color_format(format
, false, false);
599 assert(col_format
!= V_028714_SPI_SHADER_32_AR
);
600 if (col_format
>= V_028714_SPI_SHADER_32_AR
)
601 --col_format
; /* Skip V_028714_SPI_SHADER_32_AR since there is no such VkFormat */
603 --col_format
; /* Skip V_028714_SPI_SHADER_ZERO */
604 bool is_int8
= format_is_int8(format
);
605 bool is_int10
= format_is_int10(format
);
607 return col_format
+ (is_int8
? 3 : is_int10
? 5 : 0);
611 radv_pipeline_compute_get_int_clamp(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
612 unsigned *is_int8
, unsigned *is_int10
)
614 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
615 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
619 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
620 struct radv_render_pass_attachment
*attachment
;
622 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
625 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
627 if (format_is_int8(attachment
->format
))
629 if (format_is_int10(attachment
->format
))
635 radv_blend_check_commutativity(struct radv_blend_state
*blend
,
636 VkBlendOp op
, VkBlendFactor src
,
637 VkBlendFactor dst
, unsigned chanmask
)
639 /* Src factor is allowed when it does not depend on Dst. */
640 static const uint32_t src_allowed
=
641 (1u << VK_BLEND_FACTOR_ONE
) |
642 (1u << VK_BLEND_FACTOR_SRC_COLOR
) |
643 (1u << VK_BLEND_FACTOR_SRC_ALPHA
) |
644 (1u << VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
) |
645 (1u << VK_BLEND_FACTOR_CONSTANT_COLOR
) |
646 (1u << VK_BLEND_FACTOR_CONSTANT_ALPHA
) |
647 (1u << VK_BLEND_FACTOR_SRC1_COLOR
) |
648 (1u << VK_BLEND_FACTOR_SRC1_ALPHA
) |
649 (1u << VK_BLEND_FACTOR_ZERO
) |
650 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
) |
651 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
) |
652 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
) |
653 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
) |
654 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
) |
655 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
);
657 if (dst
== VK_BLEND_FACTOR_ONE
&&
658 (src_allowed
& (1u << src
))) {
659 /* Addition is commutative, but floating point addition isn't
660 * associative: subtle changes can be introduced via different
661 * rounding. Be conservative, only enable for min and max.
663 if (op
== VK_BLEND_OP_MAX
|| op
== VK_BLEND_OP_MIN
)
664 blend
->commutative_4bit
|= chanmask
;
668 static struct radv_blend_state
669 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
670 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
671 const struct radv_graphics_pipeline_create_info
*extra
)
673 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
674 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
675 struct radv_blend_state blend
= {0};
676 unsigned mode
= V_028808_CB_NORMAL
;
682 if (extra
&& extra
->custom_blend_mode
) {
683 blend
.single_cb_enable
= true;
684 mode
= extra
->custom_blend_mode
;
686 blend
.cb_color_control
= 0;
687 if (vkblend
->logicOpEnable
)
688 blend
.cb_color_control
|= S_028808_ROP3(si_translate_blend_logic_op(vkblend
->logicOp
));
690 blend
.cb_color_control
|= S_028808_ROP3(V_028808_ROP3_COPY
);
692 blend
.db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(3) |
693 S_028B70_ALPHA_TO_MASK_OFFSET1(1) |
694 S_028B70_ALPHA_TO_MASK_OFFSET2(0) |
695 S_028B70_ALPHA_TO_MASK_OFFSET3(2) |
696 S_028B70_OFFSET_ROUND(1);
698 if (vkms
&& vkms
->alphaToCoverageEnable
) {
699 blend
.db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
700 blend
.need_src_alpha
|= 0x1;
703 blend
.cb_target_mask
= 0;
704 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
705 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
706 unsigned blend_cntl
= 0;
707 unsigned srcRGB_opt
, dstRGB_opt
, srcA_opt
, dstA_opt
;
708 VkBlendOp eqRGB
= att
->colorBlendOp
;
709 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
710 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
711 VkBlendOp eqA
= att
->alphaBlendOp
;
712 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
713 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
715 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
717 if (!att
->colorWriteMask
)
720 blend
.cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
721 blend
.cb_target_enabled_4bit
|= 0xf << (4 * i
);
722 if (!att
->blendEnable
) {
723 blend
.cb_blend_control
[i
] = blend_cntl
;
727 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
729 blend
.mrt0_is_dual_src
= true;
731 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
732 srcRGB
= VK_BLEND_FACTOR_ONE
;
733 dstRGB
= VK_BLEND_FACTOR_ONE
;
735 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
736 srcA
= VK_BLEND_FACTOR_ONE
;
737 dstA
= VK_BLEND_FACTOR_ONE
;
740 radv_blend_check_commutativity(&blend
, eqRGB
, srcRGB
, dstRGB
,
742 radv_blend_check_commutativity(&blend
, eqA
, srcA
, dstA
,
745 /* Blending optimizations for RB+.
746 * These transformations don't change the behavior.
748 * First, get rid of DST in the blend factors:
749 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
751 si_blend_remove_dst(&eqRGB
, &srcRGB
, &dstRGB
,
752 VK_BLEND_FACTOR_DST_COLOR
,
753 VK_BLEND_FACTOR_SRC_COLOR
);
755 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
756 VK_BLEND_FACTOR_DST_COLOR
,
757 VK_BLEND_FACTOR_SRC_COLOR
);
759 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
760 VK_BLEND_FACTOR_DST_ALPHA
,
761 VK_BLEND_FACTOR_SRC_ALPHA
);
763 /* Look up the ideal settings from tables. */
764 srcRGB_opt
= si_translate_blend_opt_factor(srcRGB
, false);
765 dstRGB_opt
= si_translate_blend_opt_factor(dstRGB
, false);
766 srcA_opt
= si_translate_blend_opt_factor(srcA
, true);
767 dstA_opt
= si_translate_blend_opt_factor(dstA
, true);
769 /* Handle interdependencies. */
770 if (si_blend_factor_uses_dst(srcRGB
))
771 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
772 if (si_blend_factor_uses_dst(srcA
))
773 dstA_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
775 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
&&
776 (dstRGB
== VK_BLEND_FACTOR_ZERO
||
777 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
778 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
))
779 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
781 /* Set the final value. */
782 blend
.sx_mrt_blend_opt
[i
] =
783 S_028760_COLOR_SRC_OPT(srcRGB_opt
) |
784 S_028760_COLOR_DST_OPT(dstRGB_opt
) |
785 S_028760_COLOR_COMB_FCN(si_translate_blend_opt_function(eqRGB
)) |
786 S_028760_ALPHA_SRC_OPT(srcA_opt
) |
787 S_028760_ALPHA_DST_OPT(dstA_opt
) |
788 S_028760_ALPHA_COMB_FCN(si_translate_blend_opt_function(eqA
));
789 blend_cntl
|= S_028780_ENABLE(1);
791 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
792 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
793 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
794 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
795 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
796 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
797 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
798 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
800 blend
.cb_blend_control
[i
] = blend_cntl
;
802 blend
.blend_enable_4bit
|= 0xfu
<< (i
* 4);
804 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
805 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
806 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
807 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
808 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
809 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
810 blend
.need_src_alpha
|= 1 << i
;
812 for (i
= vkblend
->attachmentCount
; i
< 8; i
++) {
813 blend
.cb_blend_control
[i
] = 0;
814 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
817 if (pipeline
->device
->physical_device
->has_rbplus
) {
818 /* Disable RB+ blend optimizations for dual source blending. */
819 if (blend
.mrt0_is_dual_src
) {
820 for (i
= 0; i
< 8; i
++) {
821 blend
.sx_mrt_blend_opt
[i
] =
822 S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE
) |
823 S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE
);
827 /* RB+ doesn't work with dual source blending, logic op and
830 if (blend
.mrt0_is_dual_src
|| vkblend
->logicOpEnable
||
831 mode
== V_028808_CB_RESOLVE
)
832 blend
.cb_color_control
|= S_028808_DISABLE_DUAL_QUAD(1);
835 if (blend
.cb_target_mask
)
836 blend
.cb_color_control
|= S_028808_MODE(mode
);
838 blend
.cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
840 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
, &blend
);
844 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
847 case VK_STENCIL_OP_KEEP
:
848 return V_02842C_STENCIL_KEEP
;
849 case VK_STENCIL_OP_ZERO
:
850 return V_02842C_STENCIL_ZERO
;
851 case VK_STENCIL_OP_REPLACE
:
852 return V_02842C_STENCIL_REPLACE_TEST
;
853 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
854 return V_02842C_STENCIL_ADD_CLAMP
;
855 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
856 return V_02842C_STENCIL_SUB_CLAMP
;
857 case VK_STENCIL_OP_INVERT
:
858 return V_02842C_STENCIL_INVERT
;
859 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
860 return V_02842C_STENCIL_ADD_WRAP
;
861 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
862 return V_02842C_STENCIL_SUB_WRAP
;
868 static uint32_t si_translate_fill(VkPolygonMode func
)
871 case VK_POLYGON_MODE_FILL
:
872 return V_028814_X_DRAW_TRIANGLES
;
873 case VK_POLYGON_MODE_LINE
:
874 return V_028814_X_DRAW_LINES
;
875 case VK_POLYGON_MODE_POINT
:
876 return V_028814_X_DRAW_POINTS
;
879 return V_028814_X_DRAW_POINTS
;
883 static uint8_t radv_pipeline_get_ps_iter_samples(const VkPipelineMultisampleStateCreateInfo
*vkms
)
885 uint32_t num_samples
= vkms
->rasterizationSamples
;
886 uint32_t ps_iter_samples
= 1;
888 if (vkms
->sampleShadingEnable
) {
889 ps_iter_samples
= ceil(vkms
->minSampleShading
* num_samples
);
890 ps_iter_samples
= util_next_power_of_two(ps_iter_samples
);
892 return ps_iter_samples
;
896 radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
898 return pCreateInfo
->depthTestEnable
&&
899 pCreateInfo
->depthWriteEnable
&&
900 pCreateInfo
->depthCompareOp
!= VK_COMPARE_OP_NEVER
;
904 radv_writes_stencil(const VkStencilOpState
*state
)
906 return state
->writeMask
&&
907 (state
->failOp
!= VK_STENCIL_OP_KEEP
||
908 state
->passOp
!= VK_STENCIL_OP_KEEP
||
909 state
->depthFailOp
!= VK_STENCIL_OP_KEEP
);
913 radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
915 return pCreateInfo
->stencilTestEnable
&&
916 (radv_writes_stencil(&pCreateInfo
->front
) ||
917 radv_writes_stencil(&pCreateInfo
->back
));
921 radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
923 return radv_is_depth_write_enabled(pCreateInfo
) ||
924 radv_is_stencil_write_enabled(pCreateInfo
);
928 radv_order_invariant_stencil_op(VkStencilOp op
)
930 /* REPLACE is normally order invariant, except when the stencil
931 * reference value is written by the fragment shader. Tracking this
932 * interaction does not seem worth the effort, so be conservative.
934 return op
!= VK_STENCIL_OP_INCREMENT_AND_CLAMP
&&
935 op
!= VK_STENCIL_OP_DECREMENT_AND_CLAMP
&&
936 op
!= VK_STENCIL_OP_REPLACE
;
940 radv_order_invariant_stencil_state(const VkStencilOpState
*state
)
942 /* Compute whether, assuming Z writes are disabled, this stencil state
943 * is order invariant in the sense that the set of passing fragments as
944 * well as the final stencil buffer result does not depend on the order
947 return !state
->writeMask
||
948 /* The following assumes that Z writes are disabled. */
949 (state
->compareOp
== VK_COMPARE_OP_ALWAYS
&&
950 radv_order_invariant_stencil_op(state
->passOp
) &&
951 radv_order_invariant_stencil_op(state
->depthFailOp
)) ||
952 (state
->compareOp
== VK_COMPARE_OP_NEVER
&&
953 radv_order_invariant_stencil_op(state
->failOp
));
957 radv_pipeline_out_of_order_rast(struct radv_pipeline
*pipeline
,
958 struct radv_blend_state
*blend
,
959 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
961 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
962 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
963 unsigned colormask
= blend
->cb_target_enabled_4bit
;
965 if (!pipeline
->device
->physical_device
->out_of_order_rast_allowed
)
968 /* Be conservative if a logic operation is enabled with color buffers. */
969 if (colormask
&& pCreateInfo
->pColorBlendState
->logicOpEnable
)
972 /* Default depth/stencil invariance when no attachment is bound. */
973 struct radv_dsa_order_invariance dsa_order_invariant
= {
974 .zs
= true, .pass_set
= true
977 if (pCreateInfo
->pDepthStencilState
&&
978 subpass
->depth_stencil_attachment
) {
979 const VkPipelineDepthStencilStateCreateInfo
*vkds
=
980 pCreateInfo
->pDepthStencilState
;
981 struct radv_render_pass_attachment
*attachment
=
982 pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
983 bool has_stencil
= vk_format_is_stencil(attachment
->format
);
984 struct radv_dsa_order_invariance order_invariance
[2];
985 struct radv_shader_variant
*ps
=
986 pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
988 /* Compute depth/stencil order invariance in order to know if
989 * it's safe to enable out-of-order.
991 bool zfunc_is_ordered
=
992 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
||
993 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS
||
994 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS_OR_EQUAL
||
995 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER
||
996 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER_OR_EQUAL
;
998 bool nozwrite_and_order_invariant_stencil
=
999 !radv_is_ds_write_enabled(vkds
) ||
1000 (!radv_is_depth_write_enabled(vkds
) &&
1001 radv_order_invariant_stencil_state(&vkds
->front
) &&
1002 radv_order_invariant_stencil_state(&vkds
->back
));
1004 order_invariance
[1].zs
=
1005 nozwrite_and_order_invariant_stencil
||
1006 (!radv_is_stencil_write_enabled(vkds
) &&
1008 order_invariance
[0].zs
=
1009 !radv_is_depth_write_enabled(vkds
) || zfunc_is_ordered
;
1011 order_invariance
[1].pass_set
=
1012 nozwrite_and_order_invariant_stencil
||
1013 (!radv_is_stencil_write_enabled(vkds
) &&
1014 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1015 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
));
1016 order_invariance
[0].pass_set
=
1017 !radv_is_depth_write_enabled(vkds
) ||
1018 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1019 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
);
1021 dsa_order_invariant
= order_invariance
[has_stencil
];
1022 if (!dsa_order_invariant
.zs
)
1025 /* The set of PS invocations is always order invariant,
1026 * except when early Z/S tests are requested.
1029 ps
->info
.info
.ps
.writes_memory
&&
1030 ps
->info
.fs
.early_fragment_test
&&
1031 !dsa_order_invariant
.pass_set
)
1034 /* Determine if out-of-order rasterization should be disabled
1035 * when occlusion queries are used.
1037 pipeline
->graphics
.disable_out_of_order_rast_for_occlusion
=
1038 !dsa_order_invariant
.pass_set
;
1041 /* No color buffers are enabled for writing. */
1045 unsigned blendmask
= colormask
& blend
->blend_enable_4bit
;
1048 /* Only commutative blending. */
1049 if (blendmask
& ~blend
->commutative_4bit
)
1052 if (!dsa_order_invariant
.pass_set
)
1056 if (colormask
& ~blendmask
)
1063 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1064 struct radv_blend_state
*blend
,
1065 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1067 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
1068 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1069 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1070 bool out_of_order_rast
= false;
1071 int ps_iter_samples
= 1;
1072 uint32_t mask
= 0xffff;
1075 ms
->num_samples
= vkms
->rasterizationSamples
;
1077 ms
->num_samples
= 1;
1080 ps_iter_samples
= radv_pipeline_get_ps_iter_samples(vkms
);
1081 if (vkms
&& !vkms
->sampleShadingEnable
&& pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.force_persample
) {
1082 ps_iter_samples
= ms
->num_samples
;
1085 const struct VkPipelineRasterizationStateRasterizationOrderAMD
*raster_order
=
1086 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
, PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD
);
1087 if (raster_order
&& raster_order
->rasterizationOrder
== VK_RASTERIZATION_ORDER_RELAXED_AMD
) {
1088 /* Out-of-order rasterization is explicitly enabled by the
1091 out_of_order_rast
= true;
1093 /* Determine if the driver can enable out-of-order
1094 * rasterization internally.
1097 radv_pipeline_out_of_order_rast(pipeline
, blend
, pCreateInfo
);
1100 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1101 ms
->pa_sc_aa_config
= 0;
1102 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1103 S_028804_INCOHERENT_EQAA_READS(1) |
1104 S_028804_INTERPOLATE_COMP_Z(1) |
1105 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1106 ms
->pa_sc_mode_cntl_1
=
1107 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1108 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1109 S_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(out_of_order_rast
) |
1110 S_028A4C_OUT_OF_ORDER_WATER_MARK(0x7) |
1112 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1113 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1114 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1115 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1116 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1117 S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1118 ms
->pa_sc_mode_cntl_0
= S_028A48_ALTERNATE_RBS_PER_TILE(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
1119 S_028A48_VPORT_SCISSOR_ENABLE(1);
1121 if (ms
->num_samples
> 1) {
1122 unsigned log_samples
= util_logbase2(ms
->num_samples
);
1123 unsigned log_ps_iter_samples
= util_logbase2(ps_iter_samples
);
1124 ms
->pa_sc_mode_cntl_0
|= S_028A48_MSAA_ENABLE(1);
1125 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1126 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_samples
) |
1127 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1128 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1129 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1130 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1131 S_028BE0_MAX_SAMPLE_DIST(radv_cayman_get_maxdist(log_samples
)) |
1132 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1133 ms
->pa_sc_mode_cntl_1
|= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1134 if (ps_iter_samples
> 1)
1135 pipeline
->graphics
.spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1138 if (vkms
&& vkms
->pSampleMask
) {
1139 mask
= vkms
->pSampleMask
[0] & 0xffff;
1142 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1143 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1147 radv_prim_can_use_guardband(enum VkPrimitiveTopology topology
)
1150 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1151 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1152 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1153 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1154 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1156 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1157 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1158 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1159 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1160 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1161 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1164 unreachable("unhandled primitive type");
1169 si_translate_prim(enum VkPrimitiveTopology topology
)
1172 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1173 return V_008958_DI_PT_POINTLIST
;
1174 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1175 return V_008958_DI_PT_LINELIST
;
1176 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1177 return V_008958_DI_PT_LINESTRIP
;
1178 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1179 return V_008958_DI_PT_TRILIST
;
1180 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1181 return V_008958_DI_PT_TRISTRIP
;
1182 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1183 return V_008958_DI_PT_TRIFAN
;
1184 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1185 return V_008958_DI_PT_LINELIST_ADJ
;
1186 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1187 return V_008958_DI_PT_LINESTRIP_ADJ
;
1188 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1189 return V_008958_DI_PT_TRILIST_ADJ
;
1190 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1191 return V_008958_DI_PT_TRISTRIP_ADJ
;
1192 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1193 return V_008958_DI_PT_PATCH
;
1201 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1204 case 0: /* GL_POINTS */
1205 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1206 case 1: /* GL_LINES */
1207 case 3: /* GL_LINE_STRIP */
1208 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1209 case 0x8E7A: /* GL_ISOLINES */
1210 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1212 case 4: /* GL_TRIANGLES */
1213 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1214 case 5: /* GL_TRIANGLE_STRIP */
1215 case 7: /* GL_QUADS */
1216 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1224 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1227 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1228 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1229 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1230 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1231 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1232 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1233 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1234 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1235 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1236 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1237 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1238 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1239 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1240 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1247 static unsigned si_map_swizzle(unsigned swizzle
)
1251 return V_008F0C_SQ_SEL_Y
;
1253 return V_008F0C_SQ_SEL_Z
;
1255 return V_008F0C_SQ_SEL_W
;
1257 return V_008F0C_SQ_SEL_0
;
1259 return V_008F0C_SQ_SEL_1
;
1260 default: /* VK_SWIZZLE_X */
1261 return V_008F0C_SQ_SEL_X
;
1266 static unsigned radv_dynamic_state_mask(VkDynamicState state
)
1269 case VK_DYNAMIC_STATE_VIEWPORT
:
1270 return RADV_DYNAMIC_VIEWPORT
;
1271 case VK_DYNAMIC_STATE_SCISSOR
:
1272 return RADV_DYNAMIC_SCISSOR
;
1273 case VK_DYNAMIC_STATE_LINE_WIDTH
:
1274 return RADV_DYNAMIC_LINE_WIDTH
;
1275 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
1276 return RADV_DYNAMIC_DEPTH_BIAS
;
1277 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
1278 return RADV_DYNAMIC_BLEND_CONSTANTS
;
1279 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
1280 return RADV_DYNAMIC_DEPTH_BOUNDS
;
1281 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
1282 return RADV_DYNAMIC_STENCIL_COMPARE_MASK
;
1283 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
1284 return RADV_DYNAMIC_STENCIL_WRITE_MASK
;
1285 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
1286 return RADV_DYNAMIC_STENCIL_REFERENCE
;
1287 case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT
:
1288 return RADV_DYNAMIC_DISCARD_RECTANGLE
;
1290 unreachable("Unhandled dynamic state");
1294 static uint32_t radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1296 uint32_t states
= RADV_DYNAMIC_ALL
;
1298 /* If rasterization is disabled we do not care about any of the dynamic states,
1299 * since they are all rasterization related only. */
1300 if (pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
1303 if (!pCreateInfo
->pRasterizationState
->depthBiasEnable
)
1304 states
&= ~RADV_DYNAMIC_DEPTH_BIAS
;
1306 if (!pCreateInfo
->pDepthStencilState
||
1307 !pCreateInfo
->pDepthStencilState
->depthBoundsTestEnable
)
1308 states
&= ~RADV_DYNAMIC_DEPTH_BOUNDS
;
1310 if (!pCreateInfo
->pDepthStencilState
||
1311 !pCreateInfo
->pDepthStencilState
->stencilTestEnable
)
1312 states
&= ~(RADV_DYNAMIC_STENCIL_COMPARE_MASK
|
1313 RADV_DYNAMIC_STENCIL_WRITE_MASK
|
1314 RADV_DYNAMIC_STENCIL_REFERENCE
);
1316 if (!vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
))
1317 states
&= ~RADV_DYNAMIC_DISCARD_RECTANGLE
;
1319 /* TODO: blend constants & line width. */
1326 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1327 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1329 uint32_t needed_states
= radv_pipeline_needed_dynamic_state(pCreateInfo
);
1330 uint32_t states
= needed_states
;
1331 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1332 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1334 pipeline
->dynamic_state
= default_dynamic_state
;
1335 pipeline
->graphics
.needed_dynamic_state
= needed_states
;
1337 if (pCreateInfo
->pDynamicState
) {
1338 /* Remove all of the states that are marked as dynamic */
1339 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1340 for (uint32_t s
= 0; s
< count
; s
++)
1341 states
&= ~radv_dynamic_state_mask(pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1344 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1346 if (needed_states
& RADV_DYNAMIC_VIEWPORT
) {
1347 assert(pCreateInfo
->pViewportState
);
1349 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1350 if (states
& RADV_DYNAMIC_VIEWPORT
) {
1351 typed_memcpy(dynamic
->viewport
.viewports
,
1352 pCreateInfo
->pViewportState
->pViewports
,
1353 pCreateInfo
->pViewportState
->viewportCount
);
1357 if (needed_states
& RADV_DYNAMIC_SCISSOR
) {
1358 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1359 if (states
& RADV_DYNAMIC_SCISSOR
) {
1360 typed_memcpy(dynamic
->scissor
.scissors
,
1361 pCreateInfo
->pViewportState
->pScissors
,
1362 pCreateInfo
->pViewportState
->scissorCount
);
1366 if (states
& RADV_DYNAMIC_LINE_WIDTH
) {
1367 assert(pCreateInfo
->pRasterizationState
);
1368 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1371 if (states
& RADV_DYNAMIC_DEPTH_BIAS
) {
1372 assert(pCreateInfo
->pRasterizationState
);
1373 dynamic
->depth_bias
.bias
=
1374 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1375 dynamic
->depth_bias
.clamp
=
1376 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1377 dynamic
->depth_bias
.slope
=
1378 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1381 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1383 * pColorBlendState is [...] NULL if the pipeline has rasterization
1384 * disabled or if the subpass of the render pass the pipeline is
1385 * created against does not use any color attachments.
1387 if (subpass
->has_color_att
&& states
& RADV_DYNAMIC_BLEND_CONSTANTS
) {
1388 assert(pCreateInfo
->pColorBlendState
);
1389 typed_memcpy(dynamic
->blend_constants
,
1390 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1393 /* If there is no depthstencil attachment, then don't read
1394 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1395 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1396 * no need to override the depthstencil defaults in
1397 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1399 * Section 9.2 of the Vulkan 1.0.15 spec says:
1401 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1402 * disabled or if the subpass of the render pass the pipeline is created
1403 * against does not use a depth/stencil attachment.
1405 if (needed_states
&& subpass
->depth_stencil_attachment
) {
1406 assert(pCreateInfo
->pDepthStencilState
);
1408 if (states
& RADV_DYNAMIC_DEPTH_BOUNDS
) {
1409 dynamic
->depth_bounds
.min
=
1410 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1411 dynamic
->depth_bounds
.max
=
1412 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1415 if (states
& RADV_DYNAMIC_STENCIL_COMPARE_MASK
) {
1416 dynamic
->stencil_compare_mask
.front
=
1417 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1418 dynamic
->stencil_compare_mask
.back
=
1419 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1422 if (states
& RADV_DYNAMIC_STENCIL_WRITE_MASK
) {
1423 dynamic
->stencil_write_mask
.front
=
1424 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1425 dynamic
->stencil_write_mask
.back
=
1426 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1429 if (states
& RADV_DYNAMIC_STENCIL_REFERENCE
) {
1430 dynamic
->stencil_reference
.front
=
1431 pCreateInfo
->pDepthStencilState
->front
.reference
;
1432 dynamic
->stencil_reference
.back
=
1433 pCreateInfo
->pDepthStencilState
->back
.reference
;
1437 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
1438 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
1439 if (states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1440 dynamic
->discard_rectangle
.count
= discard_rectangle_info
->discardRectangleCount
;
1441 typed_memcpy(dynamic
->discard_rectangle
.rectangles
,
1442 discard_rectangle_info
->pDiscardRectangles
,
1443 discard_rectangle_info
->discardRectangleCount
);
1446 pipeline
->dynamic_state
.mask
= states
;
1449 static struct radv_gs_state
1450 calculate_gs_info(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1451 const struct radv_pipeline
*pipeline
)
1453 struct radv_gs_state gs
= {0};
1454 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1455 struct radv_es_output_info
*es_info
;
1456 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1457 es_info
= radv_pipeline_has_tess(pipeline
) ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1459 es_info
= radv_pipeline_has_tess(pipeline
) ?
1460 &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.es_info
:
1461 &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.es_info
;
1463 unsigned gs_num_invocations
= MAX2(gs_info
->gs
.invocations
, 1);
1464 bool uses_adjacency
;
1465 switch(pCreateInfo
->pInputAssemblyState
->topology
) {
1466 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1467 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1468 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1469 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1470 uses_adjacency
= true;
1473 uses_adjacency
= false;
1477 /* All these are in dwords: */
1478 /* We can't allow using the whole LDS, because GS waves compete with
1479 * other shader stages for LDS space. */
1480 const unsigned max_lds_size
= 8 * 1024;
1481 const unsigned esgs_itemsize
= es_info
->esgs_itemsize
/ 4;
1482 unsigned esgs_lds_size
;
1484 /* All these are per subgroup: */
1485 const unsigned max_out_prims
= 32 * 1024;
1486 const unsigned max_es_verts
= 255;
1487 const unsigned ideal_gs_prims
= 64;
1488 unsigned max_gs_prims
, gs_prims
;
1489 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
1491 if (uses_adjacency
|| gs_num_invocations
> 1)
1492 max_gs_prims
= 127 / gs_num_invocations
;
1496 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
1497 * Make sure we don't go over the maximum value.
1499 if (gs_info
->gs
.vertices_out
> 0) {
1500 max_gs_prims
= MIN2(max_gs_prims
,
1502 (gs_info
->gs
.vertices_out
* gs_num_invocations
));
1504 assert(max_gs_prims
> 0);
1506 /* If the primitive has adjacency, halve the number of vertices
1507 * that will be reused in multiple primitives.
1509 min_es_verts
= gs_info
->gs
.vertices_in
/ (uses_adjacency
? 2 : 1);
1511 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
1512 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
1514 /* Compute ESGS LDS size based on the worst case number of ES vertices
1515 * needed to create the target number of GS prims per subgroup.
1517 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1519 /* If total LDS usage is too big, refactor partitions based on ratio
1520 * of ESGS item sizes.
1522 if (esgs_lds_size
> max_lds_size
) {
1523 /* Our target GS Prims Per Subgroup was too large. Calculate
1524 * the maximum number of GS Prims Per Subgroup that will fit
1525 * into LDS, capped by the maximum that the hardware can support.
1527 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
1529 assert(gs_prims
> 0);
1530 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
1533 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1534 assert(esgs_lds_size
<= max_lds_size
);
1537 /* Now calculate remaining ESGS information. */
1539 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
1541 es_verts
= max_es_verts
;
1543 /* Vertices for adjacency primitives are not always reused, so restore
1544 * it for ES_VERTS_PER_SUBGRP.
1546 min_es_verts
= gs_info
->gs
.vertices_in
;
1548 /* For normal primitives, the VGT only checks if they are past the ES
1549 * verts per subgroup after allocating a full GS primitive and if they
1550 * are, kick off a new subgroup. But if those additional ES verts are
1551 * unique (e.g. not reused) we need to make sure there is enough LDS
1552 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
1554 es_verts
-= min_es_verts
- 1;
1556 uint32_t es_verts_per_subgroup
= es_verts
;
1557 uint32_t gs_prims_per_subgroup
= gs_prims
;
1558 uint32_t gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
1559 uint32_t max_prims_per_subgroup
= gs_inst_prims_in_subgroup
* gs_info
->gs
.vertices_out
;
1560 gs
.lds_size
= align(esgs_lds_size
, 128) / 128;
1561 gs
.vgt_gs_onchip_cntl
= S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup
) |
1562 S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup
) |
1563 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup
);
1564 gs
.vgt_gs_max_prims_per_subgroup
= S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup
);
1565 gs
.vgt_esgs_ring_itemsize
= esgs_itemsize
;
1566 assert(max_prims_per_subgroup
<= max_out_prims
);
1572 calculate_gs_ring_sizes(struct radv_pipeline
*pipeline
, const struct radv_gs_state
*gs
)
1574 struct radv_device
*device
= pipeline
->device
;
1575 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
1576 unsigned wave_size
= 64;
1577 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
1578 /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
1579 * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
1581 unsigned gs_vertex_reuse
=
1582 (device
->physical_device
->rad_info
.chip_class
>= VI
? 32 : 16) * num_se
;
1583 unsigned alignment
= 256 * num_se
;
1584 /* The maximum size is 63.999 MB per SE. */
1585 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
1586 struct radv_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1588 /* Calculate the minimum size. */
1589 unsigned min_esgs_ring_size
= align(gs
->vgt_esgs_ring_itemsize
* 4 * gs_vertex_reuse
*
1590 wave_size
, alignment
);
1591 /* These are recommended sizes, not minimum sizes. */
1592 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
1593 gs
->vgt_esgs_ring_itemsize
* 4 * gs_info
->gs
.vertices_in
;
1594 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
1595 gs_info
->gs
.max_gsvs_emit_size
;
1597 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
1598 esgs_ring_size
= align(esgs_ring_size
, alignment
);
1599 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
1601 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= VI
)
1602 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
1604 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
1607 static void si_multiwave_lds_size_workaround(struct radv_device
*device
,
1610 /* If tessellation is all offchip and on-chip GS isn't used, this
1611 * workaround is not needed.
1615 /* SPI barrier management bug:
1616 * Make sure we have at least 4k of LDS in use to avoid the bug.
1617 * It applies to workgroup sizes of more than one wavefront.
1619 if (device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
||
1620 device
->physical_device
->rad_info
.family
== CHIP_KABINI
||
1621 device
->physical_device
->rad_info
.family
== CHIP_MULLINS
)
1622 *lds_size
= MAX2(*lds_size
, 8);
1625 struct radv_shader_variant
*
1626 radv_get_shader(struct radv_pipeline
*pipeline
,
1627 gl_shader_stage stage
)
1629 if (stage
== MESA_SHADER_VERTEX
) {
1630 if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
1631 return pipeline
->shaders
[MESA_SHADER_VERTEX
];
1632 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
])
1633 return pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
1634 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1635 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1636 } else if (stage
== MESA_SHADER_TESS_EVAL
) {
1637 if (!radv_pipeline_has_tess(pipeline
))
1639 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
1640 return pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
1641 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1642 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1644 return pipeline
->shaders
[stage
];
1647 static struct radv_tessellation_state
1648 calculate_tess_state(struct radv_pipeline
*pipeline
,
1649 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1651 unsigned num_tcs_input_cp
;
1652 unsigned num_tcs_output_cp
;
1654 unsigned num_patches
;
1655 struct radv_tessellation_state tess
= {0};
1657 num_tcs_input_cp
= pCreateInfo
->pTessellationState
->patchControlPoints
;
1658 num_tcs_output_cp
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.tcs_vertices_out
; //TCS VERTICES OUT
1659 num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
1661 lds_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.lds_size
;
1663 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= CIK
) {
1664 assert(lds_size
<= 65536);
1665 lds_size
= align(lds_size
, 512) / 512;
1667 assert(lds_size
<= 32768);
1668 lds_size
= align(lds_size
, 256) / 256;
1670 si_multiwave_lds_size_workaround(pipeline
->device
, &lds_size
);
1672 tess
.lds_size
= lds_size
;
1674 tess
.ls_hs_config
= S_028B58_NUM_PATCHES(num_patches
) |
1675 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
1676 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
1677 tess
.num_patches
= num_patches
;
1679 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
1680 unsigned type
= 0, partitioning
= 0, topology
= 0, distribution_mode
= 0;
1682 switch (tes
->info
.tes
.primitive_mode
) {
1684 type
= V_028B6C_TESS_TRIANGLE
;
1687 type
= V_028B6C_TESS_QUAD
;
1690 type
= V_028B6C_TESS_ISOLINE
;
1694 switch (tes
->info
.tes
.spacing
) {
1695 case TESS_SPACING_EQUAL
:
1696 partitioning
= V_028B6C_PART_INTEGER
;
1698 case TESS_SPACING_FRACTIONAL_ODD
:
1699 partitioning
= V_028B6C_PART_FRAC_ODD
;
1701 case TESS_SPACING_FRACTIONAL_EVEN
:
1702 partitioning
= V_028B6C_PART_FRAC_EVEN
;
1708 bool ccw
= tes
->info
.tes
.ccw
;
1709 const VkPipelineTessellationDomainOriginStateCreateInfo
*domain_origin_state
=
1710 vk_find_struct_const(pCreateInfo
->pTessellationState
,
1711 PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO
);
1713 if (domain_origin_state
&& domain_origin_state
->domainOrigin
!= VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT
)
1716 if (tes
->info
.tes
.point_mode
)
1717 topology
= V_028B6C_OUTPUT_POINT
;
1718 else if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
1719 topology
= V_028B6C_OUTPUT_LINE
;
1721 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
1723 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
1725 if (pipeline
->device
->has_distributed_tess
) {
1726 if (pipeline
->device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
1727 pipeline
->device
->physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
1728 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
1730 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
1732 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
1734 tess
.tf_param
= S_028B6C_TYPE(type
) |
1735 S_028B6C_PARTITIONING(partitioning
) |
1736 S_028B6C_TOPOLOGY(topology
) |
1737 S_028B6C_DISTRIBUTION_MODE(distribution_mode
);
1742 static const struct radv_prim_vertex_count prim_size_table
[] = {
1743 [V_008958_DI_PT_NONE
] = {0, 0},
1744 [V_008958_DI_PT_POINTLIST
] = {1, 1},
1745 [V_008958_DI_PT_LINELIST
] = {2, 2},
1746 [V_008958_DI_PT_LINESTRIP
] = {2, 1},
1747 [V_008958_DI_PT_TRILIST
] = {3, 3},
1748 [V_008958_DI_PT_TRIFAN
] = {3, 1},
1749 [V_008958_DI_PT_TRISTRIP
] = {3, 1},
1750 [V_008958_DI_PT_LINELIST_ADJ
] = {4, 4},
1751 [V_008958_DI_PT_LINESTRIP_ADJ
] = {4, 1},
1752 [V_008958_DI_PT_TRILIST_ADJ
] = {6, 6},
1753 [V_008958_DI_PT_TRISTRIP_ADJ
] = {6, 2},
1754 [V_008958_DI_PT_RECTLIST
] = {3, 3},
1755 [V_008958_DI_PT_LINELOOP
] = {2, 1},
1756 [V_008958_DI_PT_POLYGON
] = {3, 1},
1757 [V_008958_DI_PT_2D_TRI_STRIP
] = {0, 0},
1760 static const struct radv_vs_output_info
*get_vs_output_info(const struct radv_pipeline
*pipeline
)
1762 if (radv_pipeline_has_gs(pipeline
))
1763 return &pipeline
->gs_copy_shader
->info
.vs
.outinfo
;
1764 else if (radv_pipeline_has_tess(pipeline
))
1765 return &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.outinfo
;
1767 return &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.outinfo
;
1771 radv_link_shaders(struct radv_pipeline
*pipeline
, nir_shader
**shaders
)
1773 nir_shader
* ordered_shaders
[MESA_SHADER_STAGES
];
1774 int shader_count
= 0;
1776 if(shaders
[MESA_SHADER_FRAGMENT
]) {
1777 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_FRAGMENT
];
1779 if(shaders
[MESA_SHADER_GEOMETRY
]) {
1780 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_GEOMETRY
];
1782 if(shaders
[MESA_SHADER_TESS_EVAL
]) {
1783 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_EVAL
];
1785 if(shaders
[MESA_SHADER_TESS_CTRL
]) {
1786 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_CTRL
];
1788 if(shaders
[MESA_SHADER_VERTEX
]) {
1789 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_VERTEX
];
1792 if (shader_count
> 1) {
1793 unsigned first
= ordered_shaders
[shader_count
- 1]->info
.stage
;
1794 unsigned last
= ordered_shaders
[0]->info
.stage
;
1796 if (ordered_shaders
[0]->info
.stage
== MESA_SHADER_FRAGMENT
&&
1797 ordered_shaders
[1]->info
.has_transform_feedback_varyings
)
1798 nir_link_xfb_varyings(ordered_shaders
[1], ordered_shaders
[0]);
1800 for (int i
= 0; i
< shader_count
; ++i
) {
1801 nir_variable_mode mask
= 0;
1803 if (ordered_shaders
[i
]->info
.stage
!= first
)
1804 mask
= mask
| nir_var_shader_in
;
1806 if (ordered_shaders
[i
]->info
.stage
!= last
)
1807 mask
= mask
| nir_var_shader_out
;
1809 nir_lower_io_to_scalar_early(ordered_shaders
[i
], mask
);
1810 radv_optimize_nir(ordered_shaders
[i
], false, false);
1814 for (int i
= 1; i
< shader_count
; ++i
) {
1815 nir_lower_io_arrays_to_elements(ordered_shaders
[i
],
1816 ordered_shaders
[i
- 1]);
1818 if (nir_link_opt_varyings(ordered_shaders
[i
],
1819 ordered_shaders
[i
- 1]))
1820 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
1822 nir_remove_dead_variables(ordered_shaders
[i
],
1823 nir_var_shader_out
);
1824 nir_remove_dead_variables(ordered_shaders
[i
- 1],
1827 bool progress
= nir_remove_unused_varyings(ordered_shaders
[i
],
1828 ordered_shaders
[i
- 1]);
1830 nir_compact_varyings(ordered_shaders
[i
],
1831 ordered_shaders
[i
- 1], true);
1834 if (nir_lower_global_vars_to_local(ordered_shaders
[i
])) {
1835 ac_lower_indirect_derefs(ordered_shaders
[i
],
1836 pipeline
->device
->physical_device
->rad_info
.chip_class
);
1838 radv_optimize_nir(ordered_shaders
[i
], false, false);
1840 if (nir_lower_global_vars_to_local(ordered_shaders
[i
- 1])) {
1841 ac_lower_indirect_derefs(ordered_shaders
[i
- 1],
1842 pipeline
->device
->physical_device
->rad_info
.chip_class
);
1844 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
1850 static struct radv_pipeline_key
1851 radv_generate_graphics_pipeline_key(struct radv_pipeline
*pipeline
,
1852 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1853 const struct radv_blend_state
*blend
,
1854 bool has_view_index
)
1856 const VkPipelineVertexInputStateCreateInfo
*input_state
=
1857 pCreateInfo
->pVertexInputState
;
1858 const VkPipelineVertexInputDivisorStateCreateInfoEXT
*divisor_state
=
1859 vk_find_struct_const(input_state
->pNext
, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT
);
1861 struct radv_pipeline_key key
;
1862 memset(&key
, 0, sizeof(key
));
1864 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
1865 key
.optimisations_disabled
= 1;
1867 key
.has_multiview_view_index
= has_view_index
;
1869 uint32_t binding_input_rate
= 0;
1870 uint32_t instance_rate_divisors
[MAX_VERTEX_ATTRIBS
];
1871 for (unsigned i
= 0; i
< input_state
->vertexBindingDescriptionCount
; ++i
) {
1872 if (input_state
->pVertexBindingDescriptions
[i
].inputRate
) {
1873 unsigned binding
= input_state
->pVertexBindingDescriptions
[i
].binding
;
1874 binding_input_rate
|= 1u << binding
;
1875 instance_rate_divisors
[binding
] = 1;
1878 if (divisor_state
) {
1879 for (unsigned i
= 0; i
< divisor_state
->vertexBindingDivisorCount
; ++i
) {
1880 instance_rate_divisors
[divisor_state
->pVertexBindingDivisors
[i
].binding
] =
1881 divisor_state
->pVertexBindingDivisors
[i
].divisor
;
1885 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
1886 const VkVertexInputAttributeDescription
*desc
=
1887 &input_state
->pVertexAttributeDescriptions
[i
];
1888 const struct vk_format_description
*format_desc
;
1889 unsigned location
= desc
->location
;
1890 unsigned binding
= desc
->binding
;
1891 unsigned num_format
, data_format
;
1894 if (binding_input_rate
& (1u << binding
)) {
1895 key
.instance_rate_inputs
|= 1u << location
;
1896 key
.instance_rate_divisors
[location
] = instance_rate_divisors
[binding
];
1899 format_desc
= vk_format_description(desc
->format
);
1900 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
1902 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
1903 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
1905 key
.vertex_attribute_formats
[location
] = data_format
| (num_format
<< 4);
1906 key
.vertex_attribute_bindings
[location
] = desc
->binding
;
1907 key
.vertex_attribute_offsets
[location
] = desc
->offset
;
1908 key
.vertex_attribute_strides
[location
] = input_state
->pVertexBindingDescriptions
[desc
->binding
].stride
;
1910 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= VI
&&
1911 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_STONEY
) {
1912 VkFormat format
= input_state
->pVertexAttributeDescriptions
[i
].format
;
1915 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
1916 case VK_FORMAT_A2B10G10R10_SNORM_PACK32
:
1917 adjust
= RADV_ALPHA_ADJUST_SNORM
;
1919 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
1920 case VK_FORMAT_A2B10G10R10_SSCALED_PACK32
:
1921 adjust
= RADV_ALPHA_ADJUST_SSCALED
;
1923 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
1924 case VK_FORMAT_A2B10G10R10_SINT_PACK32
:
1925 adjust
= RADV_ALPHA_ADJUST_SINT
;
1931 key
.vertex_alpha_adjust
|= adjust
<< (2 * location
);
1934 switch (desc
->format
) {
1935 case VK_FORMAT_B8G8R8A8_UNORM
:
1936 case VK_FORMAT_B8G8R8A8_SNORM
:
1937 case VK_FORMAT_B8G8R8A8_USCALED
:
1938 case VK_FORMAT_B8G8R8A8_SSCALED
:
1939 case VK_FORMAT_B8G8R8A8_UINT
:
1940 case VK_FORMAT_B8G8R8A8_SINT
:
1941 case VK_FORMAT_B8G8R8A8_SRGB
:
1942 case VK_FORMAT_A2R10G10B10_UNORM_PACK32
:
1943 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
1944 case VK_FORMAT_A2R10G10B10_USCALED_PACK32
:
1945 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
1946 case VK_FORMAT_A2R10G10B10_UINT_PACK32
:
1947 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
1948 key
.vertex_post_shuffle
|= 1 << location
;
1955 if (pCreateInfo
->pTessellationState
)
1956 key
.tess_input_vertices
= pCreateInfo
->pTessellationState
->patchControlPoints
;
1959 if (pCreateInfo
->pMultisampleState
&&
1960 pCreateInfo
->pMultisampleState
->rasterizationSamples
> 1) {
1961 uint32_t num_samples
= pCreateInfo
->pMultisampleState
->rasterizationSamples
;
1962 uint32_t ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
->pMultisampleState
);
1963 key
.num_samples
= num_samples
;
1964 key
.log2_ps_iter_samples
= util_logbase2(ps_iter_samples
);
1967 key
.col_format
= blend
->spi_shader_col_format
;
1968 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< VI
)
1969 radv_pipeline_compute_get_int_clamp(pCreateInfo
, &key
.is_int8
, &key
.is_int10
);
1975 radv_fill_shader_keys(struct radv_shader_variant_key
*keys
,
1976 const struct radv_pipeline_key
*key
,
1979 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_inputs
= key
->instance_rate_inputs
;
1980 keys
[MESA_SHADER_VERTEX
].vs
.alpha_adjust
= key
->vertex_alpha_adjust
;
1981 keys
[MESA_SHADER_VERTEX
].vs
.post_shuffle
= key
->vertex_post_shuffle
;
1982 for (unsigned i
= 0; i
< MAX_VERTEX_ATTRIBS
; ++i
) {
1983 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_divisors
[i
] = key
->instance_rate_divisors
[i
];
1984 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_formats
[i
] = key
->vertex_attribute_formats
[i
];
1985 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_bindings
[i
] = key
->vertex_attribute_bindings
[i
];
1986 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_offsets
[i
] = key
->vertex_attribute_offsets
[i
];
1987 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_strides
[i
] = key
->vertex_attribute_strides
[i
];
1990 if (nir
[MESA_SHADER_TESS_CTRL
]) {
1991 keys
[MESA_SHADER_VERTEX
].vs
.as_ls
= true;
1992 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= 0;
1993 keys
[MESA_SHADER_TESS_CTRL
].tcs
.input_vertices
= key
->tess_input_vertices
;
1994 keys
[MESA_SHADER_TESS_CTRL
].tcs
.primitive_mode
= nir
[MESA_SHADER_TESS_EVAL
]->info
.tess
.primitive_mode
;
1996 keys
[MESA_SHADER_TESS_CTRL
].tcs
.tes_reads_tess_factors
= !!(nir
[MESA_SHADER_TESS_EVAL
]->info
.inputs_read
& (VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
));
1999 if (nir
[MESA_SHADER_GEOMETRY
]) {
2000 if (nir
[MESA_SHADER_TESS_CTRL
])
2001 keys
[MESA_SHADER_TESS_EVAL
].tes
.as_es
= true;
2003 keys
[MESA_SHADER_VERTEX
].vs
.as_es
= true;
2006 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
2007 keys
[i
].has_multiview_view_index
= key
->has_multiview_view_index
;
2009 keys
[MESA_SHADER_FRAGMENT
].fs
.col_format
= key
->col_format
;
2010 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int8
= key
->is_int8
;
2011 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int10
= key
->is_int10
;
2012 keys
[MESA_SHADER_FRAGMENT
].fs
.log2_ps_iter_samples
= key
->log2_ps_iter_samples
;
2013 keys
[MESA_SHADER_FRAGMENT
].fs
.num_samples
= key
->num_samples
;
2017 merge_tess_info(struct shader_info
*tes_info
,
2018 const struct shader_info
*tcs_info
)
2020 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
2022 * "PointMode. Controls generation of points rather than triangles
2023 * or lines. This functionality defaults to disabled, and is
2024 * enabled if either shader stage includes the execution mode.
2026 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
2027 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
2028 * and OutputVertices, it says:
2030 * "One mode must be set in at least one of the tessellation
2033 * So, the fields can be set in either the TCS or TES, but they must
2034 * agree if set in both. Our backend looks at TES, so bitwise-or in
2035 * the values from the TCS.
2037 assert(tcs_info
->tess
.tcs_vertices_out
== 0 ||
2038 tes_info
->tess
.tcs_vertices_out
== 0 ||
2039 tcs_info
->tess
.tcs_vertices_out
== tes_info
->tess
.tcs_vertices_out
);
2040 tes_info
->tess
.tcs_vertices_out
|= tcs_info
->tess
.tcs_vertices_out
;
2042 assert(tcs_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2043 tes_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2044 tcs_info
->tess
.spacing
== tes_info
->tess
.spacing
);
2045 tes_info
->tess
.spacing
|= tcs_info
->tess
.spacing
;
2047 assert(tcs_info
->tess
.primitive_mode
== 0 ||
2048 tes_info
->tess
.primitive_mode
== 0 ||
2049 tcs_info
->tess
.primitive_mode
== tes_info
->tess
.primitive_mode
);
2050 tes_info
->tess
.primitive_mode
|= tcs_info
->tess
.primitive_mode
;
2051 tes_info
->tess
.ccw
|= tcs_info
->tess
.ccw
;
2052 tes_info
->tess
.point_mode
|= tcs_info
->tess
.point_mode
;
2056 void radv_create_shaders(struct radv_pipeline
*pipeline
,
2057 struct radv_device
*device
,
2058 struct radv_pipeline_cache
*cache
,
2059 const struct radv_pipeline_key
*key
,
2060 const VkPipelineShaderStageCreateInfo
**pStages
,
2061 const VkPipelineCreateFlags flags
)
2063 struct radv_shader_module fs_m
= {0};
2064 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
2065 nir_shader
*nir
[MESA_SHADER_STAGES
] = {0};
2066 void *codes
[MESA_SHADER_STAGES
] = {0};
2067 unsigned code_sizes
[MESA_SHADER_STAGES
] = {0};
2068 struct radv_shader_variant_key keys
[MESA_SHADER_STAGES
] = {{{{0}}}};
2069 unsigned char hash
[20], gs_copy_hash
[20];
2071 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2073 modules
[i
] = radv_shader_module_from_handle(pStages
[i
]->module
);
2074 if (modules
[i
]->nir
)
2075 _mesa_sha1_compute(modules
[i
]->nir
->info
.name
,
2076 strlen(modules
[i
]->nir
->info
.name
),
2079 pipeline
->active_stages
|= mesa_to_vk_shader_stage(i
);
2083 radv_hash_shaders(hash
, pStages
, pipeline
->layout
, key
, get_hash_flags(device
));
2084 memcpy(gs_copy_hash
, hash
, 20);
2085 gs_copy_hash
[0] ^= 1;
2087 if (modules
[MESA_SHADER_GEOMETRY
]) {
2088 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2089 radv_create_shader_variants_from_pipeline_cache(device
, cache
, gs_copy_hash
, variants
);
2090 pipeline
->gs_copy_shader
= variants
[MESA_SHADER_GEOMETRY
];
2093 if (radv_create_shader_variants_from_pipeline_cache(device
, cache
, hash
, pipeline
->shaders
) &&
2094 (!modules
[MESA_SHADER_GEOMETRY
] || pipeline
->gs_copy_shader
)) {
2098 if (!modules
[MESA_SHADER_FRAGMENT
] && !modules
[MESA_SHADER_COMPUTE
]) {
2100 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
2101 fs_b
.shader
->info
.name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
2102 fs_m
.nir
= fs_b
.shader
;
2103 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
2106 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2107 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[i
];
2112 nir
[i
] = radv_shader_compile_to_nir(device
, modules
[i
],
2113 stage
? stage
->pName
: "main", i
,
2114 stage
? stage
->pSpecializationInfo
: NULL
,
2117 /* We don't want to alter meta shaders IR directly so clone it
2120 if (nir
[i
]->info
.name
) {
2121 nir
[i
] = nir_shader_clone(NULL
, nir
[i
]);
2125 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2126 nir_lower_patch_vertices(nir
[MESA_SHADER_TESS_EVAL
], nir
[MESA_SHADER_TESS_CTRL
]->info
.tess
.tcs_vertices_out
, NULL
);
2127 merge_tess_info(&nir
[MESA_SHADER_TESS_EVAL
]->info
, &nir
[MESA_SHADER_TESS_CTRL
]->info
);
2130 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
2131 radv_link_shaders(pipeline
, nir
);
2133 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2135 NIR_PASS_V(nir
[i
], nir_lower_bool_to_int32
);
2138 if (radv_can_dump_shader(device
, modules
[i
], false))
2139 nir_print_shader(nir
[i
], stderr
);
2142 radv_fill_shader_keys(keys
, key
, nir
);
2144 if (nir
[MESA_SHADER_FRAGMENT
]) {
2145 if (!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]) {
2146 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
2147 radv_shader_variant_create(device
, modules
[MESA_SHADER_FRAGMENT
], &nir
[MESA_SHADER_FRAGMENT
], 1,
2148 pipeline
->layout
, keys
+ MESA_SHADER_FRAGMENT
,
2149 &codes
[MESA_SHADER_FRAGMENT
], &code_sizes
[MESA_SHADER_FRAGMENT
]);
2152 /* TODO: These are no longer used as keys we should refactor this */
2153 keys
[MESA_SHADER_VERTEX
].vs
.export_prim_id
=
2154 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
;
2155 keys
[MESA_SHADER_VERTEX
].vs
.export_layer_id
=
2156 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.layer_input
;
2157 keys
[MESA_SHADER_TESS_EVAL
].tes
.export_prim_id
=
2158 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
;
2159 keys
[MESA_SHADER_TESS_EVAL
].tes
.export_layer_id
=
2160 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.layer_input
;
2163 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_TESS_CTRL
]) {
2164 if (!pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]) {
2165 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2166 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2167 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2168 pipeline
->shaders
[MESA_SHADER_TESS_CTRL
] = radv_shader_variant_create(device
, modules
[MESA_SHADER_TESS_CTRL
], combined_nir
, 2,
2170 &key
, &codes
[MESA_SHADER_TESS_CTRL
],
2171 &code_sizes
[MESA_SHADER_TESS_CTRL
]);
2173 modules
[MESA_SHADER_VERTEX
] = NULL
;
2174 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2175 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.tcs
.outputs_written
);
2178 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_GEOMETRY
]) {
2179 gl_shader_stage pre_stage
= modules
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2180 if (!pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
2181 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2182 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] = radv_shader_variant_create(device
, modules
[MESA_SHADER_GEOMETRY
], combined_nir
, 2,
2184 &keys
[pre_stage
] , &codes
[MESA_SHADER_GEOMETRY
],
2185 &code_sizes
[MESA_SHADER_GEOMETRY
]);
2187 modules
[pre_stage
] = NULL
;
2190 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2191 if(modules
[i
] && !pipeline
->shaders
[i
]) {
2192 if (i
== MESA_SHADER_TESS_CTRL
) {
2193 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.info
.vs
.ls_outputs_written
);
2195 if (i
== MESA_SHADER_TESS_EVAL
) {
2196 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2197 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.tcs
.outputs_written
);
2199 pipeline
->shaders
[i
] = radv_shader_variant_create(device
, modules
[i
], &nir
[i
], 1,
2201 keys
+ i
, &codes
[i
],
2206 if(modules
[MESA_SHADER_GEOMETRY
]) {
2207 void *gs_copy_code
= NULL
;
2208 unsigned gs_copy_code_size
= 0;
2209 if (!pipeline
->gs_copy_shader
) {
2210 pipeline
->gs_copy_shader
= radv_create_gs_copy_shader(
2211 device
, nir
[MESA_SHADER_GEOMETRY
], &gs_copy_code
,
2213 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
);
2216 if (pipeline
->gs_copy_shader
) {
2217 void *code
[MESA_SHADER_STAGES
] = {0};
2218 unsigned code_size
[MESA_SHADER_STAGES
] = {0};
2219 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2221 code
[MESA_SHADER_GEOMETRY
] = gs_copy_code
;
2222 code_size
[MESA_SHADER_GEOMETRY
] = gs_copy_code_size
;
2223 variants
[MESA_SHADER_GEOMETRY
] = pipeline
->gs_copy_shader
;
2225 radv_pipeline_cache_insert_shaders(device
, cache
,
2234 radv_pipeline_cache_insert_shaders(device
, cache
, hash
, pipeline
->shaders
,
2235 (const void**)codes
, code_sizes
);
2237 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2240 if (!pipeline
->device
->keep_shader_info
)
2241 ralloc_free(nir
[i
]);
2243 if (radv_can_dump_shader_stats(device
, modules
[i
]))
2244 radv_shader_dump_stats(device
,
2245 pipeline
->shaders
[i
],
2251 ralloc_free(fs_m
.nir
);
2255 radv_pipeline_stage_to_user_data_0(struct radv_pipeline
*pipeline
,
2256 gl_shader_stage stage
, enum chip_class chip_class
)
2258 bool has_gs
= radv_pipeline_has_gs(pipeline
);
2259 bool has_tess
= radv_pipeline_has_tess(pipeline
);
2261 case MESA_SHADER_FRAGMENT
:
2262 return R_00B030_SPI_SHADER_USER_DATA_PS_0
;
2263 case MESA_SHADER_VERTEX
:
2264 if (chip_class
>= GFX9
) {
2265 return has_tess
? R_00B430_SPI_SHADER_USER_DATA_LS_0
:
2266 has_gs
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
2267 R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2270 return R_00B530_SPI_SHADER_USER_DATA_LS_0
;
2272 return has_gs
? R_00B330_SPI_SHADER_USER_DATA_ES_0
: R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2273 case MESA_SHADER_GEOMETRY
:
2274 return chip_class
>= GFX9
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
2275 R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2276 case MESA_SHADER_COMPUTE
:
2277 return R_00B900_COMPUTE_USER_DATA_0
;
2278 case MESA_SHADER_TESS_CTRL
:
2279 return chip_class
>= GFX9
? R_00B430_SPI_SHADER_USER_DATA_LS_0
:
2280 R_00B430_SPI_SHADER_USER_DATA_HS_0
;
2281 case MESA_SHADER_TESS_EVAL
:
2282 if (chip_class
>= GFX9
) {
2283 return has_gs
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
2284 R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2287 return R_00B330_SPI_SHADER_USER_DATA_ES_0
;
2289 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2291 unreachable("unknown shader");
2295 struct radv_bin_size_entry
{
2301 radv_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2303 static const struct radv_bin_size_entry color_size_table
[][3][9] = {
2307 /* One shader engine */
2313 { UINT_MAX
, { 0, 0}},
2316 /* Two shader engines */
2322 { UINT_MAX
, { 0, 0}},
2325 /* Four shader engines */
2330 { UINT_MAX
, { 0, 0}},
2336 /* One shader engine */
2342 { UINT_MAX
, { 0, 0}},
2345 /* Two shader engines */
2351 { UINT_MAX
, { 0, 0}},
2354 /* Four shader engines */
2361 { UINT_MAX
, { 0, 0}},
2367 /* One shader engine */
2374 { UINT_MAX
, { 0, 0}},
2377 /* Two shader engines */
2385 { UINT_MAX
, { 0, 0}},
2388 /* Four shader engines */
2396 { UINT_MAX
, { 0, 0}},
2400 static const struct radv_bin_size_entry ds_size_table
[][3][9] = {
2404 // One shader engine
2411 { UINT_MAX
, { 0, 0}},
2414 // Two shader engines
2422 { UINT_MAX
, { 0, 0}},
2425 // Four shader engines
2433 { UINT_MAX
, { 0, 0}},
2439 // One shader engine
2447 { UINT_MAX
, { 0, 0}},
2450 // Two shader engines
2459 { UINT_MAX
, { 0, 0}},
2462 // Four shader engines
2471 { UINT_MAX
, { 0, 0}},
2477 // One shader engine
2485 { UINT_MAX
, { 0, 0}},
2488 // Two shader engines
2497 { UINT_MAX
, { 0, 0}},
2500 // Four shader engines
2508 { UINT_MAX
, { 0, 0}},
2513 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
2514 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
2515 VkExtent2D extent
= {512, 512};
2517 unsigned log_num_rb_per_se
=
2518 util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.num_render_backends
/
2519 pipeline
->device
->physical_device
->rad_info
.max_se
);
2520 unsigned log_num_se
= util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.max_se
);
2522 unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
2523 unsigned ps_iter_samples
= 1u << G_028804_PS_ITER_SAMPLES(pipeline
->graphics
.ms
.db_eqaa
);
2524 unsigned effective_samples
= total_samples
;
2525 unsigned color_bytes_per_pixel
= 0;
2527 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
2529 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
2530 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
2533 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
2536 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
2537 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
2540 /* MSAA images typically don't use all samples all the time. */
2541 if (effective_samples
>= 2 && ps_iter_samples
<= 1)
2542 effective_samples
= 2;
2543 color_bytes_per_pixel
*= effective_samples
;
2546 const struct radv_bin_size_entry
*color_entry
= color_size_table
[log_num_rb_per_se
][log_num_se
];
2547 while(color_entry
[1].bpp
<= color_bytes_per_pixel
)
2550 extent
= color_entry
->extent
;
2552 if (subpass
->depth_stencil_attachment
) {
2553 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
2555 /* Coefficients taken from AMDVLK */
2556 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
2557 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
2558 unsigned ds_bytes_per_pixel
= 4 * (depth_coeff
+ stencil_coeff
) * total_samples
;
2560 const struct radv_bin_size_entry
*ds_entry
= ds_size_table
[log_num_rb_per_se
][log_num_se
];
2561 while(ds_entry
[1].bpp
<= ds_bytes_per_pixel
)
2564 extent
.width
= MIN2(extent
.width
, ds_entry
->extent
.width
);
2565 extent
.height
= MIN2(extent
.height
, ds_entry
->extent
.height
);
2572 radv_pipeline_generate_binning_state(struct radeon_cmdbuf
*ctx_cs
,
2573 struct radv_pipeline
*pipeline
,
2574 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2576 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX9
)
2579 uint32_t pa_sc_binner_cntl_0
=
2580 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC
) |
2581 S_028C44_DISABLE_START_OF_PRIM(1);
2582 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
2584 VkExtent2D bin_size
= radv_compute_bin_size(pipeline
, pCreateInfo
);
2586 unsigned context_states_per_bin
; /* allowed range: [1, 6] */
2587 unsigned persistent_states_per_bin
; /* allowed range: [1, 32] */
2588 unsigned fpovs_per_batch
; /* allowed range: [0, 255], 0 = unlimited */
2590 switch (pipeline
->device
->physical_device
->rad_info
.family
) {
2594 context_states_per_bin
= 1;
2595 persistent_states_per_bin
= 1;
2596 fpovs_per_batch
= 63;
2600 context_states_per_bin
= 6;
2601 persistent_states_per_bin
= 32;
2602 fpovs_per_batch
= 63;
2605 unreachable("unhandled family while determining binning state.");
2608 if (pipeline
->device
->pbb_allowed
&& bin_size
.width
&& bin_size
.height
) {
2609 pa_sc_binner_cntl_0
=
2610 S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED
) |
2611 S_028C44_BIN_SIZE_X(bin_size
.width
== 16) |
2612 S_028C44_BIN_SIZE_Y(bin_size
.height
== 16) |
2613 S_028C44_BIN_SIZE_X_EXTEND(util_logbase2(MAX2(bin_size
.width
, 32)) - 5) |
2614 S_028C44_BIN_SIZE_Y_EXTEND(util_logbase2(MAX2(bin_size
.height
, 32)) - 5) |
2615 S_028C44_CONTEXT_STATES_PER_BIN(context_states_per_bin
- 1) |
2616 S_028C44_PERSISTENT_STATES_PER_BIN(persistent_states_per_bin
- 1) |
2617 S_028C44_DISABLE_START_OF_PRIM(1) |
2618 S_028C44_FPOVS_PER_BATCH(fpovs_per_batch
) |
2619 S_028C44_OPTIMAL_BIN_SELECTION(1);
2622 radeon_set_context_reg(ctx_cs
, R_028C44_PA_SC_BINNER_CNTL_0
,
2623 pa_sc_binner_cntl_0
);
2624 radeon_set_context_reg(ctx_cs
, R_028060_DB_DFSM_CONTROL
,
2630 radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf
*ctx_cs
,
2631 struct radv_pipeline
*pipeline
,
2632 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2633 const struct radv_graphics_pipeline_create_info
*extra
)
2635 const VkPipelineDepthStencilStateCreateInfo
*vkds
= pCreateInfo
->pDepthStencilState
;
2636 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
2637 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
2638 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
2639 struct radv_render_pass_attachment
*attachment
= NULL
;
2640 uint32_t db_depth_control
= 0, db_stencil_control
= 0;
2641 uint32_t db_render_control
= 0, db_render_override2
= 0;
2642 uint32_t db_render_override
= 0;
2644 if (subpass
->depth_stencil_attachment
)
2645 attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
2647 bool has_depth_attachment
= attachment
&& vk_format_is_depth(attachment
->format
);
2648 bool has_stencil_attachment
= attachment
&& vk_format_is_stencil(attachment
->format
);
2650 if (vkds
&& has_depth_attachment
) {
2651 db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
2652 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
2653 S_028800_ZFUNC(vkds
->depthCompareOp
) |
2654 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
2656 /* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
2657 db_render_override2
|= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment
->samples
> 2);
2660 if (has_stencil_attachment
&& vkds
&& vkds
->stencilTestEnable
) {
2661 db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
2662 db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
2663 db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
2664 db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
2665 db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
2667 db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
2668 db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
2669 db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
2670 db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
2673 if (attachment
&& extra
) {
2674 db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
2675 db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
2677 db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
2678 db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
2679 db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
2680 db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
2681 db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
2684 db_render_override
|= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
2685 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
);
2687 if (!pCreateInfo
->pRasterizationState
->depthClampEnable
&&
2688 ps
->info
.info
.ps
.writes_z
) {
2689 /* From VK_EXT_depth_range_unrestricted spec:
2691 * "The behavior described in Primitive Clipping still applies.
2692 * If depth clamping is disabled the depth values are still
2693 * clipped to 0 ≤ zc ≤ wc before the viewport transform. If
2694 * depth clamping is enabled the above equation is ignored and
2695 * the depth values are instead clamped to the VkViewport
2696 * minDepth and maxDepth values, which in the case of this
2697 * extension can be outside of the 0.0 to 1.0 range."
2699 db_render_override
|= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
2702 radeon_set_context_reg(ctx_cs
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
2703 radeon_set_context_reg(ctx_cs
, R_02842C_DB_STENCIL_CONTROL
, db_stencil_control
);
2705 radeon_set_context_reg(ctx_cs
, R_028000_DB_RENDER_CONTROL
, db_render_control
);
2706 radeon_set_context_reg(ctx_cs
, R_02800C_DB_RENDER_OVERRIDE
, db_render_override
);
2707 radeon_set_context_reg(ctx_cs
, R_028010_DB_RENDER_OVERRIDE2
, db_render_override2
);
2711 radv_pipeline_generate_blend_state(struct radeon_cmdbuf
*ctx_cs
,
2712 struct radv_pipeline
*pipeline
,
2713 const struct radv_blend_state
*blend
)
2715 radeon_set_context_reg_seq(ctx_cs
, R_028780_CB_BLEND0_CONTROL
, 8);
2716 radeon_emit_array(ctx_cs
, blend
->cb_blend_control
,
2718 radeon_set_context_reg(ctx_cs
, R_028808_CB_COLOR_CONTROL
, blend
->cb_color_control
);
2719 radeon_set_context_reg(ctx_cs
, R_028B70_DB_ALPHA_TO_MASK
, blend
->db_alpha_to_mask
);
2721 if (pipeline
->device
->physical_device
->has_rbplus
) {
2723 radeon_set_context_reg_seq(ctx_cs
, R_028760_SX_MRT0_BLEND_OPT
, 8);
2724 radeon_emit_array(ctx_cs
, blend
->sx_mrt_blend_opt
, 8);
2727 radeon_set_context_reg(ctx_cs
, R_028714_SPI_SHADER_COL_FORMAT
, blend
->spi_shader_col_format
);
2729 radeon_set_context_reg(ctx_cs
, R_028238_CB_TARGET_MASK
, blend
->cb_target_mask
);
2730 radeon_set_context_reg(ctx_cs
, R_02823C_CB_SHADER_MASK
, blend
->cb_shader_mask
);
2732 pipeline
->graphics
.col_format
= blend
->spi_shader_col_format
;
2733 pipeline
->graphics
.cb_target_mask
= blend
->cb_target_mask
;
2736 static const VkConservativeRasterizationModeEXT
2737 radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo
*pCreateInfo
)
2739 const VkPipelineRasterizationConservativeStateCreateInfoEXT
*conservative_raster
=
2740 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT
);
2742 if (!conservative_raster
)
2743 return VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
;
2744 return conservative_raster
->conservativeRasterizationMode
;
2748 radv_pipeline_generate_raster_state(struct radeon_cmdbuf
*ctx_cs
,
2749 struct radv_pipeline
*pipeline
,
2750 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2752 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
2753 const VkConservativeRasterizationModeEXT mode
=
2754 radv_get_conservative_raster_mode(vkraster
);
2755 uint32_t pa_sc_conservative_rast
= S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1);
2756 bool depth_clip_disable
= vkraster
->depthClampEnable
;
2758 const VkPipelineRasterizationDepthClipStateCreateInfoEXT
*depth_clip_state
=
2759 vk_find_struct_const(vkraster
->pNext
, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT
);
2760 if (depth_clip_state
) {
2761 depth_clip_disable
= !depth_clip_state
->depthClipEnable
;
2764 radeon_set_context_reg(ctx_cs
, R_028810_PA_CL_CLIP_CNTL
,
2765 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
2766 S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable
? 1 : 0) |
2767 S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable
? 1 : 0) |
2768 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
2769 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
2771 radeon_set_context_reg(ctx_cs
, R_0286D4_SPI_INTERP_CONTROL_0
,
2772 S_0286D4_FLAT_SHADE_ENA(1) |
2773 S_0286D4_PNT_SPRITE_ENA(1) |
2774 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
2775 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
2776 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
2777 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
2778 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
2780 radeon_set_context_reg(ctx_cs
, R_028BE4_PA_SU_VTX_CNTL
,
2781 S_028BE4_PIX_CENTER(1) | // TODO verify
2782 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
2783 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
));
2785 radeon_set_context_reg(ctx_cs
, R_028814_PA_SU_SC_MODE_CNTL
,
2786 S_028814_FACE(vkraster
->frontFace
) |
2787 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
2788 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
2789 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
2790 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
2791 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
2792 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
2793 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
2794 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0));
2796 /* Conservative rasterization. */
2797 if (mode
!= VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
) {
2798 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
2800 ms
->pa_sc_aa_config
|= S_028BE0_AA_MASK_CENTROID_DTMN(1);
2801 ms
->db_eqaa
|= S_028804_ENABLE_POSTZ_OVERRASTERIZATION(1) |
2802 S_028804_OVERRASTERIZATION_AMOUNT(4);
2804 pa_sc_conservative_rast
= S_028C4C_PREZ_AA_MASK_ENABLE(1) |
2805 S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
2806 S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
2808 if (mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
) {
2809 pa_sc_conservative_rast
|=
2810 S_028C4C_OVER_RAST_ENABLE(1) |
2811 S_028C4C_OVER_RAST_SAMPLE_SELECT(0) |
2812 S_028C4C_UNDER_RAST_ENABLE(0) |
2813 S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
2814 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
2816 assert(mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT
);
2817 pa_sc_conservative_rast
|=
2818 S_028C4C_OVER_RAST_ENABLE(0) |
2819 S_028C4C_OVER_RAST_SAMPLE_SELECT(1) |
2820 S_028C4C_UNDER_RAST_ENABLE(1) |
2821 S_028C4C_UNDER_RAST_SAMPLE_SELECT(0) |
2822 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(0);
2826 radeon_set_context_reg(ctx_cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
2827 pa_sc_conservative_rast
);
2832 radv_pipeline_generate_multisample_state(struct radeon_cmdbuf
*ctx_cs
,
2833 struct radv_pipeline
*pipeline
)
2835 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
2837 radeon_set_context_reg_seq(ctx_cs
, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0
, 2);
2838 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[0]);
2839 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[1]);
2841 radeon_set_context_reg(ctx_cs
, R_028804_DB_EQAA
, ms
->db_eqaa
);
2842 radeon_set_context_reg(ctx_cs
, R_028A4C_PA_SC_MODE_CNTL_1
, ms
->pa_sc_mode_cntl_1
);
2844 /* The exclusion bits can be set to improve rasterization efficiency
2845 * if no sample lies on the pixel boundary (-8 sample offset). It's
2846 * currently always TRUE because the driver doesn't support 16 samples.
2848 bool exclusion
= pipeline
->device
->physical_device
->rad_info
.chip_class
>= CIK
;
2849 radeon_set_context_reg(ctx_cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
,
2850 S_02882C_XMAX_RIGHT_EXCLUSION(exclusion
) |
2851 S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion
));
2855 radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf
*ctx_cs
,
2856 struct radv_pipeline
*pipeline
)
2858 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
2860 uint32_t vgt_primitiveid_en
= false;
2861 uint32_t vgt_gs_mode
= 0;
2863 if (radv_pipeline_has_gs(pipeline
)) {
2864 const struct radv_shader_variant
*gs
=
2865 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
2867 vgt_gs_mode
= ac_vgt_gs_mode(gs
->info
.gs
.vertices_out
,
2868 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2869 } else if (outinfo
->export_prim_id
) {
2870 vgt_gs_mode
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
2871 vgt_primitiveid_en
= true;
2874 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
, vgt_primitiveid_en
);
2875 radeon_set_context_reg(ctx_cs
, R_028A40_VGT_GS_MODE
, vgt_gs_mode
);
2879 radv_pipeline_generate_hw_vs(struct radeon_cmdbuf
*ctx_cs
,
2880 struct radeon_cmdbuf
*cs
,
2881 struct radv_pipeline
*pipeline
,
2882 struct radv_shader_variant
*shader
)
2884 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
2886 radeon_set_sh_reg_seq(cs
, R_00B120_SPI_SHADER_PGM_LO_VS
, 4);
2887 radeon_emit(cs
, va
>> 8);
2888 radeon_emit(cs
, S_00B124_MEM_BASE(va
>> 40));
2889 radeon_emit(cs
, shader
->rsrc1
);
2890 radeon_emit(cs
, shader
->rsrc2
);
2892 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
2893 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
2894 clip_dist_mask
= outinfo
->clip_dist_mask
;
2895 cull_dist_mask
= outinfo
->cull_dist_mask
;
2896 total_mask
= clip_dist_mask
| cull_dist_mask
;
2897 bool misc_vec_ena
= outinfo
->writes_pointsize
||
2898 outinfo
->writes_layer
||
2899 outinfo
->writes_viewport_index
;
2901 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
2902 S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo
->param_exports
) - 1));
2904 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
2905 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
2906 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
2907 V_02870C_SPI_SHADER_4COMP
:
2908 V_02870C_SPI_SHADER_NONE
) |
2909 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
2910 V_02870C_SPI_SHADER_4COMP
:
2911 V_02870C_SPI_SHADER_NONE
) |
2912 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
2913 V_02870C_SPI_SHADER_4COMP
:
2914 V_02870C_SPI_SHADER_NONE
));
2916 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
2917 S_028818_VTX_W0_FMT(1) |
2918 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
2919 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
2920 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
2922 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
2923 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
2924 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
2925 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
2926 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
2927 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
2928 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
2929 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
2930 cull_dist_mask
<< 8 |
2933 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= VI
)
2934 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
2935 outinfo
->writes_viewport_index
);
2939 radv_pipeline_generate_hw_es(struct radeon_cmdbuf
*cs
,
2940 struct radv_pipeline
*pipeline
,
2941 struct radv_shader_variant
*shader
)
2943 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
2945 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 4);
2946 radeon_emit(cs
, va
>> 8);
2947 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
2948 radeon_emit(cs
, shader
->rsrc1
);
2949 radeon_emit(cs
, shader
->rsrc2
);
2953 radv_pipeline_generate_hw_ls(struct radeon_cmdbuf
*cs
,
2954 struct radv_pipeline
*pipeline
,
2955 struct radv_shader_variant
*shader
,
2956 const struct radv_tessellation_state
*tess
)
2958 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
2959 uint32_t rsrc2
= shader
->rsrc2
;
2961 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
2962 radeon_emit(cs
, va
>> 8);
2963 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
2965 rsrc2
|= S_00B52C_LDS_SIZE(tess
->lds_size
);
2966 if (pipeline
->device
->physical_device
->rad_info
.chip_class
== CIK
&&
2967 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_HAWAII
)
2968 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, rsrc2
);
2970 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
2971 radeon_emit(cs
, shader
->rsrc1
);
2972 radeon_emit(cs
, rsrc2
);
2976 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf
*cs
,
2977 struct radv_pipeline
*pipeline
,
2978 struct radv_shader_variant
*shader
,
2979 const struct radv_tessellation_state
*tess
)
2981 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
2983 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
2984 radeon_set_sh_reg_seq(cs
, R_00B410_SPI_SHADER_PGM_LO_LS
, 2);
2985 radeon_emit(cs
, va
>> 8);
2986 radeon_emit(cs
, S_00B414_MEM_BASE(va
>> 40));
2988 radeon_set_sh_reg_seq(cs
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
, 2);
2989 radeon_emit(cs
, shader
->rsrc1
);
2990 radeon_emit(cs
, shader
->rsrc2
|
2991 S_00B42C_LDS_SIZE(tess
->lds_size
));
2993 radeon_set_sh_reg_seq(cs
, R_00B420_SPI_SHADER_PGM_LO_HS
, 4);
2994 radeon_emit(cs
, va
>> 8);
2995 radeon_emit(cs
, S_00B424_MEM_BASE(va
>> 40));
2996 radeon_emit(cs
, shader
->rsrc1
);
2997 radeon_emit(cs
, shader
->rsrc2
);
3002 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf
*ctx_cs
,
3003 struct radeon_cmdbuf
*cs
,
3004 struct radv_pipeline
*pipeline
,
3005 const struct radv_tessellation_state
*tess
)
3007 struct radv_shader_variant
*vs
;
3009 /* Skip shaders merged into HS/GS */
3010 vs
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
3014 if (vs
->info
.vs
.as_ls
)
3015 radv_pipeline_generate_hw_ls(cs
, pipeline
, vs
, tess
);
3016 else if (vs
->info
.vs
.as_es
)
3017 radv_pipeline_generate_hw_es(cs
, pipeline
, vs
);
3019 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, vs
);
3023 radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf
*ctx_cs
,
3024 struct radeon_cmdbuf
*cs
,
3025 struct radv_pipeline
*pipeline
,
3026 const struct radv_tessellation_state
*tess
)
3028 if (!radv_pipeline_has_tess(pipeline
))
3031 struct radv_shader_variant
*tes
, *tcs
;
3033 tcs
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
3034 tes
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
3037 if (tes
->info
.tes
.as_es
)
3038 radv_pipeline_generate_hw_es(cs
, pipeline
, tes
);
3040 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, tes
);
3043 radv_pipeline_generate_hw_hs(cs
, pipeline
, tcs
, tess
);
3045 radeon_set_context_reg(ctx_cs
, R_028B6C_VGT_TF_PARAM
,
3048 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= CIK
)
3049 radeon_set_context_reg_idx(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
3050 tess
->ls_hs_config
);
3052 radeon_set_context_reg(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
,
3053 tess
->ls_hs_config
);
3057 radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf
*ctx_cs
,
3058 struct radeon_cmdbuf
*cs
,
3059 struct radv_pipeline
*pipeline
,
3060 const struct radv_gs_state
*gs_state
)
3062 struct radv_shader_variant
*gs
;
3063 unsigned gs_max_out_vertices
;
3064 uint8_t *num_components
;
3069 gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3073 gs_max_out_vertices
= gs
->info
.gs
.vertices_out
;
3074 max_stream
= gs
->info
.info
.gs
.max_stream
;
3075 num_components
= gs
->info
.info
.gs
.num_stream_output_components
;
3077 offset
= num_components
[0] * gs_max_out_vertices
;
3079 radeon_set_context_reg_seq(ctx_cs
, R_028A60_VGT_GSVS_RING_OFFSET_1
, 3);
3080 radeon_emit(ctx_cs
, offset
);
3081 if (max_stream
>= 1)
3082 offset
+= num_components
[1] * gs_max_out_vertices
;
3083 radeon_emit(ctx_cs
, offset
);
3084 if (max_stream
>= 2)
3085 offset
+= num_components
[2] * gs_max_out_vertices
;
3086 radeon_emit(ctx_cs
, offset
);
3087 if (max_stream
>= 3)
3088 offset
+= num_components
[3] * gs_max_out_vertices
;
3089 radeon_set_context_reg(ctx_cs
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
3091 radeon_set_context_reg(ctx_cs
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs
->info
.gs
.vertices_out
);
3093 radeon_set_context_reg_seq(ctx_cs
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, 4);
3094 radeon_emit(ctx_cs
, num_components
[0]);
3095 radeon_emit(ctx_cs
, (max_stream
>= 1) ? num_components
[1] : 0);
3096 radeon_emit(ctx_cs
, (max_stream
>= 2) ? num_components
[2] : 0);
3097 radeon_emit(ctx_cs
, (max_stream
>= 3) ? num_components
[3] : 0);
3099 uint32_t gs_num_invocations
= gs
->info
.gs
.invocations
;
3100 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
3101 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
3102 S_028B90_ENABLE(gs_num_invocations
> 0));
3104 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
3105 gs_state
->vgt_esgs_ring_itemsize
);
3107 va
= radv_buffer_get_va(gs
->bo
) + gs
->bo_offset
;
3109 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
3110 radeon_set_sh_reg_seq(cs
, R_00B210_SPI_SHADER_PGM_LO_ES
, 2);
3111 radeon_emit(cs
, va
>> 8);
3112 radeon_emit(cs
, S_00B214_MEM_BASE(va
>> 40));
3114 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3115 radeon_emit(cs
, gs
->rsrc1
);
3116 radeon_emit(cs
, gs
->rsrc2
| S_00B22C_LDS_SIZE(gs_state
->lds_size
));
3118 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
, gs_state
->vgt_gs_onchip_cntl
);
3119 radeon_set_context_reg(ctx_cs
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
, gs_state
->vgt_gs_max_prims_per_subgroup
);
3121 radeon_set_sh_reg_seq(cs
, R_00B220_SPI_SHADER_PGM_LO_GS
, 4);
3122 radeon_emit(cs
, va
>> 8);
3123 radeon_emit(cs
, S_00B224_MEM_BASE(va
>> 40));
3124 radeon_emit(cs
, gs
->rsrc1
);
3125 radeon_emit(cs
, gs
->rsrc2
);
3128 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, pipeline
->gs_copy_shader
);
3131 static uint32_t offset_to_ps_input(uint32_t offset
, bool flat_shade
, bool float16
)
3133 uint32_t ps_input_cntl
;
3134 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
3135 ps_input_cntl
= S_028644_OFFSET(offset
);
3137 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
3139 ps_input_cntl
|= S_028644_FP16_INTERP_MODE(1) |
3140 S_028644_ATTR0_VALID(1);
3143 /* The input is a DEFAULT_VAL constant. */
3144 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
3145 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
3146 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
3147 ps_input_cntl
= S_028644_OFFSET(0x20) |
3148 S_028644_DEFAULT_VAL(offset
);
3150 return ps_input_cntl
;
3154 radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf
*ctx_cs
,
3155 struct radv_pipeline
*pipeline
)
3157 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3158 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3159 uint32_t ps_input_cntl
[32];
3161 unsigned ps_offset
= 0;
3163 if (ps
->info
.info
.ps
.prim_id_input
) {
3164 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
];
3165 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
3166 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
3171 if (ps
->info
.info
.ps
.layer_input
||
3172 ps
->info
.info
.ps
.uses_input_attachments
||
3173 ps
->info
.info
.needs_multiview_view_index
) {
3174 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
];
3175 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
)
3176 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
3178 ps_input_cntl
[ps_offset
] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000
, true, false);
3182 if (ps
->info
.info
.ps
.has_pcoord
) {
3184 val
= S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
3185 ps_input_cntl
[ps_offset
] = val
;
3189 if (ps
->info
.info
.ps
.num_input_clips_culls
) {
3192 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
];
3193 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
3194 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
3198 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
];
3199 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
&&
3200 ps
->info
.info
.ps
.num_input_clips_culls
> 4) {
3201 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
3206 for (unsigned i
= 0; i
< 32 && (1u << i
) <= ps
->info
.fs
.input_mask
; ++i
) {
3210 if (!(ps
->info
.fs
.input_mask
& (1u << i
)))
3213 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_VAR0
+ i
];
3214 if (vs_offset
== AC_EXP_PARAM_UNDEFINED
) {
3215 ps_input_cntl
[ps_offset
] = S_028644_OFFSET(0x20);
3220 flat_shade
= !!(ps
->info
.fs
.flat_shaded_mask
& (1u << ps_offset
));
3221 float16
= !!(ps
->info
.fs
.float16_shaded_mask
& (1u << ps_offset
));
3223 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, flat_shade
, float16
);
3228 radeon_set_context_reg_seq(ctx_cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps_offset
);
3229 for (unsigned i
= 0; i
< ps_offset
; i
++) {
3230 radeon_emit(ctx_cs
, ps_input_cntl
[i
]);
3236 radv_compute_db_shader_control(const struct radv_device
*device
,
3237 const struct radv_pipeline
*pipeline
,
3238 const struct radv_shader_variant
*ps
)
3241 if (ps
->info
.fs
.early_fragment_test
|| !ps
->info
.info
.ps
.writes_memory
)
3242 z_order
= V_02880C_EARLY_Z_THEN_LATE_Z
;
3244 z_order
= V_02880C_LATE_Z
;
3246 bool disable_rbplus
= device
->physical_device
->has_rbplus
&&
3247 !device
->physical_device
->rbplus_allowed
;
3249 /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
3250 * but this appears to break Project Cars (DXVK). See
3251 * https://bugs.freedesktop.org/show_bug.cgi?id=109401
3253 bool mask_export_enable
= ps
->info
.info
.ps
.writes_sample_mask
;
3255 return S_02880C_Z_EXPORT_ENABLE(ps
->info
.info
.ps
.writes_z
) |
3256 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps
->info
.info
.ps
.writes_stencil
) |
3257 S_02880C_KILL_ENABLE(!!ps
->info
.fs
.can_discard
) |
3258 S_02880C_MASK_EXPORT_ENABLE(mask_export_enable
) |
3259 S_02880C_Z_ORDER(z_order
) |
3260 S_02880C_DEPTH_BEFORE_SHADER(ps
->info
.fs
.early_fragment_test
) |
3261 S_02880C_EXEC_ON_HIER_FAIL(ps
->info
.info
.ps
.writes_memory
) |
3262 S_02880C_EXEC_ON_NOOP(ps
->info
.info
.ps
.writes_memory
) |
3263 S_02880C_DUAL_QUAD_DISABLE(disable_rbplus
);
3267 radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf
*ctx_cs
,
3268 struct radeon_cmdbuf
*cs
,
3269 struct radv_pipeline
*pipeline
)
3271 struct radv_shader_variant
*ps
;
3273 assert (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]);
3275 ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3276 va
= radv_buffer_get_va(ps
->bo
) + ps
->bo_offset
;
3278 radeon_set_sh_reg_seq(cs
, R_00B020_SPI_SHADER_PGM_LO_PS
, 4);
3279 radeon_emit(cs
, va
>> 8);
3280 radeon_emit(cs
, S_00B024_MEM_BASE(va
>> 40));
3281 radeon_emit(cs
, ps
->rsrc1
);
3282 radeon_emit(cs
, ps
->rsrc2
);
3284 radeon_set_context_reg(ctx_cs
, R_02880C_DB_SHADER_CONTROL
,
3285 radv_compute_db_shader_control(pipeline
->device
,
3288 radeon_set_context_reg(ctx_cs
, R_0286CC_SPI_PS_INPUT_ENA
,
3289 ps
->config
.spi_ps_input_ena
);
3291 radeon_set_context_reg(ctx_cs
, R_0286D0_SPI_PS_INPUT_ADDR
,
3292 ps
->config
.spi_ps_input_addr
);
3294 radeon_set_context_reg(ctx_cs
, R_0286D8_SPI_PS_IN_CONTROL
,
3295 S_0286D8_NUM_INTERP(ps
->info
.fs
.num_interp
));
3297 radeon_set_context_reg(ctx_cs
, R_0286E0_SPI_BARYC_CNTL
, pipeline
->graphics
.spi_baryc_cntl
);
3299 radeon_set_context_reg(ctx_cs
, R_028710_SPI_SHADER_Z_FORMAT
,
3300 ac_get_spi_shader_z_format(ps
->info
.info
.ps
.writes_z
,
3301 ps
->info
.info
.ps
.writes_stencil
,
3302 ps
->info
.info
.ps
.writes_sample_mask
));
3304 if (pipeline
->device
->dfsm_allowed
) {
3305 /* optimise this? */
3306 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
3307 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
3312 radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf
*ctx_cs
,
3313 struct radv_pipeline
*pipeline
)
3315 if (pipeline
->device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
)
3318 unsigned vtx_reuse_depth
= 30;
3319 if (radv_pipeline_has_tess(pipeline
) &&
3320 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.tes
.spacing
== TESS_SPACING_FRACTIONAL_ODD
) {
3321 vtx_reuse_depth
= 14;
3323 radeon_set_context_reg(ctx_cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
3324 S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth
));
3328 radv_compute_vgt_shader_stages_en(const struct radv_pipeline
*pipeline
)
3330 uint32_t stages
= 0;
3331 if (radv_pipeline_has_tess(pipeline
)) {
3332 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
3333 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
3335 if (radv_pipeline_has_gs(pipeline
))
3336 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
3338 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3340 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
3342 } else if (radv_pipeline_has_gs(pipeline
))
3343 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
3345 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3347 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
3348 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
3354 radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3356 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
3357 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
3359 if (!discard_rectangle_info
)
3364 for (unsigned i
= 0; i
< (1u << MAX_DISCARD_RECTANGLES
); ++i
) {
3365 /* Interpret i as a bitmask, and then set the bit in the mask if
3366 * that combination of rectangles in which the pixel is contained
3367 * should pass the cliprect test. */
3368 unsigned relevant_subset
= i
& ((1u << discard_rectangle_info
->discardRectangleCount
) - 1);
3370 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT
&&
3374 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT
&&
3385 radv_pipeline_generate_pm4(struct radv_pipeline
*pipeline
,
3386 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3387 const struct radv_graphics_pipeline_create_info
*extra
,
3388 const struct radv_blend_state
*blend
,
3389 const struct radv_tessellation_state
*tess
,
3390 const struct radv_gs_state
*gs
,
3391 unsigned prim
, unsigned gs_out
)
3393 struct radeon_cmdbuf
*ctx_cs
= &pipeline
->ctx_cs
;
3394 struct radeon_cmdbuf
*cs
= &pipeline
->cs
;
3397 ctx_cs
->max_dw
= 256;
3398 cs
->buf
= malloc(4 * (cs
->max_dw
+ ctx_cs
->max_dw
));
3399 ctx_cs
->buf
= cs
->buf
+ cs
->max_dw
;
3401 radv_pipeline_generate_depth_stencil_state(ctx_cs
, pipeline
, pCreateInfo
, extra
);
3402 radv_pipeline_generate_blend_state(ctx_cs
, pipeline
, blend
);
3403 radv_pipeline_generate_raster_state(ctx_cs
, pipeline
, pCreateInfo
);
3404 radv_pipeline_generate_multisample_state(ctx_cs
, pipeline
);
3405 radv_pipeline_generate_vgt_gs_mode(ctx_cs
, pipeline
);
3406 radv_pipeline_generate_vertex_shader(ctx_cs
, cs
, pipeline
, tess
);
3407 radv_pipeline_generate_tess_shaders(ctx_cs
, cs
, pipeline
, tess
);
3408 radv_pipeline_generate_geometry_shader(ctx_cs
, cs
, pipeline
, gs
);
3409 radv_pipeline_generate_fragment_shader(ctx_cs
, cs
, pipeline
);
3410 radv_pipeline_generate_ps_inputs(ctx_cs
, pipeline
);
3411 radv_pipeline_generate_vgt_vertex_reuse(ctx_cs
, pipeline
);
3412 radv_pipeline_generate_binning_state(ctx_cs
, pipeline
, pCreateInfo
);
3414 radeon_set_context_reg(ctx_cs
, R_0286E8_SPI_TMPRING_SIZE
,
3415 S_0286E8_WAVES(pipeline
->max_waves
) |
3416 S_0286E8_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
3418 radeon_set_context_reg(ctx_cs
, R_028B54_VGT_SHADER_STAGES_EN
, radv_compute_vgt_shader_stages_en(pipeline
));
3420 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= CIK
) {
3421 radeon_set_uconfig_reg_idx(cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
3423 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
3425 radeon_set_context_reg(ctx_cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out
);
3427 radeon_set_context_reg(ctx_cs
, R_02820C_PA_SC_CLIPRECT_RULE
, radv_compute_cliprect_rule(pCreateInfo
));
3429 pipeline
->ctx_cs_hash
= _mesa_hash_data(ctx_cs
->buf
, ctx_cs
->cdw
* 4);
3431 assert(ctx_cs
->cdw
<= ctx_cs
->max_dw
);
3432 assert(cs
->cdw
<= cs
->max_dw
);
3435 static struct radv_ia_multi_vgt_param_helpers
3436 radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline
*pipeline
,
3437 const struct radv_tessellation_state
*tess
,
3440 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param
= {0};
3441 const struct radv_device
*device
= pipeline
->device
;
3443 if (radv_pipeline_has_tess(pipeline
))
3444 ia_multi_vgt_param
.primgroup_size
= tess
->num_patches
;
3445 else if (radv_pipeline_has_gs(pipeline
))
3446 ia_multi_vgt_param
.primgroup_size
= 64;
3448 ia_multi_vgt_param
.primgroup_size
= 128; /* recommended without a GS */
3450 /* GS requirement. */
3451 ia_multi_vgt_param
.partial_es_wave
= false;
3452 if (radv_pipeline_has_gs(pipeline
) && device
->physical_device
->rad_info
.chip_class
<= VI
)
3453 if (SI_GS_PER_ES
/ ia_multi_vgt_param
.primgroup_size
>= pipeline
->device
->gs_table_depth
- 3)
3454 ia_multi_vgt_param
.partial_es_wave
= true;
3456 ia_multi_vgt_param
.wd_switch_on_eop
= false;
3457 if (device
->physical_device
->rad_info
.chip_class
>= CIK
) {
3458 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
3459 * 4 shader engines. Set 1 to pass the assertion below.
3460 * The other cases are hardware requirements. */
3461 if (device
->physical_device
->rad_info
.max_se
< 4 ||
3462 prim
== V_008958_DI_PT_POLYGON
||
3463 prim
== V_008958_DI_PT_LINELOOP
||
3464 prim
== V_008958_DI_PT_TRIFAN
||
3465 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
3466 (pipeline
->graphics
.prim_restart_enable
&&
3467 (device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
3468 (prim
!= V_008958_DI_PT_POINTLIST
&&
3469 prim
!= V_008958_DI_PT_LINESTRIP
))))
3470 ia_multi_vgt_param
.wd_switch_on_eop
= true;
3473 ia_multi_vgt_param
.ia_switch_on_eoi
= false;
3474 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.info
.ps
.prim_id_input
)
3475 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
3476 if (radv_pipeline_has_gs(pipeline
) &&
3477 pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.info
.uses_prim_id
)
3478 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
3479 if (radv_pipeline_has_tess(pipeline
)) {
3480 /* SWITCH_ON_EOI must be set if PrimID is used. */
3481 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.info
.uses_prim_id
||
3482 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.info
.uses_prim_id
)
3483 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
3486 ia_multi_vgt_param
.partial_vs_wave
= false;
3487 if (radv_pipeline_has_tess(pipeline
)) {
3488 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
3489 if ((device
->physical_device
->rad_info
.family
== CHIP_TAHITI
||
3490 device
->physical_device
->rad_info
.family
== CHIP_PITCAIRN
||
3491 device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
) &&
3492 radv_pipeline_has_gs(pipeline
))
3493 ia_multi_vgt_param
.partial_vs_wave
= true;
3494 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
3495 if (device
->has_distributed_tess
) {
3496 if (radv_pipeline_has_gs(pipeline
)) {
3497 if (device
->physical_device
->rad_info
.chip_class
<= VI
)
3498 ia_multi_vgt_param
.partial_es_wave
= true;
3500 ia_multi_vgt_param
.partial_vs_wave
= true;
3505 /* Workaround for a VGT hang when strip primitive types are used with
3506 * primitive restart.
3508 if (pipeline
->graphics
.prim_restart_enable
&&
3509 (prim
== V_008958_DI_PT_LINESTRIP
||
3510 prim
== V_008958_DI_PT_TRISTRIP
||
3511 prim
== V_008958_DI_PT_LINESTRIP_ADJ
||
3512 prim
== V_008958_DI_PT_TRISTRIP_ADJ
)) {
3513 ia_multi_vgt_param
.partial_vs_wave
= true;
3516 if (radv_pipeline_has_gs(pipeline
)) {
3517 /* On these chips there is the possibility of a hang if the
3518 * pipeline uses a GS and partial_vs_wave is not set.
3520 * This mostly does not hit 4-SE chips, as those typically set
3521 * ia_switch_on_eoi and then partial_vs_wave is set for pipelines
3522 * with GS due to another workaround.
3524 * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242
3526 if (device
->physical_device
->rad_info
.family
== CHIP_TONGA
||
3527 device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
3528 device
->physical_device
->rad_info
.family
== CHIP_POLARIS10
||
3529 device
->physical_device
->rad_info
.family
== CHIP_POLARIS11
||
3530 device
->physical_device
->rad_info
.family
== CHIP_POLARIS12
||
3531 device
->physical_device
->rad_info
.family
== CHIP_VEGAM
) {
3532 ia_multi_vgt_param
.partial_vs_wave
= true;
3536 ia_multi_vgt_param
.base
=
3537 S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param
.primgroup_size
- 1) |
3538 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
3539 S_028AA8_MAX_PRIMGRP_IN_WAVE(device
->physical_device
->rad_info
.chip_class
== VI
? 2 : 0) |
3540 S_030960_EN_INST_OPT_BASIC(device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
3541 S_030960_EN_INST_OPT_ADV(device
->physical_device
->rad_info
.chip_class
>= GFX9
);
3543 return ia_multi_vgt_param
;
3548 radv_compute_vertex_input_state(struct radv_pipeline
*pipeline
,
3549 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3551 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
3552 pCreateInfo
->pVertexInputState
;
3553 struct radv_vertex_elements_info
*velems
= &pipeline
->vertex_elements
;
3555 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
3556 const VkVertexInputAttributeDescription
*desc
=
3557 &vi_info
->pVertexAttributeDescriptions
[i
];
3558 unsigned loc
= desc
->location
;
3559 const struct vk_format_description
*format_desc
;
3561 uint32_t num_format
, data_format
;
3562 format_desc
= vk_format_description(desc
->format
);
3563 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
3565 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
3566 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
3568 velems
->rsrc_word3
[loc
] = S_008F0C_DST_SEL_X(si_map_swizzle(format_desc
->swizzle
[0])) |
3569 S_008F0C_DST_SEL_Y(si_map_swizzle(format_desc
->swizzle
[1])) |
3570 S_008F0C_DST_SEL_Z(si_map_swizzle(format_desc
->swizzle
[2])) |
3571 S_008F0C_DST_SEL_W(si_map_swizzle(format_desc
->swizzle
[3])) |
3572 S_008F0C_NUM_FORMAT(num_format
) |
3573 S_008F0C_DATA_FORMAT(data_format
);
3574 velems
->format_size
[loc
] = format_desc
->block
.bits
/ 8;
3575 velems
->offset
[loc
] = desc
->offset
;
3576 velems
->binding
[loc
] = desc
->binding
;
3577 velems
->count
= MAX2(velems
->count
, loc
+ 1);
3580 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
3581 const VkVertexInputBindingDescription
*desc
=
3582 &vi_info
->pVertexBindingDescriptions
[i
];
3584 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
3588 static struct radv_shader_variant
*
3589 radv_pipeline_get_streamout_shader(struct radv_pipeline
*pipeline
)
3593 for (i
= MESA_SHADER_GEOMETRY
; i
>= MESA_SHADER_VERTEX
; i
--) {
3594 struct radv_shader_variant
*shader
=
3595 radv_get_shader(pipeline
, i
);
3597 if (shader
&& shader
->info
.info
.so
.num_outputs
> 0)
3605 radv_pipeline_init(struct radv_pipeline
*pipeline
,
3606 struct radv_device
*device
,
3607 struct radv_pipeline_cache
*cache
,
3608 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3609 const struct radv_graphics_pipeline_create_info
*extra
)
3612 bool has_view_index
= false;
3614 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3615 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3616 if (subpass
->view_mask
)
3617 has_view_index
= true;
3619 pipeline
->device
= device
;
3620 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
3621 assert(pipeline
->layout
);
3623 struct radv_blend_state blend
= radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
3625 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
3626 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
3627 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
3628 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
3631 struct radv_pipeline_key key
= radv_generate_graphics_pipeline_key(pipeline
, pCreateInfo
, &blend
, has_view_index
);
3632 radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
, pCreateInfo
->flags
);
3634 pipeline
->graphics
.spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
3635 radv_pipeline_init_multisample_state(pipeline
, &blend
, pCreateInfo
);
3637 uint32_t prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
3639 pipeline
->graphics
.can_use_guardband
= radv_prim_can_use_guardband(pCreateInfo
->pInputAssemblyState
->topology
);
3641 if (radv_pipeline_has_gs(pipeline
)) {
3642 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
3643 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
3645 gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
3647 if (extra
&& extra
->use_rectlist
) {
3648 prim
= V_008958_DI_PT_RECTLIST
;
3649 gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
3650 pipeline
->graphics
.can_use_guardband
= true;
3652 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
3653 /* prim vertex count will need TESS changes */
3654 pipeline
->graphics
.prim_vertex_count
= prim_size_table
[prim
];
3656 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
3658 /* Ensure that some export memory is always allocated, for two reasons:
3660 * 1) Correctness: The hardware ignores the EXEC mask if no export
3661 * memory is allocated, so KILL and alpha test do not work correctly
3663 * 2) Performance: Every shader needs at least a NULL export, even when
3664 * it writes no color/depth output. The NULL export instruction
3665 * stalls without this setting.
3667 * Don't add this to CB_SHADER_MASK.
3669 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
3670 if (!blend
.spi_shader_col_format
) {
3671 if (!ps
->info
.info
.ps
.writes_z
&&
3672 !ps
->info
.info
.ps
.writes_stencil
&&
3673 !ps
->info
.info
.ps
.writes_sample_mask
)
3674 blend
.spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
3677 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3678 if (pipeline
->shaders
[i
]) {
3679 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[i
]->info
.need_indirect_descriptor_sets
;
3683 struct radv_gs_state gs
= {0};
3684 if (radv_pipeline_has_gs(pipeline
)) {
3685 gs
= calculate_gs_info(pCreateInfo
, pipeline
);
3686 calculate_gs_ring_sizes(pipeline
, &gs
);
3689 struct radv_tessellation_state tess
= {0};
3690 if (radv_pipeline_has_tess(pipeline
)) {
3691 if (prim
== V_008958_DI_PT_PATCH
) {
3692 pipeline
->graphics
.prim_vertex_count
.min
= pCreateInfo
->pTessellationState
->patchControlPoints
;
3693 pipeline
->graphics
.prim_vertex_count
.incr
= 1;
3695 tess
= calculate_tess_state(pipeline
, pCreateInfo
);
3698 pipeline
->graphics
.ia_multi_vgt_param
= radv_compute_ia_multi_vgt_param_helpers(pipeline
, &tess
, prim
);
3700 radv_compute_vertex_input_state(pipeline
, pCreateInfo
);
3702 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++)
3703 pipeline
->user_data_0
[i
] = radv_pipeline_stage_to_user_data_0(pipeline
, i
, device
->physical_device
->rad_info
.chip_class
);
3705 struct radv_userdata_info
*loc
= radv_lookup_user_sgpr(pipeline
, MESA_SHADER_VERTEX
,
3706 AC_UD_VS_BASE_VERTEX_START_INSTANCE
);
3707 if (loc
->sgpr_idx
!= -1) {
3708 pipeline
->graphics
.vtx_base_sgpr
= pipeline
->user_data_0
[MESA_SHADER_VERTEX
];
3709 pipeline
->graphics
.vtx_base_sgpr
+= loc
->sgpr_idx
* 4;
3710 if (radv_get_shader(pipeline
, MESA_SHADER_VERTEX
)->info
.info
.vs
.needs_draw_id
)
3711 pipeline
->graphics
.vtx_emit_num
= 3;
3713 pipeline
->graphics
.vtx_emit_num
= 2;
3716 /* Find the last vertex shader stage that eventually uses streamout. */
3717 pipeline
->streamout_shader
= radv_pipeline_get_streamout_shader(pipeline
);
3719 result
= radv_pipeline_scratch_init(device
, pipeline
);
3720 radv_pipeline_generate_pm4(pipeline
, pCreateInfo
, extra
, &blend
, &tess
, &gs
, prim
, gs_out
);
3726 radv_graphics_pipeline_create(
3728 VkPipelineCache _cache
,
3729 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3730 const struct radv_graphics_pipeline_create_info
*extra
,
3731 const VkAllocationCallbacks
*pAllocator
,
3732 VkPipeline
*pPipeline
)
3734 RADV_FROM_HANDLE(radv_device
, device
, _device
);
3735 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
3736 struct radv_pipeline
*pipeline
;
3739 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
3740 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
3741 if (pipeline
== NULL
)
3742 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
3744 result
= radv_pipeline_init(pipeline
, device
, cache
,
3745 pCreateInfo
, extra
);
3746 if (result
!= VK_SUCCESS
) {
3747 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
3751 *pPipeline
= radv_pipeline_to_handle(pipeline
);
3756 VkResult
radv_CreateGraphicsPipelines(
3758 VkPipelineCache pipelineCache
,
3760 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
3761 const VkAllocationCallbacks
* pAllocator
,
3762 VkPipeline
* pPipelines
)
3764 VkResult result
= VK_SUCCESS
;
3767 for (; i
< count
; i
++) {
3769 r
= radv_graphics_pipeline_create(_device
,
3772 NULL
, pAllocator
, &pPipelines
[i
]);
3773 if (r
!= VK_SUCCESS
) {
3775 pPipelines
[i
] = VK_NULL_HANDLE
;
3784 radv_compute_generate_pm4(struct radv_pipeline
*pipeline
)
3786 struct radv_shader_variant
*compute_shader
;
3787 struct radv_device
*device
= pipeline
->device
;
3788 unsigned compute_resource_limits
;
3789 unsigned waves_per_threadgroup
;
3792 pipeline
->cs
.buf
= malloc(20 * 4);
3793 pipeline
->cs
.max_dw
= 20;
3795 compute_shader
= pipeline
->shaders
[MESA_SHADER_COMPUTE
];
3796 va
= radv_buffer_get_va(compute_shader
->bo
) + compute_shader
->bo_offset
;
3798 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B830_COMPUTE_PGM_LO
, 2);
3799 radeon_emit(&pipeline
->cs
, va
>> 8);
3800 radeon_emit(&pipeline
->cs
, S_00B834_DATA(va
>> 40));
3802 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
3803 radeon_emit(&pipeline
->cs
, compute_shader
->rsrc1
);
3804 radeon_emit(&pipeline
->cs
, compute_shader
->rsrc2
);
3806 radeon_set_sh_reg(&pipeline
->cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
3807 S_00B860_WAVES(pipeline
->max_waves
) |
3808 S_00B860_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
3810 /* Calculate best compute resource limits. */
3811 waves_per_threadgroup
=
3812 DIV_ROUND_UP(compute_shader
->info
.cs
.block_size
[0] *
3813 compute_shader
->info
.cs
.block_size
[1] *
3814 compute_shader
->info
.cs
.block_size
[2], 64);
3815 compute_resource_limits
=
3816 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup
% 4 == 0);
3818 if (device
->physical_device
->rad_info
.chip_class
>= CIK
) {
3819 unsigned num_cu_per_se
=
3820 device
->physical_device
->rad_info
.num_good_compute_units
/
3821 device
->physical_device
->rad_info
.max_se
;
3823 /* Force even distribution on all SIMDs in CU if the workgroup
3824 * size is 64. This has shown some good improvements if # of
3825 * CUs per SE is not a multiple of 4.
3827 if (num_cu_per_se
% 4 && waves_per_threadgroup
== 1)
3828 compute_resource_limits
|= S_00B854_FORCE_SIMD_DIST(1);
3831 radeon_set_sh_reg(&pipeline
->cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
3832 compute_resource_limits
);
3834 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
3835 radeon_emit(&pipeline
->cs
,
3836 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[0]));
3837 radeon_emit(&pipeline
->cs
,
3838 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[1]));
3839 radeon_emit(&pipeline
->cs
,
3840 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[2]));
3842 assert(pipeline
->cs
.cdw
<= pipeline
->cs
.max_dw
);
3845 static VkResult
radv_compute_pipeline_create(
3847 VkPipelineCache _cache
,
3848 const VkComputePipelineCreateInfo
* pCreateInfo
,
3849 const VkAllocationCallbacks
* pAllocator
,
3850 VkPipeline
* pPipeline
)
3852 RADV_FROM_HANDLE(radv_device
, device
, _device
);
3853 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
3854 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
3855 struct radv_pipeline
*pipeline
;
3858 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
3859 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
3860 if (pipeline
== NULL
)
3861 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
3863 pipeline
->device
= device
;
3864 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
3865 assert(pipeline
->layout
);
3867 pStages
[MESA_SHADER_COMPUTE
] = &pCreateInfo
->stage
;
3868 radv_create_shaders(pipeline
, device
, cache
, &(struct radv_pipeline_key
) {0}, pStages
, pCreateInfo
->flags
);
3870 pipeline
->user_data_0
[MESA_SHADER_COMPUTE
] = radv_pipeline_stage_to_user_data_0(pipeline
, MESA_SHADER_COMPUTE
, device
->physical_device
->rad_info
.chip_class
);
3871 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.need_indirect_descriptor_sets
;
3872 result
= radv_pipeline_scratch_init(device
, pipeline
);
3873 if (result
!= VK_SUCCESS
) {
3874 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
3878 radv_compute_generate_pm4(pipeline
);
3880 *pPipeline
= radv_pipeline_to_handle(pipeline
);
3885 VkResult
radv_CreateComputePipelines(
3887 VkPipelineCache pipelineCache
,
3889 const VkComputePipelineCreateInfo
* pCreateInfos
,
3890 const VkAllocationCallbacks
* pAllocator
,
3891 VkPipeline
* pPipelines
)
3893 VkResult result
= VK_SUCCESS
;
3896 for (; i
< count
; i
++) {
3898 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
3900 pAllocator
, &pPipelines
[i
]);
3901 if (r
!= VK_SUCCESS
) {
3903 pPipelines
[i
] = VK_NULL_HANDLE
;