2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
33 #include "radv_shader.h"
35 #include "nir/nir_builder.h"
36 #include "nir/nir_xfb_info.h"
37 #include "spirv/nir_spirv.h"
40 #include <llvm-c/Core.h>
41 #include <llvm-c/TargetMachine.h>
44 #include "ac_binary.h"
45 #include "ac_llvm_util.h"
46 #include "ac_nir_to_llvm.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
50 #include "ac_shader_util.h"
51 #include "main/menums.h"
53 struct radv_blend_state
{
54 uint32_t blend_enable_4bit
;
55 uint32_t need_src_alpha
;
57 uint32_t cb_color_control
;
58 uint32_t cb_target_mask
;
59 uint32_t cb_target_enabled_4bit
;
60 uint32_t sx_mrt_blend_opt
[8];
61 uint32_t cb_blend_control
[8];
63 uint32_t spi_shader_col_format
;
64 uint32_t cb_shader_mask
;
65 uint32_t db_alpha_to_mask
;
67 uint32_t commutative_4bit
;
69 bool single_cb_enable
;
70 bool mrt0_is_dual_src
;
73 struct radv_dsa_order_invariance
{
74 /* Whether the final result in Z/S buffers is guaranteed to be
75 * invariant under changes to the order in which fragments arrive.
79 /* Whether the set of fragments that pass the combined Z/S test is
80 * guaranteed to be invariant under changes to the order in which
86 struct radv_tessellation_state
{
87 uint32_t ls_hs_config
;
93 struct radv_gs_state
{
94 uint32_t vgt_gs_onchip_cntl
;
95 uint32_t vgt_gs_max_prims_per_subgroup
;
96 uint32_t vgt_esgs_ring_itemsize
;
100 struct radv_ngg_state
{
101 uint16_t ngg_emit_size
; /* in dwords */
102 uint32_t hw_max_esverts
;
103 uint32_t max_gsprims
;
104 uint32_t max_out_verts
;
105 uint32_t prim_amp_factor
;
106 uint32_t vgt_esgs_ring_itemsize
;
107 bool max_vert_out_per_gs_instance
;
110 bool radv_pipeline_has_ngg(const struct radv_pipeline
*pipeline
)
112 struct radv_shader_variant
*variant
= NULL
;
113 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
114 variant
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
115 else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
116 variant
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
117 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
118 variant
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
121 return variant
->info
.is_ngg
;
124 bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline
*pipeline
)
126 if (!radv_pipeline_has_gs(pipeline
))
129 /* The GS copy shader is required if the pipeline has GS on GFX6-GFX9.
130 * On GFX10, it might be required in rare cases if it's not possible to
133 if (radv_pipeline_has_ngg(pipeline
))
136 assert(pipeline
->gs_copy_shader
);
141 radv_pipeline_destroy(struct radv_device
*device
,
142 struct radv_pipeline
*pipeline
,
143 const VkAllocationCallbacks
* allocator
)
145 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
146 if (pipeline
->shaders
[i
])
147 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
149 if (pipeline
->gs_copy_shader
)
150 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
153 free(pipeline
->cs
.buf
);
154 vk_free2(&device
->alloc
, allocator
, pipeline
);
157 void radv_DestroyPipeline(
159 VkPipeline _pipeline
,
160 const VkAllocationCallbacks
* pAllocator
)
162 RADV_FROM_HANDLE(radv_device
, device
, _device
);
163 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
168 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
171 static uint32_t get_hash_flags(struct radv_device
*device
)
173 uint32_t hash_flags
= 0;
175 if (device
->instance
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
)
176 hash_flags
|= RADV_HASH_SHADER_UNSAFE_MATH
;
177 if (device
->instance
->debug_flags
& RADV_DEBUG_NO_NGG
)
178 hash_flags
|= RADV_HASH_SHADER_NO_NGG
;
179 if (device
->instance
->perftest_flags
& RADV_PERFTEST_SISCHED
)
180 hash_flags
|= RADV_HASH_SHADER_SISCHED
;
181 if (device
->physical_device
->cs_wave_size
== 32)
182 hash_flags
|= RADV_HASH_SHADER_CS_WAVE32
;
183 if (device
->physical_device
->ps_wave_size
== 32)
184 hash_flags
|= RADV_HASH_SHADER_PS_WAVE32
;
185 if (device
->physical_device
->ge_wave_size
== 32)
186 hash_flags
|= RADV_HASH_SHADER_GE_WAVE32
;
191 radv_pipeline_scratch_init(struct radv_device
*device
,
192 struct radv_pipeline
*pipeline
)
194 unsigned scratch_bytes_per_wave
= 0;
195 unsigned max_waves
= 0;
196 unsigned min_waves
= 1;
198 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
199 if (pipeline
->shaders
[i
]) {
200 unsigned max_stage_waves
= device
->scratch_waves
;
202 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
203 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
205 max_stage_waves
= MIN2(max_stage_waves
,
206 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
207 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
208 max_waves
= MAX2(max_waves
, max_stage_waves
);
212 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
213 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
214 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
215 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
216 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
219 if (scratch_bytes_per_wave
)
220 max_waves
= MIN2(max_waves
, 0xffffffffu
/ scratch_bytes_per_wave
);
222 if (scratch_bytes_per_wave
&& max_waves
< min_waves
) {
223 /* Not really true at this moment, but will be true on first
224 * execution. Avoid having hanging shaders. */
225 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
227 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
228 pipeline
->max_waves
= max_waves
;
232 static uint32_t si_translate_blend_logic_op(VkLogicOp op
)
235 case VK_LOGIC_OP_CLEAR
:
236 return V_028808_ROP3_CLEAR
;
237 case VK_LOGIC_OP_AND
:
238 return V_028808_ROP3_AND
;
239 case VK_LOGIC_OP_AND_REVERSE
:
240 return V_028808_ROP3_AND_REVERSE
;
241 case VK_LOGIC_OP_COPY
:
242 return V_028808_ROP3_COPY
;
243 case VK_LOGIC_OP_AND_INVERTED
:
244 return V_028808_ROP3_AND_INVERTED
;
245 case VK_LOGIC_OP_NO_OP
:
246 return V_028808_ROP3_NO_OP
;
247 case VK_LOGIC_OP_XOR
:
248 return V_028808_ROP3_XOR
;
250 return V_028808_ROP3_OR
;
251 case VK_LOGIC_OP_NOR
:
252 return V_028808_ROP3_NOR
;
253 case VK_LOGIC_OP_EQUIVALENT
:
254 return V_028808_ROP3_EQUIVALENT
;
255 case VK_LOGIC_OP_INVERT
:
256 return V_028808_ROP3_INVERT
;
257 case VK_LOGIC_OP_OR_REVERSE
:
258 return V_028808_ROP3_OR_REVERSE
;
259 case VK_LOGIC_OP_COPY_INVERTED
:
260 return V_028808_ROP3_COPY_INVERTED
;
261 case VK_LOGIC_OP_OR_INVERTED
:
262 return V_028808_ROP3_OR_INVERTED
;
263 case VK_LOGIC_OP_NAND
:
264 return V_028808_ROP3_NAND
;
265 case VK_LOGIC_OP_SET
:
266 return V_028808_ROP3_SET
;
268 unreachable("Unhandled logic op");
273 static uint32_t si_translate_blend_function(VkBlendOp op
)
276 case VK_BLEND_OP_ADD
:
277 return V_028780_COMB_DST_PLUS_SRC
;
278 case VK_BLEND_OP_SUBTRACT
:
279 return V_028780_COMB_SRC_MINUS_DST
;
280 case VK_BLEND_OP_REVERSE_SUBTRACT
:
281 return V_028780_COMB_DST_MINUS_SRC
;
282 case VK_BLEND_OP_MIN
:
283 return V_028780_COMB_MIN_DST_SRC
;
284 case VK_BLEND_OP_MAX
:
285 return V_028780_COMB_MAX_DST_SRC
;
291 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
294 case VK_BLEND_FACTOR_ZERO
:
295 return V_028780_BLEND_ZERO
;
296 case VK_BLEND_FACTOR_ONE
:
297 return V_028780_BLEND_ONE
;
298 case VK_BLEND_FACTOR_SRC_COLOR
:
299 return V_028780_BLEND_SRC_COLOR
;
300 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
301 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
302 case VK_BLEND_FACTOR_DST_COLOR
:
303 return V_028780_BLEND_DST_COLOR
;
304 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
305 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
306 case VK_BLEND_FACTOR_SRC_ALPHA
:
307 return V_028780_BLEND_SRC_ALPHA
;
308 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
309 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
310 case VK_BLEND_FACTOR_DST_ALPHA
:
311 return V_028780_BLEND_DST_ALPHA
;
312 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
313 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
314 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
315 return V_028780_BLEND_CONSTANT_COLOR
;
316 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
317 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
318 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
319 return V_028780_BLEND_CONSTANT_ALPHA
;
320 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
321 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
322 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
323 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
324 case VK_BLEND_FACTOR_SRC1_COLOR
:
325 return V_028780_BLEND_SRC1_COLOR
;
326 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
327 return V_028780_BLEND_INV_SRC1_COLOR
;
328 case VK_BLEND_FACTOR_SRC1_ALPHA
:
329 return V_028780_BLEND_SRC1_ALPHA
;
330 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
331 return V_028780_BLEND_INV_SRC1_ALPHA
;
337 static uint32_t si_translate_blend_opt_function(VkBlendOp op
)
340 case VK_BLEND_OP_ADD
:
341 return V_028760_OPT_COMB_ADD
;
342 case VK_BLEND_OP_SUBTRACT
:
343 return V_028760_OPT_COMB_SUBTRACT
;
344 case VK_BLEND_OP_REVERSE_SUBTRACT
:
345 return V_028760_OPT_COMB_REVSUBTRACT
;
346 case VK_BLEND_OP_MIN
:
347 return V_028760_OPT_COMB_MIN
;
348 case VK_BLEND_OP_MAX
:
349 return V_028760_OPT_COMB_MAX
;
351 return V_028760_OPT_COMB_BLEND_DISABLED
;
355 static uint32_t si_translate_blend_opt_factor(VkBlendFactor factor
, bool is_alpha
)
358 case VK_BLEND_FACTOR_ZERO
:
359 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_ALL
;
360 case VK_BLEND_FACTOR_ONE
:
361 return V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
;
362 case VK_BLEND_FACTOR_SRC_COLOR
:
363 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
364 : V_028760_BLEND_OPT_PRESERVE_C1_IGNORE_C0
;
365 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
366 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
367 : V_028760_BLEND_OPT_PRESERVE_C0_IGNORE_C1
;
368 case VK_BLEND_FACTOR_SRC_ALPHA
:
369 return V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
;
370 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
371 return V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
;
372 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
373 return is_alpha
? V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
374 : V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
376 return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
381 * Get rid of DST in the blend factors by commuting the operands:
382 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
384 static void si_blend_remove_dst(unsigned *func
, unsigned *src_factor
,
385 unsigned *dst_factor
, unsigned expected_dst
,
386 unsigned replacement_src
)
388 if (*src_factor
== expected_dst
&&
389 *dst_factor
== VK_BLEND_FACTOR_ZERO
) {
390 *src_factor
= VK_BLEND_FACTOR_ZERO
;
391 *dst_factor
= replacement_src
;
393 /* Commuting the operands requires reversing subtractions. */
394 if (*func
== VK_BLEND_OP_SUBTRACT
)
395 *func
= VK_BLEND_OP_REVERSE_SUBTRACT
;
396 else if (*func
== VK_BLEND_OP_REVERSE_SUBTRACT
)
397 *func
= VK_BLEND_OP_SUBTRACT
;
401 static bool si_blend_factor_uses_dst(unsigned factor
)
403 return factor
== VK_BLEND_FACTOR_DST_COLOR
||
404 factor
== VK_BLEND_FACTOR_DST_ALPHA
||
405 factor
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
406 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
||
407 factor
== VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
;
410 static bool is_dual_src(VkBlendFactor factor
)
413 case VK_BLEND_FACTOR_SRC1_COLOR
:
414 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
415 case VK_BLEND_FACTOR_SRC1_ALPHA
:
416 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
423 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
425 bool blend_need_alpha
)
427 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
428 unsigned format
, ntype
, swap
;
430 /* Alpha is needed for alpha-to-coverage.
431 * Blending may be with or without alpha.
433 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
434 unsigned alpha
= 0; /* exports alpha, but may not support blending */
435 unsigned blend
= 0; /* supports blending, but may not export alpha */
436 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
438 format
= radv_translate_colorformat(vk_format
);
439 ntype
= radv_translate_color_numformat(vk_format
, desc
,
440 vk_format_get_first_non_void_channel(vk_format
));
441 swap
= radv_translate_colorswap(vk_format
, false);
443 /* Choose the SPI color formats. These are required values for Stoney/RB+.
444 * Other chips have multiple choices, though they are not necessarily better.
447 case V_028C70_COLOR_5_6_5
:
448 case V_028C70_COLOR_1_5_5_5
:
449 case V_028C70_COLOR_5_5_5_1
:
450 case V_028C70_COLOR_4_4_4_4
:
451 case V_028C70_COLOR_10_11_11
:
452 case V_028C70_COLOR_11_11_10
:
453 case V_028C70_COLOR_8
:
454 case V_028C70_COLOR_8_8
:
455 case V_028C70_COLOR_8_8_8_8
:
456 case V_028C70_COLOR_10_10_10_2
:
457 case V_028C70_COLOR_2_10_10_10
:
458 if (ntype
== V_028C70_NUMBER_UINT
)
459 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
460 else if (ntype
== V_028C70_NUMBER_SINT
)
461 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
463 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
466 case V_028C70_COLOR_16
:
467 case V_028C70_COLOR_16_16
:
468 case V_028C70_COLOR_16_16_16_16
:
469 if (ntype
== V_028C70_NUMBER_UNORM
||
470 ntype
== V_028C70_NUMBER_SNORM
) {
471 /* UNORM16 and SNORM16 don't support blending */
472 if (ntype
== V_028C70_NUMBER_UNORM
)
473 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
475 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
477 /* Use 32 bits per channel for blending. */
478 if (format
== V_028C70_COLOR_16
) {
479 if (swap
== V_028C70_SWAP_STD
) { /* R */
480 blend
= V_028714_SPI_SHADER_32_R
;
481 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
482 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
483 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
486 } else if (format
== V_028C70_COLOR_16_16
) {
487 if (swap
== V_028C70_SWAP_STD
) { /* RG */
488 blend
= V_028714_SPI_SHADER_32_GR
;
489 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
490 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
491 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
494 } else /* 16_16_16_16 */
495 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
496 } else if (ntype
== V_028C70_NUMBER_UINT
)
497 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
498 else if (ntype
== V_028C70_NUMBER_SINT
)
499 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
500 else if (ntype
== V_028C70_NUMBER_FLOAT
)
501 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
506 case V_028C70_COLOR_32
:
507 if (swap
== V_028C70_SWAP_STD
) { /* R */
508 blend
= normal
= V_028714_SPI_SHADER_32_R
;
509 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
510 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
511 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
516 case V_028C70_COLOR_32_32
:
517 if (swap
== V_028C70_SWAP_STD
) { /* RG */
518 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
519 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
520 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
521 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
526 case V_028C70_COLOR_32_32_32_32
:
527 case V_028C70_COLOR_8_24
:
528 case V_028C70_COLOR_24_8
:
529 case V_028C70_COLOR_X24_8_32_FLOAT
:
530 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
534 unreachable("unhandled blend format");
537 if (blend_enable
&& blend_need_alpha
)
539 else if(blend_need_alpha
)
541 else if(blend_enable
)
548 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
549 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
550 struct radv_blend_state
*blend
)
552 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
553 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
554 unsigned col_format
= 0;
555 unsigned num_targets
;
557 for (unsigned i
= 0; i
< (blend
->single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
560 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
) {
561 cf
= V_028714_SPI_SHADER_ZERO
;
563 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
565 blend
->blend_enable_4bit
& (0xfu
<< (i
* 4));
567 cf
= si_choose_spi_color_format(attachment
->format
,
569 blend
->need_src_alpha
& (1 << i
));
572 col_format
|= cf
<< (4 * i
);
575 if (!(col_format
& 0xf) && blend
->need_src_alpha
& (1 << 0)) {
576 /* When a subpass doesn't have any color attachments, write the
577 * alpha channel of MRT0 when alpha coverage is enabled because
578 * the depth attachment needs it.
580 col_format
|= V_028714_SPI_SHADER_32_AR
;
583 /* If the i-th target format is set, all previous target formats must
584 * be non-zero to avoid hangs.
586 num_targets
= (util_last_bit(col_format
) + 3) / 4;
587 for (unsigned i
= 0; i
< num_targets
; i
++) {
588 if (!(col_format
& (0xf << (i
* 4)))) {
589 col_format
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
593 /* The output for dual source blending should have the same format as
596 if (blend
->mrt0_is_dual_src
)
597 col_format
|= (col_format
& 0xf) << 4;
599 blend
->cb_shader_mask
= ac_get_cb_shader_mask(col_format
);
600 blend
->spi_shader_col_format
= col_format
;
604 format_is_int8(VkFormat format
)
606 const struct vk_format_description
*desc
= vk_format_description(format
);
607 int channel
= vk_format_get_first_non_void_channel(format
);
609 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
610 desc
->channel
[channel
].size
== 8;
614 format_is_int10(VkFormat format
)
616 const struct vk_format_description
*desc
= vk_format_description(format
);
618 if (desc
->nr_channels
!= 4)
620 for (unsigned i
= 0; i
< 4; i
++) {
621 if (desc
->channel
[i
].pure_integer
&& desc
->channel
[i
].size
== 10)
628 * Ordered so that for each i,
629 * radv_format_meta_fs_key(radv_fs_key_format_exemplars[i]) == i.
631 const VkFormat radv_fs_key_format_exemplars
[NUM_META_FS_KEYS
] = {
632 VK_FORMAT_R32_SFLOAT
,
633 VK_FORMAT_R32G32_SFLOAT
,
634 VK_FORMAT_R8G8B8A8_UNORM
,
635 VK_FORMAT_R16G16B16A16_UNORM
,
636 VK_FORMAT_R16G16B16A16_SNORM
,
637 VK_FORMAT_R16G16B16A16_UINT
,
638 VK_FORMAT_R16G16B16A16_SINT
,
639 VK_FORMAT_R32G32B32A32_SFLOAT
,
640 VK_FORMAT_R8G8B8A8_UINT
,
641 VK_FORMAT_R8G8B8A8_SINT
,
642 VK_FORMAT_A2R10G10B10_UINT_PACK32
,
643 VK_FORMAT_A2R10G10B10_SINT_PACK32
,
646 unsigned radv_format_meta_fs_key(VkFormat format
)
648 unsigned col_format
= si_choose_spi_color_format(format
, false, false);
650 assert(col_format
!= V_028714_SPI_SHADER_32_AR
);
651 if (col_format
>= V_028714_SPI_SHADER_32_AR
)
652 --col_format
; /* Skip V_028714_SPI_SHADER_32_AR since there is no such VkFormat */
654 --col_format
; /* Skip V_028714_SPI_SHADER_ZERO */
655 bool is_int8
= format_is_int8(format
);
656 bool is_int10
= format_is_int10(format
);
658 return col_format
+ (is_int8
? 3 : is_int10
? 5 : 0);
662 radv_pipeline_compute_get_int_clamp(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
663 unsigned *is_int8
, unsigned *is_int10
)
665 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
666 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
670 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
671 struct radv_render_pass_attachment
*attachment
;
673 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
676 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
678 if (format_is_int8(attachment
->format
))
680 if (format_is_int10(attachment
->format
))
686 radv_blend_check_commutativity(struct radv_blend_state
*blend
,
687 VkBlendOp op
, VkBlendFactor src
,
688 VkBlendFactor dst
, unsigned chanmask
)
690 /* Src factor is allowed when it does not depend on Dst. */
691 static const uint32_t src_allowed
=
692 (1u << VK_BLEND_FACTOR_ONE
) |
693 (1u << VK_BLEND_FACTOR_SRC_COLOR
) |
694 (1u << VK_BLEND_FACTOR_SRC_ALPHA
) |
695 (1u << VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
) |
696 (1u << VK_BLEND_FACTOR_CONSTANT_COLOR
) |
697 (1u << VK_BLEND_FACTOR_CONSTANT_ALPHA
) |
698 (1u << VK_BLEND_FACTOR_SRC1_COLOR
) |
699 (1u << VK_BLEND_FACTOR_SRC1_ALPHA
) |
700 (1u << VK_BLEND_FACTOR_ZERO
) |
701 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
) |
702 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
) |
703 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
) |
704 (1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
) |
705 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
) |
706 (1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
);
708 if (dst
== VK_BLEND_FACTOR_ONE
&&
709 (src_allowed
& (1u << src
))) {
710 /* Addition is commutative, but floating point addition isn't
711 * associative: subtle changes can be introduced via different
712 * rounding. Be conservative, only enable for min and max.
714 if (op
== VK_BLEND_OP_MAX
|| op
== VK_BLEND_OP_MIN
)
715 blend
->commutative_4bit
|= chanmask
;
719 static struct radv_blend_state
720 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
721 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
722 const struct radv_graphics_pipeline_create_info
*extra
)
724 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
725 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
726 struct radv_blend_state blend
= {0};
727 unsigned mode
= V_028808_CB_NORMAL
;
733 if (extra
&& extra
->custom_blend_mode
) {
734 blend
.single_cb_enable
= true;
735 mode
= extra
->custom_blend_mode
;
737 blend
.cb_color_control
= 0;
738 if (vkblend
->logicOpEnable
)
739 blend
.cb_color_control
|= S_028808_ROP3(si_translate_blend_logic_op(vkblend
->logicOp
));
741 blend
.cb_color_control
|= S_028808_ROP3(V_028808_ROP3_COPY
);
743 blend
.db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(3) |
744 S_028B70_ALPHA_TO_MASK_OFFSET1(1) |
745 S_028B70_ALPHA_TO_MASK_OFFSET2(0) |
746 S_028B70_ALPHA_TO_MASK_OFFSET3(2) |
747 S_028B70_OFFSET_ROUND(1);
749 if (vkms
&& vkms
->alphaToCoverageEnable
) {
750 blend
.db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
751 blend
.need_src_alpha
|= 0x1;
754 blend
.cb_target_mask
= 0;
755 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
756 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
757 unsigned blend_cntl
= 0;
758 unsigned srcRGB_opt
, dstRGB_opt
, srcA_opt
, dstA_opt
;
759 VkBlendOp eqRGB
= att
->colorBlendOp
;
760 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
761 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
762 VkBlendOp eqA
= att
->alphaBlendOp
;
763 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
764 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
766 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
768 if (!att
->colorWriteMask
)
771 blend
.cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
772 blend
.cb_target_enabled_4bit
|= 0xf << (4 * i
);
773 if (!att
->blendEnable
) {
774 blend
.cb_blend_control
[i
] = blend_cntl
;
778 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
780 blend
.mrt0_is_dual_src
= true;
782 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
783 srcRGB
= VK_BLEND_FACTOR_ONE
;
784 dstRGB
= VK_BLEND_FACTOR_ONE
;
786 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
787 srcA
= VK_BLEND_FACTOR_ONE
;
788 dstA
= VK_BLEND_FACTOR_ONE
;
791 radv_blend_check_commutativity(&blend
, eqRGB
, srcRGB
, dstRGB
,
793 radv_blend_check_commutativity(&blend
, eqA
, srcA
, dstA
,
796 /* Blending optimizations for RB+.
797 * These transformations don't change the behavior.
799 * First, get rid of DST in the blend factors:
800 * func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
802 si_blend_remove_dst(&eqRGB
, &srcRGB
, &dstRGB
,
803 VK_BLEND_FACTOR_DST_COLOR
,
804 VK_BLEND_FACTOR_SRC_COLOR
);
806 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
807 VK_BLEND_FACTOR_DST_COLOR
,
808 VK_BLEND_FACTOR_SRC_COLOR
);
810 si_blend_remove_dst(&eqA
, &srcA
, &dstA
,
811 VK_BLEND_FACTOR_DST_ALPHA
,
812 VK_BLEND_FACTOR_SRC_ALPHA
);
814 /* Look up the ideal settings from tables. */
815 srcRGB_opt
= si_translate_blend_opt_factor(srcRGB
, false);
816 dstRGB_opt
= si_translate_blend_opt_factor(dstRGB
, false);
817 srcA_opt
= si_translate_blend_opt_factor(srcA
, true);
818 dstA_opt
= si_translate_blend_opt_factor(dstA
, true);
820 /* Handle interdependencies. */
821 if (si_blend_factor_uses_dst(srcRGB
))
822 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
823 if (si_blend_factor_uses_dst(srcA
))
824 dstA_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE
;
826 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
&&
827 (dstRGB
== VK_BLEND_FACTOR_ZERO
||
828 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
829 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
))
830 dstRGB_opt
= V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0
;
832 /* Set the final value. */
833 blend
.sx_mrt_blend_opt
[i
] =
834 S_028760_COLOR_SRC_OPT(srcRGB_opt
) |
835 S_028760_COLOR_DST_OPT(dstRGB_opt
) |
836 S_028760_COLOR_COMB_FCN(si_translate_blend_opt_function(eqRGB
)) |
837 S_028760_ALPHA_SRC_OPT(srcA_opt
) |
838 S_028760_ALPHA_DST_OPT(dstA_opt
) |
839 S_028760_ALPHA_COMB_FCN(si_translate_blend_opt_function(eqA
));
840 blend_cntl
|= S_028780_ENABLE(1);
842 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
843 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
844 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
845 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
846 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
847 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
848 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
849 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
851 blend
.cb_blend_control
[i
] = blend_cntl
;
853 blend
.blend_enable_4bit
|= 0xfu
<< (i
* 4);
855 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
856 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
857 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
858 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
859 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
860 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
861 blend
.need_src_alpha
|= 1 << i
;
863 for (i
= vkblend
->attachmentCount
; i
< 8; i
++) {
864 blend
.cb_blend_control
[i
] = 0;
865 blend
.sx_mrt_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
868 if (pipeline
->device
->physical_device
->rad_info
.has_rbplus
) {
869 /* Disable RB+ blend optimizations for dual source blending. */
870 if (blend
.mrt0_is_dual_src
) {
871 for (i
= 0; i
< 8; i
++) {
872 blend
.sx_mrt_blend_opt
[i
] =
873 S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE
) |
874 S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE
);
878 /* RB+ doesn't work with dual source blending, logic op and
881 if (blend
.mrt0_is_dual_src
|| vkblend
->logicOpEnable
||
882 mode
== V_028808_CB_RESOLVE
)
883 blend
.cb_color_control
|= S_028808_DISABLE_DUAL_QUAD(1);
886 if (blend
.cb_target_mask
)
887 blend
.cb_color_control
|= S_028808_MODE(mode
);
889 blend
.cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
891 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
, &blend
);
895 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
898 case VK_STENCIL_OP_KEEP
:
899 return V_02842C_STENCIL_KEEP
;
900 case VK_STENCIL_OP_ZERO
:
901 return V_02842C_STENCIL_ZERO
;
902 case VK_STENCIL_OP_REPLACE
:
903 return V_02842C_STENCIL_REPLACE_TEST
;
904 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
905 return V_02842C_STENCIL_ADD_CLAMP
;
906 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
907 return V_02842C_STENCIL_SUB_CLAMP
;
908 case VK_STENCIL_OP_INVERT
:
909 return V_02842C_STENCIL_INVERT
;
910 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
911 return V_02842C_STENCIL_ADD_WRAP
;
912 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
913 return V_02842C_STENCIL_SUB_WRAP
;
919 static uint32_t si_translate_fill(VkPolygonMode func
)
922 case VK_POLYGON_MODE_FILL
:
923 return V_028814_X_DRAW_TRIANGLES
;
924 case VK_POLYGON_MODE_LINE
:
925 return V_028814_X_DRAW_LINES
;
926 case VK_POLYGON_MODE_POINT
:
927 return V_028814_X_DRAW_POINTS
;
930 return V_028814_X_DRAW_POINTS
;
934 static uint8_t radv_pipeline_get_ps_iter_samples(const VkPipelineMultisampleStateCreateInfo
*vkms
)
936 uint32_t num_samples
= vkms
->rasterizationSamples
;
937 uint32_t ps_iter_samples
= 1;
939 if (vkms
->sampleShadingEnable
) {
940 ps_iter_samples
= ceil(vkms
->minSampleShading
* num_samples
);
941 ps_iter_samples
= util_next_power_of_two(ps_iter_samples
);
943 return ps_iter_samples
;
947 radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
949 return pCreateInfo
->depthTestEnable
&&
950 pCreateInfo
->depthWriteEnable
&&
951 pCreateInfo
->depthCompareOp
!= VK_COMPARE_OP_NEVER
;
955 radv_writes_stencil(const VkStencilOpState
*state
)
957 return state
->writeMask
&&
958 (state
->failOp
!= VK_STENCIL_OP_KEEP
||
959 state
->passOp
!= VK_STENCIL_OP_KEEP
||
960 state
->depthFailOp
!= VK_STENCIL_OP_KEEP
);
964 radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
966 return pCreateInfo
->stencilTestEnable
&&
967 (radv_writes_stencil(&pCreateInfo
->front
) ||
968 radv_writes_stencil(&pCreateInfo
->back
));
972 radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo
*pCreateInfo
)
974 return radv_is_depth_write_enabled(pCreateInfo
) ||
975 radv_is_stencil_write_enabled(pCreateInfo
);
979 radv_order_invariant_stencil_op(VkStencilOp op
)
981 /* REPLACE is normally order invariant, except when the stencil
982 * reference value is written by the fragment shader. Tracking this
983 * interaction does not seem worth the effort, so be conservative.
985 return op
!= VK_STENCIL_OP_INCREMENT_AND_CLAMP
&&
986 op
!= VK_STENCIL_OP_DECREMENT_AND_CLAMP
&&
987 op
!= VK_STENCIL_OP_REPLACE
;
991 radv_order_invariant_stencil_state(const VkStencilOpState
*state
)
993 /* Compute whether, assuming Z writes are disabled, this stencil state
994 * is order invariant in the sense that the set of passing fragments as
995 * well as the final stencil buffer result does not depend on the order
998 return !state
->writeMask
||
999 /* The following assumes that Z writes are disabled. */
1000 (state
->compareOp
== VK_COMPARE_OP_ALWAYS
&&
1001 radv_order_invariant_stencil_op(state
->passOp
) &&
1002 radv_order_invariant_stencil_op(state
->depthFailOp
)) ||
1003 (state
->compareOp
== VK_COMPARE_OP_NEVER
&&
1004 radv_order_invariant_stencil_op(state
->failOp
));
1008 radv_pipeline_out_of_order_rast(struct radv_pipeline
*pipeline
,
1009 struct radv_blend_state
*blend
,
1010 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1012 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1013 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
1014 unsigned colormask
= blend
->cb_target_enabled_4bit
;
1016 if (!pipeline
->device
->physical_device
->out_of_order_rast_allowed
)
1019 /* Be conservative if a logic operation is enabled with color buffers. */
1020 if (colormask
&& pCreateInfo
->pColorBlendState
->logicOpEnable
)
1023 /* Default depth/stencil invariance when no attachment is bound. */
1024 struct radv_dsa_order_invariance dsa_order_invariant
= {
1025 .zs
= true, .pass_set
= true
1028 if (pCreateInfo
->pDepthStencilState
&&
1029 subpass
->depth_stencil_attachment
) {
1030 const VkPipelineDepthStencilStateCreateInfo
*vkds
=
1031 pCreateInfo
->pDepthStencilState
;
1032 struct radv_render_pass_attachment
*attachment
=
1033 pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
1034 bool has_stencil
= vk_format_is_stencil(attachment
->format
);
1035 struct radv_dsa_order_invariance order_invariance
[2];
1036 struct radv_shader_variant
*ps
=
1037 pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
1039 /* Compute depth/stencil order invariance in order to know if
1040 * it's safe to enable out-of-order.
1042 bool zfunc_is_ordered
=
1043 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
||
1044 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS
||
1045 vkds
->depthCompareOp
== VK_COMPARE_OP_LESS_OR_EQUAL
||
1046 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER
||
1047 vkds
->depthCompareOp
== VK_COMPARE_OP_GREATER_OR_EQUAL
;
1049 bool nozwrite_and_order_invariant_stencil
=
1050 !radv_is_ds_write_enabled(vkds
) ||
1051 (!radv_is_depth_write_enabled(vkds
) &&
1052 radv_order_invariant_stencil_state(&vkds
->front
) &&
1053 radv_order_invariant_stencil_state(&vkds
->back
));
1055 order_invariance
[1].zs
=
1056 nozwrite_and_order_invariant_stencil
||
1057 (!radv_is_stencil_write_enabled(vkds
) &&
1059 order_invariance
[0].zs
=
1060 !radv_is_depth_write_enabled(vkds
) || zfunc_is_ordered
;
1062 order_invariance
[1].pass_set
=
1063 nozwrite_and_order_invariant_stencil
||
1064 (!radv_is_stencil_write_enabled(vkds
) &&
1065 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1066 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
));
1067 order_invariance
[0].pass_set
=
1068 !radv_is_depth_write_enabled(vkds
) ||
1069 (vkds
->depthCompareOp
== VK_COMPARE_OP_ALWAYS
||
1070 vkds
->depthCompareOp
== VK_COMPARE_OP_NEVER
);
1072 dsa_order_invariant
= order_invariance
[has_stencil
];
1073 if (!dsa_order_invariant
.zs
)
1076 /* The set of PS invocations is always order invariant,
1077 * except when early Z/S tests are requested.
1080 ps
->info
.ps
.writes_memory
&&
1081 ps
->info
.ps
.early_fragment_test
&&
1082 !dsa_order_invariant
.pass_set
)
1085 /* Determine if out-of-order rasterization should be disabled
1086 * when occlusion queries are used.
1088 pipeline
->graphics
.disable_out_of_order_rast_for_occlusion
=
1089 !dsa_order_invariant
.pass_set
;
1092 /* No color buffers are enabled for writing. */
1096 unsigned blendmask
= colormask
& blend
->blend_enable_4bit
;
1099 /* Only commutative blending. */
1100 if (blendmask
& ~blend
->commutative_4bit
)
1103 if (!dsa_order_invariant
.pass_set
)
1107 if (colormask
& ~blendmask
)
1114 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1115 struct radv_blend_state
*blend
,
1116 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1118 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
1119 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1120 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1121 bool out_of_order_rast
= false;
1122 int ps_iter_samples
= 1;
1123 uint32_t mask
= 0xffff;
1126 ms
->num_samples
= vkms
->rasterizationSamples
;
1128 ms
->num_samples
= 1;
1131 ps_iter_samples
= radv_pipeline_get_ps_iter_samples(vkms
);
1132 if (vkms
&& !vkms
->sampleShadingEnable
&& pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.force_persample
) {
1133 ps_iter_samples
= ms
->num_samples
;
1136 const struct VkPipelineRasterizationStateRasterizationOrderAMD
*raster_order
=
1137 vk_find_struct_const(pCreateInfo
->pRasterizationState
->pNext
, PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD
);
1138 if (raster_order
&& raster_order
->rasterizationOrder
== VK_RASTERIZATION_ORDER_RELAXED_AMD
) {
1139 /* Out-of-order rasterization is explicitly enabled by the
1142 out_of_order_rast
= true;
1144 /* Determine if the driver can enable out-of-order
1145 * rasterization internally.
1148 radv_pipeline_out_of_order_rast(pipeline
, blend
, pCreateInfo
);
1151 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1152 ms
->pa_sc_aa_config
= 0;
1153 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1154 S_028804_INCOHERENT_EQAA_READS(1) |
1155 S_028804_INTERPOLATE_COMP_Z(1) |
1156 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1157 ms
->pa_sc_mode_cntl_1
=
1158 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1159 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1160 S_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(out_of_order_rast
) |
1161 S_028A4C_OUT_OF_ORDER_WATER_MARK(0x7) |
1163 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1164 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1165 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1166 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1167 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1168 S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1169 ms
->pa_sc_mode_cntl_0
= S_028A48_ALTERNATE_RBS_PER_TILE(pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
1170 S_028A48_VPORT_SCISSOR_ENABLE(1);
1172 if (ms
->num_samples
> 1) {
1173 unsigned log_samples
= util_logbase2(ms
->num_samples
);
1174 unsigned log_ps_iter_samples
= util_logbase2(ps_iter_samples
);
1175 ms
->pa_sc_mode_cntl_0
|= S_028A48_MSAA_ENABLE(1);
1176 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1177 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_samples
) |
1178 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1179 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1180 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1181 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1182 S_028BE0_MAX_SAMPLE_DIST(radv_get_default_max_sample_dist(log_samples
)) |
1183 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1184 ms
->pa_sc_mode_cntl_1
|= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1185 if (ps_iter_samples
> 1)
1186 pipeline
->graphics
.spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1189 if (vkms
&& vkms
->pSampleMask
) {
1190 mask
= vkms
->pSampleMask
[0] & 0xffff;
1193 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1194 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1198 radv_prim_can_use_guardband(enum VkPrimitiveTopology topology
)
1201 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1202 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1203 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1204 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1205 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1207 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1208 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1209 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1210 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1211 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1212 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1215 unreachable("unhandled primitive type");
1220 si_translate_prim(enum VkPrimitiveTopology topology
)
1223 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1224 return V_008958_DI_PT_POINTLIST
;
1225 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1226 return V_008958_DI_PT_LINELIST
;
1227 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1228 return V_008958_DI_PT_LINESTRIP
;
1229 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1230 return V_008958_DI_PT_TRILIST
;
1231 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1232 return V_008958_DI_PT_TRISTRIP
;
1233 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1234 return V_008958_DI_PT_TRIFAN
;
1235 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1236 return V_008958_DI_PT_LINELIST_ADJ
;
1237 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1238 return V_008958_DI_PT_LINESTRIP_ADJ
;
1239 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1240 return V_008958_DI_PT_TRILIST_ADJ
;
1241 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1242 return V_008958_DI_PT_TRISTRIP_ADJ
;
1243 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1244 return V_008958_DI_PT_PATCH
;
1252 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1255 case 0: /* GL_POINTS */
1256 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1257 case 1: /* GL_LINES */
1258 case 3: /* GL_LINE_STRIP */
1259 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1260 case 0x8E7A: /* GL_ISOLINES */
1261 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1263 case 4: /* GL_TRIANGLES */
1264 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1265 case 5: /* GL_TRIANGLE_STRIP */
1266 case 7: /* GL_QUADS */
1267 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1275 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1278 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1279 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1280 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1281 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1282 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1283 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1284 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1285 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1286 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1287 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1288 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1289 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1290 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1291 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1298 static unsigned radv_dynamic_state_mask(VkDynamicState state
)
1301 case VK_DYNAMIC_STATE_VIEWPORT
:
1302 return RADV_DYNAMIC_VIEWPORT
;
1303 case VK_DYNAMIC_STATE_SCISSOR
:
1304 return RADV_DYNAMIC_SCISSOR
;
1305 case VK_DYNAMIC_STATE_LINE_WIDTH
:
1306 return RADV_DYNAMIC_LINE_WIDTH
;
1307 case VK_DYNAMIC_STATE_DEPTH_BIAS
:
1308 return RADV_DYNAMIC_DEPTH_BIAS
;
1309 case VK_DYNAMIC_STATE_BLEND_CONSTANTS
:
1310 return RADV_DYNAMIC_BLEND_CONSTANTS
;
1311 case VK_DYNAMIC_STATE_DEPTH_BOUNDS
:
1312 return RADV_DYNAMIC_DEPTH_BOUNDS
;
1313 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
:
1314 return RADV_DYNAMIC_STENCIL_COMPARE_MASK
;
1315 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
:
1316 return RADV_DYNAMIC_STENCIL_WRITE_MASK
;
1317 case VK_DYNAMIC_STATE_STENCIL_REFERENCE
:
1318 return RADV_DYNAMIC_STENCIL_REFERENCE
;
1319 case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT
:
1320 return RADV_DYNAMIC_DISCARD_RECTANGLE
;
1321 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT
:
1322 return RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1324 unreachable("Unhandled dynamic state");
1328 static uint32_t radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1330 uint32_t states
= RADV_DYNAMIC_ALL
;
1332 /* If rasterization is disabled we do not care about any of the dynamic states,
1333 * since they are all rasterization related only. */
1334 if (pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
)
1337 if (!pCreateInfo
->pRasterizationState
->depthBiasEnable
)
1338 states
&= ~RADV_DYNAMIC_DEPTH_BIAS
;
1340 if (!pCreateInfo
->pDepthStencilState
||
1341 !pCreateInfo
->pDepthStencilState
->depthBoundsTestEnable
)
1342 states
&= ~RADV_DYNAMIC_DEPTH_BOUNDS
;
1344 if (!pCreateInfo
->pDepthStencilState
||
1345 !pCreateInfo
->pDepthStencilState
->stencilTestEnable
)
1346 states
&= ~(RADV_DYNAMIC_STENCIL_COMPARE_MASK
|
1347 RADV_DYNAMIC_STENCIL_WRITE_MASK
|
1348 RADV_DYNAMIC_STENCIL_REFERENCE
);
1350 if (!vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
))
1351 states
&= ~RADV_DYNAMIC_DISCARD_RECTANGLE
;
1353 if (!pCreateInfo
->pMultisampleState
||
1354 !vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1355 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
))
1356 states
&= ~RADV_DYNAMIC_SAMPLE_LOCATIONS
;
1358 /* TODO: blend constants & line width. */
1365 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1366 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1368 uint32_t needed_states
= radv_pipeline_needed_dynamic_state(pCreateInfo
);
1369 uint32_t states
= needed_states
;
1370 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1371 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1373 pipeline
->dynamic_state
= default_dynamic_state
;
1374 pipeline
->graphics
.needed_dynamic_state
= needed_states
;
1376 if (pCreateInfo
->pDynamicState
) {
1377 /* Remove all of the states that are marked as dynamic */
1378 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1379 for (uint32_t s
= 0; s
< count
; s
++)
1380 states
&= ~radv_dynamic_state_mask(pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1383 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1385 if (needed_states
& RADV_DYNAMIC_VIEWPORT
) {
1386 assert(pCreateInfo
->pViewportState
);
1388 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1389 if (states
& RADV_DYNAMIC_VIEWPORT
) {
1390 typed_memcpy(dynamic
->viewport
.viewports
,
1391 pCreateInfo
->pViewportState
->pViewports
,
1392 pCreateInfo
->pViewportState
->viewportCount
);
1396 if (needed_states
& RADV_DYNAMIC_SCISSOR
) {
1397 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1398 if (states
& RADV_DYNAMIC_SCISSOR
) {
1399 typed_memcpy(dynamic
->scissor
.scissors
,
1400 pCreateInfo
->pViewportState
->pScissors
,
1401 pCreateInfo
->pViewportState
->scissorCount
);
1405 if (states
& RADV_DYNAMIC_LINE_WIDTH
) {
1406 assert(pCreateInfo
->pRasterizationState
);
1407 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1410 if (states
& RADV_DYNAMIC_DEPTH_BIAS
) {
1411 assert(pCreateInfo
->pRasterizationState
);
1412 dynamic
->depth_bias
.bias
=
1413 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1414 dynamic
->depth_bias
.clamp
=
1415 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1416 dynamic
->depth_bias
.slope
=
1417 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1420 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1422 * pColorBlendState is [...] NULL if the pipeline has rasterization
1423 * disabled or if the subpass of the render pass the pipeline is
1424 * created against does not use any color attachments.
1426 if (subpass
->has_color_att
&& states
& RADV_DYNAMIC_BLEND_CONSTANTS
) {
1427 assert(pCreateInfo
->pColorBlendState
);
1428 typed_memcpy(dynamic
->blend_constants
,
1429 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1432 /* If there is no depthstencil attachment, then don't read
1433 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1434 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1435 * no need to override the depthstencil defaults in
1436 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1438 * Section 9.2 of the Vulkan 1.0.15 spec says:
1440 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1441 * disabled or if the subpass of the render pass the pipeline is created
1442 * against does not use a depth/stencil attachment.
1444 if (needed_states
&& subpass
->depth_stencil_attachment
) {
1445 assert(pCreateInfo
->pDepthStencilState
);
1447 if (states
& RADV_DYNAMIC_DEPTH_BOUNDS
) {
1448 dynamic
->depth_bounds
.min
=
1449 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1450 dynamic
->depth_bounds
.max
=
1451 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1454 if (states
& RADV_DYNAMIC_STENCIL_COMPARE_MASK
) {
1455 dynamic
->stencil_compare_mask
.front
=
1456 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1457 dynamic
->stencil_compare_mask
.back
=
1458 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1461 if (states
& RADV_DYNAMIC_STENCIL_WRITE_MASK
) {
1462 dynamic
->stencil_write_mask
.front
=
1463 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1464 dynamic
->stencil_write_mask
.back
=
1465 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1468 if (states
& RADV_DYNAMIC_STENCIL_REFERENCE
) {
1469 dynamic
->stencil_reference
.front
=
1470 pCreateInfo
->pDepthStencilState
->front
.reference
;
1471 dynamic
->stencil_reference
.back
=
1472 pCreateInfo
->pDepthStencilState
->back
.reference
;
1476 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
1477 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
1478 if (needed_states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1479 dynamic
->discard_rectangle
.count
= discard_rectangle_info
->discardRectangleCount
;
1480 if (states
& RADV_DYNAMIC_DISCARD_RECTANGLE
) {
1481 typed_memcpy(dynamic
->discard_rectangle
.rectangles
,
1482 discard_rectangle_info
->pDiscardRectangles
,
1483 discard_rectangle_info
->discardRectangleCount
);
1487 if (needed_states
& RADV_DYNAMIC_SAMPLE_LOCATIONS
) {
1488 const VkPipelineSampleLocationsStateCreateInfoEXT
*sample_location_info
=
1489 vk_find_struct_const(pCreateInfo
->pMultisampleState
->pNext
,
1490 PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
1491 /* If sampleLocationsEnable is VK_FALSE, the default sample
1492 * locations are used and the values specified in
1493 * sampleLocationsInfo are ignored.
1495 if (sample_location_info
->sampleLocationsEnable
) {
1496 const VkSampleLocationsInfoEXT
*pSampleLocationsInfo
=
1497 &sample_location_info
->sampleLocationsInfo
;
1499 assert(pSampleLocationsInfo
->sampleLocationsCount
<= MAX_SAMPLE_LOCATIONS
);
1501 dynamic
->sample_location
.per_pixel
= pSampleLocationsInfo
->sampleLocationsPerPixel
;
1502 dynamic
->sample_location
.grid_size
= pSampleLocationsInfo
->sampleLocationGridSize
;
1503 dynamic
->sample_location
.count
= pSampleLocationsInfo
->sampleLocationsCount
;
1504 typed_memcpy(&dynamic
->sample_location
.locations
[0],
1505 pSampleLocationsInfo
->pSampleLocations
,
1506 pSampleLocationsInfo
->sampleLocationsCount
);
1510 pipeline
->dynamic_state
.mask
= states
;
1513 static struct radv_gs_state
1514 calculate_gs_info(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1515 const struct radv_pipeline
*pipeline
)
1517 struct radv_gs_state gs
= {0};
1518 struct radv_shader_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1519 struct radv_es_output_info
*es_info
;
1520 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1521 es_info
= radv_pipeline_has_tess(pipeline
) ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1523 es_info
= radv_pipeline_has_tess(pipeline
) ?
1524 &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.es_info
:
1525 &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.es_info
;
1527 unsigned gs_num_invocations
= MAX2(gs_info
->gs
.invocations
, 1);
1528 bool uses_adjacency
;
1529 switch(pCreateInfo
->pInputAssemblyState
->topology
) {
1530 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1531 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1532 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1533 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1534 uses_adjacency
= true;
1537 uses_adjacency
= false;
1541 /* All these are in dwords: */
1542 /* We can't allow using the whole LDS, because GS waves compete with
1543 * other shader stages for LDS space. */
1544 const unsigned max_lds_size
= 8 * 1024;
1545 const unsigned esgs_itemsize
= es_info
->esgs_itemsize
/ 4;
1546 unsigned esgs_lds_size
;
1548 /* All these are per subgroup: */
1549 const unsigned max_out_prims
= 32 * 1024;
1550 const unsigned max_es_verts
= 255;
1551 const unsigned ideal_gs_prims
= 64;
1552 unsigned max_gs_prims
, gs_prims
;
1553 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
1555 if (uses_adjacency
|| gs_num_invocations
> 1)
1556 max_gs_prims
= 127 / gs_num_invocations
;
1560 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
1561 * Make sure we don't go over the maximum value.
1563 if (gs_info
->gs
.vertices_out
> 0) {
1564 max_gs_prims
= MIN2(max_gs_prims
,
1566 (gs_info
->gs
.vertices_out
* gs_num_invocations
));
1568 assert(max_gs_prims
> 0);
1570 /* If the primitive has adjacency, halve the number of vertices
1571 * that will be reused in multiple primitives.
1573 min_es_verts
= gs_info
->gs
.vertices_in
/ (uses_adjacency
? 2 : 1);
1575 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
1576 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
1578 /* Compute ESGS LDS size based on the worst case number of ES vertices
1579 * needed to create the target number of GS prims per subgroup.
1581 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1583 /* If total LDS usage is too big, refactor partitions based on ratio
1584 * of ESGS item sizes.
1586 if (esgs_lds_size
> max_lds_size
) {
1587 /* Our target GS Prims Per Subgroup was too large. Calculate
1588 * the maximum number of GS Prims Per Subgroup that will fit
1589 * into LDS, capped by the maximum that the hardware can support.
1591 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
1593 assert(gs_prims
> 0);
1594 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
1597 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
1598 assert(esgs_lds_size
<= max_lds_size
);
1601 /* Now calculate remaining ESGS information. */
1603 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
1605 es_verts
= max_es_verts
;
1607 /* Vertices for adjacency primitives are not always reused, so restore
1608 * it for ES_VERTS_PER_SUBGRP.
1610 min_es_verts
= gs_info
->gs
.vertices_in
;
1612 /* For normal primitives, the VGT only checks if they are past the ES
1613 * verts per subgroup after allocating a full GS primitive and if they
1614 * are, kick off a new subgroup. But if those additional ES verts are
1615 * unique (e.g. not reused) we need to make sure there is enough LDS
1616 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
1618 es_verts
-= min_es_verts
- 1;
1620 uint32_t es_verts_per_subgroup
= es_verts
;
1621 uint32_t gs_prims_per_subgroup
= gs_prims
;
1622 uint32_t gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
1623 uint32_t max_prims_per_subgroup
= gs_inst_prims_in_subgroup
* gs_info
->gs
.vertices_out
;
1624 gs
.lds_size
= align(esgs_lds_size
, 128) / 128;
1625 gs
.vgt_gs_onchip_cntl
= S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup
) |
1626 S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup
) |
1627 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup
);
1628 gs
.vgt_gs_max_prims_per_subgroup
= S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup
);
1629 gs
.vgt_esgs_ring_itemsize
= esgs_itemsize
;
1630 assert(max_prims_per_subgroup
<= max_out_prims
);
1635 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1636 unsigned min_verts_per_prim
, bool use_adjacency
)
1638 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1641 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1645 radv_get_num_input_vertices(struct radv_pipeline
*pipeline
)
1647 if (radv_pipeline_has_gs(pipeline
)) {
1648 struct radv_shader_variant
*gs
=
1649 radv_get_shader(pipeline
, MESA_SHADER_GEOMETRY
);
1651 return gs
->info
.gs
.vertices_in
;
1654 if (radv_pipeline_has_tess(pipeline
)) {
1655 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
1657 if (tes
->info
.tes
.point_mode
)
1659 if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
1667 static struct radv_ngg_state
1668 calculate_ngg_info(const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1669 struct radv_pipeline
*pipeline
)
1671 struct radv_ngg_state ngg
= {0};
1672 struct radv_shader_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1673 struct radv_es_output_info
*es_info
=
1674 radv_pipeline_has_tess(pipeline
) ? &gs_info
->tes
.es_info
: &gs_info
->vs
.es_info
;
1675 unsigned gs_type
= radv_pipeline_has_gs(pipeline
) ? MESA_SHADER_GEOMETRY
: MESA_SHADER_VERTEX
;
1676 unsigned max_verts_per_prim
= radv_get_num_input_vertices(pipeline
);
1677 unsigned min_verts_per_prim
=
1678 gs_type
== MESA_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1679 unsigned gs_num_invocations
= radv_pipeline_has_gs(pipeline
) ? MAX2(gs_info
->gs
.invocations
, 1) : 1;
1680 bool uses_adjacency
;
1681 switch(pCreateInfo
->pInputAssemblyState
->topology
) {
1682 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1683 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1684 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1685 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1686 uses_adjacency
= true;
1689 uses_adjacency
= false;
1693 /* All these are in dwords: */
1694 /* We can't allow using the whole LDS, because GS waves compete with
1695 * other shader stages for LDS space.
1697 * Streamout can increase the ESGS buffer size later on, so be more
1698 * conservative with streamout and use 4K dwords. This may be suboptimal.
1700 * Otherwise, use the limit of 7K dwords. The reason is that we need
1701 * to leave some headroom for the max_esverts increase at the end.
1703 * TODO: We should really take the shader's internal LDS use into
1704 * account. The linker will fail if the size is greater than
1707 const unsigned max_lds_size
= (0 /*gs_info->info.so.num_outputs*/ ? 4 : 7) * 1024 - 128;
1708 const unsigned target_lds_size
= max_lds_size
;
1709 unsigned esvert_lds_size
= 0;
1710 unsigned gsprim_lds_size
= 0;
1712 /* All these are per subgroup: */
1713 bool max_vert_out_per_gs_instance
= false;
1714 unsigned max_esverts_base
= 256;
1715 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1717 /* Hardware has the following non-natural restrictions on the value
1718 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1720 * - at most 252 for any line input primitive type
1721 * - at most 251 for any quad input primitive type
1722 * - at most 251 for triangle strips with adjacency (this happens to
1723 * be the natural limit for triangle *lists* with adjacency)
1725 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1727 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1728 unsigned max_out_verts_per_gsprim
=
1729 gs_info
->gs
.vertices_out
* gs_num_invocations
;
1731 if (max_out_verts_per_gsprim
<= 256) {
1732 if (max_out_verts_per_gsprim
) {
1733 max_gsprims_base
= MIN2(max_gsprims_base
,
1734 256 / max_out_verts_per_gsprim
);
1737 /* Use special multi-cycling mode in which each GS
1738 * instance gets its own subgroup. Does not work with
1740 max_vert_out_per_gs_instance
= true;
1741 max_gsprims_base
= 1;
1742 max_out_verts_per_gsprim
= gs_info
->gs
.vertices_out
;
1745 esvert_lds_size
= es_info
->esgs_itemsize
/ 4;
1746 gsprim_lds_size
= (gs_info
->gs
.gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1748 /* TODO: This needs to be adjusted once LDS use for compaction
1749 * after culling is implemented. */
1751 if (es_info->info.so.num_outputs)
1752 esvert_lds_size = 4 * es_info->info.so.num_outputs + 1;
1755 /* LDS size for passing data from GS to ES.
1756 * GS stores Primitive IDs (one DWORD) into LDS at the address
1757 * corresponding to the ES thread of the provoking vertex. All
1758 * ES threads load and export PrimitiveID for their thread.
1760 if (!radv_pipeline_has_tess(pipeline
) &&
1761 pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.export_prim_id
)
1762 esvert_lds_size
= MAX2(esvert_lds_size
, 1);
1765 unsigned max_gsprims
= max_gsprims_base
;
1766 unsigned max_esverts
= max_esverts_base
;
1768 if (esvert_lds_size
)
1769 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1770 if (gsprim_lds_size
)
1771 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1773 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1774 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, uses_adjacency
);
1775 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1777 if (esvert_lds_size
|| gsprim_lds_size
) {
1778 /* Now that we have a rough proportionality between esverts
1779 * and gsprims based on the primitive type, scale both of them
1780 * down simultaneously based on required LDS space.
1782 * We could be smarter about this if we knew how much vertex
1785 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1786 max_gsprims
* gsprim_lds_size
;
1787 if (lds_total
> target_lds_size
) {
1788 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1789 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1791 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1792 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1793 min_verts_per_prim
, uses_adjacency
);
1794 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1798 /* Round up towards full wave sizes for better ALU utilization. */
1799 if (!max_vert_out_per_gs_instance
) {
1800 const unsigned wavesize
= pipeline
->device
->physical_device
->ge_wave_size
;
1801 unsigned orig_max_esverts
;
1802 unsigned orig_max_gsprims
;
1804 orig_max_esverts
= max_esverts
;
1805 orig_max_gsprims
= max_gsprims
;
1807 max_esverts
= align(max_esverts
, wavesize
);
1808 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1809 if (esvert_lds_size
)
1810 max_esverts
= MIN2(max_esverts
,
1811 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1813 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1815 max_gsprims
= align(max_gsprims
, wavesize
);
1816 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1817 if (gsprim_lds_size
)
1818 max_gsprims
= MIN2(max_gsprims
,
1819 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1821 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1822 min_verts_per_prim
, uses_adjacency
);
1823 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1824 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1827 /* Hardware restriction: minimum value of max_esverts */
1828 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1830 unsigned max_out_vertices
=
1831 max_vert_out_per_gs_instance
? gs_info
->gs
.vertices_out
:
1832 gs_type
== MESA_SHADER_GEOMETRY
?
1833 max_gsprims
* gs_num_invocations
* gs_info
->gs
.vertices_out
:
1835 assert(max_out_vertices
<= 256);
1837 unsigned prim_amp_factor
= 1;
1838 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1839 /* Number of output primitives per GS input primitive after
1841 prim_amp_factor
= gs_info
->gs
.vertices_out
;
1844 /* The GE only checks against the maximum number of ES verts after
1845 * allocating a full GS primitive. So we need to ensure that whenever
1846 * this check passes, there is enough space for a full primitive without
1849 ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1850 ngg
.max_gsprims
= max_gsprims
;
1851 ngg
.max_out_verts
= max_out_vertices
;
1852 ngg
.prim_amp_factor
= prim_amp_factor
;
1853 ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1854 ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1856 if (gs_type
== MESA_SHADER_GEOMETRY
) {
1857 ngg
.vgt_esgs_ring_itemsize
= es_info
->esgs_itemsize
/ 4;
1859 ngg
.vgt_esgs_ring_itemsize
= 1;
1862 pipeline
->graphics
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1864 assert(ngg
.hw_max_esverts
>= 24); /* HW limitation */
1870 calculate_gs_ring_sizes(struct radv_pipeline
*pipeline
, const struct radv_gs_state
*gs
)
1872 struct radv_device
*device
= pipeline
->device
;
1873 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
1874 unsigned wave_size
= 64;
1875 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
1876 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
1877 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
1879 unsigned gs_vertex_reuse
=
1880 (device
->physical_device
->rad_info
.chip_class
>= GFX8
? 32 : 16) * num_se
;
1881 unsigned alignment
= 256 * num_se
;
1882 /* The maximum size is 63.999 MB per SE. */
1883 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
1884 struct radv_shader_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1886 /* Calculate the minimum size. */
1887 unsigned min_esgs_ring_size
= align(gs
->vgt_esgs_ring_itemsize
* 4 * gs_vertex_reuse
*
1888 wave_size
, alignment
);
1889 /* These are recommended sizes, not minimum sizes. */
1890 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
1891 gs
->vgt_esgs_ring_itemsize
* 4 * gs_info
->gs
.vertices_in
;
1892 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
1893 gs_info
->gs
.max_gsvs_emit_size
;
1895 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
1896 esgs_ring_size
= align(esgs_ring_size
, alignment
);
1897 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
1899 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
1900 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
1902 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
1905 static void si_multiwave_lds_size_workaround(struct radv_device
*device
,
1908 /* If tessellation is all offchip and on-chip GS isn't used, this
1909 * workaround is not needed.
1913 /* SPI barrier management bug:
1914 * Make sure we have at least 4k of LDS in use to avoid the bug.
1915 * It applies to workgroup sizes of more than one wavefront.
1917 if (device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
||
1918 device
->physical_device
->rad_info
.family
== CHIP_KABINI
)
1919 *lds_size
= MAX2(*lds_size
, 8);
1922 struct radv_shader_variant
*
1923 radv_get_shader(struct radv_pipeline
*pipeline
,
1924 gl_shader_stage stage
)
1926 if (stage
== MESA_SHADER_VERTEX
) {
1927 if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
1928 return pipeline
->shaders
[MESA_SHADER_VERTEX
];
1929 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
])
1930 return pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
1931 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1932 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1933 } else if (stage
== MESA_SHADER_TESS_EVAL
) {
1934 if (!radv_pipeline_has_tess(pipeline
))
1936 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
1937 return pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
1938 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
])
1939 return pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
1941 return pipeline
->shaders
[stage
];
1944 static struct radv_tessellation_state
1945 calculate_tess_state(struct radv_pipeline
*pipeline
,
1946 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1948 unsigned num_tcs_input_cp
;
1949 unsigned num_tcs_output_cp
;
1951 unsigned num_patches
;
1952 struct radv_tessellation_state tess
= {0};
1954 num_tcs_input_cp
= pCreateInfo
->pTessellationState
->patchControlPoints
;
1955 num_tcs_output_cp
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.tcs_vertices_out
; //TCS VERTICES OUT
1956 num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
1958 lds_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.lds_size
;
1960 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
1961 assert(lds_size
<= 65536);
1962 lds_size
= align(lds_size
, 512) / 512;
1964 assert(lds_size
<= 32768);
1965 lds_size
= align(lds_size
, 256) / 256;
1967 si_multiwave_lds_size_workaround(pipeline
->device
, &lds_size
);
1969 tess
.lds_size
= lds_size
;
1971 tess
.ls_hs_config
= S_028B58_NUM_PATCHES(num_patches
) |
1972 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
1973 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
1974 tess
.num_patches
= num_patches
;
1976 struct radv_shader_variant
*tes
= radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
);
1977 unsigned type
= 0, partitioning
= 0, topology
= 0, distribution_mode
= 0;
1979 switch (tes
->info
.tes
.primitive_mode
) {
1981 type
= V_028B6C_TESS_TRIANGLE
;
1984 type
= V_028B6C_TESS_QUAD
;
1987 type
= V_028B6C_TESS_ISOLINE
;
1991 switch (tes
->info
.tes
.spacing
) {
1992 case TESS_SPACING_EQUAL
:
1993 partitioning
= V_028B6C_PART_INTEGER
;
1995 case TESS_SPACING_FRACTIONAL_ODD
:
1996 partitioning
= V_028B6C_PART_FRAC_ODD
;
1998 case TESS_SPACING_FRACTIONAL_EVEN
:
1999 partitioning
= V_028B6C_PART_FRAC_EVEN
;
2005 bool ccw
= tes
->info
.tes
.ccw
;
2006 const VkPipelineTessellationDomainOriginStateCreateInfo
*domain_origin_state
=
2007 vk_find_struct_const(pCreateInfo
->pTessellationState
,
2008 PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO
);
2010 if (domain_origin_state
&& domain_origin_state
->domainOrigin
!= VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT
)
2013 if (tes
->info
.tes
.point_mode
)
2014 topology
= V_028B6C_OUTPUT_POINT
;
2015 else if (tes
->info
.tes
.primitive_mode
== GL_ISOLINES
)
2016 topology
= V_028B6C_OUTPUT_LINE
;
2018 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
2020 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
2022 if (pipeline
->device
->physical_device
->rad_info
.has_distributed_tess
) {
2023 if (pipeline
->device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
2024 pipeline
->device
->physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
2025 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
2027 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
2029 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
2031 tess
.tf_param
= S_028B6C_TYPE(type
) |
2032 S_028B6C_PARTITIONING(partitioning
) |
2033 S_028B6C_TOPOLOGY(topology
) |
2034 S_028B6C_DISTRIBUTION_MODE(distribution_mode
);
2039 static const struct radv_prim_vertex_count prim_size_table
[] = {
2040 [V_008958_DI_PT_NONE
] = {0, 0},
2041 [V_008958_DI_PT_POINTLIST
] = {1, 1},
2042 [V_008958_DI_PT_LINELIST
] = {2, 2},
2043 [V_008958_DI_PT_LINESTRIP
] = {2, 1},
2044 [V_008958_DI_PT_TRILIST
] = {3, 3},
2045 [V_008958_DI_PT_TRIFAN
] = {3, 1},
2046 [V_008958_DI_PT_TRISTRIP
] = {3, 1},
2047 [V_008958_DI_PT_LINELIST_ADJ
] = {4, 4},
2048 [V_008958_DI_PT_LINESTRIP_ADJ
] = {4, 1},
2049 [V_008958_DI_PT_TRILIST_ADJ
] = {6, 6},
2050 [V_008958_DI_PT_TRISTRIP_ADJ
] = {6, 2},
2051 [V_008958_DI_PT_RECTLIST
] = {3, 3},
2052 [V_008958_DI_PT_LINELOOP
] = {2, 1},
2053 [V_008958_DI_PT_POLYGON
] = {3, 1},
2054 [V_008958_DI_PT_2D_TRI_STRIP
] = {0, 0},
2057 static const struct radv_vs_output_info
*get_vs_output_info(const struct radv_pipeline
*pipeline
)
2059 if (radv_pipeline_has_gs(pipeline
))
2060 if (radv_pipeline_has_ngg(pipeline
))
2061 return &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.vs
.outinfo
;
2063 return &pipeline
->gs_copy_shader
->info
.vs
.outinfo
;
2064 else if (radv_pipeline_has_tess(pipeline
))
2065 return &pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.outinfo
;
2067 return &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.outinfo
;
2071 radv_link_shaders(struct radv_pipeline
*pipeline
, nir_shader
**shaders
)
2073 nir_shader
* ordered_shaders
[MESA_SHADER_STAGES
];
2074 int shader_count
= 0;
2076 if(shaders
[MESA_SHADER_FRAGMENT
]) {
2077 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_FRAGMENT
];
2079 if(shaders
[MESA_SHADER_GEOMETRY
]) {
2080 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_GEOMETRY
];
2082 if(shaders
[MESA_SHADER_TESS_EVAL
]) {
2083 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_EVAL
];
2085 if(shaders
[MESA_SHADER_TESS_CTRL
]) {
2086 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_TESS_CTRL
];
2088 if(shaders
[MESA_SHADER_VERTEX
]) {
2089 ordered_shaders
[shader_count
++] = shaders
[MESA_SHADER_VERTEX
];
2092 if (shader_count
> 1) {
2093 unsigned first
= ordered_shaders
[shader_count
- 1]->info
.stage
;
2094 unsigned last
= ordered_shaders
[0]->info
.stage
;
2096 if (ordered_shaders
[0]->info
.stage
== MESA_SHADER_FRAGMENT
&&
2097 ordered_shaders
[1]->info
.has_transform_feedback_varyings
)
2098 nir_link_xfb_varyings(ordered_shaders
[1], ordered_shaders
[0]);
2100 for (int i
= 0; i
< shader_count
; ++i
) {
2101 nir_variable_mode mask
= 0;
2103 if (ordered_shaders
[i
]->info
.stage
!= first
)
2104 mask
= mask
| nir_var_shader_in
;
2106 if (ordered_shaders
[i
]->info
.stage
!= last
)
2107 mask
= mask
| nir_var_shader_out
;
2109 nir_lower_io_to_scalar_early(ordered_shaders
[i
], mask
);
2110 radv_optimize_nir(ordered_shaders
[i
], false, false);
2114 for (int i
= 1; i
< shader_count
; ++i
) {
2115 nir_lower_io_arrays_to_elements(ordered_shaders
[i
],
2116 ordered_shaders
[i
- 1]);
2118 if (nir_link_opt_varyings(ordered_shaders
[i
],
2119 ordered_shaders
[i
- 1]))
2120 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2122 nir_remove_dead_variables(ordered_shaders
[i
],
2123 nir_var_shader_out
);
2124 nir_remove_dead_variables(ordered_shaders
[i
- 1],
2127 bool progress
= nir_remove_unused_varyings(ordered_shaders
[i
],
2128 ordered_shaders
[i
- 1]);
2130 nir_compact_varyings(ordered_shaders
[i
],
2131 ordered_shaders
[i
- 1], true);
2134 if (nir_lower_global_vars_to_local(ordered_shaders
[i
])) {
2135 ac_lower_indirect_derefs(ordered_shaders
[i
],
2136 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2138 radv_optimize_nir(ordered_shaders
[i
], false, false);
2140 if (nir_lower_global_vars_to_local(ordered_shaders
[i
- 1])) {
2141 ac_lower_indirect_derefs(ordered_shaders
[i
- 1],
2142 pipeline
->device
->physical_device
->rad_info
.chip_class
);
2144 radv_optimize_nir(ordered_shaders
[i
- 1], false, false);
2150 radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo
*input_state
,
2151 uint32_t attrib_binding
)
2153 for (uint32_t i
= 0; i
< input_state
->vertexBindingDescriptionCount
; i
++) {
2154 const VkVertexInputBindingDescription
*input_binding
=
2155 &input_state
->pVertexBindingDescriptions
[i
];
2157 if (input_binding
->binding
== attrib_binding
)
2158 return input_binding
->stride
;
2164 static struct radv_pipeline_key
2165 radv_generate_graphics_pipeline_key(struct radv_pipeline
*pipeline
,
2166 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
2167 const struct radv_blend_state
*blend
,
2168 bool has_view_index
)
2170 const VkPipelineVertexInputStateCreateInfo
*input_state
=
2171 pCreateInfo
->pVertexInputState
;
2172 const VkPipelineVertexInputDivisorStateCreateInfoEXT
*divisor_state
=
2173 vk_find_struct_const(input_state
->pNext
, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT
);
2175 struct radv_pipeline_key key
;
2176 memset(&key
, 0, sizeof(key
));
2178 if (pCreateInfo
->flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
)
2179 key
.optimisations_disabled
= 1;
2181 key
.has_multiview_view_index
= has_view_index
;
2183 uint32_t binding_input_rate
= 0;
2184 uint32_t instance_rate_divisors
[MAX_VERTEX_ATTRIBS
];
2185 for (unsigned i
= 0; i
< input_state
->vertexBindingDescriptionCount
; ++i
) {
2186 if (input_state
->pVertexBindingDescriptions
[i
].inputRate
) {
2187 unsigned binding
= input_state
->pVertexBindingDescriptions
[i
].binding
;
2188 binding_input_rate
|= 1u << binding
;
2189 instance_rate_divisors
[binding
] = 1;
2192 if (divisor_state
) {
2193 for (unsigned i
= 0; i
< divisor_state
->vertexBindingDivisorCount
; ++i
) {
2194 instance_rate_divisors
[divisor_state
->pVertexBindingDivisors
[i
].binding
] =
2195 divisor_state
->pVertexBindingDivisors
[i
].divisor
;
2199 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
2200 const VkVertexInputAttributeDescription
*desc
=
2201 &input_state
->pVertexAttributeDescriptions
[i
];
2202 const struct vk_format_description
*format_desc
;
2203 unsigned location
= desc
->location
;
2204 unsigned binding
= desc
->binding
;
2205 unsigned num_format
, data_format
;
2208 if (binding_input_rate
& (1u << binding
)) {
2209 key
.instance_rate_inputs
|= 1u << location
;
2210 key
.instance_rate_divisors
[location
] = instance_rate_divisors
[binding
];
2213 format_desc
= vk_format_description(desc
->format
);
2214 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
2216 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
2217 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
2219 key
.vertex_attribute_formats
[location
] = data_format
| (num_format
<< 4);
2220 key
.vertex_attribute_bindings
[location
] = desc
->binding
;
2221 key
.vertex_attribute_offsets
[location
] = desc
->offset
;
2222 key
.vertex_attribute_strides
[location
] = radv_get_attrib_stride(input_state
, desc
->binding
);
2224 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
&&
2225 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_STONEY
) {
2226 VkFormat format
= input_state
->pVertexAttributeDescriptions
[i
].format
;
2229 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2230 case VK_FORMAT_A2B10G10R10_SNORM_PACK32
:
2231 adjust
= RADV_ALPHA_ADJUST_SNORM
;
2233 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2234 case VK_FORMAT_A2B10G10R10_SSCALED_PACK32
:
2235 adjust
= RADV_ALPHA_ADJUST_SSCALED
;
2237 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2238 case VK_FORMAT_A2B10G10R10_SINT_PACK32
:
2239 adjust
= RADV_ALPHA_ADJUST_SINT
;
2245 key
.vertex_alpha_adjust
|= adjust
<< (2 * location
);
2248 switch (desc
->format
) {
2249 case VK_FORMAT_B8G8R8A8_UNORM
:
2250 case VK_FORMAT_B8G8R8A8_SNORM
:
2251 case VK_FORMAT_B8G8R8A8_USCALED
:
2252 case VK_FORMAT_B8G8R8A8_SSCALED
:
2253 case VK_FORMAT_B8G8R8A8_UINT
:
2254 case VK_FORMAT_B8G8R8A8_SINT
:
2255 case VK_FORMAT_B8G8R8A8_SRGB
:
2256 case VK_FORMAT_A2R10G10B10_UNORM_PACK32
:
2257 case VK_FORMAT_A2R10G10B10_SNORM_PACK32
:
2258 case VK_FORMAT_A2R10G10B10_USCALED_PACK32
:
2259 case VK_FORMAT_A2R10G10B10_SSCALED_PACK32
:
2260 case VK_FORMAT_A2R10G10B10_UINT_PACK32
:
2261 case VK_FORMAT_A2R10G10B10_SINT_PACK32
:
2262 key
.vertex_post_shuffle
|= 1 << location
;
2269 if (pCreateInfo
->pTessellationState
)
2270 key
.tess_input_vertices
= pCreateInfo
->pTessellationState
->patchControlPoints
;
2273 if (pCreateInfo
->pMultisampleState
&&
2274 pCreateInfo
->pMultisampleState
->rasterizationSamples
> 1) {
2275 uint32_t num_samples
= pCreateInfo
->pMultisampleState
->rasterizationSamples
;
2276 uint32_t ps_iter_samples
= radv_pipeline_get_ps_iter_samples(pCreateInfo
->pMultisampleState
);
2277 key
.num_samples
= num_samples
;
2278 key
.log2_ps_iter_samples
= util_logbase2(ps_iter_samples
);
2281 key
.col_format
= blend
->spi_shader_col_format
;
2282 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX8
)
2283 radv_pipeline_compute_get_int_clamp(pCreateInfo
, &key
.is_int8
, &key
.is_int10
);
2289 radv_nir_stage_uses_xfb(const nir_shader
*nir
)
2291 nir_xfb_info
*xfb
= nir_gather_xfb_info(nir
, NULL
);
2292 bool uses_xfb
= !!xfb
;
2299 radv_fill_shader_keys(struct radv_device
*device
,
2300 struct radv_shader_variant_key
*keys
,
2301 const struct radv_pipeline_key
*key
,
2304 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_inputs
= key
->instance_rate_inputs
;
2305 keys
[MESA_SHADER_VERTEX
].vs
.alpha_adjust
= key
->vertex_alpha_adjust
;
2306 keys
[MESA_SHADER_VERTEX
].vs
.post_shuffle
= key
->vertex_post_shuffle
;
2307 for (unsigned i
= 0; i
< MAX_VERTEX_ATTRIBS
; ++i
) {
2308 keys
[MESA_SHADER_VERTEX
].vs
.instance_rate_divisors
[i
] = key
->instance_rate_divisors
[i
];
2309 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_formats
[i
] = key
->vertex_attribute_formats
[i
];
2310 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_bindings
[i
] = key
->vertex_attribute_bindings
[i
];
2311 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_offsets
[i
] = key
->vertex_attribute_offsets
[i
];
2312 keys
[MESA_SHADER_VERTEX
].vs
.vertex_attribute_strides
[i
] = key
->vertex_attribute_strides
[i
];
2315 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2316 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ls
= true;
2317 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= 0;
2318 keys
[MESA_SHADER_TESS_CTRL
].tcs
.input_vertices
= key
->tess_input_vertices
;
2319 keys
[MESA_SHADER_TESS_CTRL
].tcs
.primitive_mode
= nir
[MESA_SHADER_TESS_EVAL
]->info
.tess
.primitive_mode
;
2321 keys
[MESA_SHADER_TESS_CTRL
].tcs
.tes_reads_tess_factors
= !!(nir
[MESA_SHADER_TESS_EVAL
]->info
.inputs_read
& (VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
));
2324 if (nir
[MESA_SHADER_GEOMETRY
]) {
2325 if (nir
[MESA_SHADER_TESS_CTRL
])
2326 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_es
= true;
2328 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_es
= true;
2331 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
2332 device
->physical_device
->rad_info
.family
!= CHIP_NAVI14
&&
2333 !(device
->instance
->debug_flags
& RADV_DEBUG_NO_NGG
)) {
2334 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2335 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= true;
2337 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= true;
2340 if (nir
[MESA_SHADER_TESS_CTRL
] &&
2341 nir
[MESA_SHADER_GEOMETRY
] &&
2342 nir
[MESA_SHADER_GEOMETRY
]->info
.gs
.invocations
*
2343 nir
[MESA_SHADER_GEOMETRY
]->info
.gs
.vertices_out
> 256) {
2344 /* Fallback to the legacy path if tessellation is
2345 * enabled with extreme geometry because
2346 * EN_MAX_VERT_OUT_PER_GS_INSTANCE doesn't work and it
2349 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2353 * Disable NGG with geometry shaders. There are a bunch of
2355 * * GS primitives in pipeline statistic queries do not get
2356 * updates. See dEQP-VK.query_pool.statistics_query.geometry_shader_primitives
2357 * * dEQP-VK.clipping.user_defined.clip_cull_distance_dynamic_index.*geom* failures
2358 * * Interactions with tessellation failing:
2359 * dEQP-VK.tessellation.geometry_interaction.passthrough.tessellate_isolines_passthrough_geometry_no_change
2360 * * General issues with the last primitive missing/corrupt:
2361 * https://bugs.freedesktop.org/show_bug.cgi?id=111248
2363 * Furthermore, XGL/AMDVLK also disables this as of 9b632ef.
2365 if (nir
[MESA_SHADER_GEOMETRY
]) {
2366 if (nir
[MESA_SHADER_TESS_CTRL
])
2367 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2369 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= false;
2372 /* TODO: Implement streamout support for NGG. */
2373 gl_shader_stage last_xfb_stage
= MESA_SHADER_VERTEX
;
2375 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
2380 if (nir
[last_xfb_stage
] &&
2381 radv_nir_stage_uses_xfb(nir
[last_xfb_stage
])) {
2382 if (nir
[MESA_SHADER_TESS_CTRL
])
2383 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.as_ngg
= false;
2385 keys
[MESA_SHADER_VERTEX
].vs_common_out
.as_ngg
= false;
2389 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
2390 keys
[i
].has_multiview_view_index
= key
->has_multiview_view_index
;
2392 keys
[MESA_SHADER_FRAGMENT
].fs
.col_format
= key
->col_format
;
2393 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int8
= key
->is_int8
;
2394 keys
[MESA_SHADER_FRAGMENT
].fs
.is_int10
= key
->is_int10
;
2395 keys
[MESA_SHADER_FRAGMENT
].fs
.log2_ps_iter_samples
= key
->log2_ps_iter_samples
;
2396 keys
[MESA_SHADER_FRAGMENT
].fs
.num_samples
= key
->num_samples
;
2400 radv_fill_shader_info(struct radv_pipeline
*pipeline
,
2401 struct radv_shader_variant_key
*keys
,
2402 struct radv_shader_info
*infos
,
2405 unsigned active_stages
= 0;
2406 unsigned filled_stages
= 0;
2408 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2410 active_stages
|= (1 << i
);
2413 if (nir
[MESA_SHADER_FRAGMENT
]) {
2414 radv_nir_shader_info_init(&infos
[MESA_SHADER_FRAGMENT
]);
2415 radv_nir_shader_info_pass(nir
[MESA_SHADER_FRAGMENT
],
2417 &keys
[MESA_SHADER_FRAGMENT
],
2418 &infos
[MESA_SHADER_FRAGMENT
]);
2420 /* TODO: These are no longer used as keys we should refactor this */
2421 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_prim_id
=
2422 infos
[MESA_SHADER_FRAGMENT
].ps
.prim_id_input
;
2423 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_layer_id
=
2424 infos
[MESA_SHADER_FRAGMENT
].ps
.layer_input
;
2425 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_clip_dists
=
2426 !!infos
[MESA_SHADER_FRAGMENT
].ps
.num_input_clips_culls
;
2427 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_prim_id
=
2428 infos
[MESA_SHADER_FRAGMENT
].ps
.prim_id_input
;
2429 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_layer_id
=
2430 infos
[MESA_SHADER_FRAGMENT
].ps
.layer_input
;
2431 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_clip_dists
=
2432 !!infos
[MESA_SHADER_FRAGMENT
].ps
.num_input_clips_culls
;
2434 filled_stages
|= (1 << MESA_SHADER_FRAGMENT
);
2437 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
2438 nir
[MESA_SHADER_TESS_CTRL
]) {
2439 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2440 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2441 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2443 radv_nir_shader_info_init(&infos
[MESA_SHADER_TESS_CTRL
]);
2445 for (int i
= 0; i
< 2; i
++) {
2446 radv_nir_shader_info_pass(combined_nir
[i
],
2447 pipeline
->layout
, &key
,
2448 &infos
[MESA_SHADER_TESS_CTRL
]);
2451 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
=
2452 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_patches
;
2453 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
=
2454 util_last_bit64(infos
[MESA_SHADER_TESS_CTRL
].tcs
.outputs_written
);
2456 filled_stages
|= (1 << MESA_SHADER_VERTEX
);
2457 filled_stages
|= (1 << MESA_SHADER_TESS_CTRL
);
2460 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
2461 nir
[MESA_SHADER_GEOMETRY
]) {
2462 gl_shader_stage pre_stage
= nir
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2463 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2465 radv_nir_shader_info_init(&infos
[MESA_SHADER_GEOMETRY
]);
2467 for (int i
= 0; i
< 2; i
++) {
2468 radv_nir_shader_info_pass(combined_nir
[i
],
2471 &infos
[MESA_SHADER_GEOMETRY
]);
2474 filled_stages
|= (1 << pre_stage
);
2475 filled_stages
|= (1 << MESA_SHADER_GEOMETRY
);
2478 active_stages
^= filled_stages
;
2479 while (active_stages
) {
2480 int i
= u_bit_scan(&active_stages
);
2482 if (i
== MESA_SHADER_TESS_CTRL
) {
2483 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
=
2484 util_last_bit64(infos
[MESA_SHADER_VERTEX
].vs
.ls_outputs_written
);
2487 if (i
== MESA_SHADER_TESS_EVAL
) {
2488 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
=
2489 infos
[MESA_SHADER_TESS_CTRL
].tcs
.num_patches
;
2490 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
=
2491 util_last_bit64(infos
[MESA_SHADER_TESS_CTRL
].tcs
.outputs_written
);
2494 radv_nir_shader_info_init(&infos
[i
]);
2495 radv_nir_shader_info_pass(nir
[i
], pipeline
->layout
,
2496 &keys
[i
], &infos
[i
]);
2501 merge_tess_info(struct shader_info
*tes_info
,
2502 const struct shader_info
*tcs_info
)
2504 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
2506 * "PointMode. Controls generation of points rather than triangles
2507 * or lines. This functionality defaults to disabled, and is
2508 * enabled if either shader stage includes the execution mode.
2510 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
2511 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
2512 * and OutputVertices, it says:
2514 * "One mode must be set in at least one of the tessellation
2517 * So, the fields can be set in either the TCS or TES, but they must
2518 * agree if set in both. Our backend looks at TES, so bitwise-or in
2519 * the values from the TCS.
2521 assert(tcs_info
->tess
.tcs_vertices_out
== 0 ||
2522 tes_info
->tess
.tcs_vertices_out
== 0 ||
2523 tcs_info
->tess
.tcs_vertices_out
== tes_info
->tess
.tcs_vertices_out
);
2524 tes_info
->tess
.tcs_vertices_out
|= tcs_info
->tess
.tcs_vertices_out
;
2526 assert(tcs_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2527 tes_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
2528 tcs_info
->tess
.spacing
== tes_info
->tess
.spacing
);
2529 tes_info
->tess
.spacing
|= tcs_info
->tess
.spacing
;
2531 assert(tcs_info
->tess
.primitive_mode
== 0 ||
2532 tes_info
->tess
.primitive_mode
== 0 ||
2533 tcs_info
->tess
.primitive_mode
== tes_info
->tess
.primitive_mode
);
2534 tes_info
->tess
.primitive_mode
|= tcs_info
->tess
.primitive_mode
;
2535 tes_info
->tess
.ccw
|= tcs_info
->tess
.ccw
;
2536 tes_info
->tess
.point_mode
|= tcs_info
->tess
.point_mode
;
2540 void radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT
*ext
)
2545 if (ext
->pPipelineCreationFeedback
) {
2546 ext
->pPipelineCreationFeedback
->flags
= 0;
2547 ext
->pPipelineCreationFeedback
->duration
= 0;
2550 for (unsigned i
= 0; i
< ext
->pipelineStageCreationFeedbackCount
; ++i
) {
2551 ext
->pPipelineStageCreationFeedbacks
[i
].flags
= 0;
2552 ext
->pPipelineStageCreationFeedbacks
[i
].duration
= 0;
2557 void radv_start_feedback(VkPipelineCreationFeedbackEXT
*feedback
)
2562 feedback
->duration
-= radv_get_current_time();
2563 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
;
2567 void radv_stop_feedback(VkPipelineCreationFeedbackEXT
*feedback
, bool cache_hit
)
2572 feedback
->duration
+= radv_get_current_time();
2573 feedback
->flags
= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT
|
2574 (cache_hit
? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT
: 0);
2578 void radv_create_shaders(struct radv_pipeline
*pipeline
,
2579 struct radv_device
*device
,
2580 struct radv_pipeline_cache
*cache
,
2581 const struct radv_pipeline_key
*key
,
2582 const VkPipelineShaderStageCreateInfo
**pStages
,
2583 const VkPipelineCreateFlags flags
,
2584 VkPipelineCreationFeedbackEXT
*pipeline_feedback
,
2585 VkPipelineCreationFeedbackEXT
**stage_feedbacks
)
2587 struct radv_shader_module fs_m
= {0};
2588 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
2589 nir_shader
*nir
[MESA_SHADER_STAGES
] = {0};
2590 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2591 struct radv_shader_variant_key keys
[MESA_SHADER_STAGES
] = {{{{{0}}}}};
2592 struct radv_shader_info infos
[MESA_SHADER_STAGES
] = {0};
2593 unsigned char hash
[20], gs_copy_hash
[20];
2594 bool keep_executable_info
= (flags
& VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR
) || device
->keep_shader_info
;
2596 radv_start_feedback(pipeline_feedback
);
2598 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2600 modules
[i
] = radv_shader_module_from_handle(pStages
[i
]->module
);
2601 if (modules
[i
]->nir
)
2602 _mesa_sha1_compute(modules
[i
]->nir
->info
.name
,
2603 strlen(modules
[i
]->nir
->info
.name
),
2606 pipeline
->active_stages
|= mesa_to_vk_shader_stage(i
);
2610 radv_hash_shaders(hash
, pStages
, pipeline
->layout
, key
, get_hash_flags(device
));
2611 memcpy(gs_copy_hash
, hash
, 20);
2612 gs_copy_hash
[0] ^= 1;
2614 bool found_in_application_cache
= true;
2615 if (modules
[MESA_SHADER_GEOMETRY
] && !keep_executable_info
) {
2616 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2617 radv_create_shader_variants_from_pipeline_cache(device
, cache
, gs_copy_hash
, variants
,
2618 &found_in_application_cache
);
2619 pipeline
->gs_copy_shader
= variants
[MESA_SHADER_GEOMETRY
];
2622 if (!keep_executable_info
&&
2623 radv_create_shader_variants_from_pipeline_cache(device
, cache
, hash
, pipeline
->shaders
,
2624 &found_in_application_cache
) &&
2625 (!modules
[MESA_SHADER_GEOMETRY
] || pipeline
->gs_copy_shader
)) {
2626 radv_stop_feedback(pipeline_feedback
, found_in_application_cache
);
2630 if (!modules
[MESA_SHADER_FRAGMENT
] && !modules
[MESA_SHADER_COMPUTE
]) {
2632 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
2633 fs_b
.shader
->info
.name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
2634 fs_m
.nir
= fs_b
.shader
;
2635 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
2638 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2639 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[i
];
2644 radv_start_feedback(stage_feedbacks
[i
]);
2646 nir
[i
] = radv_shader_compile_to_nir(device
, modules
[i
],
2647 stage
? stage
->pName
: "main", i
,
2648 stage
? stage
->pSpecializationInfo
: NULL
,
2649 flags
, pipeline
->layout
);
2651 /* We don't want to alter meta shaders IR directly so clone it
2654 if (nir
[i
]->info
.name
) {
2655 nir
[i
] = nir_shader_clone(NULL
, nir
[i
]);
2658 radv_stop_feedback(stage_feedbacks
[i
], false);
2661 if (nir
[MESA_SHADER_TESS_CTRL
]) {
2662 nir_lower_patch_vertices(nir
[MESA_SHADER_TESS_EVAL
], nir
[MESA_SHADER_TESS_CTRL
]->info
.tess
.tcs_vertices_out
, NULL
);
2663 merge_tess_info(&nir
[MESA_SHADER_TESS_EVAL
]->info
, &nir
[MESA_SHADER_TESS_CTRL
]->info
);
2666 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
2667 radv_link_shaders(pipeline
, nir
);
2669 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2671 NIR_PASS_V(nir
[i
], nir_lower_non_uniform_access
,
2672 nir_lower_non_uniform_ubo_access
|
2673 nir_lower_non_uniform_ssbo_access
|
2674 nir_lower_non_uniform_texture_access
|
2675 nir_lower_non_uniform_image_access
);
2676 NIR_PASS_V(nir
[i
], nir_lower_bool_to_int32
);
2679 if (radv_can_dump_shader(device
, modules
[i
], false))
2680 nir_print_shader(nir
[i
], stderr
);
2683 if (nir
[MESA_SHADER_FRAGMENT
])
2684 radv_lower_fs_io(nir
[MESA_SHADER_FRAGMENT
]);
2686 radv_fill_shader_keys(device
, keys
, key
, nir
);
2688 radv_fill_shader_info(pipeline
, keys
, infos
, nir
);
2690 if (nir
[MESA_SHADER_FRAGMENT
]) {
2691 if (!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]) {
2692 radv_start_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
]);
2694 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
2695 radv_shader_variant_compile(device
, modules
[MESA_SHADER_FRAGMENT
], &nir
[MESA_SHADER_FRAGMENT
], 1,
2696 pipeline
->layout
, keys
+ MESA_SHADER_FRAGMENT
,
2697 infos
+ MESA_SHADER_FRAGMENT
,
2698 keep_executable_info
, &binaries
[MESA_SHADER_FRAGMENT
]);
2700 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_FRAGMENT
], false);
2703 /* TODO: These are no longer used as keys we should refactor this */
2704 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_prim_id
=
2705 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.prim_id_input
;
2706 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_layer_id
=
2707 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.layer_input
;
2708 keys
[MESA_SHADER_VERTEX
].vs_common_out
.export_clip_dists
=
2709 !!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.num_input_clips_culls
;
2710 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_prim_id
=
2711 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.prim_id_input
;
2712 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_layer_id
=
2713 pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.layer_input
;
2714 keys
[MESA_SHADER_TESS_EVAL
].vs_common_out
.export_clip_dists
=
2715 !!pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.num_input_clips_culls
;
2718 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_TESS_CTRL
]) {
2719 if (!pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]) {
2720 struct nir_shader
*combined_nir
[] = {nir
[MESA_SHADER_VERTEX
], nir
[MESA_SHADER_TESS_CTRL
]};
2721 struct radv_shader_variant_key key
= keys
[MESA_SHADER_TESS_CTRL
];
2722 key
.tcs
.vs_key
= keys
[MESA_SHADER_VERTEX
].vs
;
2724 radv_start_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
]);
2726 pipeline
->shaders
[MESA_SHADER_TESS_CTRL
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_TESS_CTRL
], combined_nir
, 2,
2728 &key
, &infos
[MESA_SHADER_TESS_CTRL
], keep_executable_info
,
2729 &binaries
[MESA_SHADER_TESS_CTRL
]);
2731 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_TESS_CTRL
], false);
2733 modules
[MESA_SHADER_VERTEX
] = NULL
;
2734 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2735 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.outputs_written
);
2738 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&& modules
[MESA_SHADER_GEOMETRY
]) {
2739 gl_shader_stage pre_stage
= modules
[MESA_SHADER_TESS_EVAL
] ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
2740 if (!pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
2741 struct nir_shader
*combined_nir
[] = {nir
[pre_stage
], nir
[MESA_SHADER_GEOMETRY
]};
2743 radv_start_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
]);
2745 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] = radv_shader_variant_compile(device
, modules
[MESA_SHADER_GEOMETRY
], combined_nir
, 2,
2747 &keys
[pre_stage
], &infos
[MESA_SHADER_GEOMETRY
], keep_executable_info
,
2748 &binaries
[MESA_SHADER_GEOMETRY
]);
2750 radv_stop_feedback(stage_feedbacks
[MESA_SHADER_GEOMETRY
], false);
2752 modules
[pre_stage
] = NULL
;
2755 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2756 if(modules
[i
] && !pipeline
->shaders
[i
]) {
2757 if (i
== MESA_SHADER_TESS_CTRL
) {
2758 keys
[MESA_SHADER_TESS_CTRL
].tcs
.num_inputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.vs
.ls_outputs_written
);
2760 if (i
== MESA_SHADER_TESS_EVAL
) {
2761 keys
[MESA_SHADER_TESS_EVAL
].tes
.num_patches
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.num_patches
;
2762 keys
[MESA_SHADER_TESS_EVAL
].tes
.tcs_num_outputs
= util_last_bit64(pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.outputs_written
);
2765 radv_start_feedback(stage_feedbacks
[i
]);
2767 pipeline
->shaders
[i
] = radv_shader_variant_compile(device
, modules
[i
], &nir
[i
], 1,
2769 keys
+ i
, infos
+ i
,keep_executable_info
,
2772 radv_stop_feedback(stage_feedbacks
[i
], false);
2776 if(modules
[MESA_SHADER_GEOMETRY
]) {
2777 struct radv_shader_binary
*gs_copy_binary
= NULL
;
2778 if (!pipeline
->gs_copy_shader
&&
2779 !radv_pipeline_has_ngg(pipeline
)) {
2780 struct radv_shader_info info
= {};
2781 struct radv_shader_variant_key key
= {};
2783 key
.has_multiview_view_index
=
2784 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
;
2786 radv_nir_shader_info_pass(nir
[MESA_SHADER_GEOMETRY
],
2787 pipeline
->layout
, &key
,
2790 pipeline
->gs_copy_shader
= radv_create_gs_copy_shader(
2791 device
, nir
[MESA_SHADER_GEOMETRY
], &info
,
2792 &gs_copy_binary
, keep_executable_info
,
2793 keys
[MESA_SHADER_GEOMETRY
].has_multiview_view_index
);
2796 if (!keep_executable_info
&& pipeline
->gs_copy_shader
) {
2797 struct radv_shader_binary
*binaries
[MESA_SHADER_STAGES
] = {NULL
};
2798 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
] = {0};
2800 binaries
[MESA_SHADER_GEOMETRY
] = gs_copy_binary
;
2801 variants
[MESA_SHADER_GEOMETRY
] = pipeline
->gs_copy_shader
;
2803 radv_pipeline_cache_insert_shaders(device
, cache
,
2808 free(gs_copy_binary
);
2811 if (!keep_executable_info
) {
2812 radv_pipeline_cache_insert_shaders(device
, cache
, hash
, pipeline
->shaders
,
2816 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
2819 ralloc_free(nir
[i
]);
2821 if (radv_can_dump_shader_stats(device
, modules
[i
]))
2822 radv_shader_dump_stats(device
,
2823 pipeline
->shaders
[i
],
2829 ralloc_free(fs_m
.nir
);
2831 radv_stop_feedback(pipeline_feedback
, false);
2835 radv_pipeline_stage_to_user_data_0(struct radv_pipeline
*pipeline
,
2836 gl_shader_stage stage
, enum chip_class chip_class
)
2838 bool has_gs
= radv_pipeline_has_gs(pipeline
);
2839 bool has_tess
= radv_pipeline_has_tess(pipeline
);
2840 bool has_ngg
= radv_pipeline_has_ngg(pipeline
);
2843 case MESA_SHADER_FRAGMENT
:
2844 return R_00B030_SPI_SHADER_USER_DATA_PS_0
;
2845 case MESA_SHADER_VERTEX
:
2847 if (chip_class
>= GFX10
) {
2848 return R_00B430_SPI_SHADER_USER_DATA_HS_0
;
2849 } else if (chip_class
== GFX9
) {
2850 return R_00B430_SPI_SHADER_USER_DATA_LS_0
;
2852 return R_00B530_SPI_SHADER_USER_DATA_LS_0
;
2858 if (chip_class
>= GFX10
) {
2859 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2861 return R_00B330_SPI_SHADER_USER_DATA_ES_0
;
2866 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2868 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2869 case MESA_SHADER_GEOMETRY
:
2870 return chip_class
== GFX9
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
2871 R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2872 case MESA_SHADER_COMPUTE
:
2873 return R_00B900_COMPUTE_USER_DATA_0
;
2874 case MESA_SHADER_TESS_CTRL
:
2875 return chip_class
== GFX9
? R_00B430_SPI_SHADER_USER_DATA_LS_0
:
2876 R_00B430_SPI_SHADER_USER_DATA_HS_0
;
2877 case MESA_SHADER_TESS_EVAL
:
2879 return chip_class
>= GFX10
? R_00B230_SPI_SHADER_USER_DATA_GS_0
:
2880 R_00B330_SPI_SHADER_USER_DATA_ES_0
;
2881 } else if (has_ngg
) {
2882 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
2884 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
2887 unreachable("unknown shader");
2891 struct radv_bin_size_entry
{
2897 radv_gfx9_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
2899 static const struct radv_bin_size_entry color_size_table
[][3][9] = {
2903 /* One shader engine */
2909 { UINT_MAX
, { 0, 0}},
2912 /* Two shader engines */
2918 { UINT_MAX
, { 0, 0}},
2921 /* Four shader engines */
2926 { UINT_MAX
, { 0, 0}},
2932 /* One shader engine */
2938 { UINT_MAX
, { 0, 0}},
2941 /* Two shader engines */
2947 { UINT_MAX
, { 0, 0}},
2950 /* Four shader engines */
2957 { UINT_MAX
, { 0, 0}},
2963 /* One shader engine */
2970 { UINT_MAX
, { 0, 0}},
2973 /* Two shader engines */
2981 { UINT_MAX
, { 0, 0}},
2984 /* Four shader engines */
2992 { UINT_MAX
, { 0, 0}},
2996 static const struct radv_bin_size_entry ds_size_table
[][3][9] = {
3000 // One shader engine
3007 { UINT_MAX
, { 0, 0}},
3010 // Two shader engines
3018 { UINT_MAX
, { 0, 0}},
3021 // Four shader engines
3029 { UINT_MAX
, { 0, 0}},
3035 // One shader engine
3043 { UINT_MAX
, { 0, 0}},
3046 // Two shader engines
3055 { UINT_MAX
, { 0, 0}},
3058 // Four shader engines
3067 { UINT_MAX
, { 0, 0}},
3073 // One shader engine
3081 { UINT_MAX
, { 0, 0}},
3084 // Two shader engines
3093 { UINT_MAX
, { 0, 0}},
3096 // Four shader engines
3104 { UINT_MAX
, { 0, 0}},
3109 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3110 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3111 VkExtent2D extent
= {512, 512};
3113 unsigned log_num_rb_per_se
=
3114 util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.num_render_backends
/
3115 pipeline
->device
->physical_device
->rad_info
.max_se
);
3116 unsigned log_num_se
= util_logbase2_ceil(pipeline
->device
->physical_device
->rad_info
.max_se
);
3118 unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
3119 unsigned ps_iter_samples
= 1u << G_028804_PS_ITER_SAMPLES(pipeline
->graphics
.ms
.db_eqaa
);
3120 unsigned effective_samples
= total_samples
;
3121 unsigned color_bytes_per_pixel
= 0;
3123 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
3125 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3126 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3129 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3132 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3133 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
3136 /* MSAA images typically don't use all samples all the time. */
3137 if (effective_samples
>= 2 && ps_iter_samples
<= 1)
3138 effective_samples
= 2;
3139 color_bytes_per_pixel
*= effective_samples
;
3142 const struct radv_bin_size_entry
*color_entry
= color_size_table
[log_num_rb_per_se
][log_num_se
];
3143 while(color_entry
[1].bpp
<= color_bytes_per_pixel
)
3146 extent
= color_entry
->extent
;
3148 if (subpass
->depth_stencil_attachment
) {
3149 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3151 /* Coefficients taken from AMDVLK */
3152 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
3153 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
3154 unsigned ds_bytes_per_pixel
= 4 * (depth_coeff
+ stencil_coeff
) * total_samples
;
3156 const struct radv_bin_size_entry
*ds_entry
= ds_size_table
[log_num_rb_per_se
][log_num_se
];
3157 while(ds_entry
[1].bpp
<= ds_bytes_per_pixel
)
3160 if (ds_entry
->extent
.width
* ds_entry
->extent
.height
< extent
.width
* extent
.height
)
3161 extent
= ds_entry
->extent
;
3168 radv_gfx10_compute_bin_size(struct radv_pipeline
*pipeline
, const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3170 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3171 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3172 VkExtent2D extent
= {512, 512};
3174 unsigned sdp_interface_count
;
3176 switch(pipeline
->device
->physical_device
->rad_info
.family
) {
3179 sdp_interface_count
= 16;
3182 sdp_interface_count
= 8;
3185 unreachable("Unhandled GFX10 chip");
3188 const unsigned db_tag_size
= 64;
3189 const unsigned db_tag_count
= 312;
3190 const unsigned color_tag_size
= 1024;
3191 const unsigned color_tag_count
= 31;
3192 const unsigned fmask_tag_size
= 256;
3193 const unsigned fmask_tag_count
= 44;
3195 const unsigned rb_count
= pipeline
->device
->physical_device
->rad_info
.num_render_backends
;
3196 const unsigned pipe_count
= MAX2(rb_count
, sdp_interface_count
);
3198 const unsigned db_tag_part
= (db_tag_count
* rb_count
/ pipe_count
) * db_tag_size
* pipe_count
;
3199 const unsigned color_tag_part
= (color_tag_count
* rb_count
/ pipe_count
) * color_tag_size
* pipe_count
;
3200 const unsigned fmask_tag_part
= (fmask_tag_count
* rb_count
/ pipe_count
) * fmask_tag_size
* pipe_count
;
3202 const unsigned total_samples
= 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline
->graphics
.ms
.pa_sc_aa_config
);
3203 const unsigned samples_log
= util_logbase2_ceil(total_samples
);
3205 unsigned color_bytes_per_pixel
= 0;
3206 unsigned fmask_bytes_per_pixel
= 0;
3208 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
3210 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3211 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3214 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3217 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3218 color_bytes_per_pixel
+= vk_format_get_blocksize(format
);
3220 if (total_samples
> 1) {
3221 const unsigned fmask_array
[] = {0, 1, 1, 4};
3222 fmask_bytes_per_pixel
+= fmask_array
[samples_log
];
3226 color_bytes_per_pixel
*= total_samples
;
3228 color_bytes_per_pixel
= MAX2(color_bytes_per_pixel
, 1);
3230 const unsigned color_pixel_count_log
= util_logbase2(color_tag_part
/ color_bytes_per_pixel
);
3231 extent
.width
= 1ull << ((color_pixel_count_log
+ 1) / 2);
3232 extent
.height
= 1ull << (color_pixel_count_log
/ 2);
3234 if (fmask_bytes_per_pixel
) {
3235 const unsigned fmask_pixel_count_log
= util_logbase2(fmask_tag_part
/ fmask_bytes_per_pixel
);
3237 const VkExtent2D fmask_extent
= (VkExtent2D
){
3238 .width
= 1ull << ((fmask_pixel_count_log
+ 1) / 2),
3239 .height
= 1ull << (color_pixel_count_log
/ 2)
3242 if (fmask_extent
.width
* fmask_extent
.height
< extent
.width
* extent
.height
)
3243 extent
= fmask_extent
;
3246 if (subpass
->depth_stencil_attachment
) {
3247 struct radv_render_pass_attachment
*attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3249 /* Coefficients taken from AMDVLK */
3250 unsigned depth_coeff
= vk_format_is_depth(attachment
->format
) ? 5 : 0;
3251 unsigned stencil_coeff
= vk_format_is_stencil(attachment
->format
) ? 1 : 0;
3252 unsigned db_bytes_per_pixel
= (depth_coeff
+ stencil_coeff
) * total_samples
;
3254 const unsigned db_pixel_count_log
= util_logbase2(db_tag_part
/ db_bytes_per_pixel
);
3256 const VkExtent2D db_extent
= (VkExtent2D
){
3257 .width
= 1ull << ((db_pixel_count_log
+ 1) / 2),
3258 .height
= 1ull << (color_pixel_count_log
/ 2)
3261 if (db_extent
.width
* db_extent
.height
< extent
.width
* extent
.height
)
3265 extent
.width
= MAX2(extent
.width
, 128);
3266 extent
.height
= MAX2(extent
.width
, 64);
3272 radv_pipeline_generate_disabled_binning_state(struct radeon_cmdbuf
*ctx_cs
,
3273 struct radv_pipeline
*pipeline
,
3274 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3276 uint32_t pa_sc_binner_cntl_0
=
3277 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC
) |
3278 S_028C44_DISABLE_START_OF_PRIM(1);
3279 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
3281 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3282 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3283 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3284 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
3285 unsigned min_bytes_per_pixel
= 0;
3288 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
3289 if (!vkblend
->pAttachments
[i
].colorWriteMask
)
3292 if (subpass
->color_attachments
[i
].attachment
== VK_ATTACHMENT_UNUSED
)
3295 VkFormat format
= pass
->attachments
[subpass
->color_attachments
[i
].attachment
].format
;
3296 unsigned bytes
= vk_format_get_blocksize(format
);
3297 if (!min_bytes_per_pixel
|| bytes
< min_bytes_per_pixel
)
3298 min_bytes_per_pixel
= bytes
;
3302 pa_sc_binner_cntl_0
=
3303 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_NEW_SC
) |
3304 S_028C44_BIN_SIZE_X(0) |
3305 S_028C44_BIN_SIZE_Y(0) |
3306 S_028C44_BIN_SIZE_X_EXTEND(2) | /* 128 */
3307 S_028C44_BIN_SIZE_Y_EXTEND(min_bytes_per_pixel
<= 4 ? 2 : 1) | /* 128 or 64 */
3308 S_028C44_DISABLE_START_OF_PRIM(1);
3311 pipeline
->graphics
.binning
.pa_sc_binner_cntl_0
= pa_sc_binner_cntl_0
;
3312 pipeline
->graphics
.binning
.db_dfsm_control
= db_dfsm_control
;
3316 radv_pipeline_generate_binning_state(struct radeon_cmdbuf
*ctx_cs
,
3317 struct radv_pipeline
*pipeline
,
3318 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3320 if (pipeline
->device
->physical_device
->rad_info
.chip_class
< GFX9
)
3323 VkExtent2D bin_size
;
3324 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3325 bin_size
= radv_gfx10_compute_bin_size(pipeline
, pCreateInfo
);
3326 } else if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX9
) {
3327 bin_size
= radv_gfx9_compute_bin_size(pipeline
, pCreateInfo
);
3329 unreachable("Unhandled generation for binning bin size calculation");
3331 if (pipeline
->device
->pbb_allowed
&& bin_size
.width
&& bin_size
.height
) {
3332 unsigned context_states_per_bin
; /* allowed range: [1, 6] */
3333 unsigned persistent_states_per_bin
; /* allowed range: [1, 32] */
3334 unsigned fpovs_per_batch
; /* allowed range: [0, 255], 0 = unlimited */
3336 if (pipeline
->device
->physical_device
->rad_info
.has_dedicated_vram
) {
3337 context_states_per_bin
= 1;
3338 persistent_states_per_bin
= 1;
3339 fpovs_per_batch
= 63;
3341 /* The context states are affected by the scissor bug. */
3342 context_states_per_bin
= pipeline
->device
->physical_device
->rad_info
.has_gfx9_scissor_bug
? 1 : 6;
3343 /* 32 causes hangs for RAVEN. */
3344 persistent_states_per_bin
= 16;
3345 fpovs_per_batch
= 63;
3348 const uint32_t pa_sc_binner_cntl_0
=
3349 S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED
) |
3350 S_028C44_BIN_SIZE_X(bin_size
.width
== 16) |
3351 S_028C44_BIN_SIZE_Y(bin_size
.height
== 16) |
3352 S_028C44_BIN_SIZE_X_EXTEND(util_logbase2(MAX2(bin_size
.width
, 32)) - 5) |
3353 S_028C44_BIN_SIZE_Y_EXTEND(util_logbase2(MAX2(bin_size
.height
, 32)) - 5) |
3354 S_028C44_CONTEXT_STATES_PER_BIN(context_states_per_bin
- 1) |
3355 S_028C44_PERSISTENT_STATES_PER_BIN(persistent_states_per_bin
- 1) |
3356 S_028C44_DISABLE_START_OF_PRIM(1) |
3357 S_028C44_FPOVS_PER_BATCH(fpovs_per_batch
) |
3358 S_028C44_OPTIMAL_BIN_SELECTION(1);
3360 uint32_t db_dfsm_control
= S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
);
3362 pipeline
->graphics
.binning
.pa_sc_binner_cntl_0
= pa_sc_binner_cntl_0
;
3363 pipeline
->graphics
.binning
.db_dfsm_control
= db_dfsm_control
;
3365 radv_pipeline_generate_disabled_binning_state(ctx_cs
, pipeline
, pCreateInfo
);
3370 radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf
*ctx_cs
,
3371 struct radv_pipeline
*pipeline
,
3372 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
3373 const struct radv_graphics_pipeline_create_info
*extra
)
3375 const VkPipelineDepthStencilStateCreateInfo
*vkds
= pCreateInfo
->pDepthStencilState
;
3376 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
3377 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
3378 struct radv_render_pass_attachment
*attachment
= NULL
;
3379 uint32_t db_depth_control
= 0, db_stencil_control
= 0;
3380 uint32_t db_render_control
= 0, db_render_override2
= 0;
3381 uint32_t db_render_override
= 0;
3383 if (subpass
->depth_stencil_attachment
)
3384 attachment
= pass
->attachments
+ subpass
->depth_stencil_attachment
->attachment
;
3386 bool has_depth_attachment
= attachment
&& vk_format_is_depth(attachment
->format
);
3387 bool has_stencil_attachment
= attachment
&& vk_format_is_stencil(attachment
->format
);
3389 if (vkds
&& has_depth_attachment
) {
3390 db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
3391 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
3392 S_028800_ZFUNC(vkds
->depthCompareOp
) |
3393 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
3395 /* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
3396 db_render_override2
|= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment
->samples
> 2);
3399 if (has_stencil_attachment
&& vkds
&& vkds
->stencilTestEnable
) {
3400 db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
3401 db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
3402 db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
3403 db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
3404 db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
3406 db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
3407 db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
3408 db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
3409 db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
3412 if (attachment
&& extra
) {
3413 db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
3414 db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
3416 db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
3417 db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
3418 db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
3419 db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
3420 db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
3423 db_render_override
|= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
3424 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
);
3426 if (!pCreateInfo
->pRasterizationState
->depthClampEnable
) {
3427 /* From VK_EXT_depth_range_unrestricted spec:
3429 * "The behavior described in Primitive Clipping still applies.
3430 * If depth clamping is disabled the depth values are still
3431 * clipped to 0 ≤ zc ≤ wc before the viewport transform. If
3432 * depth clamping is enabled the above equation is ignored and
3433 * the depth values are instead clamped to the VkViewport
3434 * minDepth and maxDepth values, which in the case of this
3435 * extension can be outside of the 0.0 to 1.0 range."
3437 db_render_override
|= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
3440 radeon_set_context_reg(ctx_cs
, R_028800_DB_DEPTH_CONTROL
, db_depth_control
);
3441 radeon_set_context_reg(ctx_cs
, R_02842C_DB_STENCIL_CONTROL
, db_stencil_control
);
3443 radeon_set_context_reg(ctx_cs
, R_028000_DB_RENDER_CONTROL
, db_render_control
);
3444 radeon_set_context_reg(ctx_cs
, R_02800C_DB_RENDER_OVERRIDE
, db_render_override
);
3445 radeon_set_context_reg(ctx_cs
, R_028010_DB_RENDER_OVERRIDE2
, db_render_override2
);
3449 radv_pipeline_generate_blend_state(struct radeon_cmdbuf
*ctx_cs
,
3450 struct radv_pipeline
*pipeline
,
3451 const struct radv_blend_state
*blend
)
3453 radeon_set_context_reg_seq(ctx_cs
, R_028780_CB_BLEND0_CONTROL
, 8);
3454 radeon_emit_array(ctx_cs
, blend
->cb_blend_control
,
3456 radeon_set_context_reg(ctx_cs
, R_028808_CB_COLOR_CONTROL
, blend
->cb_color_control
);
3457 radeon_set_context_reg(ctx_cs
, R_028B70_DB_ALPHA_TO_MASK
, blend
->db_alpha_to_mask
);
3459 if (pipeline
->device
->physical_device
->rad_info
.has_rbplus
) {
3461 radeon_set_context_reg_seq(ctx_cs
, R_028760_SX_MRT0_BLEND_OPT
, 8);
3462 radeon_emit_array(ctx_cs
, blend
->sx_mrt_blend_opt
, 8);
3465 radeon_set_context_reg(ctx_cs
, R_028714_SPI_SHADER_COL_FORMAT
, blend
->spi_shader_col_format
);
3467 radeon_set_context_reg(ctx_cs
, R_028238_CB_TARGET_MASK
, blend
->cb_target_mask
);
3468 radeon_set_context_reg(ctx_cs
, R_02823C_CB_SHADER_MASK
, blend
->cb_shader_mask
);
3470 pipeline
->graphics
.col_format
= blend
->spi_shader_col_format
;
3471 pipeline
->graphics
.cb_target_mask
= blend
->cb_target_mask
;
3474 static const VkConservativeRasterizationModeEXT
3475 radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo
*pCreateInfo
)
3477 const VkPipelineRasterizationConservativeStateCreateInfoEXT
*conservative_raster
=
3478 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT
);
3480 if (!conservative_raster
)
3481 return VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
;
3482 return conservative_raster
->conservativeRasterizationMode
;
3486 radv_pipeline_generate_raster_state(struct radeon_cmdbuf
*ctx_cs
,
3487 struct radv_pipeline
*pipeline
,
3488 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
3490 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
3491 const VkConservativeRasterizationModeEXT mode
=
3492 radv_get_conservative_raster_mode(vkraster
);
3493 uint32_t pa_sc_conservative_rast
= S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1);
3494 bool depth_clip_disable
= vkraster
->depthClampEnable
;
3496 const VkPipelineRasterizationDepthClipStateCreateInfoEXT
*depth_clip_state
=
3497 vk_find_struct_const(vkraster
->pNext
, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT
);
3498 if (depth_clip_state
) {
3499 depth_clip_disable
= !depth_clip_state
->depthClipEnable
;
3502 radeon_set_context_reg(ctx_cs
, R_028810_PA_CL_CLIP_CNTL
,
3503 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
3504 S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable
? 1 : 0) |
3505 S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable
? 1 : 0) |
3506 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
3507 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
3509 radeon_set_context_reg(ctx_cs
, R_0286D4_SPI_INTERP_CONTROL_0
,
3510 S_0286D4_FLAT_SHADE_ENA(1) |
3511 S_0286D4_PNT_SPRITE_ENA(1) |
3512 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
3513 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
3514 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
3515 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
3516 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
3518 radeon_set_context_reg(ctx_cs
, R_028BE4_PA_SU_VTX_CNTL
,
3519 S_028BE4_PIX_CENTER(1) | // TODO verify
3520 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
3521 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
));
3523 radeon_set_context_reg(ctx_cs
, R_028814_PA_SU_SC_MODE_CNTL
,
3524 S_028814_FACE(vkraster
->frontFace
) |
3525 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
3526 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
3527 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
3528 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3529 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
3530 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3531 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
3532 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0));
3534 /* Conservative rasterization. */
3535 if (mode
!= VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT
) {
3536 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3538 ms
->pa_sc_aa_config
|= S_028BE0_AA_MASK_CENTROID_DTMN(1);
3539 ms
->db_eqaa
|= S_028804_ENABLE_POSTZ_OVERRASTERIZATION(1) |
3540 S_028804_OVERRASTERIZATION_AMOUNT(4);
3542 pa_sc_conservative_rast
= S_028C4C_PREZ_AA_MASK_ENABLE(1) |
3543 S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
3544 S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
3546 if (mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
) {
3547 pa_sc_conservative_rast
|=
3548 S_028C4C_OVER_RAST_ENABLE(1) |
3549 S_028C4C_OVER_RAST_SAMPLE_SELECT(0) |
3550 S_028C4C_UNDER_RAST_ENABLE(0) |
3551 S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
3552 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
3554 assert(mode
== VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT
);
3555 pa_sc_conservative_rast
|=
3556 S_028C4C_OVER_RAST_ENABLE(0) |
3557 S_028C4C_OVER_RAST_SAMPLE_SELECT(1) |
3558 S_028C4C_UNDER_RAST_ENABLE(1) |
3559 S_028C4C_UNDER_RAST_SAMPLE_SELECT(0) |
3560 S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(0);
3564 radeon_set_context_reg(ctx_cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
3565 pa_sc_conservative_rast
);
3570 radv_pipeline_generate_multisample_state(struct radeon_cmdbuf
*ctx_cs
,
3571 struct radv_pipeline
*pipeline
)
3573 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
3575 radeon_set_context_reg_seq(ctx_cs
, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0
, 2);
3576 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[0]);
3577 radeon_emit(ctx_cs
, ms
->pa_sc_aa_mask
[1]);
3579 radeon_set_context_reg(ctx_cs
, R_028804_DB_EQAA
, ms
->db_eqaa
);
3580 radeon_set_context_reg(ctx_cs
, R_028A4C_PA_SC_MODE_CNTL_1
, ms
->pa_sc_mode_cntl_1
);
3582 /* The exclusion bits can be set to improve rasterization efficiency
3583 * if no sample lies on the pixel boundary (-8 sample offset). It's
3584 * currently always TRUE because the driver doesn't support 16 samples.
3586 bool exclusion
= pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
;
3587 radeon_set_context_reg(ctx_cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
,
3588 S_02882C_XMAX_RIGHT_EXCLUSION(exclusion
) |
3589 S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion
));
3593 radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf
*ctx_cs
,
3594 struct radv_pipeline
*pipeline
)
3596 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3597 const struct radv_shader_variant
*vs
=
3598 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] ?
3599 pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] :
3600 pipeline
->shaders
[MESA_SHADER_VERTEX
];
3601 unsigned vgt_primitiveid_en
= 0;
3602 uint32_t vgt_gs_mode
= 0;
3604 if (radv_pipeline_has_ngg(pipeline
))
3607 if (radv_pipeline_has_gs(pipeline
)) {
3608 const struct radv_shader_variant
*gs
=
3609 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3611 vgt_gs_mode
= ac_vgt_gs_mode(gs
->info
.gs
.vertices_out
,
3612 pipeline
->device
->physical_device
->rad_info
.chip_class
);
3613 } else if (outinfo
->export_prim_id
|| vs
->info
.uses_prim_id
) {
3614 vgt_gs_mode
= S_028A40_MODE(V_028A40_GS_SCENARIO_A
);
3615 vgt_primitiveid_en
|= S_028A84_PRIMITIVEID_EN(1);
3618 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
, vgt_primitiveid_en
);
3619 radeon_set_context_reg(ctx_cs
, R_028A40_VGT_GS_MODE
, vgt_gs_mode
);
3623 radv_pipeline_generate_hw_vs(struct radeon_cmdbuf
*ctx_cs
,
3624 struct radeon_cmdbuf
*cs
,
3625 struct radv_pipeline
*pipeline
,
3626 struct radv_shader_variant
*shader
)
3628 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3630 radeon_set_sh_reg_seq(cs
, R_00B120_SPI_SHADER_PGM_LO_VS
, 4);
3631 radeon_emit(cs
, va
>> 8);
3632 radeon_emit(cs
, S_00B124_MEM_BASE(va
>> 40));
3633 radeon_emit(cs
, shader
->config
.rsrc1
);
3634 radeon_emit(cs
, shader
->config
.rsrc2
);
3636 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3637 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3638 clip_dist_mask
= outinfo
->clip_dist_mask
;
3639 cull_dist_mask
= outinfo
->cull_dist_mask
;
3640 total_mask
= clip_dist_mask
| cull_dist_mask
;
3641 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3642 outinfo
->writes_layer
||
3643 outinfo
->writes_viewport_index
;
3644 unsigned spi_vs_out_config
, nparams
;
3646 /* VS is required to export at least one param. */
3647 nparams
= MAX2(outinfo
->param_exports
, 1);
3648 spi_vs_out_config
= S_0286C4_VS_EXPORT_COUNT(nparams
- 1);
3650 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3651 spi_vs_out_config
|= S_0286C4_NO_PC_EXPORT(outinfo
->param_exports
== 0);
3654 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
, spi_vs_out_config
);
3656 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3657 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3658 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3659 V_02870C_SPI_SHADER_4COMP
:
3660 V_02870C_SPI_SHADER_NONE
) |
3661 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3662 V_02870C_SPI_SHADER_4COMP
:
3663 V_02870C_SPI_SHADER_NONE
) |
3664 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3665 V_02870C_SPI_SHADER_4COMP
:
3666 V_02870C_SPI_SHADER_NONE
));
3668 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3669 S_028818_VTX_W0_FMT(1) |
3670 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3671 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3672 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3674 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3675 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3676 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3677 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3678 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3679 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3680 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3681 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3682 cull_dist_mask
<< 8 |
3685 if (pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX8
)
3686 radeon_set_context_reg(ctx_cs
, R_028AB4_VGT_REUSE_OFF
,
3687 outinfo
->writes_viewport_index
);
3691 radv_pipeline_generate_hw_es(struct radeon_cmdbuf
*cs
,
3692 struct radv_pipeline
*pipeline
,
3693 struct radv_shader_variant
*shader
)
3695 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3697 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 4);
3698 radeon_emit(cs
, va
>> 8);
3699 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3700 radeon_emit(cs
, shader
->config
.rsrc1
);
3701 radeon_emit(cs
, shader
->config
.rsrc2
);
3705 radv_pipeline_generate_hw_ls(struct radeon_cmdbuf
*cs
,
3706 struct radv_pipeline
*pipeline
,
3707 struct radv_shader_variant
*shader
,
3708 const struct radv_tessellation_state
*tess
)
3710 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3711 uint32_t rsrc2
= shader
->config
.rsrc2
;
3713 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3714 radeon_emit(cs
, va
>> 8);
3715 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3717 rsrc2
|= S_00B52C_LDS_SIZE(tess
->lds_size
);
3718 if (pipeline
->device
->physical_device
->rad_info
.chip_class
== GFX7
&&
3719 pipeline
->device
->physical_device
->rad_info
.family
!= CHIP_HAWAII
)
3720 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, rsrc2
);
3722 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
3723 radeon_emit(cs
, shader
->config
.rsrc1
);
3724 radeon_emit(cs
, rsrc2
);
3728 radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf
*ctx_cs
,
3729 struct radeon_cmdbuf
*cs
,
3730 struct radv_pipeline
*pipeline
,
3731 struct radv_shader_variant
*shader
,
3732 const struct radv_ngg_state
*ngg_state
)
3734 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3735 gl_shader_stage es_type
=
3736 radv_pipeline_has_tess(pipeline
) ? MESA_SHADER_TESS_EVAL
: MESA_SHADER_VERTEX
;
3737 struct radv_shader_variant
*es
=
3738 es_type
== MESA_SHADER_TESS_EVAL
? pipeline
->shaders
[MESA_SHADER_TESS_EVAL
] : pipeline
->shaders
[MESA_SHADER_VERTEX
];
3740 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
3741 radeon_emit(cs
, va
>> 8);
3742 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
3743 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
3744 radeon_emit(cs
, shader
->config
.rsrc1
);
3745 radeon_emit(cs
, shader
->config
.rsrc2
);
3747 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
3748 unsigned clip_dist_mask
, cull_dist_mask
, total_mask
;
3749 clip_dist_mask
= outinfo
->clip_dist_mask
;
3750 cull_dist_mask
= outinfo
->cull_dist_mask
;
3751 total_mask
= clip_dist_mask
| cull_dist_mask
;
3752 bool misc_vec_ena
= outinfo
->writes_pointsize
||
3753 outinfo
->writes_layer
||
3754 outinfo
->writes_viewport_index
;
3755 bool es_enable_prim_id
= outinfo
->export_prim_id
||
3756 (es
&& es
->info
.uses_prim_id
);
3757 bool break_wave_at_eoi
= false;
3761 if (es_type
== MESA_SHADER_TESS_EVAL
) {
3762 struct radv_shader_variant
*gs
=
3763 pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3765 if (es_enable_prim_id
|| (gs
&& gs
->info
.uses_prim_id
))
3766 break_wave_at_eoi
= true;
3769 nparams
= MAX2(outinfo
->param_exports
, 1);
3770 radeon_set_context_reg(ctx_cs
, R_0286C4_SPI_VS_OUT_CONFIG
,
3771 S_0286C4_VS_EXPORT_COUNT(nparams
- 1) |
3772 S_0286C4_NO_PC_EXPORT(outinfo
->param_exports
== 0));
3774 radeon_set_context_reg(ctx_cs
, R_028708_SPI_SHADER_IDX_FORMAT
,
3775 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP
));
3776 radeon_set_context_reg(ctx_cs
, R_02870C_SPI_SHADER_POS_FORMAT
,
3777 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
3778 S_02870C_POS1_EXPORT_FORMAT(outinfo
->pos_exports
> 1 ?
3779 V_02870C_SPI_SHADER_4COMP
:
3780 V_02870C_SPI_SHADER_NONE
) |
3781 S_02870C_POS2_EXPORT_FORMAT(outinfo
->pos_exports
> 2 ?
3782 V_02870C_SPI_SHADER_4COMP
:
3783 V_02870C_SPI_SHADER_NONE
) |
3784 S_02870C_POS3_EXPORT_FORMAT(outinfo
->pos_exports
> 3 ?
3785 V_02870C_SPI_SHADER_4COMP
:
3786 V_02870C_SPI_SHADER_NONE
));
3788 radeon_set_context_reg(ctx_cs
, R_028818_PA_CL_VTE_CNTL
,
3789 S_028818_VTX_W0_FMT(1) |
3790 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3791 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3792 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3793 radeon_set_context_reg(ctx_cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
3794 S_02881C_USE_VTX_POINT_SIZE(outinfo
->writes_pointsize
) |
3795 S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo
->writes_layer
) |
3796 S_02881C_USE_VTX_VIEWPORT_INDX(outinfo
->writes_viewport_index
) |
3797 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
3798 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
) |
3799 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask
& 0x0f) != 0) |
3800 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask
& 0xf0) != 0) |
3801 cull_dist_mask
<< 8 |
3804 radeon_set_context_reg(ctx_cs
, R_028A84_VGT_PRIMITIVEID_EN
,
3805 S_028A84_PRIMITIVEID_EN(es_enable_prim_id
) |
3806 S_028A84_NGG_DISABLE_PROVOK_REUSE(es_enable_prim_id
));
3808 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
3809 ngg_state
->vgt_esgs_ring_itemsize
);
3811 /* NGG specific registers. */
3812 struct radv_shader_variant
*gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
3813 uint32_t gs_num_invocations
= gs
? gs
->info
.gs
.invocations
: 1;
3815 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
3816 S_028A44_ES_VERTS_PER_SUBGRP(ngg_state
->hw_max_esverts
) |
3817 S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state
->max_gsprims
) |
3818 S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state
->max_gsprims
* gs_num_invocations
));
3819 radeon_set_context_reg(ctx_cs
, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP
,
3820 S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state
->max_out_verts
));
3821 radeon_set_context_reg(ctx_cs
, R_028B4C_GE_NGG_SUBGRP_CNTL
,
3822 S_028B4C_PRIM_AMP_FACTOR(ngg_state
->prim_amp_factor
) |
3823 S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
3824 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
3825 S_028B90_CNT(gs_num_invocations
) |
3826 S_028B90_ENABLE(gs_num_invocations
> 1) |
3827 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state
->max_vert_out_per_gs_instance
));
3829 /* User edge flags are set by the pos exports. If user edge flags are
3830 * not used, we must use hw-generated edge flags and pass them via
3831 * the prim export to prevent drawing lines on internal edges of
3832 * decomposed primitives (such as quads) with polygon mode = lines.
3834 * TODO: We should combine hw-generated edge flags with user edge
3835 * flags in the shader.
3837 radeon_set_context_reg(ctx_cs
, R_028838_PA_CL_NGG_CNTL
,
3838 S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline
) &&
3839 !radv_pipeline_has_gs(pipeline
)));
3841 ge_cntl
= S_03096C_PRIM_GRP_SIZE(ngg_state
->max_gsprims
) |
3842 S_03096C_VERT_GRP_SIZE(ngg_state
->hw_max_esverts
) |
3843 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
);
3845 /* Bug workaround for a possible hang with non-tessellation cases.
3846 * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
3848 * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
3850 if ((pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI10
||
3851 pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI12
||
3852 pipeline
->device
->physical_device
->rad_info
.family
== CHIP_NAVI14
) &&
3853 !radv_pipeline_has_tess(pipeline
) &&
3854 ngg_state
->hw_max_esverts
!= 256) {
3855 ge_cntl
&= C_03096C_VERT_GRP_SIZE
;
3857 if (ngg_state
->hw_max_esverts
> 5) {
3858 ge_cntl
|= S_03096C_VERT_GRP_SIZE(ngg_state
->hw_max_esverts
- 5);
3862 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
, ge_cntl
);
3866 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf
*cs
,
3867 struct radv_pipeline
*pipeline
,
3868 struct radv_shader_variant
*shader
,
3869 const struct radv_tessellation_state
*tess
)
3871 uint64_t va
= radv_buffer_get_va(shader
->bo
) + shader
->bo_offset
;
3873 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
3874 unsigned hs_rsrc2
= shader
->config
.rsrc2
;
3876 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3877 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX10(tess
->lds_size
);
3879 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX9(tess
->lds_size
);
3882 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
3883 radeon_set_sh_reg_seq(cs
, R_00B520_SPI_SHADER_PGM_LO_LS
, 2);
3884 radeon_emit(cs
, va
>> 8);
3885 radeon_emit(cs
, S_00B524_MEM_BASE(va
>> 40));
3887 radeon_set_sh_reg_seq(cs
, R_00B410_SPI_SHADER_PGM_LO_LS
, 2);
3888 radeon_emit(cs
, va
>> 8);
3889 radeon_emit(cs
, S_00B414_MEM_BASE(va
>> 40));
3892 radeon_set_sh_reg_seq(cs
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
, 2);
3893 radeon_emit(cs
, shader
->config
.rsrc1
);
3894 radeon_emit(cs
, hs_rsrc2
);
3896 radeon_set_sh_reg_seq(cs
, R_00B420_SPI_SHADER_PGM_LO_HS
, 4);
3897 radeon_emit(cs
, va
>> 8);
3898 radeon_emit(cs
, S_00B424_MEM_BASE(va
>> 40));
3899 radeon_emit(cs
, shader
->config
.rsrc1
);
3900 radeon_emit(cs
, shader
->config
.rsrc2
);
3905 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf
*ctx_cs
,
3906 struct radeon_cmdbuf
*cs
,
3907 struct radv_pipeline
*pipeline
,
3908 const struct radv_tessellation_state
*tess
,
3909 const struct radv_ngg_state
*ngg
)
3911 struct radv_shader_variant
*vs
;
3913 /* Skip shaders merged into HS/GS */
3914 vs
= pipeline
->shaders
[MESA_SHADER_VERTEX
];
3918 if (vs
->info
.vs
.as_ls
)
3919 radv_pipeline_generate_hw_ls(cs
, pipeline
, vs
, tess
);
3920 else if (vs
->info
.vs
.as_es
)
3921 radv_pipeline_generate_hw_es(cs
, pipeline
, vs
);
3922 else if (vs
->info
.is_ngg
)
3923 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, vs
, ngg
);
3925 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, vs
);
3929 radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf
*ctx_cs
,
3930 struct radeon_cmdbuf
*cs
,
3931 struct radv_pipeline
*pipeline
,
3932 const struct radv_tessellation_state
*tess
,
3933 const struct radv_ngg_state
*ngg
)
3935 if (!radv_pipeline_has_tess(pipeline
))
3938 struct radv_shader_variant
*tes
, *tcs
;
3940 tcs
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
];
3941 tes
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
];
3944 if (tes
->info
.is_ngg
) {
3945 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, tes
, ngg
);
3946 } else if (tes
->info
.tes
.as_es
)
3947 radv_pipeline_generate_hw_es(cs
, pipeline
, tes
);
3949 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, tes
);
3952 radv_pipeline_generate_hw_hs(cs
, pipeline
, tcs
, tess
);
3954 radeon_set_context_reg(ctx_cs
, R_028B6C_VGT_TF_PARAM
,
3957 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
)
3958 radeon_set_context_reg_idx(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
3959 tess
->ls_hs_config
);
3961 radeon_set_context_reg(ctx_cs
, R_028B58_VGT_LS_HS_CONFIG
,
3962 tess
->ls_hs_config
);
3964 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
3965 !radv_pipeline_has_gs(pipeline
) && !radv_pipeline_has_ngg(pipeline
)) {
3966 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
3967 S_028A44_ES_VERTS_PER_SUBGRP(250) |
3968 S_028A44_GS_PRIMS_PER_SUBGRP(126) |
3969 S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
3974 radv_pipeline_generate_hw_gs(struct radeon_cmdbuf
*ctx_cs
,
3975 struct radeon_cmdbuf
*cs
,
3976 struct radv_pipeline
*pipeline
,
3977 struct radv_shader_variant
*gs
,
3978 const struct radv_gs_state
*gs_state
)
3980 unsigned gs_max_out_vertices
;
3981 uint8_t *num_components
;
3986 gs_max_out_vertices
= gs
->info
.gs
.vertices_out
;
3987 max_stream
= gs
->info
.gs
.max_stream
;
3988 num_components
= gs
->info
.gs
.num_stream_output_components
;
3990 offset
= num_components
[0] * gs_max_out_vertices
;
3992 radeon_set_context_reg_seq(ctx_cs
, R_028A60_VGT_GSVS_RING_OFFSET_1
, 3);
3993 radeon_emit(ctx_cs
, offset
);
3994 if (max_stream
>= 1)
3995 offset
+= num_components
[1] * gs_max_out_vertices
;
3996 radeon_emit(ctx_cs
, offset
);
3997 if (max_stream
>= 2)
3998 offset
+= num_components
[2] * gs_max_out_vertices
;
3999 radeon_emit(ctx_cs
, offset
);
4000 if (max_stream
>= 3)
4001 offset
+= num_components
[3] * gs_max_out_vertices
;
4002 radeon_set_context_reg(ctx_cs
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
4004 radeon_set_context_reg_seq(ctx_cs
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, 4);
4005 radeon_emit(ctx_cs
, num_components
[0]);
4006 radeon_emit(ctx_cs
, (max_stream
>= 1) ? num_components
[1] : 0);
4007 radeon_emit(ctx_cs
, (max_stream
>= 2) ? num_components
[2] : 0);
4008 radeon_emit(ctx_cs
, (max_stream
>= 3) ? num_components
[3] : 0);
4010 uint32_t gs_num_invocations
= gs
->info
.gs
.invocations
;
4011 radeon_set_context_reg(ctx_cs
, R_028B90_VGT_GS_INSTANCE_CNT
,
4012 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
4013 S_028B90_ENABLE(gs_num_invocations
> 0));
4015 radeon_set_context_reg(ctx_cs
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
4016 gs_state
->vgt_esgs_ring_itemsize
);
4018 va
= radv_buffer_get_va(gs
->bo
) + gs
->bo_offset
;
4020 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
4021 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4022 radeon_set_sh_reg_seq(cs
, R_00B320_SPI_SHADER_PGM_LO_ES
, 2);
4023 radeon_emit(cs
, va
>> 8);
4024 radeon_emit(cs
, S_00B324_MEM_BASE(va
>> 40));
4026 radeon_set_sh_reg_seq(cs
, R_00B210_SPI_SHADER_PGM_LO_ES
, 2);
4027 radeon_emit(cs
, va
>> 8);
4028 radeon_emit(cs
, S_00B214_MEM_BASE(va
>> 40));
4031 radeon_set_sh_reg_seq(cs
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, 2);
4032 radeon_emit(cs
, gs
->config
.rsrc1
);
4033 radeon_emit(cs
, gs
->config
.rsrc2
| S_00B22C_LDS_SIZE(gs_state
->lds_size
));
4035 radeon_set_context_reg(ctx_cs
, R_028A44_VGT_GS_ONCHIP_CNTL
, gs_state
->vgt_gs_onchip_cntl
);
4036 radeon_set_context_reg(ctx_cs
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
, gs_state
->vgt_gs_max_prims_per_subgroup
);
4038 radeon_set_sh_reg_seq(cs
, R_00B220_SPI_SHADER_PGM_LO_GS
, 4);
4039 radeon_emit(cs
, va
>> 8);
4040 radeon_emit(cs
, S_00B224_MEM_BASE(va
>> 40));
4041 radeon_emit(cs
, gs
->config
.rsrc1
);
4042 radeon_emit(cs
, gs
->config
.rsrc2
);
4045 radv_pipeline_generate_hw_vs(ctx_cs
, cs
, pipeline
, pipeline
->gs_copy_shader
);
4049 radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf
*ctx_cs
,
4050 struct radeon_cmdbuf
*cs
,
4051 struct radv_pipeline
*pipeline
,
4052 const struct radv_gs_state
*gs_state
,
4053 const struct radv_ngg_state
*ngg_state
)
4055 struct radv_shader_variant
*gs
;
4057 gs
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
];
4061 if (gs
->info
.is_ngg
)
4062 radv_pipeline_generate_hw_ngg(ctx_cs
, cs
, pipeline
, gs
, ngg_state
);
4064 radv_pipeline_generate_hw_gs(ctx_cs
, cs
, pipeline
, gs
, gs_state
);
4066 radeon_set_context_reg(ctx_cs
, R_028B38_VGT_GS_MAX_VERT_OUT
,
4067 gs
->info
.gs
.vertices_out
);
4070 static uint32_t offset_to_ps_input(uint32_t offset
, bool flat_shade
, bool float16
)
4072 uint32_t ps_input_cntl
;
4073 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
4074 ps_input_cntl
= S_028644_OFFSET(offset
);
4076 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
4078 ps_input_cntl
|= S_028644_FP16_INTERP_MODE(1) |
4079 S_028644_ATTR0_VALID(1);
4082 /* The input is a DEFAULT_VAL constant. */
4083 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
4084 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
4085 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
4086 ps_input_cntl
= S_028644_OFFSET(0x20) |
4087 S_028644_DEFAULT_VAL(offset
);
4089 return ps_input_cntl
;
4093 radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf
*ctx_cs
,
4094 struct radv_pipeline
*pipeline
)
4096 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4097 const struct radv_vs_output_info
*outinfo
= get_vs_output_info(pipeline
);
4098 uint32_t ps_input_cntl
[32];
4100 unsigned ps_offset
= 0;
4102 if (ps
->info
.ps
.prim_id_input
) {
4103 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
];
4104 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
4105 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
4110 if (ps
->info
.ps
.layer_input
||
4111 ps
->info
.needs_multiview_view_index
) {
4112 unsigned vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
];
4113 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
)
4114 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, true, false);
4116 ps_input_cntl
[ps_offset
] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000
, true, false);
4120 if (ps
->info
.ps
.has_pcoord
) {
4122 val
= S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
4123 ps_input_cntl
[ps_offset
] = val
;
4127 if (ps
->info
.ps
.num_input_clips_culls
) {
4130 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
];
4131 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
) {
4132 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
4136 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
];
4137 if (vs_offset
!= AC_EXP_PARAM_UNDEFINED
&&
4138 ps
->info
.ps
.num_input_clips_culls
> 4) {
4139 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, false, false);
4144 for (unsigned i
= 0; i
< 32 && (1u << i
) <= ps
->info
.ps
.input_mask
; ++i
) {
4148 if (!(ps
->info
.ps
.input_mask
& (1u << i
)))
4151 vs_offset
= outinfo
->vs_output_param_offset
[VARYING_SLOT_VAR0
+ i
];
4152 if (vs_offset
== AC_EXP_PARAM_UNDEFINED
) {
4153 ps_input_cntl
[ps_offset
] = S_028644_OFFSET(0x20);
4158 flat_shade
= !!(ps
->info
.ps
.flat_shaded_mask
& (1u << ps_offset
));
4159 float16
= !!(ps
->info
.ps
.float16_shaded_mask
& (1u << ps_offset
));
4161 ps_input_cntl
[ps_offset
] = offset_to_ps_input(vs_offset
, flat_shade
, float16
);
4166 radeon_set_context_reg_seq(ctx_cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps_offset
);
4167 for (unsigned i
= 0; i
< ps_offset
; i
++) {
4168 radeon_emit(ctx_cs
, ps_input_cntl
[i
]);
4174 radv_compute_db_shader_control(const struct radv_device
*device
,
4175 const struct radv_pipeline
*pipeline
,
4176 const struct radv_shader_variant
*ps
)
4179 if (ps
->info
.ps
.early_fragment_test
|| !ps
->info
.ps
.writes_memory
)
4180 z_order
= V_02880C_EARLY_Z_THEN_LATE_Z
;
4182 z_order
= V_02880C_LATE_Z
;
4184 bool disable_rbplus
= device
->physical_device
->rad_info
.has_rbplus
&&
4185 !device
->physical_device
->rad_info
.rbplus_allowed
;
4187 /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
4188 * but this appears to break Project Cars (DXVK). See
4189 * https://bugs.freedesktop.org/show_bug.cgi?id=109401
4191 bool mask_export_enable
= ps
->info
.ps
.writes_sample_mask
;
4193 return S_02880C_Z_EXPORT_ENABLE(ps
->info
.ps
.writes_z
) |
4194 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps
->info
.ps
.writes_stencil
) |
4195 S_02880C_KILL_ENABLE(!!ps
->info
.ps
.can_discard
) |
4196 S_02880C_MASK_EXPORT_ENABLE(mask_export_enable
) |
4197 S_02880C_Z_ORDER(z_order
) |
4198 S_02880C_DEPTH_BEFORE_SHADER(ps
->info
.ps
.early_fragment_test
) |
4199 S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps
->info
.ps
.post_depth_coverage
) |
4200 S_02880C_EXEC_ON_HIER_FAIL(ps
->info
.ps
.writes_memory
) |
4201 S_02880C_EXEC_ON_NOOP(ps
->info
.ps
.writes_memory
) |
4202 S_02880C_DUAL_QUAD_DISABLE(disable_rbplus
);
4206 radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf
*ctx_cs
,
4207 struct radeon_cmdbuf
*cs
,
4208 struct radv_pipeline
*pipeline
)
4210 struct radv_shader_variant
*ps
;
4212 assert (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]);
4214 ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4215 va
= radv_buffer_get_va(ps
->bo
) + ps
->bo_offset
;
4217 radeon_set_sh_reg_seq(cs
, R_00B020_SPI_SHADER_PGM_LO_PS
, 4);
4218 radeon_emit(cs
, va
>> 8);
4219 radeon_emit(cs
, S_00B024_MEM_BASE(va
>> 40));
4220 radeon_emit(cs
, ps
->config
.rsrc1
);
4221 radeon_emit(cs
, ps
->config
.rsrc2
);
4223 radeon_set_context_reg(ctx_cs
, R_02880C_DB_SHADER_CONTROL
,
4224 radv_compute_db_shader_control(pipeline
->device
,
4227 radeon_set_context_reg(ctx_cs
, R_0286CC_SPI_PS_INPUT_ENA
,
4228 ps
->config
.spi_ps_input_ena
);
4230 radeon_set_context_reg(ctx_cs
, R_0286D0_SPI_PS_INPUT_ADDR
,
4231 ps
->config
.spi_ps_input_addr
);
4233 radeon_set_context_reg(ctx_cs
, R_0286D8_SPI_PS_IN_CONTROL
,
4234 S_0286D8_NUM_INTERP(ps
->info
.ps
.num_interp
) |
4235 S_0286D8_PS_W32_EN(ps
->info
.wave_size
== 32));
4237 radeon_set_context_reg(ctx_cs
, R_0286E0_SPI_BARYC_CNTL
, pipeline
->graphics
.spi_baryc_cntl
);
4239 radeon_set_context_reg(ctx_cs
, R_028710_SPI_SHADER_Z_FORMAT
,
4240 ac_get_spi_shader_z_format(ps
->info
.ps
.writes_z
,
4241 ps
->info
.ps
.writes_stencil
,
4242 ps
->info
.ps
.writes_sample_mask
));
4244 if (pipeline
->device
->dfsm_allowed
) {
4245 /* optimise this? */
4246 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
4247 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_DFSM
) | EVENT_INDEX(0));
4252 radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf
*ctx_cs
,
4253 struct radv_pipeline
*pipeline
)
4255 if (pipeline
->device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
4256 pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
)
4259 unsigned vtx_reuse_depth
= 30;
4260 if (radv_pipeline_has_tess(pipeline
) &&
4261 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.tes
.spacing
== TESS_SPACING_FRACTIONAL_ODD
) {
4262 vtx_reuse_depth
= 14;
4264 radeon_set_context_reg(ctx_cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
4265 S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth
));
4269 radv_compute_vgt_shader_stages_en(const struct radv_pipeline
*pipeline
)
4271 uint32_t stages
= 0;
4272 if (radv_pipeline_has_tess(pipeline
)) {
4273 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
4274 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
4276 if (radv_pipeline_has_gs(pipeline
))
4277 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
4279 else if (radv_pipeline_has_ngg(pipeline
))
4280 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
);
4282 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
4283 } else if (radv_pipeline_has_gs(pipeline
)) {
4284 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
4286 } else if (radv_pipeline_has_ngg(pipeline
)) {
4287 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
);
4290 if (radv_pipeline_has_ngg(pipeline
)) {
4291 stages
|= S_028B54_PRIMGEN_EN(1);
4292 } else if (radv_pipeline_has_gs(pipeline
)) {
4293 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
4296 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
4297 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
4299 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
4300 uint8_t hs_size
= 64, gs_size
= 64, vs_size
= 64;
4302 if (radv_pipeline_has_tess(pipeline
))
4303 hs_size
= pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.wave_size
;
4305 if (pipeline
->shaders
[MESA_SHADER_GEOMETRY
]) {
4306 vs_size
= gs_size
= pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.wave_size
;
4307 if (pipeline
->gs_copy_shader
)
4308 vs_size
= pipeline
->gs_copy_shader
->info
.wave_size
;
4309 } else if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
])
4310 vs_size
= pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.wave_size
;
4311 else if (pipeline
->shaders
[MESA_SHADER_VERTEX
])
4312 vs_size
= pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
.wave_size
;
4314 if (radv_pipeline_has_ngg(pipeline
))
4317 /* legacy GS only supports Wave64 */
4318 stages
|= S_028B54_HS_W32_EN(hs_size
== 32 ? 1 : 0) |
4319 S_028B54_GS_W32_EN(gs_size
== 32 ? 1 : 0) |
4320 S_028B54_VS_W32_EN(vs_size
== 32 ? 1 : 0);
4327 radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4329 const VkPipelineDiscardRectangleStateCreateInfoEXT
*discard_rectangle_info
=
4330 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT
);
4332 if (!discard_rectangle_info
)
4337 for (unsigned i
= 0; i
< (1u << MAX_DISCARD_RECTANGLES
); ++i
) {
4338 /* Interpret i as a bitmask, and then set the bit in the mask if
4339 * that combination of rectangles in which the pixel is contained
4340 * should pass the cliprect test. */
4341 unsigned relevant_subset
= i
& ((1u << discard_rectangle_info
->discardRectangleCount
) - 1);
4343 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT
&&
4347 if (discard_rectangle_info
->discardRectangleMode
== VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT
&&
4358 gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf
*ctx_cs
,
4359 struct radv_pipeline
*pipeline
,
4360 const struct radv_tessellation_state
*tess
,
4361 const struct radv_gs_state
*gs_state
)
4363 bool break_wave_at_eoi
= false;
4364 unsigned primgroup_size
;
4365 unsigned vertgroup_size
;
4367 if (radv_pipeline_has_tess(pipeline
)) {
4368 primgroup_size
= tess
->num_patches
; /* must be a multiple of NUM_PATCHES */
4370 } else if (radv_pipeline_has_gs(pipeline
)) {
4371 unsigned vgt_gs_onchip_cntl
= gs_state
->vgt_gs_onchip_cntl
;
4372 primgroup_size
= G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl
);
4373 vertgroup_size
= G_028A44_ES_VERTS_PER_SUBGRP(vgt_gs_onchip_cntl
);
4375 primgroup_size
= 128; /* recommended without a GS and tess */
4379 if (radv_pipeline_has_tess(pipeline
)) {
4380 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.uses_prim_id
||
4381 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.uses_prim_id
)
4382 break_wave_at_eoi
= true;
4385 radeon_set_uconfig_reg(ctx_cs
, R_03096C_GE_CNTL
,
4386 S_03096C_PRIM_GRP_SIZE(primgroup_size
) |
4387 S_03096C_VERT_GRP_SIZE(vertgroup_size
) |
4388 S_03096C_PACKET_TO_ONE_PA(0) /* line stipple */ |
4389 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
));
4393 radv_pipeline_generate_pm4(struct radv_pipeline
*pipeline
,
4394 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4395 const struct radv_graphics_pipeline_create_info
*extra
,
4396 const struct radv_blend_state
*blend
,
4397 const struct radv_tessellation_state
*tess
,
4398 const struct radv_gs_state
*gs
,
4399 const struct radv_ngg_state
*ngg
,
4400 unsigned prim
, unsigned gs_out
)
4402 struct radeon_cmdbuf
*ctx_cs
= &pipeline
->ctx_cs
;
4403 struct radeon_cmdbuf
*cs
= &pipeline
->cs
;
4406 ctx_cs
->max_dw
= 256;
4407 cs
->buf
= malloc(4 * (cs
->max_dw
+ ctx_cs
->max_dw
));
4408 ctx_cs
->buf
= cs
->buf
+ cs
->max_dw
;
4410 radv_pipeline_generate_depth_stencil_state(ctx_cs
, pipeline
, pCreateInfo
, extra
);
4411 radv_pipeline_generate_blend_state(ctx_cs
, pipeline
, blend
);
4412 radv_pipeline_generate_raster_state(ctx_cs
, pipeline
, pCreateInfo
);
4413 radv_pipeline_generate_multisample_state(ctx_cs
, pipeline
);
4414 radv_pipeline_generate_vgt_gs_mode(ctx_cs
, pipeline
);
4415 radv_pipeline_generate_vertex_shader(ctx_cs
, cs
, pipeline
, tess
, ngg
);
4416 radv_pipeline_generate_tess_shaders(ctx_cs
, cs
, pipeline
, tess
, ngg
);
4417 radv_pipeline_generate_geometry_shader(ctx_cs
, cs
, pipeline
, gs
, ngg
);
4418 radv_pipeline_generate_fragment_shader(ctx_cs
, cs
, pipeline
);
4419 radv_pipeline_generate_ps_inputs(ctx_cs
, pipeline
);
4420 radv_pipeline_generate_vgt_vertex_reuse(ctx_cs
, pipeline
);
4421 radv_pipeline_generate_binning_state(ctx_cs
, pipeline
, pCreateInfo
);
4423 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX10
&& !radv_pipeline_has_ngg(pipeline
))
4424 gfx10_pipeline_generate_ge_cntl(ctx_cs
, pipeline
, tess
, gs
);
4426 radeon_set_context_reg(ctx_cs
, R_0286E8_SPI_TMPRING_SIZE
,
4427 S_0286E8_WAVES(pipeline
->max_waves
) |
4428 S_0286E8_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
4430 radeon_set_context_reg(ctx_cs
, R_028B54_VGT_SHADER_STAGES_EN
, radv_compute_vgt_shader_stages_en(pipeline
));
4432 if (pipeline
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4433 radeon_set_uconfig_reg_idx(pipeline
->device
->physical_device
,
4434 cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
4436 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
4438 radeon_set_context_reg(ctx_cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out
);
4440 radeon_set_context_reg(ctx_cs
, R_02820C_PA_SC_CLIPRECT_RULE
, radv_compute_cliprect_rule(pCreateInfo
));
4442 pipeline
->ctx_cs_hash
= _mesa_hash_data(ctx_cs
->buf
, ctx_cs
->cdw
* 4);
4444 assert(ctx_cs
->cdw
<= ctx_cs
->max_dw
);
4445 assert(cs
->cdw
<= cs
->max_dw
);
4448 static struct radv_ia_multi_vgt_param_helpers
4449 radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline
*pipeline
,
4450 const struct radv_tessellation_state
*tess
,
4453 struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param
= {0};
4454 const struct radv_device
*device
= pipeline
->device
;
4456 if (radv_pipeline_has_tess(pipeline
))
4457 ia_multi_vgt_param
.primgroup_size
= tess
->num_patches
;
4458 else if (radv_pipeline_has_gs(pipeline
))
4459 ia_multi_vgt_param
.primgroup_size
= 64;
4461 ia_multi_vgt_param
.primgroup_size
= 128; /* recommended without a GS */
4463 /* GS requirement. */
4464 ia_multi_vgt_param
.partial_es_wave
= false;
4465 if (radv_pipeline_has_gs(pipeline
) && device
->physical_device
->rad_info
.chip_class
<= GFX8
)
4466 if (SI_GS_PER_ES
/ ia_multi_vgt_param
.primgroup_size
>= pipeline
->device
->gs_table_depth
- 3)
4467 ia_multi_vgt_param
.partial_es_wave
= true;
4469 ia_multi_vgt_param
.wd_switch_on_eop
= false;
4470 if (device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
4471 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
4472 * 4 shader engines. Set 1 to pass the assertion below.
4473 * The other cases are hardware requirements. */
4474 if (device
->physical_device
->rad_info
.max_se
< 4 ||
4475 prim
== V_008958_DI_PT_POLYGON
||
4476 prim
== V_008958_DI_PT_LINELOOP
||
4477 prim
== V_008958_DI_PT_TRIFAN
||
4478 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
4479 (pipeline
->graphics
.prim_restart_enable
&&
4480 (device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
4481 (prim
!= V_008958_DI_PT_POINTLIST
&&
4482 prim
!= V_008958_DI_PT_LINESTRIP
))))
4483 ia_multi_vgt_param
.wd_switch_on_eop
= true;
4486 ia_multi_vgt_param
.ia_switch_on_eoi
= false;
4487 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.ps
.prim_id_input
)
4488 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4489 if (radv_pipeline_has_gs(pipeline
) &&
4490 pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.uses_prim_id
)
4491 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4492 if (radv_pipeline_has_tess(pipeline
)) {
4493 /* SWITCH_ON_EOI must be set if PrimID is used. */
4494 if (pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.uses_prim_id
||
4495 radv_get_shader(pipeline
, MESA_SHADER_TESS_EVAL
)->info
.uses_prim_id
)
4496 ia_multi_vgt_param
.ia_switch_on_eoi
= true;
4499 ia_multi_vgt_param
.partial_vs_wave
= false;
4500 if (radv_pipeline_has_tess(pipeline
)) {
4501 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
4502 if ((device
->physical_device
->rad_info
.family
== CHIP_TAHITI
||
4503 device
->physical_device
->rad_info
.family
== CHIP_PITCAIRN
||
4504 device
->physical_device
->rad_info
.family
== CHIP_BONAIRE
) &&
4505 radv_pipeline_has_gs(pipeline
))
4506 ia_multi_vgt_param
.partial_vs_wave
= true;
4507 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
4508 if (device
->physical_device
->rad_info
.has_distributed_tess
) {
4509 if (radv_pipeline_has_gs(pipeline
)) {
4510 if (device
->physical_device
->rad_info
.chip_class
<= GFX8
)
4511 ia_multi_vgt_param
.partial_es_wave
= true;
4513 ia_multi_vgt_param
.partial_vs_wave
= true;
4518 /* Workaround for a VGT hang when strip primitive types are used with
4519 * primitive restart.
4521 if (pipeline
->graphics
.prim_restart_enable
&&
4522 (prim
== V_008958_DI_PT_LINESTRIP
||
4523 prim
== V_008958_DI_PT_TRISTRIP
||
4524 prim
== V_008958_DI_PT_LINESTRIP_ADJ
||
4525 prim
== V_008958_DI_PT_TRISTRIP_ADJ
)) {
4526 ia_multi_vgt_param
.partial_vs_wave
= true;
4529 if (radv_pipeline_has_gs(pipeline
)) {
4530 /* On these chips there is the possibility of a hang if the
4531 * pipeline uses a GS and partial_vs_wave is not set.
4533 * This mostly does not hit 4-SE chips, as those typically set
4534 * ia_switch_on_eoi and then partial_vs_wave is set for pipelines
4535 * with GS due to another workaround.
4537 * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242
4539 if (device
->physical_device
->rad_info
.family
== CHIP_TONGA
||
4540 device
->physical_device
->rad_info
.family
== CHIP_FIJI
||
4541 device
->physical_device
->rad_info
.family
== CHIP_POLARIS10
||
4542 device
->physical_device
->rad_info
.family
== CHIP_POLARIS11
||
4543 device
->physical_device
->rad_info
.family
== CHIP_POLARIS12
||
4544 device
->physical_device
->rad_info
.family
== CHIP_VEGAM
) {
4545 ia_multi_vgt_param
.partial_vs_wave
= true;
4549 ia_multi_vgt_param
.base
=
4550 S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param
.primgroup_size
- 1) |
4551 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
4552 S_028AA8_MAX_PRIMGRP_IN_WAVE(device
->physical_device
->rad_info
.chip_class
== GFX8
? 2 : 0) |
4553 S_030960_EN_INST_OPT_BASIC(device
->physical_device
->rad_info
.chip_class
>= GFX9
) |
4554 S_030960_EN_INST_OPT_ADV(device
->physical_device
->rad_info
.chip_class
>= GFX9
);
4556 return ia_multi_vgt_param
;
4561 radv_compute_vertex_input_state(struct radv_pipeline
*pipeline
,
4562 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
4564 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
4565 pCreateInfo
->pVertexInputState
;
4566 struct radv_vertex_elements_info
*velems
= &pipeline
->vertex_elements
;
4568 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
4569 const VkVertexInputAttributeDescription
*desc
=
4570 &vi_info
->pVertexAttributeDescriptions
[i
];
4571 unsigned loc
= desc
->location
;
4572 const struct vk_format_description
*format_desc
;
4574 format_desc
= vk_format_description(desc
->format
);
4576 velems
->format_size
[loc
] = format_desc
->block
.bits
/ 8;
4579 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
4580 const VkVertexInputBindingDescription
*desc
=
4581 &vi_info
->pVertexBindingDescriptions
[i
];
4583 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
4584 pipeline
->num_vertex_bindings
=
4585 MAX2(pipeline
->num_vertex_bindings
, desc
->binding
+ 1);
4589 static struct radv_shader_variant
*
4590 radv_pipeline_get_streamout_shader(struct radv_pipeline
*pipeline
)
4594 for (i
= MESA_SHADER_GEOMETRY
; i
>= MESA_SHADER_VERTEX
; i
--) {
4595 struct radv_shader_variant
*shader
=
4596 radv_get_shader(pipeline
, i
);
4598 if (shader
&& shader
->info
.so
.num_outputs
> 0)
4606 radv_pipeline_init(struct radv_pipeline
*pipeline
,
4607 struct radv_device
*device
,
4608 struct radv_pipeline_cache
*cache
,
4609 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4610 const struct radv_graphics_pipeline_create_info
*extra
)
4613 bool has_view_index
= false;
4615 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
4616 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
4617 if (subpass
->view_mask
)
4618 has_view_index
= true;
4620 pipeline
->device
= device
;
4621 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
4622 assert(pipeline
->layout
);
4624 struct radv_blend_state blend
= radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
4626 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
4627 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
4628 radv_init_feedback(creation_feedback
);
4630 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
4632 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
4633 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
4634 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
4635 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
4636 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
4637 if(creation_feedback
)
4638 stage_feedbacks
[stage
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[i
];
4641 struct radv_pipeline_key key
= radv_generate_graphics_pipeline_key(pipeline
, pCreateInfo
, &blend
, has_view_index
);
4642 radv_create_shaders(pipeline
, device
, cache
, &key
, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
4644 pipeline
->graphics
.spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
4645 radv_pipeline_init_multisample_state(pipeline
, &blend
, pCreateInfo
);
4647 uint32_t prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
4649 pipeline
->graphics
.can_use_guardband
= radv_prim_can_use_guardband(pCreateInfo
->pInputAssemblyState
->topology
);
4651 if (radv_pipeline_has_gs(pipeline
)) {
4652 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
4653 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4654 } else if (radv_pipeline_has_tess(pipeline
)) {
4655 if (pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.point_mode
)
4656 gs_out
= V_028A6C_OUTPRIM_TYPE_POINTLIST
;
4658 gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.primitive_mode
);
4659 pipeline
->graphics
.can_use_guardband
= gs_out
== V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4661 gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
4663 if (extra
&& extra
->use_rectlist
) {
4664 prim
= V_008958_DI_PT_RECTLIST
;
4665 gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
4666 pipeline
->graphics
.can_use_guardband
= true;
4667 if (radv_pipeline_has_ngg(pipeline
))
4668 gs_out
= V_028A6C_VGT_OUT_RECT_V0
;
4670 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
4671 /* prim vertex count will need TESS changes */
4672 pipeline
->graphics
.prim_vertex_count
= prim_size_table
[prim
];
4674 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
4676 /* Ensure that some export memory is always allocated, for two reasons:
4678 * 1) Correctness: The hardware ignores the EXEC mask if no export
4679 * memory is allocated, so KILL and alpha test do not work correctly
4681 * 2) Performance: Every shader needs at least a NULL export, even when
4682 * it writes no color/depth output. The NULL export instruction
4683 * stalls without this setting.
4685 * Don't add this to CB_SHADER_MASK.
4687 * GFX10 supports pixel shaders without exports by setting both the
4688 * color and Z formats to SPI_SHADER_ZERO. The hw will skip export
4689 * instructions if any are present.
4691 struct radv_shader_variant
*ps
= pipeline
->shaders
[MESA_SHADER_FRAGMENT
];
4692 if ((pipeline
->device
->physical_device
->rad_info
.chip_class
<= GFX9
||
4693 ps
->info
.ps
.can_discard
) &&
4694 !blend
.spi_shader_col_format
) {
4695 if (!ps
->info
.ps
.writes_z
&&
4696 !ps
->info
.ps
.writes_stencil
&&
4697 !ps
->info
.ps
.writes_sample_mask
)
4698 blend
.spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
4701 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4702 if (pipeline
->shaders
[i
]) {
4703 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[i
]->info
.need_indirect_descriptor_sets
;
4707 struct radv_ngg_state ngg
= {0};
4708 struct radv_gs_state gs
= {0};
4710 if (radv_pipeline_has_ngg(pipeline
)) {
4711 ngg
= calculate_ngg_info(pCreateInfo
, pipeline
);
4712 } else if (radv_pipeline_has_gs(pipeline
)) {
4713 gs
= calculate_gs_info(pCreateInfo
, pipeline
);
4714 calculate_gs_ring_sizes(pipeline
, &gs
);
4717 struct radv_tessellation_state tess
= {0};
4718 if (radv_pipeline_has_tess(pipeline
)) {
4719 if (prim
== V_008958_DI_PT_PATCH
) {
4720 pipeline
->graphics
.prim_vertex_count
.min
= pCreateInfo
->pTessellationState
->patchControlPoints
;
4721 pipeline
->graphics
.prim_vertex_count
.incr
= 1;
4723 tess
= calculate_tess_state(pipeline
, pCreateInfo
);
4726 pipeline
->graphics
.ia_multi_vgt_param
= radv_compute_ia_multi_vgt_param_helpers(pipeline
, &tess
, prim
);
4728 radv_compute_vertex_input_state(pipeline
, pCreateInfo
);
4730 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++)
4731 pipeline
->user_data_0
[i
] = radv_pipeline_stage_to_user_data_0(pipeline
, i
, device
->physical_device
->rad_info
.chip_class
);
4733 struct radv_userdata_info
*loc
= radv_lookup_user_sgpr(pipeline
, MESA_SHADER_VERTEX
,
4734 AC_UD_VS_BASE_VERTEX_START_INSTANCE
);
4735 if (loc
->sgpr_idx
!= -1) {
4736 pipeline
->graphics
.vtx_base_sgpr
= pipeline
->user_data_0
[MESA_SHADER_VERTEX
];
4737 pipeline
->graphics
.vtx_base_sgpr
+= loc
->sgpr_idx
* 4;
4738 if (radv_get_shader(pipeline
, MESA_SHADER_VERTEX
)->info
.vs
.needs_draw_id
)
4739 pipeline
->graphics
.vtx_emit_num
= 3;
4741 pipeline
->graphics
.vtx_emit_num
= 2;
4744 /* Find the last vertex shader stage that eventually uses streamout. */
4745 pipeline
->streamout_shader
= radv_pipeline_get_streamout_shader(pipeline
);
4747 result
= radv_pipeline_scratch_init(device
, pipeline
);
4748 radv_pipeline_generate_pm4(pipeline
, pCreateInfo
, extra
, &blend
, &tess
, &gs
, &ngg
, prim
, gs_out
);
4754 radv_graphics_pipeline_create(
4756 VkPipelineCache _cache
,
4757 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
4758 const struct radv_graphics_pipeline_create_info
*extra
,
4759 const VkAllocationCallbacks
*pAllocator
,
4760 VkPipeline
*pPipeline
)
4762 RADV_FROM_HANDLE(radv_device
, device
, _device
);
4763 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
4764 struct radv_pipeline
*pipeline
;
4767 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
4768 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
4769 if (pipeline
== NULL
)
4770 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
4772 result
= radv_pipeline_init(pipeline
, device
, cache
,
4773 pCreateInfo
, extra
);
4774 if (result
!= VK_SUCCESS
) {
4775 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
4779 *pPipeline
= radv_pipeline_to_handle(pipeline
);
4784 VkResult
radv_CreateGraphicsPipelines(
4786 VkPipelineCache pipelineCache
,
4788 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
4789 const VkAllocationCallbacks
* pAllocator
,
4790 VkPipeline
* pPipelines
)
4792 VkResult result
= VK_SUCCESS
;
4795 for (; i
< count
; i
++) {
4797 r
= radv_graphics_pipeline_create(_device
,
4800 NULL
, pAllocator
, &pPipelines
[i
]);
4801 if (r
!= VK_SUCCESS
) {
4803 pPipelines
[i
] = VK_NULL_HANDLE
;
4812 radv_compute_generate_pm4(struct radv_pipeline
*pipeline
)
4814 struct radv_shader_variant
*compute_shader
;
4815 struct radv_device
*device
= pipeline
->device
;
4816 unsigned threads_per_threadgroup
;
4817 unsigned threadgroups_per_cu
= 1;
4818 unsigned waves_per_threadgroup
;
4819 unsigned max_waves_per_sh
= 0;
4822 pipeline
->cs
.buf
= malloc(20 * 4);
4823 pipeline
->cs
.max_dw
= 20;
4825 compute_shader
= pipeline
->shaders
[MESA_SHADER_COMPUTE
];
4826 va
= radv_buffer_get_va(compute_shader
->bo
) + compute_shader
->bo_offset
;
4828 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B830_COMPUTE_PGM_LO
, 2);
4829 radeon_emit(&pipeline
->cs
, va
>> 8);
4830 radeon_emit(&pipeline
->cs
, S_00B834_DATA(va
>> 40));
4832 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B848_COMPUTE_PGM_RSRC1
, 2);
4833 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc1
);
4834 radeon_emit(&pipeline
->cs
, compute_shader
->config
.rsrc2
);
4836 radeon_set_sh_reg(&pipeline
->cs
, R_00B860_COMPUTE_TMPRING_SIZE
,
4837 S_00B860_WAVES(pipeline
->max_waves
) |
4838 S_00B860_WAVESIZE(pipeline
->scratch_bytes_per_wave
>> 10));
4840 /* Calculate best compute resource limits. */
4841 threads_per_threadgroup
= compute_shader
->info
.cs
.block_size
[0] *
4842 compute_shader
->info
.cs
.block_size
[1] *
4843 compute_shader
->info
.cs
.block_size
[2];
4844 waves_per_threadgroup
= DIV_ROUND_UP(threads_per_threadgroup
,
4845 device
->physical_device
->cs_wave_size
);
4847 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
&&
4848 waves_per_threadgroup
== 1)
4849 threadgroups_per_cu
= 2;
4851 radeon_set_sh_reg(&pipeline
->cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
,
4852 ac_get_compute_resource_limits(&device
->physical_device
->rad_info
,
4853 waves_per_threadgroup
,
4855 threadgroups_per_cu
));
4857 radeon_set_sh_reg_seq(&pipeline
->cs
, R_00B81C_COMPUTE_NUM_THREAD_X
, 3);
4858 radeon_emit(&pipeline
->cs
,
4859 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[0]));
4860 radeon_emit(&pipeline
->cs
,
4861 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[1]));
4862 radeon_emit(&pipeline
->cs
,
4863 S_00B81C_NUM_THREAD_FULL(compute_shader
->info
.cs
.block_size
[2]));
4865 assert(pipeline
->cs
.cdw
<= pipeline
->cs
.max_dw
);
4868 static VkResult
radv_compute_pipeline_create(
4870 VkPipelineCache _cache
,
4871 const VkComputePipelineCreateInfo
* pCreateInfo
,
4872 const VkAllocationCallbacks
* pAllocator
,
4873 VkPipeline
* pPipeline
)
4875 RADV_FROM_HANDLE(radv_device
, device
, _device
);
4876 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
4877 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
4878 VkPipelineCreationFeedbackEXT
*stage_feedbacks
[MESA_SHADER_STAGES
] = { 0 };
4879 struct radv_pipeline
*pipeline
;
4882 pipeline
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
4883 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
4884 if (pipeline
== NULL
)
4885 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
4887 pipeline
->device
= device
;
4888 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
4889 assert(pipeline
->layout
);
4891 const VkPipelineCreationFeedbackCreateInfoEXT
*creation_feedback
=
4892 vk_find_struct_const(pCreateInfo
->pNext
, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT
);
4893 radv_init_feedback(creation_feedback
);
4895 VkPipelineCreationFeedbackEXT
*pipeline_feedback
= creation_feedback
? creation_feedback
->pPipelineCreationFeedback
: NULL
;
4896 if (creation_feedback
)
4897 stage_feedbacks
[MESA_SHADER_COMPUTE
] = &creation_feedback
->pPipelineStageCreationFeedbacks
[0];
4899 pStages
[MESA_SHADER_COMPUTE
] = &pCreateInfo
->stage
;
4900 radv_create_shaders(pipeline
, device
, cache
, &(struct radv_pipeline_key
) {0}, pStages
, pCreateInfo
->flags
, pipeline_feedback
, stage_feedbacks
);
4902 pipeline
->user_data_0
[MESA_SHADER_COMPUTE
] = radv_pipeline_stage_to_user_data_0(pipeline
, MESA_SHADER_COMPUTE
, device
->physical_device
->rad_info
.chip_class
);
4903 pipeline
->need_indirect_descriptor_sets
|= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.need_indirect_descriptor_sets
;
4904 result
= radv_pipeline_scratch_init(device
, pipeline
);
4905 if (result
!= VK_SUCCESS
) {
4906 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
4910 radv_compute_generate_pm4(pipeline
);
4912 *pPipeline
= radv_pipeline_to_handle(pipeline
);
4917 VkResult
radv_CreateComputePipelines(
4919 VkPipelineCache pipelineCache
,
4921 const VkComputePipelineCreateInfo
* pCreateInfos
,
4922 const VkAllocationCallbacks
* pAllocator
,
4923 VkPipeline
* pPipelines
)
4925 VkResult result
= VK_SUCCESS
;
4928 for (; i
< count
; i
++) {
4930 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
4932 pAllocator
, &pPipelines
[i
]);
4933 if (r
!= VK_SUCCESS
) {
4935 pPipelines
[i
] = VK_NULL_HANDLE
;
4943 static uint32_t radv_get_executable_count(const struct radv_pipeline
*pipeline
)
4946 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
4947 if (pipeline
->shaders
[i
])
4948 ret
+= i
== MESA_SHADER_GEOMETRY
? 2u : 1u;
4954 static struct radv_shader_variant
*
4955 radv_get_shader_from_executable_index(const struct radv_pipeline
*pipeline
, int index
, gl_shader_stage
*stage
)
4957 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
4958 if (!pipeline
->shaders
[i
])
4962 return pipeline
->shaders
[i
];
4967 if (i
== MESA_SHADER_GEOMETRY
) {
4970 return pipeline
->gs_copy_shader
;
4980 /* Basically strlcpy (which does not exist on linux) specialized for
4982 static void desc_copy(char *desc
, const char *src
) {
4983 int len
= strlen(src
);
4984 assert(len
< VK_MAX_DESCRIPTION_SIZE
);
4985 memcpy(desc
, src
, len
);
4986 memset(desc
+ len
, 0, VK_MAX_DESCRIPTION_SIZE
- len
);
4989 VkResult
radv_GetPipelineExecutablePropertiesKHR(
4991 const VkPipelineInfoKHR
* pPipelineInfo
,
4992 uint32_t* pExecutableCount
,
4993 VkPipelineExecutablePropertiesKHR
* pProperties
)
4995 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pPipelineInfo
->pipeline
);
4996 const uint32_t total_count
= radv_get_executable_count(pipeline
);
4999 *pExecutableCount
= total_count
;
5003 const uint32_t count
= MIN2(total_count
, *pExecutableCount
);
5004 for (unsigned i
= 0, executable_idx
= 0;
5005 i
< MESA_SHADER_STAGES
&& executable_idx
< count
; ++i
) {
5006 if (!pipeline
->shaders
[i
])
5008 pProperties
[executable_idx
].stages
= mesa_to_vk_shader_stage(i
);
5009 const char *name
= NULL
;
5010 const char *description
= NULL
;
5012 case MESA_SHADER_VERTEX
:
5013 name
= "Vertex Shader";
5014 description
= "Vulkan Vertex Shader";
5016 case MESA_SHADER_TESS_CTRL
:
5017 if (!pipeline
->shaders
[MESA_SHADER_VERTEX
]) {
5018 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_VERTEX_BIT
;
5019 name
= "Vertex + Tessellation Control Shaders";
5020 description
= "Combined Vulkan Vertex and Tessellation Control Shaders";
5022 name
= "Tessellation Control Shader";
5023 description
= "Vulkan Tessellation Control Shader";
5026 case MESA_SHADER_TESS_EVAL
:
5027 name
= "Tessellation Evaluation Shader";
5028 description
= "Vulkan Tessellation Evaluation Shader";
5030 case MESA_SHADER_GEOMETRY
:
5031 if (radv_pipeline_has_tess(pipeline
) && !pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]) {
5032 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
;
5033 name
= "Tessellation Evaluation + Geometry Shaders";
5034 description
= "Combined Vulkan Tessellation Evaluation and Geometry Shaders";
5035 } else if (!radv_pipeline_has_tess(pipeline
) && !pipeline
->shaders
[MESA_SHADER_VERTEX
]) {
5036 pProperties
[executable_idx
].stages
|= VK_SHADER_STAGE_VERTEX_BIT
;
5037 name
= "Vertex + Geometry Shader";
5038 description
= "Combined Vulkan Vertex and Geometry Shaders";
5040 name
= "Geometry Shader";
5041 description
= "Vulkan Geometry Shader";
5044 case MESA_SHADER_FRAGMENT
:
5045 name
= "Fragment Shader";
5046 description
= "Vulkan Fragment Shader";
5048 case MESA_SHADER_COMPUTE
:
5049 name
= "Compute Shader";
5050 description
= "Vulkan Compute Shader";
5054 desc_copy(pProperties
[executable_idx
].name
, name
);
5055 desc_copy(pProperties
[executable_idx
].description
, description
);
5058 if (i
== MESA_SHADER_GEOMETRY
) {
5059 assert(pipeline
->gs_copy_shader
);
5060 if (executable_idx
>= count
)
5063 pProperties
[executable_idx
].stages
= VK_SHADER_STAGE_GEOMETRY_BIT
;
5064 desc_copy(pProperties
[executable_idx
].name
, "GS Copy Shader");
5065 desc_copy(pProperties
[executable_idx
].description
,
5066 "Extra shader stage that loads the GS output ringbuffer into the rasterizer");
5072 for (unsigned i
= 0; i
< count
; ++i
)
5073 pProperties
[i
].subgroupSize
= 64;
5075 VkResult result
= *pExecutableCount
< total_count
? VK_INCOMPLETE
: VK_SUCCESS
;
5076 *pExecutableCount
= count
;
5080 VkResult
radv_GetPipelineExecutableStatisticsKHR(
5082 const VkPipelineExecutableInfoKHR
* pExecutableInfo
,
5083 uint32_t* pStatisticCount
,
5084 VkPipelineExecutableStatisticKHR
* pStatistics
)
5086 RADV_FROM_HANDLE(radv_device
, device
, _device
);
5087 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pExecutableInfo
->pipeline
);
5088 gl_shader_stage stage
;
5089 struct radv_shader_variant
*shader
= radv_get_shader_from_executable_index(pipeline
, pExecutableInfo
->executableIndex
, &stage
);
5091 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
5092 unsigned lds_increment
= chip_class
>= GFX7
? 512 : 256;
5093 unsigned max_waves
= radv_get_max_waves(device
, shader
, stage
);
5095 VkPipelineExecutableStatisticKHR
*s
= pStatistics
;
5096 VkPipelineExecutableStatisticKHR
*end
= s
+ (pStatistics
? *pStatisticCount
: 0);
5097 VkResult result
= VK_SUCCESS
;
5100 desc_copy(s
->name
, "SGPRs");
5101 desc_copy(s
->description
, "Number of SGPR registers allocated per subgroup");
5102 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5103 s
->value
.u64
= shader
->config
.num_sgprs
;
5108 desc_copy(s
->name
, "VGPRs");
5109 desc_copy(s
->description
, "Number of VGPR registers allocated per subgroup");
5110 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5111 s
->value
.u64
= shader
->config
.num_vgprs
;
5116 desc_copy(s
->name
, "Spilled SGPRs");
5117 desc_copy(s
->description
, "Number of SGPR registers spilled per subgroup");
5118 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5119 s
->value
.u64
= shader
->config
.spilled_sgprs
;
5124 desc_copy(s
->name
, "Spilled VGPRs");
5125 desc_copy(s
->description
, "Number of VGPR registers spilled per subgroup");
5126 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5127 s
->value
.u64
= shader
->config
.spilled_vgprs
;
5132 desc_copy(s
->name
, "PrivMem VGPRs");
5133 desc_copy(s
->description
, "Number of VGPRs stored in private memory per subgroup");
5134 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5135 s
->value
.u64
= shader
->info
.private_mem_vgprs
;
5140 desc_copy(s
->name
, "Code size");
5141 desc_copy(s
->description
, "Code size in bytes");
5142 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5143 s
->value
.u64
= shader
->exec_size
;
5148 desc_copy(s
->name
, "LDS size");
5149 desc_copy(s
->description
, "LDS size in bytes per workgroup");
5150 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5151 s
->value
.u64
= shader
->config
.lds_size
* lds_increment
;
5156 desc_copy(s
->name
, "Scratch size");
5157 desc_copy(s
->description
, "Private memory in bytes per subgroup");
5158 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5159 s
->value
.u64
= shader
->config
.scratch_bytes_per_wave
;
5164 desc_copy(s
->name
, "Subgroups per SIMD");
5165 desc_copy(s
->description
, "The maximum number of subgroups in flight on a SIMD unit");
5166 s
->format
= VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR
;
5167 s
->value
.u64
= max_waves
;
5172 *pStatisticCount
= s
- pStatistics
;
5174 *pStatisticCount
= end
- pStatistics
;
5175 result
= VK_INCOMPLETE
;
5177 *pStatisticCount
= s
- pStatistics
;
5183 static VkResult
radv_copy_representation(void *data
, size_t *data_size
, const char *src
)
5185 size_t total_size
= strlen(src
) + 1;
5188 *data_size
= total_size
;
5192 size_t size
= MIN2(total_size
, *data_size
);
5194 memcpy(data
, src
, size
);
5196 *((char*)data
+ size
- 1) = 0;
5197 return size
< total_size
? VK_INCOMPLETE
: VK_SUCCESS
;
5200 VkResult
radv_GetPipelineExecutableInternalRepresentationsKHR(
5202 const VkPipelineExecutableInfoKHR
* pExecutableInfo
,
5203 uint32_t* pInternalRepresentationCount
,
5204 VkPipelineExecutableInternalRepresentationKHR
* pInternalRepresentations
)
5206 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pExecutableInfo
->pipeline
);
5207 gl_shader_stage stage
;
5208 struct radv_shader_variant
*shader
= radv_get_shader_from_executable_index(pipeline
, pExecutableInfo
->executableIndex
, &stage
);
5210 VkPipelineExecutableInternalRepresentationKHR
*p
= pInternalRepresentations
;
5211 VkPipelineExecutableInternalRepresentationKHR
*end
= p
+ (pInternalRepresentations
? *pInternalRepresentationCount
: 0);
5212 VkResult result
= VK_SUCCESS
;
5216 desc_copy(p
->name
, "NIR Shader(s)");
5217 desc_copy(p
->description
, "The optimized NIR shader(s)");
5218 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->nir_string
) != VK_SUCCESS
)
5219 result
= VK_INCOMPLETE
;
5226 desc_copy(p
->name
, "LLVM IR");
5227 desc_copy(p
->description
, "The LLVM IR after some optimizations");
5228 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->llvm_ir_string
) != VK_SUCCESS
)
5229 result
= VK_INCOMPLETE
;
5236 desc_copy(p
->name
, "Assembly");
5237 desc_copy(p
->description
, "Final Assembly");
5238 if (radv_copy_representation(p
->pData
, &p
->dataSize
, shader
->disasm_string
) != VK_SUCCESS
)
5239 result
= VK_INCOMPLETE
;
5243 if (!pInternalRepresentations
)
5244 *pInternalRepresentationCount
= p
- pInternalRepresentations
;
5246 result
= VK_INCOMPLETE
;
5247 *pInternalRepresentationCount
= end
- pInternalRepresentations
;
5249 *pInternalRepresentationCount
= p
- pInternalRepresentations
;