2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "radv_private.h"
31 #include "nir/nir_builder.h"
32 #include "spirv/nir_spirv.h"
34 #include <llvm-c/Core.h>
35 #include <llvm-c/TargetMachine.h>
38 #include "r600d_common.h"
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_nir_to_llvm.h"
42 #include "vk_format.h"
43 #include "util/debug.h"
44 void radv_shader_variant_destroy(struct radv_device
*device
,
45 struct radv_shader_variant
*variant
);
47 static const struct nir_shader_compiler_options nir_options
= {
48 .vertex_id_zero_based
= true,
52 .lower_pack_snorm_2x16
= true,
53 .lower_pack_snorm_4x8
= true,
54 .lower_pack_unorm_2x16
= true,
55 .lower_pack_unorm_4x8
= true,
56 .lower_unpack_snorm_2x16
= true,
57 .lower_unpack_snorm_4x8
= true,
58 .lower_unpack_unorm_2x16
= true,
59 .lower_unpack_unorm_4x8
= true,
60 .lower_extract_byte
= true,
61 .lower_extract_word
= true,
64 VkResult
radv_CreateShaderModule(
66 const VkShaderModuleCreateInfo
* pCreateInfo
,
67 const VkAllocationCallbacks
* pAllocator
,
68 VkShaderModule
* pShaderModule
)
70 RADV_FROM_HANDLE(radv_device
, device
, _device
);
71 struct radv_shader_module
*module
;
73 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
74 assert(pCreateInfo
->flags
== 0);
76 module
= vk_alloc2(&device
->alloc
, pAllocator
,
77 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
78 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
80 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
83 module
->size
= pCreateInfo
->codeSize
;
84 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
86 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
88 *pShaderModule
= radv_shader_module_to_handle(module
);
93 void radv_DestroyShaderModule(
95 VkShaderModule _module
,
96 const VkAllocationCallbacks
* pAllocator
)
98 RADV_FROM_HANDLE(radv_device
, device
, _device
);
99 RADV_FROM_HANDLE(radv_shader_module
, module
, _module
);
104 vk_free2(&device
->alloc
, pAllocator
, module
);
109 radv_pipeline_destroy(struct radv_device
*device
,
110 struct radv_pipeline
*pipeline
,
111 const VkAllocationCallbacks
* allocator
)
113 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
114 if (pipeline
->shaders
[i
])
115 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
117 vk_free2(&device
->alloc
, allocator
, pipeline
);
120 void radv_DestroyPipeline(
122 VkPipeline _pipeline
,
123 const VkAllocationCallbacks
* pAllocator
)
125 RADV_FROM_HANDLE(radv_device
, device
, _device
);
126 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
131 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
136 radv_optimize_nir(struct nir_shader
*shader
)
143 NIR_PASS_V(shader
, nir_lower_vars_to_ssa
);
144 NIR_PASS_V(shader
, nir_lower_alu_to_scalar
);
145 NIR_PASS_V(shader
, nir_lower_phis_to_scalar
);
147 NIR_PASS(progress
, shader
, nir_copy_prop
);
148 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
149 NIR_PASS(progress
, shader
, nir_opt_dce
);
150 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
151 NIR_PASS(progress
, shader
, nir_opt_cse
);
152 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 8);
153 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
154 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
155 NIR_PASS(progress
, shader
, nir_opt_undef
);
156 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
161 radv_shader_compile_to_nir(struct radv_device
*device
,
162 struct radv_shader_module
*module
,
163 const char *entrypoint_name
,
164 gl_shader_stage stage
,
165 const VkSpecializationInfo
*spec_info
,
168 if (strcmp(entrypoint_name
, "main") != 0) {
169 radv_finishme("Multiple shaders per module not really supported");
173 nir_function
*entry_point
;
175 /* Some things such as our meta clear/blit code will give us a NIR
176 * shader directly. In that case, we just ignore the SPIR-V entirely
177 * and just use the NIR shader */
179 nir
->options
= &nir_options
;
180 nir_validate_shader(nir
);
182 assert(exec_list_length(&nir
->functions
) == 1);
183 struct exec_node
*node
= exec_list_get_head(&nir
->functions
);
184 entry_point
= exec_node_data(nir_function
, node
, node
);
186 uint32_t *spirv
= (uint32_t *) module
->data
;
187 assert(module
->size
% 4 == 0);
189 uint32_t num_spec_entries
= 0;
190 struct nir_spirv_specialization
*spec_entries
= NULL
;
191 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
192 num_spec_entries
= spec_info
->mapEntryCount
;
193 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
194 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
195 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
196 const void *data
= spec_info
->pData
+ entry
.offset
;
197 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
199 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
200 if (spec_info
->dataSize
== 8)
201 spec_entries
[i
].data64
= *(const uint64_t *)data
;
203 spec_entries
[i
].data32
= *(const uint32_t *)data
;
206 const struct nir_spirv_supported_extensions supported_ext
= {
208 entry_point
= spirv_to_nir(spirv
, module
->size
/ 4,
209 spec_entries
, num_spec_entries
,
210 stage
, entrypoint_name
, &supported_ext
, &nir_options
);
211 nir
= entry_point
->shader
;
212 assert(nir
->stage
== stage
);
213 nir_validate_shader(nir
);
217 /* We have to lower away local constant initializers right before we
218 * inline functions. That way they get properly initialized at the top
219 * of the function and not at the top of its caller.
221 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_local
);
222 NIR_PASS_V(nir
, nir_lower_returns
);
223 NIR_PASS_V(nir
, nir_inline_functions
);
225 /* Pick off the single entrypoint that we want */
226 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
227 if (func
!= entry_point
)
228 exec_node_remove(&func
->node
);
230 assert(exec_list_length(&nir
->functions
) == 1);
231 entry_point
->name
= ralloc_strdup(entry_point
, "main");
233 NIR_PASS_V(nir
, nir_remove_dead_variables
,
234 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
);
236 /* Now that we've deleted all but the main function, we can go ahead and
237 * lower the rest of the constant initializers.
239 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
240 NIR_PASS_V(nir
, nir_lower_system_values
);
243 /* Vulkan uses the separate-shader linking model */
244 nir
->info
->separate_shader
= true;
246 // nir = brw_preprocess_nir(compiler, nir);
248 nir_shader_gather_info(nir
, entry_point
->impl
);
250 nir_variable_mode indirect_mask
= 0;
251 // if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
252 indirect_mask
|= nir_var_shader_in
;
253 // if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
254 indirect_mask
|= nir_var_local
;
256 nir_lower_indirect_derefs(nir
, indirect_mask
);
258 static const nir_lower_tex_options tex_options
= {
262 nir_lower_tex(nir
, &tex_options
);
264 nir_lower_vars_to_ssa(nir
);
265 nir_lower_var_copies(nir
);
266 nir_lower_global_vars_to_local(nir
);
267 nir_remove_dead_variables(nir
, nir_var_local
);
268 radv_optimize_nir(nir
);
271 nir_print_shader(nir
, stderr
);
276 static const char *radv_get_shader_name(struct radv_shader_variant
*var
,
277 gl_shader_stage stage
)
280 case MESA_SHADER_VERTEX
: return "Vertex Shader as VS";
281 case MESA_SHADER_FRAGMENT
: return "Pixel Shader";
282 case MESA_SHADER_COMPUTE
: return "Compute Shader";
284 return "Unknown shader";
288 static void radv_dump_pipeline_stats(struct radv_device
*device
, struct radv_pipeline
*pipeline
)
290 unsigned lds_increment
= device
->physical_device
->rad_info
.chip_class
>= CIK
? 512 : 256;
291 struct radv_shader_variant
*var
;
292 struct ac_shader_config
*conf
;
295 unsigned max_simd_waves
= 10;
296 unsigned lds_per_wave
= 0;
298 for (i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
299 if (!pipeline
->shaders
[i
])
301 var
= pipeline
->shaders
[i
];
305 if (i
== MESA_SHADER_FRAGMENT
) {
306 lds_per_wave
= conf
->lds_size
* lds_increment
+
307 align(var
->info
.fs
.num_interp
* 48, lds_increment
);
310 if (conf
->num_sgprs
) {
311 if (device
->physical_device
->rad_info
.chip_class
>= VI
)
312 max_simd_waves
= MIN2(max_simd_waves
, 800 / conf
->num_sgprs
);
314 max_simd_waves
= MIN2(max_simd_waves
, 512 / conf
->num_sgprs
);
318 max_simd_waves
= MIN2(max_simd_waves
, 256 / conf
->num_vgprs
);
320 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
324 max_simd_waves
= MIN2(max_simd_waves
, 16384 / lds_per_wave
);
326 fprintf(file
, "\n%s:\n",
327 radv_get_shader_name(var
, i
));
328 if (i
== MESA_SHADER_FRAGMENT
) {
329 fprintf(file
, "*** SHADER CONFIG ***\n"
330 "SPI_PS_INPUT_ADDR = 0x%04x\n"
331 "SPI_PS_INPUT_ENA = 0x%04x\n",
332 conf
->spi_ps_input_addr
, conf
->spi_ps_input_ena
);
334 fprintf(file
, "*** SHADER STATS ***\n"
337 "Spilled SGPRs: %d\n"
338 "Spilled VGPRs: %d\n"
339 "Code Size: %d bytes\n"
341 "Scratch: %d bytes per wave\n"
343 "********************\n\n\n",
344 conf
->num_sgprs
, conf
->num_vgprs
,
345 conf
->spilled_sgprs
, conf
->spilled_vgprs
, var
->code_size
,
346 conf
->lds_size
, conf
->scratch_bytes_per_wave
,
351 void radv_shader_variant_destroy(struct radv_device
*device
,
352 struct radv_shader_variant
*variant
)
354 if (__sync_fetch_and_sub(&variant
->ref_count
, 1) != 1)
357 device
->ws
->buffer_destroy(variant
->bo
);
361 static void radv_fill_shader_variant(struct radv_device
*device
,
362 struct radv_shader_variant
*variant
,
363 struct ac_shader_binary
*binary
,
364 gl_shader_stage stage
)
366 variant
->code_size
= binary
->code_size
;
367 bool scratch_enabled
= variant
->config
.scratch_bytes_per_wave
> 0;
368 unsigned vgpr_comp_cnt
= 0;
371 radv_finishme("shader scratch space");
374 case MESA_SHADER_VERTEX
:
375 variant
->rsrc2
= S_00B12C_USER_SGPR(variant
->info
.num_user_sgprs
) |
376 S_00B12C_SCRATCH_EN(scratch_enabled
);
377 vgpr_comp_cnt
= variant
->info
.vs
.vgpr_comp_cnt
;
379 case MESA_SHADER_FRAGMENT
:
380 variant
->rsrc2
= S_00B12C_USER_SGPR(variant
->info
.num_user_sgprs
) |
381 S_00B12C_SCRATCH_EN(scratch_enabled
);
383 case MESA_SHADER_COMPUTE
:
384 variant
->rsrc2
= S_00B84C_USER_SGPR(variant
->info
.num_user_sgprs
) |
385 S_00B84C_SCRATCH_EN(scratch_enabled
) |
386 S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
387 S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
388 S_00B84C_TG_SIZE_EN(1) |
389 S_00B84C_LDS_SIZE(variant
->config
.lds_size
);
392 unreachable("unsupported shader type");
396 variant
->rsrc1
= S_00B848_VGPRS((variant
->config
.num_vgprs
- 1) / 4) |
397 S_00B848_SGPRS((variant
->config
.num_sgprs
- 1) / 8) |
398 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
399 S_00B848_DX10_CLAMP(1) |
400 S_00B848_FLOAT_MODE(variant
->config
.float_mode
);
402 variant
->bo
= device
->ws
->buffer_create(device
->ws
, binary
->code_size
, 256,
403 RADEON_DOMAIN_GTT
, RADEON_FLAG_CPU_ACCESS
);
405 void *ptr
= device
->ws
->buffer_map(variant
->bo
);
406 memcpy(ptr
, binary
->code
, binary
->code_size
);
407 device
->ws
->buffer_unmap(variant
->bo
);
412 static struct radv_shader_variant
*radv_shader_variant_create(struct radv_device
*device
,
413 struct nir_shader
*shader
,
414 struct radv_pipeline_layout
*layout
,
415 const union ac_shader_variant_key
*key
,
417 unsigned *code_size_out
,
420 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
421 enum radeon_family chip_family
= device
->physical_device
->rad_info
.family
;
422 LLVMTargetMachineRef tm
;
426 struct ac_nir_compiler_options options
= {0};
427 options
.layout
= layout
;
431 struct ac_shader_binary binary
;
433 options
.unsafe_math
= !!(device
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
);
434 options
.family
= chip_family
;
435 options
.chip_class
= device
->physical_device
->rad_info
.chip_class
;
436 options
.supports_spill
= false;
437 tm
= ac_create_target_machine(chip_family
, false);
438 ac_compile_nir_shader(tm
, &binary
, &variant
->config
,
439 &variant
->info
, shader
, &options
, dump
);
440 LLVMDisposeTargetMachine(tm
);
442 radv_fill_shader_variant(device
, variant
, &binary
, shader
->stage
);
445 *code_out
= binary
.code
;
446 *code_size_out
= binary
.code_size
;
451 free(binary
.global_symbol_offsets
);
453 free(binary
.disasm_string
);
454 variant
->ref_count
= 1;
459 static struct radv_shader_variant
*
460 radv_pipeline_compile(struct radv_pipeline
*pipeline
,
461 struct radv_pipeline_cache
*cache
,
462 struct radv_shader_module
*module
,
463 const char *entrypoint
,
464 gl_shader_stage stage
,
465 const VkSpecializationInfo
*spec_info
,
466 struct radv_pipeline_layout
*layout
,
467 const union ac_shader_variant_key
*key
)
469 unsigned char sha1
[20];
470 struct radv_shader_variant
*variant
;
473 unsigned code_size
= 0;
474 bool dump
= (pipeline
->device
->debug_flags
& RADV_DEBUG_DUMP_SHADERS
);
477 _mesa_sha1_compute(module
->nir
->info
->name
,
478 strlen(module
->nir
->info
->name
),
481 radv_hash_shader(sha1
, module
, entrypoint
, spec_info
, layout
, key
);
484 variant
= radv_create_shader_variant_from_pipeline_cache(pipeline
->device
,
491 nir
= radv_shader_compile_to_nir(pipeline
->device
,
492 module
, entrypoint
, stage
,
497 variant
= radv_shader_variant_create(pipeline
->device
, nir
, layout
, key
,
498 &code
, &code_size
, dump
);
502 if (variant
&& cache
)
503 variant
= radv_pipeline_cache_insert_shader(cache
, sha1
, variant
,
512 radv_pipeline_scratch_init(struct radv_device
*device
,
513 struct radv_pipeline
*pipeline
)
515 unsigned scratch_bytes_per_wave
= 0;
516 unsigned max_waves
= 0;
517 unsigned min_waves
= 1;
519 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
520 if (pipeline
->shaders
[i
]) {
521 unsigned max_stage_waves
= device
->scratch_waves
;
523 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
524 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
526 max_stage_waves
= MIN2(max_stage_waves
,
527 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
528 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
529 max_waves
= MAX2(max_waves
, max_stage_waves
);
533 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
534 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
535 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
536 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
537 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
540 if (scratch_bytes_per_wave
)
541 max_waves
= MIN2(max_waves
, 0xffffffffu
/ scratch_bytes_per_wave
);
543 if (scratch_bytes_per_wave
&& max_waves
< min_waves
) {
544 /* Not really true at this moment, but will be true on first
545 * execution. Avoid having hanging shaders. */
546 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
548 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
549 pipeline
->max_waves
= max_waves
;
553 static uint32_t si_translate_blend_function(VkBlendOp op
)
556 case VK_BLEND_OP_ADD
:
557 return V_028780_COMB_DST_PLUS_SRC
;
558 case VK_BLEND_OP_SUBTRACT
:
559 return V_028780_COMB_SRC_MINUS_DST
;
560 case VK_BLEND_OP_REVERSE_SUBTRACT
:
561 return V_028780_COMB_DST_MINUS_SRC
;
562 case VK_BLEND_OP_MIN
:
563 return V_028780_COMB_MIN_DST_SRC
;
564 case VK_BLEND_OP_MAX
:
565 return V_028780_COMB_MAX_DST_SRC
;
571 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
574 case VK_BLEND_FACTOR_ZERO
:
575 return V_028780_BLEND_ZERO
;
576 case VK_BLEND_FACTOR_ONE
:
577 return V_028780_BLEND_ONE
;
578 case VK_BLEND_FACTOR_SRC_COLOR
:
579 return V_028780_BLEND_SRC_COLOR
;
580 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
581 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
582 case VK_BLEND_FACTOR_DST_COLOR
:
583 return V_028780_BLEND_DST_COLOR
;
584 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
585 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
586 case VK_BLEND_FACTOR_SRC_ALPHA
:
587 return V_028780_BLEND_SRC_ALPHA
;
588 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
589 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
590 case VK_BLEND_FACTOR_DST_ALPHA
:
591 return V_028780_BLEND_DST_ALPHA
;
592 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
593 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
594 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
595 return V_028780_BLEND_CONSTANT_COLOR
;
596 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
597 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
598 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
599 return V_028780_BLEND_CONSTANT_ALPHA
;
600 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
601 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
602 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
603 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
604 case VK_BLEND_FACTOR_SRC1_COLOR
:
605 return V_028780_BLEND_SRC1_COLOR
;
606 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
607 return V_028780_BLEND_INV_SRC1_COLOR
;
608 case VK_BLEND_FACTOR_SRC1_ALPHA
:
609 return V_028780_BLEND_SRC1_ALPHA
;
610 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
611 return V_028780_BLEND_INV_SRC1_ALPHA
;
617 static bool is_dual_src(VkBlendFactor factor
)
620 case VK_BLEND_FACTOR_SRC1_COLOR
:
621 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
622 case VK_BLEND_FACTOR_SRC1_ALPHA
:
623 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
630 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
632 bool blend_need_alpha
)
634 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
635 unsigned format
, ntype
, swap
;
637 /* Alpha is needed for alpha-to-coverage.
638 * Blending may be with or without alpha.
640 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
641 unsigned alpha
= 0; /* exports alpha, but may not support blending */
642 unsigned blend
= 0; /* supports blending, but may not export alpha */
643 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
645 format
= radv_translate_colorformat(vk_format
);
646 ntype
= radv_translate_color_numformat(vk_format
, desc
,
647 vk_format_get_first_non_void_channel(vk_format
));
648 swap
= radv_translate_colorswap(vk_format
, false);
650 /* Choose the SPI color formats. These are required values for Stoney/RB+.
651 * Other chips have multiple choices, though they are not necessarily better.
654 case V_028C70_COLOR_5_6_5
:
655 case V_028C70_COLOR_1_5_5_5
:
656 case V_028C70_COLOR_5_5_5_1
:
657 case V_028C70_COLOR_4_4_4_4
:
658 case V_028C70_COLOR_10_11_11
:
659 case V_028C70_COLOR_11_11_10
:
660 case V_028C70_COLOR_8
:
661 case V_028C70_COLOR_8_8
:
662 case V_028C70_COLOR_8_8_8_8
:
663 case V_028C70_COLOR_10_10_10_2
:
664 case V_028C70_COLOR_2_10_10_10
:
665 if (ntype
== V_028C70_NUMBER_UINT
)
666 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
667 else if (ntype
== V_028C70_NUMBER_SINT
)
668 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
670 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
673 case V_028C70_COLOR_16
:
674 case V_028C70_COLOR_16_16
:
675 case V_028C70_COLOR_16_16_16_16
:
676 if (ntype
== V_028C70_NUMBER_UNORM
||
677 ntype
== V_028C70_NUMBER_SNORM
) {
678 /* UNORM16 and SNORM16 don't support blending */
679 if (ntype
== V_028C70_NUMBER_UNORM
)
680 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
682 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
684 /* Use 32 bits per channel for blending. */
685 if (format
== V_028C70_COLOR_16
) {
686 if (swap
== V_028C70_SWAP_STD
) { /* R */
687 blend
= V_028714_SPI_SHADER_32_R
;
688 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
689 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
690 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
693 } else if (format
== V_028C70_COLOR_16_16
) {
694 if (swap
== V_028C70_SWAP_STD
) { /* RG */
695 blend
= V_028714_SPI_SHADER_32_GR
;
696 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
697 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
698 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
701 } else /* 16_16_16_16 */
702 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
703 } else if (ntype
== V_028C70_NUMBER_UINT
)
704 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
705 else if (ntype
== V_028C70_NUMBER_SINT
)
706 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
707 else if (ntype
== V_028C70_NUMBER_FLOAT
)
708 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
713 case V_028C70_COLOR_32
:
714 if (swap
== V_028C70_SWAP_STD
) { /* R */
715 blend
= normal
= V_028714_SPI_SHADER_32_R
;
716 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
717 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
718 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
723 case V_028C70_COLOR_32_32
:
724 if (swap
== V_028C70_SWAP_STD
) { /* RG */
725 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
726 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
727 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
728 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
733 case V_028C70_COLOR_32_32_32_32
:
734 case V_028C70_COLOR_8_24
:
735 case V_028C70_COLOR_24_8
:
736 case V_028C70_COLOR_X24_8_32_FLOAT
:
737 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
741 unreachable("unhandled blend format");
744 if (blend_enable
&& blend_need_alpha
)
746 else if(blend_need_alpha
)
748 else if(blend_enable
)
754 static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format
)
756 unsigned i
, cb_shader_mask
= 0;
758 for (i
= 0; i
< 8; i
++) {
759 switch ((spi_shader_col_format
>> (i
* 4)) & 0xf) {
760 case V_028714_SPI_SHADER_ZERO
:
762 case V_028714_SPI_SHADER_32_R
:
763 cb_shader_mask
|= 0x1 << (i
* 4);
765 case V_028714_SPI_SHADER_32_GR
:
766 cb_shader_mask
|= 0x3 << (i
* 4);
768 case V_028714_SPI_SHADER_32_AR
:
769 cb_shader_mask
|= 0x9 << (i
* 4);
771 case V_028714_SPI_SHADER_FP16_ABGR
:
772 case V_028714_SPI_SHADER_UNORM16_ABGR
:
773 case V_028714_SPI_SHADER_SNORM16_ABGR
:
774 case V_028714_SPI_SHADER_UINT16_ABGR
:
775 case V_028714_SPI_SHADER_SINT16_ABGR
:
776 case V_028714_SPI_SHADER_32_ABGR
:
777 cb_shader_mask
|= 0xf << (i
* 4);
783 return cb_shader_mask
;
787 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
788 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
789 uint32_t blend_enable
,
790 uint32_t blend_need_alpha
,
791 bool single_cb_enable
,
792 bool blend_mrt0_is_dual_src
)
794 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
795 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
796 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
797 unsigned col_format
= 0;
799 for (unsigned i
= 0; i
< (single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
800 struct radv_render_pass_attachment
*attachment
;
803 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
805 cf
= si_choose_spi_color_format(attachment
->format
,
806 blend_enable
& (1 << i
),
807 blend_need_alpha
& (1 << i
));
809 col_format
|= cf
<< (4 * i
);
812 blend
->cb_shader_mask
= si_get_cb_shader_mask(col_format
);
814 if (blend_mrt0_is_dual_src
)
815 col_format
|= (col_format
& 0xf) << 4;
817 col_format
|= V_028714_SPI_SHADER_32_R
;
818 blend
->spi_shader_col_format
= col_format
;
822 format_is_int8(VkFormat format
)
824 const struct vk_format_description
*desc
= vk_format_description(format
);
825 int channel
= vk_format_get_first_non_void_channel(format
);
827 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
828 desc
->channel
[channel
].size
== 8;
831 unsigned radv_format_meta_fs_key(VkFormat format
)
833 unsigned col_format
= si_choose_spi_color_format(format
, false, false) - 1;
834 bool is_int8
= format_is_int8(format
);
836 return col_format
+ (is_int8
? 3 : 0);
840 radv_pipeline_compute_is_int8(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
842 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
843 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
844 unsigned is_int8
= 0;
846 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
847 struct radv_render_pass_attachment
*attachment
;
849 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
851 if (format_is_int8(attachment
->format
))
859 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
860 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
861 const struct radv_graphics_pipeline_create_info
*extra
)
863 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
864 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
865 unsigned mode
= V_028808_CB_NORMAL
;
866 uint32_t blend_enable
= 0, blend_need_alpha
= 0;
867 bool blend_mrt0_is_dual_src
= false;
869 bool single_cb_enable
= false;
874 if (extra
&& extra
->custom_blend_mode
) {
875 single_cb_enable
= true;
876 mode
= extra
->custom_blend_mode
;
878 blend
->cb_color_control
= 0;
879 if (vkblend
->logicOpEnable
)
880 blend
->cb_color_control
|= S_028808_ROP3(vkblend
->logicOp
| (vkblend
->logicOp
<< 4));
882 blend
->cb_color_control
|= S_028808_ROP3(0xcc);
884 blend
->db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(2) |
885 S_028B70_ALPHA_TO_MASK_OFFSET1(2) |
886 S_028B70_ALPHA_TO_MASK_OFFSET2(2) |
887 S_028B70_ALPHA_TO_MASK_OFFSET3(2);
889 blend
->cb_target_mask
= 0;
890 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
891 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
892 unsigned blend_cntl
= 0;
893 VkBlendOp eqRGB
= att
->colorBlendOp
;
894 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
895 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
896 VkBlendOp eqA
= att
->alphaBlendOp
;
897 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
898 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
900 blend
->sx_mrt0_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
902 if (!att
->colorWriteMask
)
905 blend
->cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
906 if (!att
->blendEnable
) {
907 blend
->cb_blend_control
[i
] = blend_cntl
;
911 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
913 blend_mrt0_is_dual_src
= true;
915 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
916 srcRGB
= VK_BLEND_FACTOR_ONE
;
917 dstRGB
= VK_BLEND_FACTOR_ONE
;
919 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
920 srcA
= VK_BLEND_FACTOR_ONE
;
921 dstA
= VK_BLEND_FACTOR_ONE
;
924 blend_cntl
|= S_028780_ENABLE(1);
926 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
927 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
928 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
929 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
930 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
931 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
932 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
933 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
935 blend
->cb_blend_control
[i
] = blend_cntl
;
937 blend_enable
|= 1 << i
;
939 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
940 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
941 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
942 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
943 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
944 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
945 blend_need_alpha
|= 1 << i
;
947 for (i
= vkblend
->attachmentCount
; i
< 8; i
++)
948 blend
->cb_blend_control
[i
] = 0;
950 if (blend
->cb_target_mask
)
951 blend
->cb_color_control
|= S_028808_MODE(mode
);
953 blend
->cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
955 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
,
956 blend_enable
, blend_need_alpha
, single_cb_enable
, blend_mrt0_is_dual_src
);
959 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
962 case VK_STENCIL_OP_KEEP
:
963 return V_02842C_STENCIL_KEEP
;
964 case VK_STENCIL_OP_ZERO
:
965 return V_02842C_STENCIL_ZERO
;
966 case VK_STENCIL_OP_REPLACE
:
967 return V_02842C_STENCIL_REPLACE_TEST
;
968 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
969 return V_02842C_STENCIL_ADD_CLAMP
;
970 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
971 return V_02842C_STENCIL_SUB_CLAMP
;
972 case VK_STENCIL_OP_INVERT
:
973 return V_02842C_STENCIL_INVERT
;
974 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
975 return V_02842C_STENCIL_ADD_WRAP
;
976 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
977 return V_02842C_STENCIL_SUB_WRAP
;
983 radv_pipeline_init_depth_stencil_state(struct radv_pipeline
*pipeline
,
984 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
985 const struct radv_graphics_pipeline_create_info
*extra
)
987 const VkPipelineDepthStencilStateCreateInfo
*vkds
= pCreateInfo
->pDepthStencilState
;
988 struct radv_depth_stencil_state
*ds
= &pipeline
->graphics
.ds
;
990 memset(ds
, 0, sizeof(*ds
));
993 ds
->db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
994 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
995 S_028800_ZFUNC(vkds
->depthCompareOp
) |
996 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
998 if (vkds
->stencilTestEnable
) {
999 ds
->db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
1000 ds
->db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
1001 ds
->db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
1002 ds
->db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
1003 ds
->db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
1005 ds
->db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
1006 ds
->db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
1007 ds
->db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
1008 ds
->db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
1013 ds
->db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
1014 ds
->db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
1016 ds
->db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
1017 ds
->db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
1018 ds
->db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
1019 ds
->db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
1020 ds
->db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
1024 static uint32_t si_translate_fill(VkPolygonMode func
)
1027 case VK_POLYGON_MODE_FILL
:
1028 return V_028814_X_DRAW_TRIANGLES
;
1029 case VK_POLYGON_MODE_LINE
:
1030 return V_028814_X_DRAW_LINES
;
1031 case VK_POLYGON_MODE_POINT
:
1032 return V_028814_X_DRAW_POINTS
;
1035 return V_028814_X_DRAW_POINTS
;
1039 radv_pipeline_init_raster_state(struct radv_pipeline
*pipeline
,
1040 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1042 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
1043 struct radv_raster_state
*raster
= &pipeline
->graphics
.raster
;
1045 memset(raster
, 0, sizeof(*raster
));
1047 raster
->spi_interp_control
=
1048 S_0286D4_FLAT_SHADE_ENA(1) |
1049 S_0286D4_PNT_SPRITE_ENA(1) |
1050 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
1051 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
1052 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
1053 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
1054 S_0286D4_PNT_SPRITE_TOP_1(0); // vulkan is top to bottom - 1.0 at bottom
1056 raster
->pa_cl_vs_out_cntl
= S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1);
1057 raster
->pa_cl_clip_cntl
= S_028810_PS_UCP_MODE(3) |
1058 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
1059 S_028810_ZCLIP_NEAR_DISABLE(vkraster
->depthClampEnable
? 1 : 0) |
1060 S_028810_ZCLIP_FAR_DISABLE(vkraster
->depthClampEnable
? 1 : 0) |
1061 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
1062 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1);
1064 raster
->pa_su_vtx_cntl
=
1065 S_028BE4_PIX_CENTER(1) | // TODO verify
1066 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
1067 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
);
1069 raster
->pa_su_sc_mode_cntl
=
1070 S_028814_FACE(vkraster
->frontFace
) |
1071 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
1072 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
1073 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
1074 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
1075 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
1076 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
1077 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
1078 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0);
1083 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1084 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1086 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
1087 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
1088 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1089 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1090 int ps_iter_samples
= 1;
1091 uint32_t mask
= 0xffff;
1093 ms
->num_samples
= vkms
->rasterizationSamples
;
1095 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.fs
.force_persample
) {
1096 ps_iter_samples
= vkms
->rasterizationSamples
;
1099 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1100 ms
->pa_sc_aa_config
= 0;
1101 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1102 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1103 ms
->pa_sc_mode_cntl_1
=
1104 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1105 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1107 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1108 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1109 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1110 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1111 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1112 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1114 if (vkms
->rasterizationSamples
> 1) {
1115 unsigned log_samples
= util_logbase2(vkms
->rasterizationSamples
);
1116 unsigned log_ps_iter_samples
= util_logbase2(util_next_power_of_two(ps_iter_samples
));
1117 ms
->pa_sc_mode_cntl_0
= S_028A48_MSAA_ENABLE(1);
1118 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1119 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_samples
) |
1120 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1121 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1122 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1123 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1124 S_028BE0_MAX_SAMPLE_DIST(radv_cayman_get_maxdist(log_samples
)) |
1125 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1126 ms
->pa_sc_mode_cntl_1
|= EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1129 if (vkms
->alphaToCoverageEnable
)
1130 blend
->db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
1132 if (vkms
->pSampleMask
) {
1133 mask
= vkms
->pSampleMask
[0] & 0xffff;
1136 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1137 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1141 si_translate_prim(enum VkPrimitiveTopology topology
)
1144 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1145 return V_008958_DI_PT_POINTLIST
;
1146 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1147 return V_008958_DI_PT_LINELIST
;
1148 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1149 return V_008958_DI_PT_LINESTRIP
;
1150 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1151 return V_008958_DI_PT_TRILIST
;
1152 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1153 return V_008958_DI_PT_TRISTRIP
;
1154 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1155 return V_008958_DI_PT_TRIFAN
;
1156 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1157 return V_008958_DI_PT_LINELIST_ADJ
;
1158 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1159 return V_008958_DI_PT_LINESTRIP_ADJ
;
1160 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1161 return V_008958_DI_PT_TRILIST_ADJ
;
1162 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1163 return V_008958_DI_PT_TRISTRIP_ADJ
;
1164 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1165 return V_008958_DI_PT_PATCH
;
1173 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1176 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1177 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1178 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1179 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1180 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1181 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1182 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1183 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1184 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1185 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1186 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1187 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1188 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1189 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1196 static unsigned si_map_swizzle(unsigned swizzle
)
1200 return V_008F0C_SQ_SEL_Y
;
1202 return V_008F0C_SQ_SEL_Z
;
1204 return V_008F0C_SQ_SEL_W
;
1206 return V_008F0C_SQ_SEL_0
;
1208 return V_008F0C_SQ_SEL_1
;
1209 default: /* VK_SWIZZLE_X */
1210 return V_008F0C_SQ_SEL_X
;
1215 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1216 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1218 radv_cmd_dirty_mask_t states
= RADV_CMD_DIRTY_DYNAMIC_ALL
;
1219 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1220 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1222 pipeline
->dynamic_state
= default_dynamic_state
;
1224 if (pCreateInfo
->pDynamicState
) {
1225 /* Remove all of the states that are marked as dynamic */
1226 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1227 for (uint32_t s
= 0; s
< count
; s
++)
1228 states
&= ~(1 << pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1231 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1233 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1235 * pViewportState is [...] NULL if the pipeline
1236 * has rasterization disabled.
1238 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
1239 assert(pCreateInfo
->pViewportState
);
1241 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1242 if (states
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
1243 typed_memcpy(dynamic
->viewport
.viewports
,
1244 pCreateInfo
->pViewportState
->pViewports
,
1245 pCreateInfo
->pViewportState
->viewportCount
);
1248 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1249 if (states
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
1250 typed_memcpy(dynamic
->scissor
.scissors
,
1251 pCreateInfo
->pViewportState
->pScissors
,
1252 pCreateInfo
->pViewportState
->scissorCount
);
1256 if (states
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
)) {
1257 assert(pCreateInfo
->pRasterizationState
);
1258 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1261 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
)) {
1262 assert(pCreateInfo
->pRasterizationState
);
1263 dynamic
->depth_bias
.bias
=
1264 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1265 dynamic
->depth_bias
.clamp
=
1266 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1267 dynamic
->depth_bias
.slope
=
1268 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1271 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1273 * pColorBlendState is [...] NULL if the pipeline has rasterization
1274 * disabled or if the subpass of the render pass the pipeline is
1275 * created against does not use any color attachments.
1277 bool uses_color_att
= false;
1278 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
1279 if (subpass
->color_attachments
[i
].attachment
!= VK_ATTACHMENT_UNUSED
) {
1280 uses_color_att
= true;
1285 if (uses_color_att
&& states
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
)) {
1286 assert(pCreateInfo
->pColorBlendState
);
1287 typed_memcpy(dynamic
->blend_constants
,
1288 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1291 /* If there is no depthstencil attachment, then don't read
1292 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1293 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1294 * no need to override the depthstencil defaults in
1295 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1297 * Section 9.2 of the Vulkan 1.0.15 spec says:
1299 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1300 * disabled or if the subpass of the render pass the pipeline is created
1301 * against does not use a depth/stencil attachment.
1303 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
1304 subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
) {
1305 assert(pCreateInfo
->pDepthStencilState
);
1307 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
)) {
1308 dynamic
->depth_bounds
.min
=
1309 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1310 dynamic
->depth_bounds
.max
=
1311 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1314 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
)) {
1315 dynamic
->stencil_compare_mask
.front
=
1316 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1317 dynamic
->stencil_compare_mask
.back
=
1318 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1321 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
)) {
1322 dynamic
->stencil_write_mask
.front
=
1323 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1324 dynamic
->stencil_write_mask
.back
=
1325 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1328 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
)) {
1329 dynamic
->stencil_reference
.front
=
1330 pCreateInfo
->pDepthStencilState
->front
.reference
;
1331 dynamic
->stencil_reference
.back
=
1332 pCreateInfo
->pDepthStencilState
->back
.reference
;
1336 pipeline
->dynamic_state_mask
= states
;
1339 static union ac_shader_variant_key
1340 radv_compute_vs_key(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1342 union ac_shader_variant_key key
;
1343 const VkPipelineVertexInputStateCreateInfo
*input_state
=
1344 pCreateInfo
->pVertexInputState
;
1346 memset(&key
, 0, sizeof(key
));
1347 key
.vs
.instance_rate_inputs
= 0;
1349 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
1351 binding
= input_state
->pVertexAttributeDescriptions
[i
].binding
;
1352 if (input_state
->pVertexBindingDescriptions
[binding
].inputRate
)
1353 key
.vs
.instance_rate_inputs
|= 1u << input_state
->pVertexAttributeDescriptions
[i
].location
;
1359 radv_pipeline_init(struct radv_pipeline
*pipeline
,
1360 struct radv_device
*device
,
1361 struct radv_pipeline_cache
*cache
,
1362 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1363 const struct radv_graphics_pipeline_create_info
*extra
,
1364 const VkAllocationCallbacks
*alloc
)
1366 struct radv_shader_module fs_m
= {0};
1370 alloc
= &device
->alloc
;
1372 pipeline
->device
= device
;
1373 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1375 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
1376 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
1377 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
1378 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
1379 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
1380 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
1381 modules
[stage
] = radv_shader_module_from_handle(pStages
[stage
]->module
);
1384 radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
1387 if (modules
[MESA_SHADER_VERTEX
]) {
1388 union ac_shader_variant_key key
= radv_compute_vs_key(pCreateInfo
);
1390 pipeline
->shaders
[MESA_SHADER_VERTEX
] =
1391 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_VERTEX
],
1392 pStages
[MESA_SHADER_VERTEX
]->pName
,
1394 pStages
[MESA_SHADER_VERTEX
]->pSpecializationInfo
,
1395 pipeline
->layout
, &key
);
1397 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_VERTEX
);
1400 if (!modules
[MESA_SHADER_FRAGMENT
]) {
1402 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
1403 fs_b
.shader
->info
->name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
1404 fs_m
.nir
= fs_b
.shader
;
1405 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
1408 if (modules
[MESA_SHADER_FRAGMENT
]) {
1409 union ac_shader_variant_key key
;
1410 key
.fs
.col_format
= pipeline
->graphics
.blend
.spi_shader_col_format
;
1411 key
.fs
.is_int8
= radv_pipeline_compute_is_int8(pCreateInfo
);
1413 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[MESA_SHADER_FRAGMENT
];
1415 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
1416 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_FRAGMENT
],
1417 stage
? stage
->pName
: "main",
1418 MESA_SHADER_FRAGMENT
,
1419 stage
? stage
->pSpecializationInfo
: NULL
,
1420 pipeline
->layout
, &key
);
1421 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_FRAGMENT
);
1425 ralloc_free(fs_m
.nir
);
1427 radv_pipeline_init_depth_stencil_state(pipeline
, pCreateInfo
, extra
);
1428 radv_pipeline_init_raster_state(pipeline
, pCreateInfo
);
1429 radv_pipeline_init_multisample_state(pipeline
, pCreateInfo
);
1430 pipeline
->graphics
.prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
1431 pipeline
->graphics
.gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
1432 if (extra
&& extra
->use_rectlist
) {
1433 pipeline
->graphics
.prim
= V_008958_DI_PT_RECTLIST
;
1434 pipeline
->graphics
.gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1436 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
1438 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1439 pCreateInfo
->pVertexInputState
;
1440 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
1441 const VkVertexInputAttributeDescription
*desc
=
1442 &vi_info
->pVertexAttributeDescriptions
[i
];
1443 unsigned loc
= desc
->location
;
1444 const struct vk_format_description
*format_desc
;
1446 uint32_t num_format
, data_format
;
1447 format_desc
= vk_format_description(desc
->format
);
1448 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
1450 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
1451 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
1453 pipeline
->va_rsrc_word3
[loc
] = S_008F0C_DST_SEL_X(si_map_swizzle(format_desc
->swizzle
[0])) |
1454 S_008F0C_DST_SEL_Y(si_map_swizzle(format_desc
->swizzle
[1])) |
1455 S_008F0C_DST_SEL_Z(si_map_swizzle(format_desc
->swizzle
[2])) |
1456 S_008F0C_DST_SEL_W(si_map_swizzle(format_desc
->swizzle
[3])) |
1457 S_008F0C_NUM_FORMAT(num_format
) |
1458 S_008F0C_DATA_FORMAT(data_format
);
1459 pipeline
->va_format_size
[loc
] = format_desc
->block
.bits
/ 8;
1460 pipeline
->va_offset
[loc
] = desc
->offset
;
1461 pipeline
->va_binding
[loc
] = desc
->binding
;
1462 pipeline
->num_vertex_attribs
= MAX2(pipeline
->num_vertex_attribs
, loc
+ 1);
1465 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
1466 const VkVertexInputBindingDescription
*desc
=
1467 &vi_info
->pVertexBindingDescriptions
[i
];
1469 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
1472 if (device
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
) {
1473 radv_dump_pipeline_stats(device
, pipeline
);
1476 result
= radv_pipeline_scratch_init(device
, pipeline
);
1481 radv_graphics_pipeline_create(
1483 VkPipelineCache _cache
,
1484 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1485 const struct radv_graphics_pipeline_create_info
*extra
,
1486 const VkAllocationCallbacks
*pAllocator
,
1487 VkPipeline
*pPipeline
)
1489 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1490 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
1491 struct radv_pipeline
*pipeline
;
1494 pipeline
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1495 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1496 if (pipeline
== NULL
)
1497 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1499 memset(pipeline
, 0, sizeof(*pipeline
));
1500 result
= radv_pipeline_init(pipeline
, device
, cache
,
1501 pCreateInfo
, extra
, pAllocator
);
1502 if (result
!= VK_SUCCESS
) {
1503 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
1507 *pPipeline
= radv_pipeline_to_handle(pipeline
);
1512 VkResult
radv_CreateGraphicsPipelines(
1514 VkPipelineCache pipelineCache
,
1516 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
1517 const VkAllocationCallbacks
* pAllocator
,
1518 VkPipeline
* pPipelines
)
1520 VkResult result
= VK_SUCCESS
;
1523 for (; i
< count
; i
++) {
1525 r
= radv_graphics_pipeline_create(_device
,
1528 NULL
, pAllocator
, &pPipelines
[i
]);
1529 if (r
!= VK_SUCCESS
) {
1531 pPipelines
[i
] = VK_NULL_HANDLE
;
1538 static VkResult
radv_compute_pipeline_create(
1540 VkPipelineCache _cache
,
1541 const VkComputePipelineCreateInfo
* pCreateInfo
,
1542 const VkAllocationCallbacks
* pAllocator
,
1543 VkPipeline
* pPipeline
)
1545 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1546 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
1547 RADV_FROM_HANDLE(radv_shader_module
, module
, pCreateInfo
->stage
.module
);
1548 struct radv_pipeline
*pipeline
;
1551 pipeline
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1552 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1553 if (pipeline
== NULL
)
1554 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1556 memset(pipeline
, 0, sizeof(*pipeline
));
1557 pipeline
->device
= device
;
1558 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1560 pipeline
->shaders
[MESA_SHADER_COMPUTE
] =
1561 radv_pipeline_compile(pipeline
, cache
, module
,
1562 pCreateInfo
->stage
.pName
,
1563 MESA_SHADER_COMPUTE
,
1564 pCreateInfo
->stage
.pSpecializationInfo
,
1565 pipeline
->layout
, NULL
);
1568 result
= radv_pipeline_scratch_init(device
, pipeline
);
1569 if (result
!= VK_SUCCESS
) {
1570 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
1574 *pPipeline
= radv_pipeline_to_handle(pipeline
);
1576 if (device
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
) {
1577 radv_dump_pipeline_stats(device
, pipeline
);
1581 VkResult
radv_CreateComputePipelines(
1583 VkPipelineCache pipelineCache
,
1585 const VkComputePipelineCreateInfo
* pCreateInfos
,
1586 const VkAllocationCallbacks
* pAllocator
,
1587 VkPipeline
* pPipelines
)
1589 VkResult result
= VK_SUCCESS
;
1592 for (; i
< count
; i
++) {
1594 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
1596 pAllocator
, &pPipelines
[i
]);
1597 if (r
!= VK_SUCCESS
) {
1599 pPipelines
[i
] = VK_NULL_HANDLE
;