2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "radv_private.h"
31 #include "nir/nir_builder.h"
32 #include "spirv/nir_spirv.h"
34 #include <llvm-c/Core.h>
35 #include <llvm-c/TargetMachine.h>
38 #include "r600d_common.h"
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_nir_to_llvm.h"
42 #include "vk_format.h"
43 #include "util/debug.h"
44 void radv_shader_variant_destroy(struct radv_device
*device
,
45 struct radv_shader_variant
*variant
);
47 static const struct nir_shader_compiler_options nir_options
= {
48 .vertex_id_zero_based
= true,
52 .lower_pack_snorm_2x16
= true,
53 .lower_pack_snorm_4x8
= true,
54 .lower_pack_unorm_2x16
= true,
55 .lower_pack_unorm_4x8
= true,
56 .lower_unpack_snorm_2x16
= true,
57 .lower_unpack_snorm_4x8
= true,
58 .lower_unpack_unorm_2x16
= true,
59 .lower_unpack_unorm_4x8
= true,
60 .lower_extract_byte
= true,
61 .lower_extract_word
= true,
64 VkResult
radv_CreateShaderModule(
66 const VkShaderModuleCreateInfo
* pCreateInfo
,
67 const VkAllocationCallbacks
* pAllocator
,
68 VkShaderModule
* pShaderModule
)
70 RADV_FROM_HANDLE(radv_device
, device
, _device
);
71 struct radv_shader_module
*module
;
73 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
74 assert(pCreateInfo
->flags
== 0);
76 module
= vk_alloc2(&device
->alloc
, pAllocator
,
77 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
78 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
80 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
83 module
->size
= pCreateInfo
->codeSize
;
84 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
86 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
88 *pShaderModule
= radv_shader_module_to_handle(module
);
93 void radv_DestroyShaderModule(
95 VkShaderModule _module
,
96 const VkAllocationCallbacks
* pAllocator
)
98 RADV_FROM_HANDLE(radv_device
, device
, _device
);
99 RADV_FROM_HANDLE(radv_shader_module
, module
, _module
);
104 vk_free2(&device
->alloc
, pAllocator
, module
);
109 radv_pipeline_destroy(struct radv_device
*device
,
110 struct radv_pipeline
*pipeline
,
111 const VkAllocationCallbacks
* allocator
)
113 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
114 if (pipeline
->shaders
[i
])
115 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
117 if (pipeline
->gs_copy_shader
)
118 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
120 vk_free2(&device
->alloc
, allocator
, pipeline
);
123 void radv_DestroyPipeline(
125 VkPipeline _pipeline
,
126 const VkAllocationCallbacks
* pAllocator
)
128 RADV_FROM_HANDLE(radv_device
, device
, _device
);
129 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
134 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
139 radv_optimize_nir(struct nir_shader
*shader
)
146 NIR_PASS_V(shader
, nir_lower_vars_to_ssa
);
147 NIR_PASS_V(shader
, nir_lower_alu_to_scalar
);
148 NIR_PASS_V(shader
, nir_lower_phis_to_scalar
);
150 NIR_PASS(progress
, shader
, nir_copy_prop
);
151 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
152 NIR_PASS(progress
, shader
, nir_opt_dce
);
153 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
154 NIR_PASS(progress
, shader
, nir_opt_cse
);
155 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 8);
156 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
157 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
158 NIR_PASS(progress
, shader
, nir_opt_undef
);
159 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
164 radv_shader_compile_to_nir(struct radv_device
*device
,
165 struct radv_shader_module
*module
,
166 const char *entrypoint_name
,
167 gl_shader_stage stage
,
168 const VkSpecializationInfo
*spec_info
,
171 if (strcmp(entrypoint_name
, "main") != 0) {
172 radv_finishme("Multiple shaders per module not really supported");
176 nir_function
*entry_point
;
178 /* Some things such as our meta clear/blit code will give us a NIR
179 * shader directly. In that case, we just ignore the SPIR-V entirely
180 * and just use the NIR shader */
182 nir
->options
= &nir_options
;
183 nir_validate_shader(nir
);
185 assert(exec_list_length(&nir
->functions
) == 1);
186 struct exec_node
*node
= exec_list_get_head(&nir
->functions
);
187 entry_point
= exec_node_data(nir_function
, node
, node
);
189 uint32_t *spirv
= (uint32_t *) module
->data
;
190 assert(module
->size
% 4 == 0);
192 uint32_t num_spec_entries
= 0;
193 struct nir_spirv_specialization
*spec_entries
= NULL
;
194 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
195 num_spec_entries
= spec_info
->mapEntryCount
;
196 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
197 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
198 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
199 const void *data
= spec_info
->pData
+ entry
.offset
;
200 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
202 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
203 if (spec_info
->dataSize
== 8)
204 spec_entries
[i
].data64
= *(const uint64_t *)data
;
206 spec_entries
[i
].data32
= *(const uint32_t *)data
;
209 const struct nir_spirv_supported_extensions supported_ext
= {
211 entry_point
= spirv_to_nir(spirv
, module
->size
/ 4,
212 spec_entries
, num_spec_entries
,
213 stage
, entrypoint_name
, &supported_ext
, &nir_options
);
214 nir
= entry_point
->shader
;
215 assert(nir
->stage
== stage
);
216 nir_validate_shader(nir
);
220 /* We have to lower away local constant initializers right before we
221 * inline functions. That way they get properly initialized at the top
222 * of the function and not at the top of its caller.
224 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_local
);
225 NIR_PASS_V(nir
, nir_lower_returns
);
226 NIR_PASS_V(nir
, nir_inline_functions
);
228 /* Pick off the single entrypoint that we want */
229 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
230 if (func
!= entry_point
)
231 exec_node_remove(&func
->node
);
233 assert(exec_list_length(&nir
->functions
) == 1);
234 entry_point
->name
= ralloc_strdup(entry_point
, "main");
236 NIR_PASS_V(nir
, nir_remove_dead_variables
,
237 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
);
239 /* Now that we've deleted all but the main function, we can go ahead and
240 * lower the rest of the constant initializers.
242 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
243 NIR_PASS_V(nir
, nir_lower_system_values
);
246 /* Vulkan uses the separate-shader linking model */
247 nir
->info
->separate_shader
= true;
249 // nir = brw_preprocess_nir(compiler, nir);
251 nir_shader_gather_info(nir
, entry_point
->impl
);
253 nir_variable_mode indirect_mask
= 0;
254 // if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
255 indirect_mask
|= nir_var_shader_in
;
256 // if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
257 indirect_mask
|= nir_var_local
;
259 nir_lower_indirect_derefs(nir
, indirect_mask
);
261 static const nir_lower_tex_options tex_options
= {
265 nir_lower_tex(nir
, &tex_options
);
267 nir_lower_vars_to_ssa(nir
);
268 nir_lower_var_copies(nir
);
269 nir_lower_global_vars_to_local(nir
);
270 nir_remove_dead_variables(nir
, nir_var_local
);
271 radv_optimize_nir(nir
);
274 nir_print_shader(nir
, stderr
);
279 static const char *radv_get_shader_name(struct radv_shader_variant
*var
,
280 gl_shader_stage stage
)
283 case MESA_SHADER_VERTEX
: return var
->info
.vs
.as_es
? "Vertex Shader as ES" : "Vertex Shader as VS";
284 case MESA_SHADER_GEOMETRY
: return "Geometry Shader";
285 case MESA_SHADER_FRAGMENT
: return "Pixel Shader";
286 case MESA_SHADER_COMPUTE
: return "Compute Shader";
288 return "Unknown shader";
292 static void radv_dump_pipeline_stats(struct radv_device
*device
, struct radv_pipeline
*pipeline
)
294 unsigned lds_increment
= device
->physical_device
->rad_info
.chip_class
>= CIK
? 512 : 256;
295 struct radv_shader_variant
*var
;
296 struct ac_shader_config
*conf
;
299 unsigned max_simd_waves
= 10;
300 unsigned lds_per_wave
= 0;
302 for (i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
303 if (!pipeline
->shaders
[i
])
305 var
= pipeline
->shaders
[i
];
309 if (i
== MESA_SHADER_FRAGMENT
) {
310 lds_per_wave
= conf
->lds_size
* lds_increment
+
311 align(var
->info
.fs
.num_interp
* 48, lds_increment
);
314 if (conf
->num_sgprs
) {
315 if (device
->physical_device
->rad_info
.chip_class
>= VI
)
316 max_simd_waves
= MIN2(max_simd_waves
, 800 / conf
->num_sgprs
);
318 max_simd_waves
= MIN2(max_simd_waves
, 512 / conf
->num_sgprs
);
322 max_simd_waves
= MIN2(max_simd_waves
, 256 / conf
->num_vgprs
);
324 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
328 max_simd_waves
= MIN2(max_simd_waves
, 16384 / lds_per_wave
);
330 fprintf(file
, "\n%s:\n",
331 radv_get_shader_name(var
, i
));
332 if (i
== MESA_SHADER_FRAGMENT
) {
333 fprintf(file
, "*** SHADER CONFIG ***\n"
334 "SPI_PS_INPUT_ADDR = 0x%04x\n"
335 "SPI_PS_INPUT_ENA = 0x%04x\n",
336 conf
->spi_ps_input_addr
, conf
->spi_ps_input_ena
);
338 fprintf(file
, "*** SHADER STATS ***\n"
341 "Spilled SGPRs: %d\n"
342 "Spilled VGPRs: %d\n"
343 "Code Size: %d bytes\n"
345 "Scratch: %d bytes per wave\n"
347 "********************\n\n\n",
348 conf
->num_sgprs
, conf
->num_vgprs
,
349 conf
->spilled_sgprs
, conf
->spilled_vgprs
, var
->code_size
,
350 conf
->lds_size
, conf
->scratch_bytes_per_wave
,
355 void radv_shader_variant_destroy(struct radv_device
*device
,
356 struct radv_shader_variant
*variant
)
358 if (__sync_fetch_and_sub(&variant
->ref_count
, 1) != 1)
361 device
->ws
->buffer_destroy(variant
->bo
);
365 static void radv_fill_shader_variant(struct radv_device
*device
,
366 struct radv_shader_variant
*variant
,
367 struct ac_shader_binary
*binary
,
368 gl_shader_stage stage
)
370 bool scratch_enabled
= variant
->config
.scratch_bytes_per_wave
> 0;
371 unsigned vgpr_comp_cnt
= 0;
373 if (scratch_enabled
&& !device
->llvm_supports_spill
)
374 radv_finishme("shader scratch support only available with LLVM 4.0");
376 variant
->code_size
= binary
->code_size
;
379 case MESA_SHADER_VERTEX
:
380 case MESA_SHADER_GEOMETRY
:
381 variant
->rsrc2
= S_00B12C_USER_SGPR(variant
->info
.num_user_sgprs
) |
382 S_00B12C_SCRATCH_EN(scratch_enabled
);
383 vgpr_comp_cnt
= variant
->info
.vs
.vgpr_comp_cnt
;
385 case MESA_SHADER_FRAGMENT
:
386 variant
->rsrc2
= S_00B12C_USER_SGPR(variant
->info
.num_user_sgprs
) |
387 S_00B12C_SCRATCH_EN(scratch_enabled
);
389 case MESA_SHADER_COMPUTE
:
390 variant
->rsrc2
= S_00B84C_USER_SGPR(variant
->info
.num_user_sgprs
) |
391 S_00B84C_SCRATCH_EN(scratch_enabled
) |
392 S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
393 S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
394 S_00B84C_TG_SIZE_EN(1) |
395 S_00B84C_LDS_SIZE(variant
->config
.lds_size
);
398 unreachable("unsupported shader type");
402 variant
->rsrc1
= S_00B848_VGPRS((variant
->config
.num_vgprs
- 1) / 4) |
403 S_00B848_SGPRS((variant
->config
.num_sgprs
- 1) / 8) |
404 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
405 S_00B848_DX10_CLAMP(1) |
406 S_00B848_FLOAT_MODE(variant
->config
.float_mode
);
408 variant
->bo
= device
->ws
->buffer_create(device
->ws
, binary
->code_size
, 256,
409 RADEON_DOMAIN_GTT
, RADEON_FLAG_CPU_ACCESS
);
411 void *ptr
= device
->ws
->buffer_map(variant
->bo
);
412 memcpy(ptr
, binary
->code
, binary
->code_size
);
413 device
->ws
->buffer_unmap(variant
->bo
);
418 static struct radv_shader_variant
*radv_shader_variant_create(struct radv_device
*device
,
419 struct nir_shader
*shader
,
420 struct radv_pipeline_layout
*layout
,
421 const union ac_shader_variant_key
*key
,
423 unsigned *code_size_out
,
426 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
427 enum radeon_family chip_family
= device
->physical_device
->rad_info
.family
;
428 LLVMTargetMachineRef tm
;
432 struct ac_nir_compiler_options options
= {0};
433 options
.layout
= layout
;
437 struct ac_shader_binary binary
;
439 options
.unsafe_math
= !!(device
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
);
440 options
.family
= chip_family
;
441 options
.chip_class
= device
->physical_device
->rad_info
.chip_class
;
442 options
.supports_spill
= device
->llvm_supports_spill
;
443 tm
= ac_create_target_machine(chip_family
, options
.supports_spill
);
444 ac_compile_nir_shader(tm
, &binary
, &variant
->config
,
445 &variant
->info
, shader
, &options
, dump
);
446 LLVMDisposeTargetMachine(tm
);
448 radv_fill_shader_variant(device
, variant
, &binary
, shader
->stage
);
451 *code_out
= binary
.code
;
452 *code_size_out
= binary
.code_size
;
457 free(binary
.global_symbol_offsets
);
459 free(binary
.disasm_string
);
460 variant
->ref_count
= 1;
464 static struct radv_shader_variant
*
465 radv_pipeline_create_gs_copy_shader(struct radv_pipeline
*pipeline
,
466 struct nir_shader
*nir
,
468 unsigned *code_size_out
,
471 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
472 enum radeon_family chip_family
= pipeline
->device
->physical_device
->rad_info
.family
;
473 LLVMTargetMachineRef tm
;
477 struct ac_nir_compiler_options options
= {0};
478 struct ac_shader_binary binary
;
479 options
.family
= chip_family
;
480 options
.chip_class
= pipeline
->device
->physical_device
->rad_info
.chip_class
;
481 options
.supports_spill
= pipeline
->device
->llvm_supports_spill
;
482 tm
= ac_create_target_machine(chip_family
, options
.supports_spill
);
483 ac_create_gs_copy_shader(tm
, nir
, &binary
, &variant
->config
, &variant
->info
, &options
, dump_shader
);
484 LLVMDisposeTargetMachine(tm
);
486 radv_fill_shader_variant(pipeline
->device
, variant
, &binary
, MESA_SHADER_VERTEX
);
489 *code_out
= binary
.code
;
490 *code_size_out
= binary
.code_size
;
495 free(binary
.global_symbol_offsets
);
497 free(binary
.disasm_string
);
498 variant
->ref_count
= 1;
502 static struct radv_shader_variant
*
503 radv_pipeline_compile(struct radv_pipeline
*pipeline
,
504 struct radv_pipeline_cache
*cache
,
505 struct radv_shader_module
*module
,
506 const char *entrypoint
,
507 gl_shader_stage stage
,
508 const VkSpecializationInfo
*spec_info
,
509 struct radv_pipeline_layout
*layout
,
510 const union ac_shader_variant_key
*key
)
512 unsigned char sha1
[20];
513 unsigned char gs_copy_sha1
[20];
514 struct radv_shader_variant
*variant
;
517 unsigned code_size
= 0;
518 bool dump
= (pipeline
->device
->debug_flags
& RADV_DEBUG_DUMP_SHADERS
);
521 _mesa_sha1_compute(module
->nir
->info
->name
,
522 strlen(module
->nir
->info
->name
),
525 radv_hash_shader(sha1
, module
, entrypoint
, spec_info
, layout
, key
, 0);
526 if (stage
== MESA_SHADER_GEOMETRY
)
527 radv_hash_shader(gs_copy_sha1
, module
, entrypoint
, spec_info
,
531 variant
= radv_create_shader_variant_from_pipeline_cache(pipeline
->device
,
535 if (stage
== MESA_SHADER_GEOMETRY
) {
536 pipeline
->gs_copy_shader
=
537 radv_create_shader_variant_from_pipeline_cache(
546 nir
= radv_shader_compile_to_nir(pipeline
->device
,
547 module
, entrypoint
, stage
,
552 variant
= radv_shader_variant_create(pipeline
->device
, nir
, layout
, key
,
553 &code
, &code_size
, dump
);
555 if (stage
== MESA_SHADER_GEOMETRY
) {
556 void *gs_copy_code
= NULL
;
557 unsigned gs_copy_code_size
= 0;
558 pipeline
->gs_copy_shader
= radv_pipeline_create_gs_copy_shader(
559 pipeline
, nir
, &gs_copy_code
, &gs_copy_code_size
, dump
);
561 if (pipeline
->gs_copy_shader
&& cache
) {
562 pipeline
->gs_copy_shader
=
563 radv_pipeline_cache_insert_shader(cache
,
565 pipeline
->gs_copy_shader
,
573 if (variant
&& cache
)
574 variant
= radv_pipeline_cache_insert_shader(cache
, sha1
, variant
,
583 radv_pipeline_scratch_init(struct radv_device
*device
,
584 struct radv_pipeline
*pipeline
)
586 unsigned scratch_bytes_per_wave
= 0;
587 unsigned max_waves
= 0;
588 unsigned min_waves
= 1;
590 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
591 if (pipeline
->shaders
[i
]) {
592 unsigned max_stage_waves
= device
->scratch_waves
;
594 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
595 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
597 max_stage_waves
= MIN2(max_stage_waves
,
598 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
599 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
600 max_waves
= MAX2(max_waves
, max_stage_waves
);
604 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
605 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
606 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
607 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
608 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
611 if (scratch_bytes_per_wave
)
612 max_waves
= MIN2(max_waves
, 0xffffffffu
/ scratch_bytes_per_wave
);
614 if (scratch_bytes_per_wave
&& max_waves
< min_waves
) {
615 /* Not really true at this moment, but will be true on first
616 * execution. Avoid having hanging shaders. */
617 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
619 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
620 pipeline
->max_waves
= max_waves
;
624 static uint32_t si_translate_blend_function(VkBlendOp op
)
627 case VK_BLEND_OP_ADD
:
628 return V_028780_COMB_DST_PLUS_SRC
;
629 case VK_BLEND_OP_SUBTRACT
:
630 return V_028780_COMB_SRC_MINUS_DST
;
631 case VK_BLEND_OP_REVERSE_SUBTRACT
:
632 return V_028780_COMB_DST_MINUS_SRC
;
633 case VK_BLEND_OP_MIN
:
634 return V_028780_COMB_MIN_DST_SRC
;
635 case VK_BLEND_OP_MAX
:
636 return V_028780_COMB_MAX_DST_SRC
;
642 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
645 case VK_BLEND_FACTOR_ZERO
:
646 return V_028780_BLEND_ZERO
;
647 case VK_BLEND_FACTOR_ONE
:
648 return V_028780_BLEND_ONE
;
649 case VK_BLEND_FACTOR_SRC_COLOR
:
650 return V_028780_BLEND_SRC_COLOR
;
651 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
652 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
653 case VK_BLEND_FACTOR_DST_COLOR
:
654 return V_028780_BLEND_DST_COLOR
;
655 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
656 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
657 case VK_BLEND_FACTOR_SRC_ALPHA
:
658 return V_028780_BLEND_SRC_ALPHA
;
659 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
660 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
661 case VK_BLEND_FACTOR_DST_ALPHA
:
662 return V_028780_BLEND_DST_ALPHA
;
663 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
664 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
665 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
666 return V_028780_BLEND_CONSTANT_COLOR
;
667 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
668 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
669 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
670 return V_028780_BLEND_CONSTANT_ALPHA
;
671 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
672 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
673 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
674 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
675 case VK_BLEND_FACTOR_SRC1_COLOR
:
676 return V_028780_BLEND_SRC1_COLOR
;
677 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
678 return V_028780_BLEND_INV_SRC1_COLOR
;
679 case VK_BLEND_FACTOR_SRC1_ALPHA
:
680 return V_028780_BLEND_SRC1_ALPHA
;
681 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
682 return V_028780_BLEND_INV_SRC1_ALPHA
;
688 static bool is_dual_src(VkBlendFactor factor
)
691 case VK_BLEND_FACTOR_SRC1_COLOR
:
692 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
693 case VK_BLEND_FACTOR_SRC1_ALPHA
:
694 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
701 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
703 bool blend_need_alpha
)
705 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
706 unsigned format
, ntype
, swap
;
708 /* Alpha is needed for alpha-to-coverage.
709 * Blending may be with or without alpha.
711 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
712 unsigned alpha
= 0; /* exports alpha, but may not support blending */
713 unsigned blend
= 0; /* supports blending, but may not export alpha */
714 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
716 format
= radv_translate_colorformat(vk_format
);
717 ntype
= radv_translate_color_numformat(vk_format
, desc
,
718 vk_format_get_first_non_void_channel(vk_format
));
719 swap
= radv_translate_colorswap(vk_format
, false);
721 /* Choose the SPI color formats. These are required values for Stoney/RB+.
722 * Other chips have multiple choices, though they are not necessarily better.
725 case V_028C70_COLOR_5_6_5
:
726 case V_028C70_COLOR_1_5_5_5
:
727 case V_028C70_COLOR_5_5_5_1
:
728 case V_028C70_COLOR_4_4_4_4
:
729 case V_028C70_COLOR_10_11_11
:
730 case V_028C70_COLOR_11_11_10
:
731 case V_028C70_COLOR_8
:
732 case V_028C70_COLOR_8_8
:
733 case V_028C70_COLOR_8_8_8_8
:
734 case V_028C70_COLOR_10_10_10_2
:
735 case V_028C70_COLOR_2_10_10_10
:
736 if (ntype
== V_028C70_NUMBER_UINT
)
737 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
738 else if (ntype
== V_028C70_NUMBER_SINT
)
739 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
741 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
744 case V_028C70_COLOR_16
:
745 case V_028C70_COLOR_16_16
:
746 case V_028C70_COLOR_16_16_16_16
:
747 if (ntype
== V_028C70_NUMBER_UNORM
||
748 ntype
== V_028C70_NUMBER_SNORM
) {
749 /* UNORM16 and SNORM16 don't support blending */
750 if (ntype
== V_028C70_NUMBER_UNORM
)
751 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
753 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
755 /* Use 32 bits per channel for blending. */
756 if (format
== V_028C70_COLOR_16
) {
757 if (swap
== V_028C70_SWAP_STD
) { /* R */
758 blend
= V_028714_SPI_SHADER_32_R
;
759 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
760 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
761 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
764 } else if (format
== V_028C70_COLOR_16_16
) {
765 if (swap
== V_028C70_SWAP_STD
) { /* RG */
766 blend
= V_028714_SPI_SHADER_32_GR
;
767 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
768 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
769 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
772 } else /* 16_16_16_16 */
773 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
774 } else if (ntype
== V_028C70_NUMBER_UINT
)
775 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
776 else if (ntype
== V_028C70_NUMBER_SINT
)
777 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
778 else if (ntype
== V_028C70_NUMBER_FLOAT
)
779 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
784 case V_028C70_COLOR_32
:
785 if (swap
== V_028C70_SWAP_STD
) { /* R */
786 blend
= normal
= V_028714_SPI_SHADER_32_R
;
787 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
788 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
789 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
794 case V_028C70_COLOR_32_32
:
795 if (swap
== V_028C70_SWAP_STD
) { /* RG */
796 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
797 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
798 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
799 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
804 case V_028C70_COLOR_32_32_32_32
:
805 case V_028C70_COLOR_8_24
:
806 case V_028C70_COLOR_24_8
:
807 case V_028C70_COLOR_X24_8_32_FLOAT
:
808 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
812 unreachable("unhandled blend format");
815 if (blend_enable
&& blend_need_alpha
)
817 else if(blend_need_alpha
)
819 else if(blend_enable
)
825 static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format
)
827 unsigned i
, cb_shader_mask
= 0;
829 for (i
= 0; i
< 8; i
++) {
830 switch ((spi_shader_col_format
>> (i
* 4)) & 0xf) {
831 case V_028714_SPI_SHADER_ZERO
:
833 case V_028714_SPI_SHADER_32_R
:
834 cb_shader_mask
|= 0x1 << (i
* 4);
836 case V_028714_SPI_SHADER_32_GR
:
837 cb_shader_mask
|= 0x3 << (i
* 4);
839 case V_028714_SPI_SHADER_32_AR
:
840 cb_shader_mask
|= 0x9 << (i
* 4);
842 case V_028714_SPI_SHADER_FP16_ABGR
:
843 case V_028714_SPI_SHADER_UNORM16_ABGR
:
844 case V_028714_SPI_SHADER_SNORM16_ABGR
:
845 case V_028714_SPI_SHADER_UINT16_ABGR
:
846 case V_028714_SPI_SHADER_SINT16_ABGR
:
847 case V_028714_SPI_SHADER_32_ABGR
:
848 cb_shader_mask
|= 0xf << (i
* 4);
854 return cb_shader_mask
;
858 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
859 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
860 uint32_t blend_enable
,
861 uint32_t blend_need_alpha
,
862 bool single_cb_enable
,
863 bool blend_mrt0_is_dual_src
)
865 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
866 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
867 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
868 unsigned col_format
= 0;
870 for (unsigned i
= 0; i
< (single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
871 struct radv_render_pass_attachment
*attachment
;
874 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
876 cf
= si_choose_spi_color_format(attachment
->format
,
877 blend_enable
& (1 << i
),
878 blend_need_alpha
& (1 << i
));
880 col_format
|= cf
<< (4 * i
);
883 blend
->cb_shader_mask
= si_get_cb_shader_mask(col_format
);
885 if (blend_mrt0_is_dual_src
)
886 col_format
|= (col_format
& 0xf) << 4;
888 col_format
|= V_028714_SPI_SHADER_32_R
;
889 blend
->spi_shader_col_format
= col_format
;
893 format_is_int8(VkFormat format
)
895 const struct vk_format_description
*desc
= vk_format_description(format
);
896 int channel
= vk_format_get_first_non_void_channel(format
);
898 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
899 desc
->channel
[channel
].size
== 8;
902 unsigned radv_format_meta_fs_key(VkFormat format
)
904 unsigned col_format
= si_choose_spi_color_format(format
, false, false) - 1;
905 bool is_int8
= format_is_int8(format
);
907 return col_format
+ (is_int8
? 3 : 0);
911 radv_pipeline_compute_is_int8(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
913 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
914 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
915 unsigned is_int8
= 0;
917 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
918 struct radv_render_pass_attachment
*attachment
;
920 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
922 if (format_is_int8(attachment
->format
))
930 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
931 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
932 const struct radv_graphics_pipeline_create_info
*extra
)
934 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
935 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
936 unsigned mode
= V_028808_CB_NORMAL
;
937 uint32_t blend_enable
= 0, blend_need_alpha
= 0;
938 bool blend_mrt0_is_dual_src
= false;
940 bool single_cb_enable
= false;
945 if (extra
&& extra
->custom_blend_mode
) {
946 single_cb_enable
= true;
947 mode
= extra
->custom_blend_mode
;
949 blend
->cb_color_control
= 0;
950 if (vkblend
->logicOpEnable
)
951 blend
->cb_color_control
|= S_028808_ROP3(vkblend
->logicOp
| (vkblend
->logicOp
<< 4));
953 blend
->cb_color_control
|= S_028808_ROP3(0xcc);
955 blend
->db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(2) |
956 S_028B70_ALPHA_TO_MASK_OFFSET1(2) |
957 S_028B70_ALPHA_TO_MASK_OFFSET2(2) |
958 S_028B70_ALPHA_TO_MASK_OFFSET3(2);
960 blend
->cb_target_mask
= 0;
961 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
962 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
963 unsigned blend_cntl
= 0;
964 VkBlendOp eqRGB
= att
->colorBlendOp
;
965 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
966 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
967 VkBlendOp eqA
= att
->alphaBlendOp
;
968 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
969 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
971 blend
->sx_mrt0_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
973 if (!att
->colorWriteMask
)
976 blend
->cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
977 if (!att
->blendEnable
) {
978 blend
->cb_blend_control
[i
] = blend_cntl
;
982 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
984 blend_mrt0_is_dual_src
= true;
986 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
987 srcRGB
= VK_BLEND_FACTOR_ONE
;
988 dstRGB
= VK_BLEND_FACTOR_ONE
;
990 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
991 srcA
= VK_BLEND_FACTOR_ONE
;
992 dstA
= VK_BLEND_FACTOR_ONE
;
995 blend_cntl
|= S_028780_ENABLE(1);
997 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
998 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
999 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
1000 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
1001 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
1002 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
1003 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
1004 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
1006 blend
->cb_blend_control
[i
] = blend_cntl
;
1008 blend_enable
|= 1 << i
;
1010 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
1011 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
1012 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
1013 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
1014 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
1015 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
1016 blend_need_alpha
|= 1 << i
;
1018 for (i
= vkblend
->attachmentCount
; i
< 8; i
++)
1019 blend
->cb_blend_control
[i
] = 0;
1021 if (blend
->cb_target_mask
)
1022 blend
->cb_color_control
|= S_028808_MODE(mode
);
1024 blend
->cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
1026 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
,
1027 blend_enable
, blend_need_alpha
, single_cb_enable
, blend_mrt0_is_dual_src
);
1030 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
1033 case VK_STENCIL_OP_KEEP
:
1034 return V_02842C_STENCIL_KEEP
;
1035 case VK_STENCIL_OP_ZERO
:
1036 return V_02842C_STENCIL_ZERO
;
1037 case VK_STENCIL_OP_REPLACE
:
1038 return V_02842C_STENCIL_REPLACE_TEST
;
1039 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
1040 return V_02842C_STENCIL_ADD_CLAMP
;
1041 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
1042 return V_02842C_STENCIL_SUB_CLAMP
;
1043 case VK_STENCIL_OP_INVERT
:
1044 return V_02842C_STENCIL_INVERT
;
1045 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
1046 return V_02842C_STENCIL_ADD_WRAP
;
1047 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
1048 return V_02842C_STENCIL_SUB_WRAP
;
1054 radv_pipeline_init_depth_stencil_state(struct radv_pipeline
*pipeline
,
1055 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1056 const struct radv_graphics_pipeline_create_info
*extra
)
1058 const VkPipelineDepthStencilStateCreateInfo
*vkds
= pCreateInfo
->pDepthStencilState
;
1059 struct radv_depth_stencil_state
*ds
= &pipeline
->graphics
.ds
;
1061 memset(ds
, 0, sizeof(*ds
));
1064 ds
->db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
1065 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
1066 S_028800_ZFUNC(vkds
->depthCompareOp
) |
1067 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
1069 if (vkds
->stencilTestEnable
) {
1070 ds
->db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
1071 ds
->db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
1072 ds
->db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
1073 ds
->db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
1074 ds
->db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
1076 ds
->db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
1077 ds
->db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
1078 ds
->db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
1079 ds
->db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
1084 ds
->db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
1085 ds
->db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
1087 ds
->db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
1088 ds
->db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
1089 ds
->db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
1090 ds
->db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
1091 ds
->db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
1095 static uint32_t si_translate_fill(VkPolygonMode func
)
1098 case VK_POLYGON_MODE_FILL
:
1099 return V_028814_X_DRAW_TRIANGLES
;
1100 case VK_POLYGON_MODE_LINE
:
1101 return V_028814_X_DRAW_LINES
;
1102 case VK_POLYGON_MODE_POINT
:
1103 return V_028814_X_DRAW_POINTS
;
1106 return V_028814_X_DRAW_POINTS
;
1110 radv_pipeline_init_raster_state(struct radv_pipeline
*pipeline
,
1111 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1113 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
1114 struct radv_raster_state
*raster
= &pipeline
->graphics
.raster
;
1116 memset(raster
, 0, sizeof(*raster
));
1118 raster
->spi_interp_control
=
1119 S_0286D4_FLAT_SHADE_ENA(1) |
1120 S_0286D4_PNT_SPRITE_ENA(1) |
1121 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
1122 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
1123 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
1124 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
1125 S_0286D4_PNT_SPRITE_TOP_1(0); // vulkan is top to bottom - 1.0 at bottom
1127 raster
->pa_cl_vs_out_cntl
= S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1);
1128 raster
->pa_cl_clip_cntl
= S_028810_PS_UCP_MODE(3) |
1129 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
1130 S_028810_ZCLIP_NEAR_DISABLE(vkraster
->depthClampEnable
? 1 : 0) |
1131 S_028810_ZCLIP_FAR_DISABLE(vkraster
->depthClampEnable
? 1 : 0) |
1132 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
1133 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1);
1135 raster
->pa_su_vtx_cntl
=
1136 S_028BE4_PIX_CENTER(1) | // TODO verify
1137 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
1138 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
);
1140 raster
->pa_su_sc_mode_cntl
=
1141 S_028814_FACE(vkraster
->frontFace
) |
1142 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
1143 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
1144 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
1145 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
1146 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
1147 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
1148 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
1149 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0);
1154 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1155 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1157 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
1158 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
1159 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1160 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1161 int ps_iter_samples
= 1;
1162 uint32_t mask
= 0xffff;
1164 ms
->num_samples
= vkms
->rasterizationSamples
;
1166 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.fs
.force_persample
) {
1167 ps_iter_samples
= vkms
->rasterizationSamples
;
1170 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1171 ms
->pa_sc_aa_config
= 0;
1172 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1173 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1174 ms
->pa_sc_mode_cntl_1
=
1175 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1176 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1178 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1179 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1180 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1181 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1182 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1183 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1185 if (vkms
->rasterizationSamples
> 1) {
1186 unsigned log_samples
= util_logbase2(vkms
->rasterizationSamples
);
1187 unsigned log_ps_iter_samples
= util_logbase2(util_next_power_of_two(ps_iter_samples
));
1188 ms
->pa_sc_mode_cntl_0
= S_028A48_MSAA_ENABLE(1);
1189 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1190 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_samples
) |
1191 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1192 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1193 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1194 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1195 S_028BE0_MAX_SAMPLE_DIST(radv_cayman_get_maxdist(log_samples
)) |
1196 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1197 ms
->pa_sc_mode_cntl_1
|= EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1200 if (vkms
->alphaToCoverageEnable
)
1201 blend
->db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
1203 if (vkms
->pSampleMask
) {
1204 mask
= vkms
->pSampleMask
[0] & 0xffff;
1207 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1208 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1212 si_translate_prim(enum VkPrimitiveTopology topology
)
1215 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1216 return V_008958_DI_PT_POINTLIST
;
1217 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1218 return V_008958_DI_PT_LINELIST
;
1219 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1220 return V_008958_DI_PT_LINESTRIP
;
1221 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1222 return V_008958_DI_PT_TRILIST
;
1223 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1224 return V_008958_DI_PT_TRISTRIP
;
1225 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1226 return V_008958_DI_PT_TRIFAN
;
1227 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1228 return V_008958_DI_PT_LINELIST_ADJ
;
1229 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1230 return V_008958_DI_PT_LINESTRIP_ADJ
;
1231 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1232 return V_008958_DI_PT_TRILIST_ADJ
;
1233 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1234 return V_008958_DI_PT_TRISTRIP_ADJ
;
1235 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1236 return V_008958_DI_PT_PATCH
;
1244 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1247 case 0: /* GL_POINTS */
1248 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1249 case 1: /* GL_LINES */
1250 case 3: /* GL_LINE_STRIP */
1251 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1252 case 0x8E7A: /* GL_ISOLINES */
1253 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1255 case 4: /* GL_TRIANGLES */
1256 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1257 case 5: /* GL_TRIANGLE_STRIP */
1258 case 7: /* GL_QUADS */
1259 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1267 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1270 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1271 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1272 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1273 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1274 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1275 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1276 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1277 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1278 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1279 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1280 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1281 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1282 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1283 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1290 static unsigned si_map_swizzle(unsigned swizzle
)
1294 return V_008F0C_SQ_SEL_Y
;
1296 return V_008F0C_SQ_SEL_Z
;
1298 return V_008F0C_SQ_SEL_W
;
1300 return V_008F0C_SQ_SEL_0
;
1302 return V_008F0C_SQ_SEL_1
;
1303 default: /* VK_SWIZZLE_X */
1304 return V_008F0C_SQ_SEL_X
;
1309 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1310 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1312 radv_cmd_dirty_mask_t states
= RADV_CMD_DIRTY_DYNAMIC_ALL
;
1313 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1314 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1316 pipeline
->dynamic_state
= default_dynamic_state
;
1318 if (pCreateInfo
->pDynamicState
) {
1319 /* Remove all of the states that are marked as dynamic */
1320 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1321 for (uint32_t s
= 0; s
< count
; s
++)
1322 states
&= ~(1 << pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1325 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1327 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1329 * pViewportState is [...] NULL if the pipeline
1330 * has rasterization disabled.
1332 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
1333 assert(pCreateInfo
->pViewportState
);
1335 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1336 if (states
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
1337 typed_memcpy(dynamic
->viewport
.viewports
,
1338 pCreateInfo
->pViewportState
->pViewports
,
1339 pCreateInfo
->pViewportState
->viewportCount
);
1342 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1343 if (states
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
1344 typed_memcpy(dynamic
->scissor
.scissors
,
1345 pCreateInfo
->pViewportState
->pScissors
,
1346 pCreateInfo
->pViewportState
->scissorCount
);
1350 if (states
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
)) {
1351 assert(pCreateInfo
->pRasterizationState
);
1352 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1355 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
)) {
1356 assert(pCreateInfo
->pRasterizationState
);
1357 dynamic
->depth_bias
.bias
=
1358 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1359 dynamic
->depth_bias
.clamp
=
1360 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1361 dynamic
->depth_bias
.slope
=
1362 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1365 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1367 * pColorBlendState is [...] NULL if the pipeline has rasterization
1368 * disabled or if the subpass of the render pass the pipeline is
1369 * created against does not use any color attachments.
1371 bool uses_color_att
= false;
1372 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
1373 if (subpass
->color_attachments
[i
].attachment
!= VK_ATTACHMENT_UNUSED
) {
1374 uses_color_att
= true;
1379 if (uses_color_att
&& states
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
)) {
1380 assert(pCreateInfo
->pColorBlendState
);
1381 typed_memcpy(dynamic
->blend_constants
,
1382 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1385 /* If there is no depthstencil attachment, then don't read
1386 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1387 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1388 * no need to override the depthstencil defaults in
1389 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1391 * Section 9.2 of the Vulkan 1.0.15 spec says:
1393 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1394 * disabled or if the subpass of the render pass the pipeline is created
1395 * against does not use a depth/stencil attachment.
1397 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
1398 subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
) {
1399 assert(pCreateInfo
->pDepthStencilState
);
1401 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
)) {
1402 dynamic
->depth_bounds
.min
=
1403 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1404 dynamic
->depth_bounds
.max
=
1405 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1408 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
)) {
1409 dynamic
->stencil_compare_mask
.front
=
1410 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1411 dynamic
->stencil_compare_mask
.back
=
1412 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1415 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
)) {
1416 dynamic
->stencil_write_mask
.front
=
1417 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1418 dynamic
->stencil_write_mask
.back
=
1419 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1422 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
)) {
1423 dynamic
->stencil_reference
.front
=
1424 pCreateInfo
->pDepthStencilState
->front
.reference
;
1425 dynamic
->stencil_reference
.back
=
1426 pCreateInfo
->pDepthStencilState
->back
.reference
;
1430 pipeline
->dynamic_state_mask
= states
;
1433 static union ac_shader_variant_key
1434 radv_compute_vs_key(const VkGraphicsPipelineCreateInfo
*pCreateInfo
, bool as_es
)
1436 union ac_shader_variant_key key
;
1437 const VkPipelineVertexInputStateCreateInfo
*input_state
=
1438 pCreateInfo
->pVertexInputState
;
1440 memset(&key
, 0, sizeof(key
));
1441 key
.vs
.instance_rate_inputs
= 0;
1442 key
.vs
.as_es
= as_es
;
1444 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
1446 binding
= input_state
->pVertexAttributeDescriptions
[i
].binding
;
1447 if (input_state
->pVertexBindingDescriptions
[binding
].inputRate
)
1448 key
.vs
.instance_rate_inputs
|= 1u << input_state
->pVertexAttributeDescriptions
[i
].location
;
1454 calculate_gs_ring_sizes(struct radv_pipeline
*pipeline
)
1456 struct radv_device
*device
= pipeline
->device
;
1457 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
1458 unsigned wave_size
= 64;
1459 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
1460 unsigned gs_vertex_reuse
= 16 * num_se
; /* GS_VERTEX_REUSE register (per SE) */
1461 unsigned alignment
= 256 * num_se
;
1462 /* The maximum size is 63.999 MB per SE. */
1463 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
1465 struct ac_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1466 struct ac_shader_variant_info
*es_info
= &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
;
1467 /* Calculate the minimum size. */
1468 unsigned min_esgs_ring_size
= align(es_info
->vs
.esgs_itemsize
* gs_vertex_reuse
*
1469 wave_size
, alignment
);
1470 /* These are recommended sizes, not minimum sizes. */
1471 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
1472 es_info
->vs
.esgs_itemsize
* gs_info
->gs
.vertices_in
;
1473 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
1474 gs_info
->gs
.max_gsvs_emit_size
* 1; // no streams in VK (gs->max_gs_stream + 1);
1476 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
1477 esgs_ring_size
= align(esgs_ring_size
, alignment
);
1478 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
1480 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
1481 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
1485 radv_pipeline_init(struct radv_pipeline
*pipeline
,
1486 struct radv_device
*device
,
1487 struct radv_pipeline_cache
*cache
,
1488 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1489 const struct radv_graphics_pipeline_create_info
*extra
,
1490 const VkAllocationCallbacks
*alloc
)
1492 struct radv_shader_module fs_m
= {0};
1496 alloc
= &device
->alloc
;
1498 pipeline
->device
= device
;
1499 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1501 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
1502 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
1503 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
1504 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
1505 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
1506 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
1507 modules
[stage
] = radv_shader_module_from_handle(pStages
[stage
]->module
);
1510 radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
1513 if (modules
[MESA_SHADER_VERTEX
]) {
1514 bool as_es
= modules
[MESA_SHADER_GEOMETRY
] != NULL
;
1515 union ac_shader_variant_key key
= radv_compute_vs_key(pCreateInfo
, as_es
);
1517 pipeline
->shaders
[MESA_SHADER_VERTEX
] =
1518 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_VERTEX
],
1519 pStages
[MESA_SHADER_VERTEX
]->pName
,
1521 pStages
[MESA_SHADER_VERTEX
]->pSpecializationInfo
,
1522 pipeline
->layout
, &key
);
1524 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_VERTEX
);
1527 if (modules
[MESA_SHADER_GEOMETRY
]) {
1528 union ac_shader_variant_key key
= radv_compute_vs_key(pCreateInfo
, false);
1530 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] =
1531 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_GEOMETRY
],
1532 pStages
[MESA_SHADER_GEOMETRY
]->pName
,
1533 MESA_SHADER_GEOMETRY
,
1534 pStages
[MESA_SHADER_GEOMETRY
]->pSpecializationInfo
,
1535 pipeline
->layout
, &key
);
1537 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_GEOMETRY
);
1538 calculate_gs_ring_sizes(pipeline
);
1541 if (!modules
[MESA_SHADER_FRAGMENT
]) {
1543 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
1544 fs_b
.shader
->info
->name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
1545 fs_m
.nir
= fs_b
.shader
;
1546 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
1549 if (modules
[MESA_SHADER_FRAGMENT
]) {
1550 union ac_shader_variant_key key
;
1551 key
.fs
.col_format
= pipeline
->graphics
.blend
.spi_shader_col_format
;
1552 key
.fs
.is_int8
= radv_pipeline_compute_is_int8(pCreateInfo
);
1554 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[MESA_SHADER_FRAGMENT
];
1556 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
1557 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_FRAGMENT
],
1558 stage
? stage
->pName
: "main",
1559 MESA_SHADER_FRAGMENT
,
1560 stage
? stage
->pSpecializationInfo
: NULL
,
1561 pipeline
->layout
, &key
);
1562 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_FRAGMENT
);
1566 ralloc_free(fs_m
.nir
);
1568 radv_pipeline_init_depth_stencil_state(pipeline
, pCreateInfo
, extra
);
1569 radv_pipeline_init_raster_state(pipeline
, pCreateInfo
);
1570 radv_pipeline_init_multisample_state(pipeline
, pCreateInfo
);
1571 pipeline
->graphics
.prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
1572 if (radv_pipeline_has_gs(pipeline
)) {
1573 pipeline
->graphics
.gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
1575 pipeline
->graphics
.gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
1577 if (extra
&& extra
->use_rectlist
) {
1578 pipeline
->graphics
.prim
= V_008958_DI_PT_RECTLIST
;
1579 pipeline
->graphics
.gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1581 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
1583 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1584 pCreateInfo
->pVertexInputState
;
1585 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
1586 const VkVertexInputAttributeDescription
*desc
=
1587 &vi_info
->pVertexAttributeDescriptions
[i
];
1588 unsigned loc
= desc
->location
;
1589 const struct vk_format_description
*format_desc
;
1591 uint32_t num_format
, data_format
;
1592 format_desc
= vk_format_description(desc
->format
);
1593 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
1595 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
1596 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
1598 pipeline
->va_rsrc_word3
[loc
] = S_008F0C_DST_SEL_X(si_map_swizzle(format_desc
->swizzle
[0])) |
1599 S_008F0C_DST_SEL_Y(si_map_swizzle(format_desc
->swizzle
[1])) |
1600 S_008F0C_DST_SEL_Z(si_map_swizzle(format_desc
->swizzle
[2])) |
1601 S_008F0C_DST_SEL_W(si_map_swizzle(format_desc
->swizzle
[3])) |
1602 S_008F0C_NUM_FORMAT(num_format
) |
1603 S_008F0C_DATA_FORMAT(data_format
);
1604 pipeline
->va_format_size
[loc
] = format_desc
->block
.bits
/ 8;
1605 pipeline
->va_offset
[loc
] = desc
->offset
;
1606 pipeline
->va_binding
[loc
] = desc
->binding
;
1607 pipeline
->num_vertex_attribs
= MAX2(pipeline
->num_vertex_attribs
, loc
+ 1);
1610 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
1611 const VkVertexInputBindingDescription
*desc
=
1612 &vi_info
->pVertexBindingDescriptions
[i
];
1614 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
1617 if (device
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
) {
1618 radv_dump_pipeline_stats(device
, pipeline
);
1621 result
= radv_pipeline_scratch_init(device
, pipeline
);
1626 radv_graphics_pipeline_create(
1628 VkPipelineCache _cache
,
1629 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1630 const struct radv_graphics_pipeline_create_info
*extra
,
1631 const VkAllocationCallbacks
*pAllocator
,
1632 VkPipeline
*pPipeline
)
1634 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1635 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
1636 struct radv_pipeline
*pipeline
;
1639 pipeline
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1640 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1641 if (pipeline
== NULL
)
1642 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1644 memset(pipeline
, 0, sizeof(*pipeline
));
1645 result
= radv_pipeline_init(pipeline
, device
, cache
,
1646 pCreateInfo
, extra
, pAllocator
);
1647 if (result
!= VK_SUCCESS
) {
1648 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
1652 *pPipeline
= radv_pipeline_to_handle(pipeline
);
1657 VkResult
radv_CreateGraphicsPipelines(
1659 VkPipelineCache pipelineCache
,
1661 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
1662 const VkAllocationCallbacks
* pAllocator
,
1663 VkPipeline
* pPipelines
)
1665 VkResult result
= VK_SUCCESS
;
1668 for (; i
< count
; i
++) {
1670 r
= radv_graphics_pipeline_create(_device
,
1673 NULL
, pAllocator
, &pPipelines
[i
]);
1674 if (r
!= VK_SUCCESS
) {
1676 pPipelines
[i
] = VK_NULL_HANDLE
;
1683 static VkResult
radv_compute_pipeline_create(
1685 VkPipelineCache _cache
,
1686 const VkComputePipelineCreateInfo
* pCreateInfo
,
1687 const VkAllocationCallbacks
* pAllocator
,
1688 VkPipeline
* pPipeline
)
1690 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1691 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
1692 RADV_FROM_HANDLE(radv_shader_module
, module
, pCreateInfo
->stage
.module
);
1693 struct radv_pipeline
*pipeline
;
1696 pipeline
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1697 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1698 if (pipeline
== NULL
)
1699 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1701 memset(pipeline
, 0, sizeof(*pipeline
));
1702 pipeline
->device
= device
;
1703 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1705 pipeline
->shaders
[MESA_SHADER_COMPUTE
] =
1706 radv_pipeline_compile(pipeline
, cache
, module
,
1707 pCreateInfo
->stage
.pName
,
1708 MESA_SHADER_COMPUTE
,
1709 pCreateInfo
->stage
.pSpecializationInfo
,
1710 pipeline
->layout
, NULL
);
1713 result
= radv_pipeline_scratch_init(device
, pipeline
);
1714 if (result
!= VK_SUCCESS
) {
1715 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
1719 *pPipeline
= radv_pipeline_to_handle(pipeline
);
1721 if (device
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
) {
1722 radv_dump_pipeline_stats(device
, pipeline
);
1726 VkResult
radv_CreateComputePipelines(
1728 VkPipelineCache pipelineCache
,
1730 const VkComputePipelineCreateInfo
* pCreateInfos
,
1731 const VkAllocationCallbacks
* pAllocator
,
1732 VkPipeline
* pPipelines
)
1734 VkResult result
= VK_SUCCESS
;
1737 for (; i
< count
; i
++) {
1739 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
1741 pAllocator
, &pPipelines
[i
]);
1742 if (r
!= VK_SUCCESS
) {
1744 pPipelines
[i
] = VK_NULL_HANDLE
;