2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "radv_private.h"
31 #include "nir/nir_builder.h"
32 #include "spirv/nir_spirv.h"
34 #include <llvm-c/Core.h>
35 #include <llvm-c/TargetMachine.h>
38 #include "r600d_common.h"
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_nir_to_llvm.h"
42 #include "vk_format.h"
43 #include "util/debug.h"
44 void radv_shader_variant_destroy(struct radv_device
*device
,
45 struct radv_shader_variant
*variant
);
47 static const struct nir_shader_compiler_options nir_options
= {
48 .vertex_id_zero_based
= true,
52 .lower_pack_snorm_2x16
= true,
53 .lower_pack_snorm_4x8
= true,
54 .lower_pack_unorm_2x16
= true,
55 .lower_pack_unorm_4x8
= true,
56 .lower_unpack_snorm_2x16
= true,
57 .lower_unpack_snorm_4x8
= true,
58 .lower_unpack_unorm_2x16
= true,
59 .lower_unpack_unorm_4x8
= true,
60 .lower_extract_byte
= true,
61 .lower_extract_word
= true,
64 VkResult
radv_CreateShaderModule(
66 const VkShaderModuleCreateInfo
* pCreateInfo
,
67 const VkAllocationCallbacks
* pAllocator
,
68 VkShaderModule
* pShaderModule
)
70 RADV_FROM_HANDLE(radv_device
, device
, _device
);
71 struct radv_shader_module
*module
;
73 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
74 assert(pCreateInfo
->flags
== 0);
76 module
= vk_alloc2(&device
->alloc
, pAllocator
,
77 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
78 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
80 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
83 module
->size
= pCreateInfo
->codeSize
;
84 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
86 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
88 *pShaderModule
= radv_shader_module_to_handle(module
);
93 void radv_DestroyShaderModule(
95 VkShaderModule _module
,
96 const VkAllocationCallbacks
* pAllocator
)
98 RADV_FROM_HANDLE(radv_device
, device
, _device
);
99 RADV_FROM_HANDLE(radv_shader_module
, module
, _module
);
104 vk_free2(&device
->alloc
, pAllocator
, module
);
109 radv_pipeline_destroy(struct radv_device
*device
,
110 struct radv_pipeline
*pipeline
,
111 const VkAllocationCallbacks
* allocator
)
113 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
114 if (pipeline
->shaders
[i
])
115 radv_shader_variant_destroy(device
, pipeline
->shaders
[i
]);
117 if (pipeline
->gs_copy_shader
)
118 radv_shader_variant_destroy(device
, pipeline
->gs_copy_shader
);
120 vk_free2(&device
->alloc
, allocator
, pipeline
);
123 void radv_DestroyPipeline(
125 VkPipeline _pipeline
,
126 const VkAllocationCallbacks
* pAllocator
)
128 RADV_FROM_HANDLE(radv_device
, device
, _device
);
129 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
134 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
139 radv_optimize_nir(struct nir_shader
*shader
)
146 NIR_PASS_V(shader
, nir_lower_vars_to_ssa
);
147 NIR_PASS_V(shader
, nir_lower_alu_to_scalar
);
148 NIR_PASS_V(shader
, nir_lower_phis_to_scalar
);
150 NIR_PASS(progress
, shader
, nir_copy_prop
);
151 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
152 NIR_PASS(progress
, shader
, nir_opt_dce
);
153 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
154 NIR_PASS(progress
, shader
, nir_opt_cse
);
155 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 8);
156 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
157 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
158 NIR_PASS(progress
, shader
, nir_opt_undef
);
159 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
164 radv_shader_compile_to_nir(struct radv_device
*device
,
165 struct radv_shader_module
*module
,
166 const char *entrypoint_name
,
167 gl_shader_stage stage
,
168 const VkSpecializationInfo
*spec_info
,
171 if (strcmp(entrypoint_name
, "main") != 0) {
172 radv_finishme("Multiple shaders per module not really supported");
176 nir_function
*entry_point
;
178 /* Some things such as our meta clear/blit code will give us a NIR
179 * shader directly. In that case, we just ignore the SPIR-V entirely
180 * and just use the NIR shader */
182 nir
->options
= &nir_options
;
183 nir_validate_shader(nir
);
185 assert(exec_list_length(&nir
->functions
) == 1);
186 struct exec_node
*node
= exec_list_get_head(&nir
->functions
);
187 entry_point
= exec_node_data(nir_function
, node
, node
);
189 uint32_t *spirv
= (uint32_t *) module
->data
;
190 assert(module
->size
% 4 == 0);
192 uint32_t num_spec_entries
= 0;
193 struct nir_spirv_specialization
*spec_entries
= NULL
;
194 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
195 num_spec_entries
= spec_info
->mapEntryCount
;
196 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
197 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
198 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
199 const void *data
= spec_info
->pData
+ entry
.offset
;
200 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
202 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
203 if (spec_info
->dataSize
== 8)
204 spec_entries
[i
].data64
= *(const uint64_t *)data
;
206 spec_entries
[i
].data32
= *(const uint32_t *)data
;
209 const struct nir_spirv_supported_extensions supported_ext
= {
210 .draw_parameters
= true,
212 .image_write_without_format
= true,
214 entry_point
= spirv_to_nir(spirv
, module
->size
/ 4,
215 spec_entries
, num_spec_entries
,
216 stage
, entrypoint_name
, &supported_ext
, &nir_options
);
217 nir
= entry_point
->shader
;
218 assert(nir
->stage
== stage
);
219 nir_validate_shader(nir
);
223 /* We have to lower away local constant initializers right before we
224 * inline functions. That way they get properly initialized at the top
225 * of the function and not at the top of its caller.
227 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_local
);
228 NIR_PASS_V(nir
, nir_lower_returns
);
229 NIR_PASS_V(nir
, nir_inline_functions
);
231 /* Pick off the single entrypoint that we want */
232 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
233 if (func
!= entry_point
)
234 exec_node_remove(&func
->node
);
236 assert(exec_list_length(&nir
->functions
) == 1);
237 entry_point
->name
= ralloc_strdup(entry_point
, "main");
239 NIR_PASS_V(nir
, nir_remove_dead_variables
,
240 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
);
242 /* Now that we've deleted all but the main function, we can go ahead and
243 * lower the rest of the constant initializers.
245 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
246 NIR_PASS_V(nir
, nir_lower_system_values
);
249 /* Vulkan uses the separate-shader linking model */
250 nir
->info
->separate_shader
= true;
252 // nir = brw_preprocess_nir(compiler, nir);
254 nir_shader_gather_info(nir
, entry_point
->impl
);
256 nir_variable_mode indirect_mask
= 0;
257 // if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
258 indirect_mask
|= nir_var_shader_in
;
259 // if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
260 indirect_mask
|= nir_var_local
;
262 nir_lower_indirect_derefs(nir
, indirect_mask
);
264 static const nir_lower_tex_options tex_options
= {
268 nir_lower_tex(nir
, &tex_options
);
270 nir_lower_vars_to_ssa(nir
);
271 nir_lower_var_copies(nir
);
272 nir_lower_global_vars_to_local(nir
);
273 nir_remove_dead_variables(nir
, nir_var_local
);
274 radv_optimize_nir(nir
);
277 nir_print_shader(nir
, stderr
);
282 static const char *radv_get_shader_name(struct radv_shader_variant
*var
,
283 gl_shader_stage stage
)
286 case MESA_SHADER_VERTEX
: return var
->info
.vs
.as_es
? "Vertex Shader as ES" : "Vertex Shader as VS";
287 case MESA_SHADER_GEOMETRY
: return "Geometry Shader";
288 case MESA_SHADER_FRAGMENT
: return "Pixel Shader";
289 case MESA_SHADER_COMPUTE
: return "Compute Shader";
291 return "Unknown shader";
295 static void radv_dump_pipeline_stats(struct radv_device
*device
, struct radv_pipeline
*pipeline
)
297 unsigned lds_increment
= device
->physical_device
->rad_info
.chip_class
>= CIK
? 512 : 256;
298 struct radv_shader_variant
*var
;
299 struct ac_shader_config
*conf
;
302 unsigned max_simd_waves
= 10;
303 unsigned lds_per_wave
= 0;
305 for (i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
306 if (!pipeline
->shaders
[i
])
308 var
= pipeline
->shaders
[i
];
312 if (i
== MESA_SHADER_FRAGMENT
) {
313 lds_per_wave
= conf
->lds_size
* lds_increment
+
314 align(var
->info
.fs
.num_interp
* 48, lds_increment
);
317 if (conf
->num_sgprs
) {
318 if (device
->physical_device
->rad_info
.chip_class
>= VI
)
319 max_simd_waves
= MIN2(max_simd_waves
, 800 / conf
->num_sgprs
);
321 max_simd_waves
= MIN2(max_simd_waves
, 512 / conf
->num_sgprs
);
325 max_simd_waves
= MIN2(max_simd_waves
, 256 / conf
->num_vgprs
);
327 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
331 max_simd_waves
= MIN2(max_simd_waves
, 16384 / lds_per_wave
);
333 fprintf(file
, "\n%s:\n",
334 radv_get_shader_name(var
, i
));
335 if (i
== MESA_SHADER_FRAGMENT
) {
336 fprintf(file
, "*** SHADER CONFIG ***\n"
337 "SPI_PS_INPUT_ADDR = 0x%04x\n"
338 "SPI_PS_INPUT_ENA = 0x%04x\n",
339 conf
->spi_ps_input_addr
, conf
->spi_ps_input_ena
);
341 fprintf(file
, "*** SHADER STATS ***\n"
344 "Spilled SGPRs: %d\n"
345 "Spilled VGPRs: %d\n"
346 "Code Size: %d bytes\n"
348 "Scratch: %d bytes per wave\n"
350 "********************\n\n\n",
351 conf
->num_sgprs
, conf
->num_vgprs
,
352 conf
->spilled_sgprs
, conf
->spilled_vgprs
, var
->code_size
,
353 conf
->lds_size
, conf
->scratch_bytes_per_wave
,
358 void radv_shader_variant_destroy(struct radv_device
*device
,
359 struct radv_shader_variant
*variant
)
361 if (__sync_fetch_and_sub(&variant
->ref_count
, 1) != 1)
364 device
->ws
->buffer_destroy(variant
->bo
);
368 static void radv_fill_shader_variant(struct radv_device
*device
,
369 struct radv_shader_variant
*variant
,
370 struct ac_shader_binary
*binary
,
371 gl_shader_stage stage
)
373 bool scratch_enabled
= variant
->config
.scratch_bytes_per_wave
> 0;
374 unsigned vgpr_comp_cnt
= 0;
376 if (scratch_enabled
&& !device
->llvm_supports_spill
)
377 radv_finishme("shader scratch support only available with LLVM 4.0");
379 variant
->code_size
= binary
->code_size
;
382 case MESA_SHADER_VERTEX
:
383 case MESA_SHADER_GEOMETRY
:
384 variant
->rsrc2
= S_00B12C_USER_SGPR(variant
->info
.num_user_sgprs
) |
385 S_00B12C_SCRATCH_EN(scratch_enabled
);
386 vgpr_comp_cnt
= variant
->info
.vs
.vgpr_comp_cnt
;
388 case MESA_SHADER_FRAGMENT
:
389 variant
->rsrc2
= S_00B12C_USER_SGPR(variant
->info
.num_user_sgprs
) |
390 S_00B12C_SCRATCH_EN(scratch_enabled
);
392 case MESA_SHADER_COMPUTE
:
393 variant
->rsrc2
= S_00B84C_USER_SGPR(variant
->info
.num_user_sgprs
) |
394 S_00B84C_SCRATCH_EN(scratch_enabled
) |
395 S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
396 S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
397 S_00B84C_TG_SIZE_EN(1) |
398 S_00B84C_LDS_SIZE(variant
->config
.lds_size
);
401 unreachable("unsupported shader type");
405 variant
->rsrc1
= S_00B848_VGPRS((variant
->config
.num_vgprs
- 1) / 4) |
406 S_00B848_SGPRS((variant
->config
.num_sgprs
- 1) / 8) |
407 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
408 S_00B848_DX10_CLAMP(1) |
409 S_00B848_FLOAT_MODE(variant
->config
.float_mode
);
411 variant
->bo
= device
->ws
->buffer_create(device
->ws
, binary
->code_size
, 256,
412 RADEON_DOMAIN_GTT
, RADEON_FLAG_CPU_ACCESS
);
414 void *ptr
= device
->ws
->buffer_map(variant
->bo
);
415 memcpy(ptr
, binary
->code
, binary
->code_size
);
416 device
->ws
->buffer_unmap(variant
->bo
);
421 static struct radv_shader_variant
*radv_shader_variant_create(struct radv_device
*device
,
422 struct nir_shader
*shader
,
423 struct radv_pipeline_layout
*layout
,
424 const union ac_shader_variant_key
*key
,
426 unsigned *code_size_out
,
429 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
430 enum radeon_family chip_family
= device
->physical_device
->rad_info
.family
;
431 LLVMTargetMachineRef tm
;
435 struct ac_nir_compiler_options options
= {0};
436 options
.layout
= layout
;
440 struct ac_shader_binary binary
;
442 options
.unsafe_math
= !!(device
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
);
443 options
.family
= chip_family
;
444 options
.chip_class
= device
->physical_device
->rad_info
.chip_class
;
445 options
.supports_spill
= device
->llvm_supports_spill
;
446 tm
= ac_create_target_machine(chip_family
, options
.supports_spill
);
447 ac_compile_nir_shader(tm
, &binary
, &variant
->config
,
448 &variant
->info
, shader
, &options
, dump
);
449 LLVMDisposeTargetMachine(tm
);
451 radv_fill_shader_variant(device
, variant
, &binary
, shader
->stage
);
454 *code_out
= binary
.code
;
455 *code_size_out
= binary
.code_size
;
460 free(binary
.global_symbol_offsets
);
462 free(binary
.disasm_string
);
463 variant
->ref_count
= 1;
467 static struct radv_shader_variant
*
468 radv_pipeline_create_gs_copy_shader(struct radv_pipeline
*pipeline
,
469 struct nir_shader
*nir
,
471 unsigned *code_size_out
,
474 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
475 enum radeon_family chip_family
= pipeline
->device
->physical_device
->rad_info
.family
;
476 LLVMTargetMachineRef tm
;
480 struct ac_nir_compiler_options options
= {0};
481 struct ac_shader_binary binary
;
482 options
.family
= chip_family
;
483 options
.chip_class
= pipeline
->device
->physical_device
->rad_info
.chip_class
;
484 options
.supports_spill
= pipeline
->device
->llvm_supports_spill
;
485 tm
= ac_create_target_machine(chip_family
, options
.supports_spill
);
486 ac_create_gs_copy_shader(tm
, nir
, &binary
, &variant
->config
, &variant
->info
, &options
, dump_shader
);
487 LLVMDisposeTargetMachine(tm
);
489 radv_fill_shader_variant(pipeline
->device
, variant
, &binary
, MESA_SHADER_VERTEX
);
492 *code_out
= binary
.code
;
493 *code_size_out
= binary
.code_size
;
498 free(binary
.global_symbol_offsets
);
500 free(binary
.disasm_string
);
501 variant
->ref_count
= 1;
505 static struct radv_shader_variant
*
506 radv_pipeline_compile(struct radv_pipeline
*pipeline
,
507 struct radv_pipeline_cache
*cache
,
508 struct radv_shader_module
*module
,
509 const char *entrypoint
,
510 gl_shader_stage stage
,
511 const VkSpecializationInfo
*spec_info
,
512 struct radv_pipeline_layout
*layout
,
513 const union ac_shader_variant_key
*key
)
515 unsigned char sha1
[20];
516 unsigned char gs_copy_sha1
[20];
517 struct radv_shader_variant
*variant
;
520 unsigned code_size
= 0;
521 bool dump
= (pipeline
->device
->debug_flags
& RADV_DEBUG_DUMP_SHADERS
);
524 _mesa_sha1_compute(module
->nir
->info
->name
,
525 strlen(module
->nir
->info
->name
),
528 radv_hash_shader(sha1
, module
, entrypoint
, spec_info
, layout
, key
, 0);
529 if (stage
== MESA_SHADER_GEOMETRY
)
530 radv_hash_shader(gs_copy_sha1
, module
, entrypoint
, spec_info
,
534 variant
= radv_create_shader_variant_from_pipeline_cache(pipeline
->device
,
538 if (stage
== MESA_SHADER_GEOMETRY
) {
539 pipeline
->gs_copy_shader
=
540 radv_create_shader_variant_from_pipeline_cache(
549 nir
= radv_shader_compile_to_nir(pipeline
->device
,
550 module
, entrypoint
, stage
,
555 variant
= radv_shader_variant_create(pipeline
->device
, nir
, layout
, key
,
556 &code
, &code_size
, dump
);
558 if (stage
== MESA_SHADER_GEOMETRY
) {
559 void *gs_copy_code
= NULL
;
560 unsigned gs_copy_code_size
= 0;
561 pipeline
->gs_copy_shader
= radv_pipeline_create_gs_copy_shader(
562 pipeline
, nir
, &gs_copy_code
, &gs_copy_code_size
, dump
);
564 if (pipeline
->gs_copy_shader
&& cache
) {
565 pipeline
->gs_copy_shader
=
566 radv_pipeline_cache_insert_shader(cache
,
568 pipeline
->gs_copy_shader
,
576 if (variant
&& cache
)
577 variant
= radv_pipeline_cache_insert_shader(cache
, sha1
, variant
,
586 radv_pipeline_scratch_init(struct radv_device
*device
,
587 struct radv_pipeline
*pipeline
)
589 unsigned scratch_bytes_per_wave
= 0;
590 unsigned max_waves
= 0;
591 unsigned min_waves
= 1;
593 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
594 if (pipeline
->shaders
[i
]) {
595 unsigned max_stage_waves
= device
->scratch_waves
;
597 scratch_bytes_per_wave
= MAX2(scratch_bytes_per_wave
,
598 pipeline
->shaders
[i
]->config
.scratch_bytes_per_wave
);
600 max_stage_waves
= MIN2(max_stage_waves
,
601 4 * device
->physical_device
->rad_info
.num_good_compute_units
*
602 (256 / pipeline
->shaders
[i
]->config
.num_vgprs
));
603 max_waves
= MAX2(max_waves
, max_stage_waves
);
607 if (pipeline
->shaders
[MESA_SHADER_COMPUTE
]) {
608 unsigned group_size
= pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[0] *
609 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[1] *
610 pipeline
->shaders
[MESA_SHADER_COMPUTE
]->info
.cs
.block_size
[2];
611 min_waves
= MAX2(min_waves
, round_up_u32(group_size
, 64));
614 if (scratch_bytes_per_wave
)
615 max_waves
= MIN2(max_waves
, 0xffffffffu
/ scratch_bytes_per_wave
);
617 if (scratch_bytes_per_wave
&& max_waves
< min_waves
) {
618 /* Not really true at this moment, but will be true on first
619 * execution. Avoid having hanging shaders. */
620 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
622 pipeline
->scratch_bytes_per_wave
= scratch_bytes_per_wave
;
623 pipeline
->max_waves
= max_waves
;
627 static uint32_t si_translate_blend_function(VkBlendOp op
)
630 case VK_BLEND_OP_ADD
:
631 return V_028780_COMB_DST_PLUS_SRC
;
632 case VK_BLEND_OP_SUBTRACT
:
633 return V_028780_COMB_SRC_MINUS_DST
;
634 case VK_BLEND_OP_REVERSE_SUBTRACT
:
635 return V_028780_COMB_DST_MINUS_SRC
;
636 case VK_BLEND_OP_MIN
:
637 return V_028780_COMB_MIN_DST_SRC
;
638 case VK_BLEND_OP_MAX
:
639 return V_028780_COMB_MAX_DST_SRC
;
645 static uint32_t si_translate_blend_factor(VkBlendFactor factor
)
648 case VK_BLEND_FACTOR_ZERO
:
649 return V_028780_BLEND_ZERO
;
650 case VK_BLEND_FACTOR_ONE
:
651 return V_028780_BLEND_ONE
;
652 case VK_BLEND_FACTOR_SRC_COLOR
:
653 return V_028780_BLEND_SRC_COLOR
;
654 case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR
:
655 return V_028780_BLEND_ONE_MINUS_SRC_COLOR
;
656 case VK_BLEND_FACTOR_DST_COLOR
:
657 return V_028780_BLEND_DST_COLOR
;
658 case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR
:
659 return V_028780_BLEND_ONE_MINUS_DST_COLOR
;
660 case VK_BLEND_FACTOR_SRC_ALPHA
:
661 return V_028780_BLEND_SRC_ALPHA
;
662 case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
:
663 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA
;
664 case VK_BLEND_FACTOR_DST_ALPHA
:
665 return V_028780_BLEND_DST_ALPHA
;
666 case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA
:
667 return V_028780_BLEND_ONE_MINUS_DST_ALPHA
;
668 case VK_BLEND_FACTOR_CONSTANT_COLOR
:
669 return V_028780_BLEND_CONSTANT_COLOR
;
670 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR
:
671 return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR
;
672 case VK_BLEND_FACTOR_CONSTANT_ALPHA
:
673 return V_028780_BLEND_CONSTANT_ALPHA
;
674 case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
:
675 return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA
;
676 case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
:
677 return V_028780_BLEND_SRC_ALPHA_SATURATE
;
678 case VK_BLEND_FACTOR_SRC1_COLOR
:
679 return V_028780_BLEND_SRC1_COLOR
;
680 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
681 return V_028780_BLEND_INV_SRC1_COLOR
;
682 case VK_BLEND_FACTOR_SRC1_ALPHA
:
683 return V_028780_BLEND_SRC1_ALPHA
;
684 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
685 return V_028780_BLEND_INV_SRC1_ALPHA
;
691 static bool is_dual_src(VkBlendFactor factor
)
694 case VK_BLEND_FACTOR_SRC1_COLOR
:
695 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR
:
696 case VK_BLEND_FACTOR_SRC1_ALPHA
:
697 case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
:
704 static unsigned si_choose_spi_color_format(VkFormat vk_format
,
706 bool blend_need_alpha
)
708 const struct vk_format_description
*desc
= vk_format_description(vk_format
);
709 unsigned format
, ntype
, swap
;
711 /* Alpha is needed for alpha-to-coverage.
712 * Blending may be with or without alpha.
714 unsigned normal
= 0; /* most optimal, may not support blending or export alpha */
715 unsigned alpha
= 0; /* exports alpha, but may not support blending */
716 unsigned blend
= 0; /* supports blending, but may not export alpha */
717 unsigned blend_alpha
= 0; /* least optimal, supports blending and exports alpha */
719 format
= radv_translate_colorformat(vk_format
);
720 ntype
= radv_translate_color_numformat(vk_format
, desc
,
721 vk_format_get_first_non_void_channel(vk_format
));
722 swap
= radv_translate_colorswap(vk_format
, false);
724 /* Choose the SPI color formats. These are required values for Stoney/RB+.
725 * Other chips have multiple choices, though they are not necessarily better.
728 case V_028C70_COLOR_5_6_5
:
729 case V_028C70_COLOR_1_5_5_5
:
730 case V_028C70_COLOR_5_5_5_1
:
731 case V_028C70_COLOR_4_4_4_4
:
732 case V_028C70_COLOR_10_11_11
:
733 case V_028C70_COLOR_11_11_10
:
734 case V_028C70_COLOR_8
:
735 case V_028C70_COLOR_8_8
:
736 case V_028C70_COLOR_8_8_8_8
:
737 case V_028C70_COLOR_10_10_10_2
:
738 case V_028C70_COLOR_2_10_10_10
:
739 if (ntype
== V_028C70_NUMBER_UINT
)
740 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
741 else if (ntype
== V_028C70_NUMBER_SINT
)
742 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
744 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
747 case V_028C70_COLOR_16
:
748 case V_028C70_COLOR_16_16
:
749 case V_028C70_COLOR_16_16_16_16
:
750 if (ntype
== V_028C70_NUMBER_UNORM
||
751 ntype
== V_028C70_NUMBER_SNORM
) {
752 /* UNORM16 and SNORM16 don't support blending */
753 if (ntype
== V_028C70_NUMBER_UNORM
)
754 normal
= alpha
= V_028714_SPI_SHADER_UNORM16_ABGR
;
756 normal
= alpha
= V_028714_SPI_SHADER_SNORM16_ABGR
;
758 /* Use 32 bits per channel for blending. */
759 if (format
== V_028C70_COLOR_16
) {
760 if (swap
== V_028C70_SWAP_STD
) { /* R */
761 blend
= V_028714_SPI_SHADER_32_R
;
762 blend_alpha
= V_028714_SPI_SHADER_32_AR
;
763 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
764 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
767 } else if (format
== V_028C70_COLOR_16_16
) {
768 if (swap
== V_028C70_SWAP_STD
) { /* RG */
769 blend
= V_028714_SPI_SHADER_32_GR
;
770 blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
771 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
772 blend
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
775 } else /* 16_16_16_16 */
776 blend
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
777 } else if (ntype
== V_028C70_NUMBER_UINT
)
778 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_UINT16_ABGR
;
779 else if (ntype
== V_028C70_NUMBER_SINT
)
780 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_SINT16_ABGR
;
781 else if (ntype
== V_028C70_NUMBER_FLOAT
)
782 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_FP16_ABGR
;
787 case V_028C70_COLOR_32
:
788 if (swap
== V_028C70_SWAP_STD
) { /* R */
789 blend
= normal
= V_028714_SPI_SHADER_32_R
;
790 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_AR
;
791 } else if (swap
== V_028C70_SWAP_ALT_REV
) /* A */
792 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
797 case V_028C70_COLOR_32_32
:
798 if (swap
== V_028C70_SWAP_STD
) { /* RG */
799 blend
= normal
= V_028714_SPI_SHADER_32_GR
;
800 alpha
= blend_alpha
= V_028714_SPI_SHADER_32_ABGR
;
801 } else if (swap
== V_028C70_SWAP_ALT
) /* RA */
802 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_AR
;
807 case V_028C70_COLOR_32_32_32_32
:
808 case V_028C70_COLOR_8_24
:
809 case V_028C70_COLOR_24_8
:
810 case V_028C70_COLOR_X24_8_32_FLOAT
:
811 alpha
= blend
= blend_alpha
= normal
= V_028714_SPI_SHADER_32_ABGR
;
815 unreachable("unhandled blend format");
818 if (blend_enable
&& blend_need_alpha
)
820 else if(blend_need_alpha
)
822 else if(blend_enable
)
828 static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format
)
830 unsigned i
, cb_shader_mask
= 0;
832 for (i
= 0; i
< 8; i
++) {
833 switch ((spi_shader_col_format
>> (i
* 4)) & 0xf) {
834 case V_028714_SPI_SHADER_ZERO
:
836 case V_028714_SPI_SHADER_32_R
:
837 cb_shader_mask
|= 0x1 << (i
* 4);
839 case V_028714_SPI_SHADER_32_GR
:
840 cb_shader_mask
|= 0x3 << (i
* 4);
842 case V_028714_SPI_SHADER_32_AR
:
843 cb_shader_mask
|= 0x9 << (i
* 4);
845 case V_028714_SPI_SHADER_FP16_ABGR
:
846 case V_028714_SPI_SHADER_UNORM16_ABGR
:
847 case V_028714_SPI_SHADER_SNORM16_ABGR
:
848 case V_028714_SPI_SHADER_UINT16_ABGR
:
849 case V_028714_SPI_SHADER_SINT16_ABGR
:
850 case V_028714_SPI_SHADER_32_ABGR
:
851 cb_shader_mask
|= 0xf << (i
* 4);
857 return cb_shader_mask
;
861 radv_pipeline_compute_spi_color_formats(struct radv_pipeline
*pipeline
,
862 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
863 uint32_t blend_enable
,
864 uint32_t blend_need_alpha
,
865 bool single_cb_enable
,
866 bool blend_mrt0_is_dual_src
)
868 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
869 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
870 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
871 unsigned col_format
= 0;
873 for (unsigned i
= 0; i
< (single_cb_enable
? 1 : subpass
->color_count
); ++i
) {
874 struct radv_render_pass_attachment
*attachment
;
877 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
879 cf
= si_choose_spi_color_format(attachment
->format
,
880 blend_enable
& (1 << i
),
881 blend_need_alpha
& (1 << i
));
883 col_format
|= cf
<< (4 * i
);
886 blend
->cb_shader_mask
= si_get_cb_shader_mask(col_format
);
888 if (blend_mrt0_is_dual_src
)
889 col_format
|= (col_format
& 0xf) << 4;
891 col_format
|= V_028714_SPI_SHADER_32_R
;
892 blend
->spi_shader_col_format
= col_format
;
896 format_is_int8(VkFormat format
)
898 const struct vk_format_description
*desc
= vk_format_description(format
);
899 int channel
= vk_format_get_first_non_void_channel(format
);
901 return channel
>= 0 && desc
->channel
[channel
].pure_integer
&&
902 desc
->channel
[channel
].size
== 8;
905 unsigned radv_format_meta_fs_key(VkFormat format
)
907 unsigned col_format
= si_choose_spi_color_format(format
, false, false) - 1;
908 bool is_int8
= format_is_int8(format
);
910 return col_format
+ (is_int8
? 3 : 0);
914 radv_pipeline_compute_is_int8(const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
916 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
917 struct radv_subpass
*subpass
= pass
->subpasses
+ pCreateInfo
->subpass
;
918 unsigned is_int8
= 0;
920 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
921 struct radv_render_pass_attachment
*attachment
;
923 attachment
= pass
->attachments
+ subpass
->color_attachments
[i
].attachment
;
925 if (format_is_int8(attachment
->format
))
933 radv_pipeline_init_blend_state(struct radv_pipeline
*pipeline
,
934 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
935 const struct radv_graphics_pipeline_create_info
*extra
)
937 const VkPipelineColorBlendStateCreateInfo
*vkblend
= pCreateInfo
->pColorBlendState
;
938 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
939 unsigned mode
= V_028808_CB_NORMAL
;
940 uint32_t blend_enable
= 0, blend_need_alpha
= 0;
941 bool blend_mrt0_is_dual_src
= false;
943 bool single_cb_enable
= false;
948 if (extra
&& extra
->custom_blend_mode
) {
949 single_cb_enable
= true;
950 mode
= extra
->custom_blend_mode
;
952 blend
->cb_color_control
= 0;
953 if (vkblend
->logicOpEnable
)
954 blend
->cb_color_control
|= S_028808_ROP3(vkblend
->logicOp
| (vkblend
->logicOp
<< 4));
956 blend
->cb_color_control
|= S_028808_ROP3(0xcc);
958 blend
->db_alpha_to_mask
= S_028B70_ALPHA_TO_MASK_OFFSET0(2) |
959 S_028B70_ALPHA_TO_MASK_OFFSET1(2) |
960 S_028B70_ALPHA_TO_MASK_OFFSET2(2) |
961 S_028B70_ALPHA_TO_MASK_OFFSET3(2);
963 blend
->cb_target_mask
= 0;
964 for (i
= 0; i
< vkblend
->attachmentCount
; i
++) {
965 const VkPipelineColorBlendAttachmentState
*att
= &vkblend
->pAttachments
[i
];
966 unsigned blend_cntl
= 0;
967 VkBlendOp eqRGB
= att
->colorBlendOp
;
968 VkBlendFactor srcRGB
= att
->srcColorBlendFactor
;
969 VkBlendFactor dstRGB
= att
->dstColorBlendFactor
;
970 VkBlendOp eqA
= att
->alphaBlendOp
;
971 VkBlendFactor srcA
= att
->srcAlphaBlendFactor
;
972 VkBlendFactor dstA
= att
->dstAlphaBlendFactor
;
974 blend
->sx_mrt0_blend_opt
[i
] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED
);
976 if (!att
->colorWriteMask
)
979 blend
->cb_target_mask
|= (unsigned)att
->colorWriteMask
<< (4 * i
);
980 if (!att
->blendEnable
) {
981 blend
->cb_blend_control
[i
] = blend_cntl
;
985 if (is_dual_src(srcRGB
) || is_dual_src(dstRGB
) || is_dual_src(srcA
) || is_dual_src(dstA
))
987 blend_mrt0_is_dual_src
= true;
989 if (eqRGB
== VK_BLEND_OP_MIN
|| eqRGB
== VK_BLEND_OP_MAX
) {
990 srcRGB
= VK_BLEND_FACTOR_ONE
;
991 dstRGB
= VK_BLEND_FACTOR_ONE
;
993 if (eqA
== VK_BLEND_OP_MIN
|| eqA
== VK_BLEND_OP_MAX
) {
994 srcA
= VK_BLEND_FACTOR_ONE
;
995 dstA
= VK_BLEND_FACTOR_ONE
;
998 blend_cntl
|= S_028780_ENABLE(1);
1000 blend_cntl
|= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB
));
1001 blend_cntl
|= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB
));
1002 blend_cntl
|= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB
));
1003 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
) {
1004 blend_cntl
|= S_028780_SEPARATE_ALPHA_BLEND(1);
1005 blend_cntl
|= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA
));
1006 blend_cntl
|= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA
));
1007 blend_cntl
|= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA
));
1009 blend
->cb_blend_control
[i
] = blend_cntl
;
1011 blend_enable
|= 1 << i
;
1013 if (srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
1014 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA
||
1015 srcRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
1016 dstRGB
== VK_BLEND_FACTOR_SRC_ALPHA_SATURATE
||
1017 srcRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
||
1018 dstRGB
== VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
)
1019 blend_need_alpha
|= 1 << i
;
1021 for (i
= vkblend
->attachmentCount
; i
< 8; i
++)
1022 blend
->cb_blend_control
[i
] = 0;
1024 if (blend
->cb_target_mask
)
1025 blend
->cb_color_control
|= S_028808_MODE(mode
);
1027 blend
->cb_color_control
|= S_028808_MODE(V_028808_CB_DISABLE
);
1029 radv_pipeline_compute_spi_color_formats(pipeline
, pCreateInfo
,
1030 blend_enable
, blend_need_alpha
, single_cb_enable
, blend_mrt0_is_dual_src
);
1033 static uint32_t si_translate_stencil_op(enum VkStencilOp op
)
1036 case VK_STENCIL_OP_KEEP
:
1037 return V_02842C_STENCIL_KEEP
;
1038 case VK_STENCIL_OP_ZERO
:
1039 return V_02842C_STENCIL_ZERO
;
1040 case VK_STENCIL_OP_REPLACE
:
1041 return V_02842C_STENCIL_REPLACE_TEST
;
1042 case VK_STENCIL_OP_INCREMENT_AND_CLAMP
:
1043 return V_02842C_STENCIL_ADD_CLAMP
;
1044 case VK_STENCIL_OP_DECREMENT_AND_CLAMP
:
1045 return V_02842C_STENCIL_SUB_CLAMP
;
1046 case VK_STENCIL_OP_INVERT
:
1047 return V_02842C_STENCIL_INVERT
;
1048 case VK_STENCIL_OP_INCREMENT_AND_WRAP
:
1049 return V_02842C_STENCIL_ADD_WRAP
;
1050 case VK_STENCIL_OP_DECREMENT_AND_WRAP
:
1051 return V_02842C_STENCIL_SUB_WRAP
;
1057 radv_pipeline_init_depth_stencil_state(struct radv_pipeline
*pipeline
,
1058 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1059 const struct radv_graphics_pipeline_create_info
*extra
)
1061 const VkPipelineDepthStencilStateCreateInfo
*vkds
= pCreateInfo
->pDepthStencilState
;
1062 struct radv_depth_stencil_state
*ds
= &pipeline
->graphics
.ds
;
1064 memset(ds
, 0, sizeof(*ds
));
1067 ds
->db_depth_control
= S_028800_Z_ENABLE(vkds
->depthTestEnable
? 1 : 0) |
1068 S_028800_Z_WRITE_ENABLE(vkds
->depthWriteEnable
? 1 : 0) |
1069 S_028800_ZFUNC(vkds
->depthCompareOp
) |
1070 S_028800_DEPTH_BOUNDS_ENABLE(vkds
->depthBoundsTestEnable
? 1 : 0);
1072 if (vkds
->stencilTestEnable
) {
1073 ds
->db_depth_control
|= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
1074 ds
->db_depth_control
|= S_028800_STENCILFUNC(vkds
->front
.compareOp
);
1075 ds
->db_stencil_control
|= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds
->front
.failOp
));
1076 ds
->db_stencil_control
|= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds
->front
.passOp
));
1077 ds
->db_stencil_control
|= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds
->front
.depthFailOp
));
1079 ds
->db_depth_control
|= S_028800_STENCILFUNC_BF(vkds
->back
.compareOp
);
1080 ds
->db_stencil_control
|= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds
->back
.failOp
));
1081 ds
->db_stencil_control
|= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds
->back
.passOp
));
1082 ds
->db_stencil_control
|= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds
->back
.depthFailOp
));
1087 ds
->db_render_control
|= S_028000_DEPTH_CLEAR_ENABLE(extra
->db_depth_clear
);
1088 ds
->db_render_control
|= S_028000_STENCIL_CLEAR_ENABLE(extra
->db_stencil_clear
);
1090 ds
->db_render_control
|= S_028000_RESUMMARIZE_ENABLE(extra
->db_resummarize
);
1091 ds
->db_render_control
|= S_028000_DEPTH_COMPRESS_DISABLE(extra
->db_flush_depth_inplace
);
1092 ds
->db_render_control
|= S_028000_STENCIL_COMPRESS_DISABLE(extra
->db_flush_stencil_inplace
);
1093 ds
->db_render_override2
|= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra
->db_depth_disable_expclear
);
1094 ds
->db_render_override2
|= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra
->db_stencil_disable_expclear
);
1098 static uint32_t si_translate_fill(VkPolygonMode func
)
1101 case VK_POLYGON_MODE_FILL
:
1102 return V_028814_X_DRAW_TRIANGLES
;
1103 case VK_POLYGON_MODE_LINE
:
1104 return V_028814_X_DRAW_LINES
;
1105 case VK_POLYGON_MODE_POINT
:
1106 return V_028814_X_DRAW_POINTS
;
1109 return V_028814_X_DRAW_POINTS
;
1113 radv_pipeline_init_raster_state(struct radv_pipeline
*pipeline
,
1114 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1116 const VkPipelineRasterizationStateCreateInfo
*vkraster
= pCreateInfo
->pRasterizationState
;
1117 struct radv_raster_state
*raster
= &pipeline
->graphics
.raster
;
1119 memset(raster
, 0, sizeof(*raster
));
1121 raster
->spi_interp_control
=
1122 S_0286D4_FLAT_SHADE_ENA(1) |
1123 S_0286D4_PNT_SPRITE_ENA(1) |
1124 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
1125 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
1126 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
1127 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
1128 S_0286D4_PNT_SPRITE_TOP_1(0); // vulkan is top to bottom - 1.0 at bottom
1130 raster
->pa_cl_vs_out_cntl
= S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1);
1131 raster
->pa_cl_clip_cntl
= S_028810_PS_UCP_MODE(3) |
1132 S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
1133 S_028810_ZCLIP_NEAR_DISABLE(vkraster
->depthClampEnable
? 1 : 0) |
1134 S_028810_ZCLIP_FAR_DISABLE(vkraster
->depthClampEnable
? 1 : 0) |
1135 S_028810_DX_RASTERIZATION_KILL(vkraster
->rasterizerDiscardEnable
? 1 : 0) |
1136 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1);
1138 raster
->pa_su_vtx_cntl
=
1139 S_028BE4_PIX_CENTER(1) | // TODO verify
1140 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
1141 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
);
1143 raster
->pa_su_sc_mode_cntl
=
1144 S_028814_FACE(vkraster
->frontFace
) |
1145 S_028814_CULL_FRONT(!!(vkraster
->cullMode
& VK_CULL_MODE_FRONT_BIT
)) |
1146 S_028814_CULL_BACK(!!(vkraster
->cullMode
& VK_CULL_MODE_BACK_BIT
)) |
1147 S_028814_POLY_MODE(vkraster
->polygonMode
!= VK_POLYGON_MODE_FILL
) |
1148 S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
1149 S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster
->polygonMode
)) |
1150 S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
1151 S_028814_POLY_OFFSET_BACK_ENABLE(vkraster
->depthBiasEnable
? 1 : 0) |
1152 S_028814_POLY_OFFSET_PARA_ENABLE(vkraster
->depthBiasEnable
? 1 : 0);
1157 radv_pipeline_init_multisample_state(struct radv_pipeline
*pipeline
,
1158 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1160 const VkPipelineMultisampleStateCreateInfo
*vkms
= pCreateInfo
->pMultisampleState
;
1161 struct radv_blend_state
*blend
= &pipeline
->graphics
.blend
;
1162 struct radv_multisample_state
*ms
= &pipeline
->graphics
.ms
;
1163 unsigned num_tile_pipes
= pipeline
->device
->physical_device
->rad_info
.num_tile_pipes
;
1164 int ps_iter_samples
= 1;
1165 uint32_t mask
= 0xffff;
1167 ms
->num_samples
= vkms
->rasterizationSamples
;
1169 if (pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.fs
.force_persample
) {
1170 ps_iter_samples
= vkms
->rasterizationSamples
;
1173 ms
->pa_sc_line_cntl
= S_028BDC_DX10_DIAMOND_TEST_ENA(1);
1174 ms
->pa_sc_aa_config
= 0;
1175 ms
->db_eqaa
= S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1176 S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1177 ms
->pa_sc_mode_cntl_1
=
1178 S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1179 S_028A4C_WALK_FENCE_SIZE(num_tile_pipes
== 2 ? 2 : 3) |
1181 S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1182 S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1183 S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1184 S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1185 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1186 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1188 if (vkms
->rasterizationSamples
> 1) {
1189 unsigned log_samples
= util_logbase2(vkms
->rasterizationSamples
);
1190 unsigned log_ps_iter_samples
= util_logbase2(util_next_power_of_two(ps_iter_samples
));
1191 ms
->pa_sc_mode_cntl_0
= S_028A48_MSAA_ENABLE(1);
1192 ms
->pa_sc_line_cntl
|= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
1193 ms
->db_eqaa
|= S_028804_MAX_ANCHOR_SAMPLES(log_samples
) |
1194 S_028804_PS_ITER_SAMPLES(log_ps_iter_samples
) |
1195 S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples
) |
1196 S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples
);
1197 ms
->pa_sc_aa_config
|= S_028BE0_MSAA_NUM_SAMPLES(log_samples
) |
1198 S_028BE0_MAX_SAMPLE_DIST(radv_cayman_get_maxdist(log_samples
)) |
1199 S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples
); /* CM_R_028BE0_PA_SC_AA_CONFIG */
1200 ms
->pa_sc_mode_cntl_1
|= EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples
> 1);
1203 if (vkms
->alphaToCoverageEnable
)
1204 blend
->db_alpha_to_mask
|= S_028B70_ALPHA_TO_MASK_ENABLE(1);
1206 if (vkms
->pSampleMask
) {
1207 mask
= vkms
->pSampleMask
[0] & 0xffff;
1210 ms
->pa_sc_aa_mask
[0] = mask
| (mask
<< 16);
1211 ms
->pa_sc_aa_mask
[1] = mask
| (mask
<< 16);
1215 si_translate_prim(enum VkPrimitiveTopology topology
)
1218 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1219 return V_008958_DI_PT_POINTLIST
;
1220 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1221 return V_008958_DI_PT_LINELIST
;
1222 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1223 return V_008958_DI_PT_LINESTRIP
;
1224 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1225 return V_008958_DI_PT_TRILIST
;
1226 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1227 return V_008958_DI_PT_TRISTRIP
;
1228 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1229 return V_008958_DI_PT_TRIFAN
;
1230 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1231 return V_008958_DI_PT_LINELIST_ADJ
;
1232 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1233 return V_008958_DI_PT_LINESTRIP_ADJ
;
1234 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1235 return V_008958_DI_PT_TRILIST_ADJ
;
1236 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1237 return V_008958_DI_PT_TRISTRIP_ADJ
;
1238 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1239 return V_008958_DI_PT_PATCH
;
1247 si_conv_gl_prim_to_gs_out(unsigned gl_prim
)
1250 case 0: /* GL_POINTS */
1251 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1252 case 1: /* GL_LINES */
1253 case 3: /* GL_LINE_STRIP */
1254 case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1255 case 0x8E7A: /* GL_ISOLINES */
1256 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1258 case 4: /* GL_TRIANGLES */
1259 case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1260 case 5: /* GL_TRIANGLE_STRIP */
1261 case 7: /* GL_QUADS */
1262 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1270 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology
)
1273 case VK_PRIMITIVE_TOPOLOGY_POINT_LIST
:
1274 case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
:
1275 return V_028A6C_OUTPRIM_TYPE_POINTLIST
;
1276 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST
:
1277 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
:
1278 case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
:
1279 case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
:
1280 return V_028A6C_OUTPRIM_TYPE_LINESTRIP
;
1281 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
:
1282 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
:
1283 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
:
1284 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
:
1285 case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
:
1286 return V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1293 static unsigned si_map_swizzle(unsigned swizzle
)
1297 return V_008F0C_SQ_SEL_Y
;
1299 return V_008F0C_SQ_SEL_Z
;
1301 return V_008F0C_SQ_SEL_W
;
1303 return V_008F0C_SQ_SEL_0
;
1305 return V_008F0C_SQ_SEL_1
;
1306 default: /* VK_SWIZZLE_X */
1307 return V_008F0C_SQ_SEL_X
;
1312 radv_pipeline_init_dynamic_state(struct radv_pipeline
*pipeline
,
1313 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1315 radv_cmd_dirty_mask_t states
= RADV_CMD_DIRTY_DYNAMIC_ALL
;
1316 RADV_FROM_HANDLE(radv_render_pass
, pass
, pCreateInfo
->renderPass
);
1317 struct radv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
1319 pipeline
->dynamic_state
= default_dynamic_state
;
1321 if (pCreateInfo
->pDynamicState
) {
1322 /* Remove all of the states that are marked as dynamic */
1323 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1324 for (uint32_t s
= 0; s
< count
; s
++)
1325 states
&= ~(1 << pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1328 struct radv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1330 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1332 * pViewportState is [...] NULL if the pipeline
1333 * has rasterization disabled.
1335 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
1336 assert(pCreateInfo
->pViewportState
);
1338 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1339 if (states
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
1340 typed_memcpy(dynamic
->viewport
.viewports
,
1341 pCreateInfo
->pViewportState
->pViewports
,
1342 pCreateInfo
->pViewportState
->viewportCount
);
1345 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1346 if (states
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
1347 typed_memcpy(dynamic
->scissor
.scissors
,
1348 pCreateInfo
->pViewportState
->pScissors
,
1349 pCreateInfo
->pViewportState
->scissorCount
);
1353 if (states
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
)) {
1354 assert(pCreateInfo
->pRasterizationState
);
1355 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1358 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
)) {
1359 assert(pCreateInfo
->pRasterizationState
);
1360 dynamic
->depth_bias
.bias
=
1361 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1362 dynamic
->depth_bias
.clamp
=
1363 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1364 dynamic
->depth_bias
.slope
=
1365 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1368 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1370 * pColorBlendState is [...] NULL if the pipeline has rasterization
1371 * disabled or if the subpass of the render pass the pipeline is
1372 * created against does not use any color attachments.
1374 bool uses_color_att
= false;
1375 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
1376 if (subpass
->color_attachments
[i
].attachment
!= VK_ATTACHMENT_UNUSED
) {
1377 uses_color_att
= true;
1382 if (uses_color_att
&& states
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
)) {
1383 assert(pCreateInfo
->pColorBlendState
);
1384 typed_memcpy(dynamic
->blend_constants
,
1385 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1388 /* If there is no depthstencil attachment, then don't read
1389 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1390 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1391 * no need to override the depthstencil defaults in
1392 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1394 * Section 9.2 of the Vulkan 1.0.15 spec says:
1396 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1397 * disabled or if the subpass of the render pass the pipeline is created
1398 * against does not use a depth/stencil attachment.
1400 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
1401 subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
) {
1402 assert(pCreateInfo
->pDepthStencilState
);
1404 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
)) {
1405 dynamic
->depth_bounds
.min
=
1406 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1407 dynamic
->depth_bounds
.max
=
1408 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1411 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
)) {
1412 dynamic
->stencil_compare_mask
.front
=
1413 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1414 dynamic
->stencil_compare_mask
.back
=
1415 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1418 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
)) {
1419 dynamic
->stencil_write_mask
.front
=
1420 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1421 dynamic
->stencil_write_mask
.back
=
1422 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1425 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
)) {
1426 dynamic
->stencil_reference
.front
=
1427 pCreateInfo
->pDepthStencilState
->front
.reference
;
1428 dynamic
->stencil_reference
.back
=
1429 pCreateInfo
->pDepthStencilState
->back
.reference
;
1433 pipeline
->dynamic_state_mask
= states
;
1436 static union ac_shader_variant_key
1437 radv_compute_vs_key(const VkGraphicsPipelineCreateInfo
*pCreateInfo
, bool as_es
)
1439 union ac_shader_variant_key key
;
1440 const VkPipelineVertexInputStateCreateInfo
*input_state
=
1441 pCreateInfo
->pVertexInputState
;
1443 memset(&key
, 0, sizeof(key
));
1444 key
.vs
.instance_rate_inputs
= 0;
1445 key
.vs
.as_es
= as_es
;
1447 for (unsigned i
= 0; i
< input_state
->vertexAttributeDescriptionCount
; ++i
) {
1449 binding
= input_state
->pVertexAttributeDescriptions
[i
].binding
;
1450 if (input_state
->pVertexBindingDescriptions
[binding
].inputRate
)
1451 key
.vs
.instance_rate_inputs
|= 1u << input_state
->pVertexAttributeDescriptions
[i
].location
;
1457 calculate_gs_ring_sizes(struct radv_pipeline
*pipeline
)
1459 struct radv_device
*device
= pipeline
->device
;
1460 unsigned num_se
= device
->physical_device
->rad_info
.max_se
;
1461 unsigned wave_size
= 64;
1462 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
1463 unsigned gs_vertex_reuse
= 16 * num_se
; /* GS_VERTEX_REUSE register (per SE) */
1464 unsigned alignment
= 256 * num_se
;
1465 /* The maximum size is 63.999 MB per SE. */
1466 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
1468 struct ac_shader_variant_info
*gs_info
= &pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
;
1469 struct ac_shader_variant_info
*es_info
= &pipeline
->shaders
[MESA_SHADER_VERTEX
]->info
;
1470 /* Calculate the minimum size. */
1471 unsigned min_esgs_ring_size
= align(es_info
->vs
.esgs_itemsize
* gs_vertex_reuse
*
1472 wave_size
, alignment
);
1473 /* These are recommended sizes, not minimum sizes. */
1474 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
1475 es_info
->vs
.esgs_itemsize
* gs_info
->gs
.vertices_in
;
1476 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
1477 gs_info
->gs
.max_gsvs_emit_size
* 1; // no streams in VK (gs->max_gs_stream + 1);
1479 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
1480 esgs_ring_size
= align(esgs_ring_size
, alignment
);
1481 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
1483 pipeline
->graphics
.esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
1484 pipeline
->graphics
.gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
1487 static const struct radv_prim_vertex_count prim_size_table
[] = {
1488 [V_008958_DI_PT_NONE
] = {0, 0},
1489 [V_008958_DI_PT_POINTLIST
] = {1, 1},
1490 [V_008958_DI_PT_LINELIST
] = {2, 2},
1491 [V_008958_DI_PT_LINESTRIP
] = {2, 1},
1492 [V_008958_DI_PT_TRILIST
] = {3, 3},
1493 [V_008958_DI_PT_TRIFAN
] = {3, 1},
1494 [V_008958_DI_PT_TRISTRIP
] = {3, 1},
1495 [V_008958_DI_PT_LINELIST_ADJ
] = {4, 4},
1496 [V_008958_DI_PT_LINESTRIP_ADJ
] = {4, 1},
1497 [V_008958_DI_PT_TRILIST_ADJ
] = {6, 6},
1498 [V_008958_DI_PT_TRISTRIP_ADJ
] = {6, 2},
1499 [V_008958_DI_PT_RECTLIST
] = {3, 3},
1500 [V_008958_DI_PT_LINELOOP
] = {2, 1},
1501 [V_008958_DI_PT_POLYGON
] = {3, 1},
1502 [V_008958_DI_PT_2D_TRI_STRIP
] = {0, 0},
1506 radv_pipeline_init(struct radv_pipeline
*pipeline
,
1507 struct radv_device
*device
,
1508 struct radv_pipeline_cache
*cache
,
1509 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1510 const struct radv_graphics_pipeline_create_info
*extra
,
1511 const VkAllocationCallbacks
*alloc
)
1513 struct radv_shader_module fs_m
= {0};
1517 alloc
= &device
->alloc
;
1519 pipeline
->device
= device
;
1520 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1522 radv_pipeline_init_dynamic_state(pipeline
, pCreateInfo
);
1523 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
1524 struct radv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
1525 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
1526 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
1527 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
1528 modules
[stage
] = radv_shader_module_from_handle(pStages
[stage
]->module
);
1531 radv_pipeline_init_blend_state(pipeline
, pCreateInfo
, extra
);
1534 if (modules
[MESA_SHADER_VERTEX
]) {
1535 bool as_es
= modules
[MESA_SHADER_GEOMETRY
] != NULL
;
1536 union ac_shader_variant_key key
= radv_compute_vs_key(pCreateInfo
, as_es
);
1538 pipeline
->shaders
[MESA_SHADER_VERTEX
] =
1539 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_VERTEX
],
1540 pStages
[MESA_SHADER_VERTEX
]->pName
,
1542 pStages
[MESA_SHADER_VERTEX
]->pSpecializationInfo
,
1543 pipeline
->layout
, &key
);
1545 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_VERTEX
);
1548 if (modules
[MESA_SHADER_GEOMETRY
]) {
1549 union ac_shader_variant_key key
= radv_compute_vs_key(pCreateInfo
, false);
1551 pipeline
->shaders
[MESA_SHADER_GEOMETRY
] =
1552 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_GEOMETRY
],
1553 pStages
[MESA_SHADER_GEOMETRY
]->pName
,
1554 MESA_SHADER_GEOMETRY
,
1555 pStages
[MESA_SHADER_GEOMETRY
]->pSpecializationInfo
,
1556 pipeline
->layout
, &key
);
1558 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_GEOMETRY
);
1559 calculate_gs_ring_sizes(pipeline
);
1562 if (!modules
[MESA_SHADER_FRAGMENT
]) {
1564 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
1565 fs_b
.shader
->info
->name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
1566 fs_m
.nir
= fs_b
.shader
;
1567 modules
[MESA_SHADER_FRAGMENT
] = &fs_m
;
1570 if (modules
[MESA_SHADER_FRAGMENT
]) {
1571 union ac_shader_variant_key key
;
1572 key
.fs
.col_format
= pipeline
->graphics
.blend
.spi_shader_col_format
;
1573 key
.fs
.is_int8
= radv_pipeline_compute_is_int8(pCreateInfo
);
1575 const VkPipelineShaderStageCreateInfo
*stage
= pStages
[MESA_SHADER_FRAGMENT
];
1577 pipeline
->shaders
[MESA_SHADER_FRAGMENT
] =
1578 radv_pipeline_compile(pipeline
, cache
, modules
[MESA_SHADER_FRAGMENT
],
1579 stage
? stage
->pName
: "main",
1580 MESA_SHADER_FRAGMENT
,
1581 stage
? stage
->pSpecializationInfo
: NULL
,
1582 pipeline
->layout
, &key
);
1583 pipeline
->active_stages
|= mesa_to_vk_shader_stage(MESA_SHADER_FRAGMENT
);
1587 ralloc_free(fs_m
.nir
);
1589 radv_pipeline_init_depth_stencil_state(pipeline
, pCreateInfo
, extra
);
1590 radv_pipeline_init_raster_state(pipeline
, pCreateInfo
);
1591 radv_pipeline_init_multisample_state(pipeline
, pCreateInfo
);
1592 pipeline
->graphics
.prim
= si_translate_prim(pCreateInfo
->pInputAssemblyState
->topology
);
1593 if (radv_pipeline_has_gs(pipeline
)) {
1594 pipeline
->graphics
.gs_out
= si_conv_gl_prim_to_gs_out(pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.output_prim
);
1596 pipeline
->graphics
.gs_out
= si_conv_prim_to_gs_out(pCreateInfo
->pInputAssemblyState
->topology
);
1598 if (extra
&& extra
->use_rectlist
) {
1599 pipeline
->graphics
.prim
= V_008958_DI_PT_RECTLIST
;
1600 pipeline
->graphics
.gs_out
= V_028A6C_OUTPRIM_TYPE_TRISTRIP
;
1602 pipeline
->graphics
.prim_restart_enable
= !!pCreateInfo
->pInputAssemblyState
->primitiveRestartEnable
;
1603 /* prim vertex count will need TESS changes */
1604 pipeline
->graphics
.prim_vertex_count
= prim_size_table
[pipeline
->graphics
.prim
];
1606 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1607 pCreateInfo
->pVertexInputState
;
1608 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
1609 const VkVertexInputAttributeDescription
*desc
=
1610 &vi_info
->pVertexAttributeDescriptions
[i
];
1611 unsigned loc
= desc
->location
;
1612 const struct vk_format_description
*format_desc
;
1614 uint32_t num_format
, data_format
;
1615 format_desc
= vk_format_description(desc
->format
);
1616 first_non_void
= vk_format_get_first_non_void_channel(desc
->format
);
1618 num_format
= radv_translate_buffer_numformat(format_desc
, first_non_void
);
1619 data_format
= radv_translate_buffer_dataformat(format_desc
, first_non_void
);
1621 pipeline
->va_rsrc_word3
[loc
] = S_008F0C_DST_SEL_X(si_map_swizzle(format_desc
->swizzle
[0])) |
1622 S_008F0C_DST_SEL_Y(si_map_swizzle(format_desc
->swizzle
[1])) |
1623 S_008F0C_DST_SEL_Z(si_map_swizzle(format_desc
->swizzle
[2])) |
1624 S_008F0C_DST_SEL_W(si_map_swizzle(format_desc
->swizzle
[3])) |
1625 S_008F0C_NUM_FORMAT(num_format
) |
1626 S_008F0C_DATA_FORMAT(data_format
);
1627 pipeline
->va_format_size
[loc
] = format_desc
->block
.bits
/ 8;
1628 pipeline
->va_offset
[loc
] = desc
->offset
;
1629 pipeline
->va_binding
[loc
] = desc
->binding
;
1630 pipeline
->num_vertex_attribs
= MAX2(pipeline
->num_vertex_attribs
, loc
+ 1);
1633 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
1634 const VkVertexInputBindingDescription
*desc
=
1635 &vi_info
->pVertexBindingDescriptions
[i
];
1637 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
1640 if (device
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
) {
1641 radv_dump_pipeline_stats(device
, pipeline
);
1644 result
= radv_pipeline_scratch_init(device
, pipeline
);
1649 radv_graphics_pipeline_create(
1651 VkPipelineCache _cache
,
1652 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1653 const struct radv_graphics_pipeline_create_info
*extra
,
1654 const VkAllocationCallbacks
*pAllocator
,
1655 VkPipeline
*pPipeline
)
1657 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1658 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
1659 struct radv_pipeline
*pipeline
;
1662 pipeline
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1663 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1664 if (pipeline
== NULL
)
1665 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1667 memset(pipeline
, 0, sizeof(*pipeline
));
1668 result
= radv_pipeline_init(pipeline
, device
, cache
,
1669 pCreateInfo
, extra
, pAllocator
);
1670 if (result
!= VK_SUCCESS
) {
1671 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
1675 *pPipeline
= radv_pipeline_to_handle(pipeline
);
1680 VkResult
radv_CreateGraphicsPipelines(
1682 VkPipelineCache pipelineCache
,
1684 const VkGraphicsPipelineCreateInfo
* pCreateInfos
,
1685 const VkAllocationCallbacks
* pAllocator
,
1686 VkPipeline
* pPipelines
)
1688 VkResult result
= VK_SUCCESS
;
1691 for (; i
< count
; i
++) {
1693 r
= radv_graphics_pipeline_create(_device
,
1696 NULL
, pAllocator
, &pPipelines
[i
]);
1697 if (r
!= VK_SUCCESS
) {
1699 pPipelines
[i
] = VK_NULL_HANDLE
;
1706 static VkResult
radv_compute_pipeline_create(
1708 VkPipelineCache _cache
,
1709 const VkComputePipelineCreateInfo
* pCreateInfo
,
1710 const VkAllocationCallbacks
* pAllocator
,
1711 VkPipeline
* pPipeline
)
1713 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1714 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
1715 RADV_FROM_HANDLE(radv_shader_module
, module
, pCreateInfo
->stage
.module
);
1716 struct radv_pipeline
*pipeline
;
1719 pipeline
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pipeline
), 8,
1720 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1721 if (pipeline
== NULL
)
1722 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1724 memset(pipeline
, 0, sizeof(*pipeline
));
1725 pipeline
->device
= device
;
1726 pipeline
->layout
= radv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1728 pipeline
->shaders
[MESA_SHADER_COMPUTE
] =
1729 radv_pipeline_compile(pipeline
, cache
, module
,
1730 pCreateInfo
->stage
.pName
,
1731 MESA_SHADER_COMPUTE
,
1732 pCreateInfo
->stage
.pSpecializationInfo
,
1733 pipeline
->layout
, NULL
);
1736 result
= radv_pipeline_scratch_init(device
, pipeline
);
1737 if (result
!= VK_SUCCESS
) {
1738 radv_pipeline_destroy(device
, pipeline
, pAllocator
);
1742 *pPipeline
= radv_pipeline_to_handle(pipeline
);
1744 if (device
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
) {
1745 radv_dump_pipeline_stats(device
, pipeline
);
1749 VkResult
radv_CreateComputePipelines(
1751 VkPipelineCache pipelineCache
,
1753 const VkComputePipelineCreateInfo
* pCreateInfos
,
1754 const VkAllocationCallbacks
* pAllocator
,
1755 VkPipeline
* pPipelines
)
1757 VkResult result
= VK_SUCCESS
;
1760 for (; i
< count
; i
++) {
1762 r
= radv_compute_pipeline_create(_device
, pipelineCache
,
1764 pAllocator
, &pPipelines
[i
]);
1765 if (r
!= VK_SUCCESS
) {
1767 pPipelines
[i
] = VK_NULL_HANDLE
;