2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
38 #include <llvm-c/Core.h>
39 #include <llvm-c/TargetMachine.h>
40 #include <llvm-c/Support.h>
44 #include "ac_binary.h"
45 #include "ac_llvm_util.h"
46 #include "ac_nir_to_llvm.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
51 #include "util/string_buffer.h"
53 static const struct nir_shader_compiler_options nir_options
= {
54 .vertex_id_zero_based
= true,
58 .lower_device_index_to_zero
= true,
62 .lower_pack_snorm_2x16
= true,
63 .lower_pack_snorm_4x8
= true,
64 .lower_pack_unorm_2x16
= true,
65 .lower_pack_unorm_4x8
= true,
66 .lower_unpack_snorm_2x16
= true,
67 .lower_unpack_snorm_4x8
= true,
68 .lower_unpack_unorm_2x16
= true,
69 .lower_unpack_unorm_4x8
= true,
70 .lower_extract_byte
= true,
71 .lower_extract_word
= true,
74 .max_unroll_iterations
= 32
77 VkResult
radv_CreateShaderModule(
79 const VkShaderModuleCreateInfo
* pCreateInfo
,
80 const VkAllocationCallbacks
* pAllocator
,
81 VkShaderModule
* pShaderModule
)
83 RADV_FROM_HANDLE(radv_device
, device
, _device
);
84 struct radv_shader_module
*module
;
86 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
87 assert(pCreateInfo
->flags
== 0);
89 module
= vk_alloc2(&device
->alloc
, pAllocator
,
90 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
91 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
93 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
96 module
->size
= pCreateInfo
->codeSize
;
97 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
99 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
101 *pShaderModule
= radv_shader_module_to_handle(module
);
106 void radv_DestroyShaderModule(
108 VkShaderModule _module
,
109 const VkAllocationCallbacks
* pAllocator
)
111 RADV_FROM_HANDLE(radv_device
, device
, _device
);
112 RADV_FROM_HANDLE(radv_shader_module
, module
, _module
);
117 vk_free2(&device
->alloc
, pAllocator
, module
);
121 radv_optimize_nir(struct nir_shader
*shader
, bool optimize_conservatively
)
128 NIR_PASS_V(shader
, nir_lower_vars_to_ssa
);
129 NIR_PASS_V(shader
, nir_lower_pack
);
130 NIR_PASS_V(shader
, nir_lower_alu_to_scalar
);
131 NIR_PASS_V(shader
, nir_lower_phis_to_scalar
);
133 NIR_PASS(progress
, shader
, nir_copy_prop
);
134 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
135 NIR_PASS(progress
, shader
, nir_opt_dce
);
136 if (nir_opt_trivial_continues(shader
)) {
138 NIR_PASS(progress
, shader
, nir_copy_prop
);
139 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
140 NIR_PASS(progress
, shader
, nir_opt_dce
);
142 NIR_PASS(progress
, shader
, nir_opt_if
);
143 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
144 NIR_PASS(progress
, shader
, nir_opt_cse
);
145 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 8);
146 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
147 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
148 NIR_PASS(progress
, shader
, nir_opt_undef
);
149 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
150 if (shader
->options
->max_unroll_iterations
) {
151 NIR_PASS(progress
, shader
, nir_opt_loop_unroll
, 0);
153 } while (progress
&& !optimize_conservatively
);
155 NIR_PASS(progress
, shader
, nir_opt_shrink_load
);
156 NIR_PASS(progress
, shader
, nir_opt_move_load_ubo
);
160 radv_shader_compile_to_nir(struct radv_device
*device
,
161 struct radv_shader_module
*module
,
162 const char *entrypoint_name
,
163 gl_shader_stage stage
,
164 const VkSpecializationInfo
*spec_info
,
165 const VkPipelineCreateFlags flags
)
168 nir_function
*entry_point
;
170 /* Some things such as our meta clear/blit code will give us a NIR
171 * shader directly. In that case, we just ignore the SPIR-V entirely
172 * and just use the NIR shader */
174 nir
->options
= &nir_options
;
175 nir_validate_shader(nir
);
177 assert(exec_list_length(&nir
->functions
) == 1);
178 struct exec_node
*node
= exec_list_get_head(&nir
->functions
);
179 entry_point
= exec_node_data(nir_function
, node
, node
);
181 uint32_t *spirv
= (uint32_t *) module
->data
;
182 assert(module
->size
% 4 == 0);
184 if (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SPIRV
)
185 radv_print_spirv(spirv
, module
->size
, stderr
);
187 uint32_t num_spec_entries
= 0;
188 struct nir_spirv_specialization
*spec_entries
= NULL
;
189 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
190 num_spec_entries
= spec_info
->mapEntryCount
;
191 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
192 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
193 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
194 const void *data
= spec_info
->pData
+ entry
.offset
;
195 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
197 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
198 if (spec_info
->dataSize
== 8)
199 spec_entries
[i
].data64
= *(const uint64_t *)data
;
201 spec_entries
[i
].data32
= *(const uint32_t *)data
;
204 const struct spirv_to_nir_options spirv_options
= {
206 .device_group
= true,
207 .draw_parameters
= true,
209 .image_read_without_format
= true,
210 .image_write_without_format
= true,
211 .tessellation
= true,
215 .subgroup_ballot
= true,
216 .subgroup_basic
= true,
217 .subgroup_quad
= true,
218 .subgroup_shuffle
= true,
219 .subgroup_vote
= true,
220 .variable_pointers
= true,
222 .trinary_minmax
= true,
223 .shader_viewport_index_layer
= true,
224 .descriptor_array_dynamic_indexing
= true,
225 .runtime_descriptor_array
= true,
226 .stencil_export
= true,
227 .storage_16bit
= true,
230 entry_point
= spirv_to_nir(spirv
, module
->size
/ 4,
231 spec_entries
, num_spec_entries
,
232 stage
, entrypoint_name
,
233 &spirv_options
, &nir_options
);
234 nir
= entry_point
->shader
;
235 assert(nir
->info
.stage
== stage
);
236 nir_validate_shader(nir
);
240 /* We have to lower away local constant initializers right before we
241 * inline functions. That way they get properly initialized at the top
242 * of the function and not at the top of its caller.
244 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_local
);
245 NIR_PASS_V(nir
, nir_lower_returns
);
246 NIR_PASS_V(nir
, nir_inline_functions
);
247 NIR_PASS_V(nir
, nir_copy_prop
);
249 /* Pick off the single entrypoint that we want */
250 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
251 if (func
!= entry_point
)
252 exec_node_remove(&func
->node
);
254 assert(exec_list_length(&nir
->functions
) == 1);
255 entry_point
->name
= ralloc_strdup(entry_point
, "main");
257 /* Make sure we lower constant initializers on output variables so that
258 * nir_remove_dead_variables below sees the corresponding stores
260 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_shader_out
);
262 /* Now that we've deleted all but the main function, we can go ahead and
263 * lower the rest of the constant initializers.
265 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
267 /* Split member structs. We do this before lower_io_to_temporaries so that
268 * it doesn't lower system values to temporaries by accident.
270 NIR_PASS_V(nir
, nir_split_var_copies
);
271 NIR_PASS_V(nir
, nir_split_per_member_structs
);
273 NIR_PASS_V(nir
, nir_remove_dead_variables
,
274 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
);
276 NIR_PASS_V(nir
, nir_lower_system_values
);
277 NIR_PASS_V(nir
, nir_lower_clip_cull_distance_arrays
);
280 /* Vulkan uses the separate-shader linking model */
281 nir
->info
.separate_shader
= true;
283 nir_shader_gather_info(nir
, entry_point
->impl
);
285 static const nir_lower_tex_options tex_options
= {
289 nir_lower_tex(nir
, &tex_options
);
291 nir_lower_vars_to_ssa(nir
);
293 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
294 nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
295 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
296 nir_shader_get_entrypoint(nir
), true, true);
297 } else if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
||
298 nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
299 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
300 nir_shader_get_entrypoint(nir
), true, false);
303 nir_split_var_copies(nir
);
304 nir_lower_var_copies(nir
);
306 nir_lower_global_vars_to_local(nir
);
307 nir_remove_dead_variables(nir
, nir_var_local
);
308 nir_lower_subgroups(nir
, &(struct nir_lower_subgroups_options
) {
310 .ballot_bit_size
= 64,
311 .lower_to_scalar
= 1,
312 .lower_subgroup_masks
= 1,
314 .lower_shuffle_to_32bit
= 1,
315 .lower_vote_eq_to_ballot
= 1,
318 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
319 radv_optimize_nir(nir
, false);
321 /* Indirect lowering must be called after the radv_optimize_nir() loop
322 * has been called at least once. Otherwise indirect lowering can
323 * bloat the instruction count of the loop and cause it to be
324 * considered too large for unrolling.
326 ac_lower_indirect_derefs(nir
, device
->physical_device
->rad_info
.chip_class
);
327 radv_optimize_nir(nir
, flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
);
333 radv_alloc_shader_memory(struct radv_device
*device
,
334 struct radv_shader_variant
*shader
)
336 mtx_lock(&device
->shader_slab_mutex
);
337 list_for_each_entry(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
339 list_for_each_entry(struct radv_shader_variant
, s
, &slab
->shaders
, slab_list
) {
340 if (s
->bo_offset
- offset
>= shader
->code_size
) {
341 shader
->bo
= slab
->bo
;
342 shader
->bo_offset
= offset
;
343 list_addtail(&shader
->slab_list
, &s
->slab_list
);
344 mtx_unlock(&device
->shader_slab_mutex
);
345 return slab
->ptr
+ offset
;
347 offset
= align_u64(s
->bo_offset
+ s
->code_size
, 256);
349 if (slab
->size
- offset
>= shader
->code_size
) {
350 shader
->bo
= slab
->bo
;
351 shader
->bo_offset
= offset
;
352 list_addtail(&shader
->slab_list
, &slab
->shaders
);
353 mtx_unlock(&device
->shader_slab_mutex
);
354 return slab
->ptr
+ offset
;
358 mtx_unlock(&device
->shader_slab_mutex
);
359 struct radv_shader_slab
*slab
= calloc(1, sizeof(struct radv_shader_slab
));
361 slab
->size
= 256 * 1024;
362 slab
->bo
= device
->ws
->buffer_create(device
->ws
, slab
->size
, 256,
364 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
365 (device
->physical_device
->cpdma_prefetch_writes_memory
?
366 0 : RADEON_FLAG_READ_ONLY
));
367 slab
->ptr
= (char*)device
->ws
->buffer_map(slab
->bo
);
368 list_inithead(&slab
->shaders
);
370 mtx_lock(&device
->shader_slab_mutex
);
371 list_add(&slab
->slabs
, &device
->shader_slabs
);
373 shader
->bo
= slab
->bo
;
374 shader
->bo_offset
= 0;
375 list_add(&shader
->slab_list
, &slab
->shaders
);
376 mtx_unlock(&device
->shader_slab_mutex
);
381 radv_destroy_shader_slabs(struct radv_device
*device
)
383 list_for_each_entry_safe(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
384 device
->ws
->buffer_destroy(slab
->bo
);
387 mtx_destroy(&device
->shader_slab_mutex
);
390 /* For the UMR disassembler. */
391 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
392 #define DEBUGGER_NUM_MARKERS 5
395 radv_get_shader_binary_size(struct ac_shader_binary
*binary
)
397 return binary
->code_size
+ DEBUGGER_NUM_MARKERS
* 4;
401 radv_fill_shader_variant(struct radv_device
*device
,
402 struct radv_shader_variant
*variant
,
403 struct ac_shader_binary
*binary
,
404 gl_shader_stage stage
)
406 bool scratch_enabled
= variant
->config
.scratch_bytes_per_wave
> 0;
407 struct radv_shader_info
*info
= &variant
->info
.info
;
408 unsigned vgpr_comp_cnt
= 0;
410 variant
->code_size
= radv_get_shader_binary_size(binary
);
411 variant
->rsrc2
= S_00B12C_USER_SGPR(variant
->info
.num_user_sgprs
) |
412 S_00B12C_USER_SGPR_MSB(variant
->info
.num_user_sgprs
>> 5) |
413 S_00B12C_SCRATCH_EN(scratch_enabled
);
415 variant
->rsrc1
= S_00B848_VGPRS((variant
->config
.num_vgprs
- 1) / 4) |
416 S_00B848_SGPRS((variant
->config
.num_sgprs
- 1) / 8) |
417 S_00B848_DX10_CLAMP(1) |
418 S_00B848_FLOAT_MODE(variant
->config
.float_mode
);
421 case MESA_SHADER_TESS_EVAL
:
423 variant
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
425 case MESA_SHADER_TESS_CTRL
:
426 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
427 vgpr_comp_cnt
= variant
->info
.vs
.vgpr_comp_cnt
;
429 variant
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
432 case MESA_SHADER_VERTEX
:
433 case MESA_SHADER_GEOMETRY
:
434 vgpr_comp_cnt
= variant
->info
.vs
.vgpr_comp_cnt
;
436 case MESA_SHADER_FRAGMENT
:
438 case MESA_SHADER_COMPUTE
:
440 S_00B84C_TGID_X_EN(info
->cs
.uses_block_id
[0]) |
441 S_00B84C_TGID_Y_EN(info
->cs
.uses_block_id
[1]) |
442 S_00B84C_TGID_Z_EN(info
->cs
.uses_block_id
[2]) |
443 S_00B84C_TIDIG_COMP_CNT(info
->cs
.uses_thread_id
[2] ? 2 :
444 info
->cs
.uses_thread_id
[1] ? 1 : 0) |
445 S_00B84C_TG_SIZE_EN(info
->cs
.uses_local_invocation_idx
) |
446 S_00B84C_LDS_SIZE(variant
->config
.lds_size
);
449 unreachable("unsupported shader type");
453 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
454 stage
== MESA_SHADER_GEOMETRY
) {
455 unsigned es_type
= variant
->info
.gs
.es_type
;
456 unsigned gs_vgpr_comp_cnt
, es_vgpr_comp_cnt
;
458 if (es_type
== MESA_SHADER_VERTEX
) {
459 es_vgpr_comp_cnt
= variant
->info
.vs
.vgpr_comp_cnt
;
460 } else if (es_type
== MESA_SHADER_TESS_EVAL
) {
461 es_vgpr_comp_cnt
= 3;
463 unreachable("invalid shader ES type");
466 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
467 * VGPR[0:4] are always loaded.
469 if (info
->uses_invocation_id
) {
470 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
471 } else if (info
->uses_prim_id
) {
472 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
473 } else if (variant
->info
.gs
.vertices_in
>= 3) {
474 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
476 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
479 variant
->rsrc1
|= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
);
480 variant
->rsrc2
|= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
481 S_00B22C_OC_LDS_EN(es_type
== MESA_SHADER_TESS_EVAL
);
482 } else if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
483 stage
== MESA_SHADER_TESS_CTRL
) {
484 variant
->rsrc1
|= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt
);
486 variant
->rsrc1
|= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
);
489 void *ptr
= radv_alloc_shader_memory(device
, variant
);
490 memcpy(ptr
, binary
->code
, binary
->code_size
);
492 /* Add end-of-code markers for the UMR disassembler. */
493 uint32_t *ptr32
= (uint32_t *)ptr
+ binary
->code_size
/ 4;
494 for (unsigned i
= 0; i
< DEBUGGER_NUM_MARKERS
; i
++)
495 ptr32
[i
] = DEBUGGER_END_OF_CODE_MARKER
;
499 static void radv_init_llvm_target()
501 LLVMInitializeAMDGPUTargetInfo();
502 LLVMInitializeAMDGPUTarget();
503 LLVMInitializeAMDGPUTargetMC();
504 LLVMInitializeAMDGPUAsmPrinter();
506 /* For inline assembly. */
507 LLVMInitializeAMDGPUAsmParser();
509 /* Workaround for bug in llvm 4.0 that causes image intrinsics
511 * https://reviews.llvm.org/D26348
513 * Workaround for bug in llvm that causes the GPU to hang in presence
514 * of nested loops because there is an exec mask issue. The proper
515 * solution is to fix LLVM but this might require a bunch of work.
516 * https://bugs.llvm.org/show_bug.cgi?id=37744
518 * "mesa" is the prefix for error messages.
520 const char *argv
[3] = { "mesa", "-simplifycfg-sink-common=false",
521 "-amdgpu-skip-threshold=1" };
522 LLVMParseCommandLineOptions(3, argv
, NULL
);
525 static once_flag radv_init_llvm_target_once_flag
= ONCE_FLAG_INIT
;
527 static void radv_init_llvm_once(void)
529 call_once(&radv_init_llvm_target_once_flag
, radv_init_llvm_target
);
532 static struct radv_shader_variant
*
533 shader_variant_create(struct radv_device
*device
,
534 struct radv_shader_module
*module
,
535 struct nir_shader
* const *shaders
,
537 gl_shader_stage stage
,
538 struct radv_nir_compiler_options
*options
,
541 unsigned *code_size_out
)
543 enum radeon_family chip_family
= device
->physical_device
->rad_info
.family
;
544 enum ac_target_machine_options tm_options
= 0;
545 struct radv_shader_variant
*variant
;
546 struct ac_shader_binary binary
;
547 struct ac_llvm_compiler ac_llvm
;
548 bool thread_compiler
;
549 variant
= calloc(1, sizeof(struct radv_shader_variant
));
553 options
->family
= chip_family
;
554 options
->chip_class
= device
->physical_device
->rad_info
.chip_class
;
555 options
->dump_shader
= radv_can_dump_shader(device
, module
, gs_copy_shader
);
556 options
->dump_preoptir
= options
->dump_shader
&&
557 device
->instance
->debug_flags
& RADV_DEBUG_PREOPTIR
;
558 options
->record_llvm_ir
= device
->keep_shader_info
;
559 options
->check_ir
= device
->instance
->debug_flags
& RADV_DEBUG_CHECKIR
;
560 options
->tess_offchip_block_dw_size
= device
->tess_offchip_block_dw_size
;
561 options
->address32_hi
= device
->physical_device
->rad_info
.address32_hi
;
563 if (options
->supports_spill
)
564 tm_options
|= AC_TM_SUPPORTS_SPILL
;
565 if (device
->instance
->perftest_flags
& RADV_PERFTEST_SISCHED
)
566 tm_options
|= AC_TM_SISCHED
;
567 if (options
->check_ir
)
568 tm_options
|= AC_TM_CHECK_IR
;
570 thread_compiler
= !(device
->instance
->debug_flags
& RADV_DEBUG_NOTHREADLLVM
);
571 radv_init_llvm_once();
572 radv_init_llvm_compiler(&ac_llvm
, false,
574 chip_family
, tm_options
);
575 if (gs_copy_shader
) {
576 assert(shader_count
== 1);
577 radv_compile_gs_copy_shader(&ac_llvm
, *shaders
, &binary
,
578 &variant
->config
, &variant
->info
,
581 radv_compile_nir_shader(&ac_llvm
, &binary
, &variant
->config
,
582 &variant
->info
, shaders
, shader_count
,
586 radv_destroy_llvm_compiler(&ac_llvm
, thread_compiler
);
588 radv_fill_shader_variant(device
, variant
, &binary
, stage
);
591 *code_out
= binary
.code
;
592 *code_size_out
= binary
.code_size
;
597 free(binary
.global_symbol_offsets
);
599 variant
->ref_count
= 1;
601 if (device
->keep_shader_info
) {
602 variant
->disasm_string
= binary
.disasm_string
;
603 variant
->llvm_ir_string
= binary
.llvm_ir_string
;
604 if (!gs_copy_shader
&& !module
->nir
) {
605 variant
->nir
= *shaders
;
606 variant
->spirv
= (uint32_t *)module
->data
;
607 variant
->spirv_size
= module
->size
;
610 free(binary
.disasm_string
);
616 struct radv_shader_variant
*
617 radv_shader_variant_create(struct radv_device
*device
,
618 struct radv_shader_module
*module
,
619 struct nir_shader
*const *shaders
,
621 struct radv_pipeline_layout
*layout
,
622 const struct radv_shader_variant_key
*key
,
624 unsigned *code_size_out
)
626 struct radv_nir_compiler_options options
= {0};
628 options
.layout
= layout
;
632 options
.unsafe_math
= !!(device
->instance
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
);
633 options
.supports_spill
= true;
635 return shader_variant_create(device
, module
, shaders
, shader_count
, shaders
[shader_count
- 1]->info
.stage
,
636 &options
, false, code_out
, code_size_out
);
639 struct radv_shader_variant
*
640 radv_create_gs_copy_shader(struct radv_device
*device
,
641 struct nir_shader
*shader
,
643 unsigned *code_size_out
,
646 struct radv_nir_compiler_options options
= {0};
648 options
.key
.has_multiview_view_index
= multiview
;
650 return shader_variant_create(device
, NULL
, &shader
, 1, MESA_SHADER_VERTEX
,
651 &options
, true, code_out
, code_size_out
);
655 radv_shader_variant_destroy(struct radv_device
*device
,
656 struct radv_shader_variant
*variant
)
658 if (!p_atomic_dec_zero(&variant
->ref_count
))
661 mtx_lock(&device
->shader_slab_mutex
);
662 list_del(&variant
->slab_list
);
663 mtx_unlock(&device
->shader_slab_mutex
);
665 ralloc_free(variant
->nir
);
666 free(variant
->disasm_string
);
667 free(variant
->llvm_ir_string
);
672 radv_get_shader_name(struct radv_shader_variant
*var
, gl_shader_stage stage
)
675 case MESA_SHADER_VERTEX
: return var
->info
.vs
.as_ls
? "Vertex Shader as LS" : var
->info
.vs
.as_es
? "Vertex Shader as ES" : "Vertex Shader as VS";
676 case MESA_SHADER_GEOMETRY
: return "Geometry Shader";
677 case MESA_SHADER_FRAGMENT
: return "Pixel Shader";
678 case MESA_SHADER_COMPUTE
: return "Compute Shader";
679 case MESA_SHADER_TESS_CTRL
: return "Tessellation Control Shader";
680 case MESA_SHADER_TESS_EVAL
: return var
->info
.tes
.as_es
? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
682 return "Unknown shader";
687 generate_shader_stats(struct radv_device
*device
,
688 struct radv_shader_variant
*variant
,
689 gl_shader_stage stage
,
690 struct _mesa_string_buffer
*buf
)
692 unsigned lds_increment
= device
->physical_device
->rad_info
.chip_class
>= CIK
? 512 : 256;
693 struct ac_shader_config
*conf
;
694 unsigned max_simd_waves
;
695 unsigned lds_per_wave
= 0;
697 max_simd_waves
= ac_get_max_simd_waves(device
->physical_device
->rad_info
.family
);
699 conf
= &variant
->config
;
701 if (stage
== MESA_SHADER_FRAGMENT
) {
702 lds_per_wave
= conf
->lds_size
* lds_increment
+
703 align(variant
->info
.fs
.num_interp
* 48,
710 radv_get_num_physical_sgprs(device
->physical_device
) / conf
->num_sgprs
);
715 RADV_NUM_PHYSICAL_VGPRS
/ conf
->num_vgprs
);
717 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
721 max_simd_waves
= MIN2(max_simd_waves
, 16384 / lds_per_wave
);
723 if (stage
== MESA_SHADER_FRAGMENT
) {
724 _mesa_string_buffer_printf(buf
, "*** SHADER CONFIG ***\n"
725 "SPI_PS_INPUT_ADDR = 0x%04x\n"
726 "SPI_PS_INPUT_ENA = 0x%04x\n",
727 conf
->spi_ps_input_addr
, conf
->spi_ps_input_ena
);
730 _mesa_string_buffer_printf(buf
, "*** SHADER STATS ***\n"
733 "Spilled SGPRs: %d\n"
734 "Spilled VGPRs: %d\n"
735 "PrivMem VGPRS: %d\n"
736 "Code Size: %d bytes\n"
738 "Scratch: %d bytes per wave\n"
740 "********************\n\n\n",
741 conf
->num_sgprs
, conf
->num_vgprs
,
742 conf
->spilled_sgprs
, conf
->spilled_vgprs
,
743 variant
->info
.private_mem_vgprs
, variant
->code_size
,
744 conf
->lds_size
, conf
->scratch_bytes_per_wave
,
749 radv_shader_dump_stats(struct radv_device
*device
,
750 struct radv_shader_variant
*variant
,
751 gl_shader_stage stage
,
754 struct _mesa_string_buffer
*buf
= _mesa_string_buffer_create(NULL
, 256);
756 generate_shader_stats(device
, variant
, stage
, buf
);
758 fprintf(file
, "\n%s:\n", radv_get_shader_name(variant
, stage
));
759 fprintf(file
, "%s", buf
->buf
);
761 _mesa_string_buffer_destroy(buf
);
765 radv_GetShaderInfoAMD(VkDevice _device
,
766 VkPipeline _pipeline
,
767 VkShaderStageFlagBits shaderStage
,
768 VkShaderInfoTypeAMD infoType
,
772 RADV_FROM_HANDLE(radv_device
, device
, _device
);
773 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
774 gl_shader_stage stage
= vk_to_mesa_shader_stage(shaderStage
);
775 struct radv_shader_variant
*variant
= pipeline
->shaders
[stage
];
776 struct _mesa_string_buffer
*buf
;
777 VkResult result
= VK_SUCCESS
;
779 /* Spec doesn't indicate what to do if the stage is invalid, so just
780 * return no info for this. */
782 return vk_error(device
->instance
, VK_ERROR_FEATURE_NOT_PRESENT
);
785 case VK_SHADER_INFO_TYPE_STATISTICS_AMD
:
787 *pInfoSize
= sizeof(VkShaderStatisticsInfoAMD
);
789 unsigned lds_multiplier
= device
->physical_device
->rad_info
.chip_class
>= CIK
? 512 : 256;
790 struct ac_shader_config
*conf
= &variant
->config
;
792 VkShaderStatisticsInfoAMD statistics
= {};
793 statistics
.shaderStageMask
= shaderStage
;
794 statistics
.numPhysicalVgprs
= RADV_NUM_PHYSICAL_VGPRS
;
795 statistics
.numPhysicalSgprs
= radv_get_num_physical_sgprs(device
->physical_device
);
796 statistics
.numAvailableSgprs
= statistics
.numPhysicalSgprs
;
798 if (stage
== MESA_SHADER_COMPUTE
) {
799 unsigned *local_size
= variant
->nir
->info
.cs
.local_size
;
800 unsigned workgroup_size
= local_size
[0] * local_size
[1] * local_size
[2];
802 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
/
803 ceil((double)workgroup_size
/ statistics
.numPhysicalVgprs
);
805 statistics
.computeWorkGroupSize
[0] = local_size
[0];
806 statistics
.computeWorkGroupSize
[1] = local_size
[1];
807 statistics
.computeWorkGroupSize
[2] = local_size
[2];
809 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
;
812 statistics
.resourceUsage
.numUsedVgprs
= conf
->num_vgprs
;
813 statistics
.resourceUsage
.numUsedSgprs
= conf
->num_sgprs
;
814 statistics
.resourceUsage
.ldsSizePerLocalWorkGroup
= 32768;
815 statistics
.resourceUsage
.ldsUsageSizeInBytes
= conf
->lds_size
* lds_multiplier
;
816 statistics
.resourceUsage
.scratchMemUsageInBytes
= conf
->scratch_bytes_per_wave
;
818 size_t size
= *pInfoSize
;
819 *pInfoSize
= sizeof(statistics
);
821 memcpy(pInfo
, &statistics
, MIN2(size
, *pInfoSize
));
823 if (size
< *pInfoSize
)
824 result
= VK_INCOMPLETE
;
828 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD
:
829 buf
= _mesa_string_buffer_create(NULL
, 1024);
831 _mesa_string_buffer_printf(buf
, "%s:\n", radv_get_shader_name(variant
, stage
));
832 _mesa_string_buffer_printf(buf
, "%s\n\n", variant
->disasm_string
);
833 generate_shader_stats(device
, variant
, stage
, buf
);
835 /* Need to include the null terminator. */
836 size_t length
= buf
->length
+ 1;
841 size_t size
= *pInfoSize
;
844 memcpy(pInfo
, buf
->buf
, MIN2(size
, length
));
847 result
= VK_INCOMPLETE
;
850 _mesa_string_buffer_destroy(buf
);
853 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
854 result
= VK_ERROR_FEATURE_NOT_PRESENT
;