2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
38 #include <llvm-c/Core.h>
39 #include <llvm-c/TargetMachine.h>
40 #include <llvm-c/Support.h>
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
51 #include "util/string_buffer.h"
53 static const struct nir_shader_compiler_options nir_options
= {
54 .vertex_id_zero_based
= true,
59 .lower_device_index_to_zero
= true,
62 .lower_bitfield_insert_to_bitfield_select
= true,
63 .lower_bitfield_extract
= true,
65 .lower_pack_snorm_2x16
= true,
66 .lower_pack_snorm_4x8
= true,
67 .lower_pack_unorm_2x16
= true,
68 .lower_pack_unorm_4x8
= true,
69 .lower_unpack_snorm_2x16
= true,
70 .lower_unpack_snorm_4x8
= true,
71 .lower_unpack_unorm_2x16
= true,
72 .lower_unpack_unorm_4x8
= true,
73 .lower_extract_byte
= true,
74 .lower_extract_word
= true,
77 .lower_mul_2x32_64
= true,
79 .max_unroll_iterations
= 32,
80 .use_interpolated_input_intrinsics
= true,
83 VkResult
radv_CreateShaderModule(
85 const VkShaderModuleCreateInfo
* pCreateInfo
,
86 const VkAllocationCallbacks
* pAllocator
,
87 VkShaderModule
* pShaderModule
)
89 RADV_FROM_HANDLE(radv_device
, device
, _device
);
90 struct radv_shader_module
*module
;
92 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
93 assert(pCreateInfo
->flags
== 0);
95 module
= vk_alloc2(&device
->alloc
, pAllocator
,
96 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
97 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
99 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
102 module
->size
= pCreateInfo
->codeSize
;
103 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
105 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
107 *pShaderModule
= radv_shader_module_to_handle(module
);
112 void radv_DestroyShaderModule(
114 VkShaderModule _module
,
115 const VkAllocationCallbacks
* pAllocator
)
117 RADV_FROM_HANDLE(radv_device
, device
, _device
);
118 RADV_FROM_HANDLE(radv_shader_module
, module
, _module
);
123 vk_free2(&device
->alloc
, pAllocator
, module
);
127 radv_optimize_nir(struct nir_shader
*shader
, bool optimize_conservatively
,
131 unsigned lower_flrp
=
132 (shader
->options
->lower_flrp16
? 16 : 0) |
133 (shader
->options
->lower_flrp32
? 32 : 0) |
134 (shader
->options
->lower_flrp64
? 64 : 0);
139 NIR_PASS(progress
, shader
, nir_split_array_vars
, nir_var_function_temp
);
140 NIR_PASS(progress
, shader
, nir_shrink_vec_array_vars
, nir_var_function_temp
);
142 NIR_PASS_V(shader
, nir_lower_vars_to_ssa
);
143 NIR_PASS_V(shader
, nir_lower_pack
);
146 /* Only run this pass in the first call to
147 * radv_optimize_nir. Later calls assume that we've
148 * lowered away any copy_deref instructions and we
149 * don't want to introduce any more.
151 NIR_PASS(progress
, shader
, nir_opt_find_array_copies
);
154 NIR_PASS(progress
, shader
, nir_opt_copy_prop_vars
);
155 NIR_PASS(progress
, shader
, nir_opt_dead_write_vars
);
157 NIR_PASS_V(shader
, nir_lower_alu_to_scalar
, NULL
);
158 NIR_PASS_V(shader
, nir_lower_phis_to_scalar
);
160 NIR_PASS(progress
, shader
, nir_copy_prop
);
161 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
162 NIR_PASS(progress
, shader
, nir_opt_dce
);
163 if (nir_opt_trivial_continues(shader
)) {
165 NIR_PASS(progress
, shader
, nir_copy_prop
);
166 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
167 NIR_PASS(progress
, shader
, nir_opt_dce
);
169 NIR_PASS(progress
, shader
, nir_opt_if
, true);
170 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
171 NIR_PASS(progress
, shader
, nir_opt_cse
);
172 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 8, true, true);
173 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
174 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
176 if (lower_flrp
!= 0) {
177 bool lower_flrp_progress
= false;
178 NIR_PASS(lower_flrp_progress
,
182 false /* always_precise */,
183 shader
->options
->lower_ffma
);
184 if (lower_flrp_progress
) {
185 NIR_PASS(progress
, shader
,
186 nir_opt_constant_folding
);
190 /* Nothing should rematerialize any flrps, so we only
191 * need to do this lowering once.
196 NIR_PASS(progress
, shader
, nir_opt_undef
);
197 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
198 if (shader
->options
->max_unroll_iterations
) {
199 NIR_PASS(progress
, shader
, nir_opt_loop_unroll
, 0);
201 } while (progress
&& !optimize_conservatively
);
203 NIR_PASS(progress
, shader
, nir_opt_shrink_load
);
204 NIR_PASS(progress
, shader
, nir_opt_move_load_ubo
);
208 radv_shader_compile_to_nir(struct radv_device
*device
,
209 struct radv_shader_module
*module
,
210 const char *entrypoint_name
,
211 gl_shader_stage stage
,
212 const VkSpecializationInfo
*spec_info
,
213 const VkPipelineCreateFlags flags
,
214 const struct radv_pipeline_layout
*layout
)
218 /* Some things such as our meta clear/blit code will give us a NIR
219 * shader directly. In that case, we just ignore the SPIR-V entirely
220 * and just use the NIR shader */
222 nir
->options
= &nir_options
;
223 nir_validate_shader(nir
, "in internal shader");
225 assert(exec_list_length(&nir
->functions
) == 1);
227 uint32_t *spirv
= (uint32_t *) module
->data
;
228 assert(module
->size
% 4 == 0);
230 if (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SPIRV
)
231 radv_print_spirv(spirv
, module
->size
, stderr
);
233 uint32_t num_spec_entries
= 0;
234 struct nir_spirv_specialization
*spec_entries
= NULL
;
235 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
236 num_spec_entries
= spec_info
->mapEntryCount
;
237 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
238 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
239 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
240 const void *data
= spec_info
->pData
+ entry
.offset
;
241 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
243 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
244 if (spec_info
->dataSize
== 8)
245 spec_entries
[i
].data64
= *(const uint64_t *)data
;
247 spec_entries
[i
].data32
= *(const uint32_t *)data
;
250 const struct spirv_to_nir_options spirv_options
= {
251 .lower_ubo_ssbo_access_to_offsets
= true,
253 .amd_gcn_shader
= true,
254 .amd_shader_ballot
= device
->instance
->perftest_flags
& RADV_PERFTEST_SHADER_BALLOT
,
255 .amd_trinary_minmax
= true,
256 .derivative_group
= true,
257 .descriptor_array_dynamic_indexing
= true,
258 .descriptor_array_non_uniform_indexing
= true,
259 .descriptor_indexing
= true,
260 .device_group
= true,
261 .draw_parameters
= true,
264 .geometry_streams
= true,
265 .image_read_without_format
= true,
266 .image_write_without_format
= true,
270 .int64_atomics
= true,
272 .physical_storage_buffer_address
= true,
273 .runtime_descriptor_array
= true,
274 .shader_viewport_index_layer
= true,
275 .stencil_export
= true,
276 .storage_8bit
= true,
277 .storage_16bit
= true,
278 .storage_image_ms
= true,
279 .subgroup_arithmetic
= true,
280 .subgroup_ballot
= true,
281 .subgroup_basic
= true,
282 .subgroup_quad
= true,
283 .subgroup_shuffle
= true,
284 .subgroup_vote
= true,
285 .tessellation
= true,
286 .transform_feedback
= true,
287 .variable_pointers
= true,
289 .ubo_addr_format
= nir_address_format_32bit_index_offset
,
290 .ssbo_addr_format
= nir_address_format_32bit_index_offset
,
291 .phys_ssbo_addr_format
= nir_address_format_64bit_global
,
292 .push_const_addr_format
= nir_address_format_logical
,
293 .shared_addr_format
= nir_address_format_32bit_offset
,
294 .frag_coord_is_sysval
= true,
296 nir
= spirv_to_nir(spirv
, module
->size
/ 4,
297 spec_entries
, num_spec_entries
,
298 stage
, entrypoint_name
,
299 &spirv_options
, &nir_options
);
300 assert(nir
->info
.stage
== stage
);
301 nir_validate_shader(nir
, "after spirv_to_nir");
305 /* We have to lower away local constant initializers right before we
306 * inline functions. That way they get properly initialized at the top
307 * of the function and not at the top of its caller.
309 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_function_temp
);
310 NIR_PASS_V(nir
, nir_lower_returns
);
311 NIR_PASS_V(nir
, nir_inline_functions
);
312 NIR_PASS_V(nir
, nir_opt_deref
);
314 /* Pick off the single entrypoint that we want */
315 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
316 if (func
->is_entrypoint
)
317 func
->name
= ralloc_strdup(func
, "main");
319 exec_node_remove(&func
->node
);
321 assert(exec_list_length(&nir
->functions
) == 1);
323 /* Make sure we lower constant initializers on output variables so that
324 * nir_remove_dead_variables below sees the corresponding stores
326 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_shader_out
);
328 /* Now that we've deleted all but the main function, we can go ahead and
329 * lower the rest of the constant initializers.
331 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
333 /* Split member structs. We do this before lower_io_to_temporaries so that
334 * it doesn't lower system values to temporaries by accident.
336 NIR_PASS_V(nir
, nir_split_var_copies
);
337 NIR_PASS_V(nir
, nir_split_per_member_structs
);
339 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
)
340 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
342 NIR_PASS_V(nir
, nir_remove_dead_variables
,
343 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
);
345 NIR_PASS_V(nir
, nir_lower_system_values
);
346 NIR_PASS_V(nir
, nir_lower_clip_cull_distance_arrays
);
347 NIR_PASS_V(nir
, radv_nir_lower_ycbcr_textures
, layout
);
350 /* Vulkan uses the separate-shader linking model */
351 nir
->info
.separate_shader
= true;
353 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
355 static const nir_lower_tex_options tex_options
= {
357 .lower_tg4_offsets
= true,
360 nir_lower_tex(nir
, &tex_options
);
362 nir_lower_vars_to_ssa(nir
);
364 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
365 nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
366 nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
367 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
368 nir_shader_get_entrypoint(nir
), true, true);
369 } else if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
370 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
371 nir_shader_get_entrypoint(nir
), true, false);
374 nir_split_var_copies(nir
);
376 nir_lower_global_vars_to_local(nir
);
377 nir_remove_dead_variables(nir
, nir_var_function_temp
);
378 nir_lower_subgroups(nir
, &(struct nir_lower_subgroups_options
) {
380 .ballot_bit_size
= 64,
381 .lower_to_scalar
= 1,
382 .lower_subgroup_masks
= 1,
384 .lower_shuffle_to_32bit
= 1,
385 .lower_vote_eq_to_ballot
= 1,
388 nir_lower_load_const_to_scalar(nir
);
390 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
391 radv_optimize_nir(nir
, false, true);
393 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
394 * to remove any copies introduced by nir_opt_find_array_copies().
396 nir_lower_var_copies(nir
);
398 /* Indirect lowering must be called after the radv_optimize_nir() loop
399 * has been called at least once. Otherwise indirect lowering can
400 * bloat the instruction count of the loop and cause it to be
401 * considered too large for unrolling.
403 ac_lower_indirect_derefs(nir
, device
->physical_device
->rad_info
.chip_class
);
404 radv_optimize_nir(nir
, flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
, false);
409 static void mark_16bit_fs_input(struct radv_shader_variant_info
*shader_info
,
410 const struct glsl_type
*type
,
413 if (glsl_type_is_scalar(type
) || glsl_type_is_vector(type
) || glsl_type_is_matrix(type
)) {
414 unsigned attrib_count
= glsl_count_attribute_slots(type
, false);
415 if (glsl_type_is_16bit(type
)) {
416 shader_info
->fs
.float16_shaded_mask
|= ((1ull << attrib_count
) - 1) << location
;
418 } else if (glsl_type_is_array(type
)) {
419 unsigned stride
= glsl_count_attribute_slots(glsl_get_array_element(type
), false);
420 for (unsigned i
= 0; i
< glsl_get_length(type
); ++i
) {
421 mark_16bit_fs_input(shader_info
, glsl_get_array_element(type
), location
+ i
* stride
);
424 assert(glsl_type_is_struct_or_ifc(type
));
425 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
426 mark_16bit_fs_input(shader_info
, glsl_get_struct_field(type
, i
), location
);
427 location
+= glsl_count_attribute_slots(glsl_get_struct_field(type
, i
), false);
433 handle_fs_input_decl(struct radv_shader_variant_info
*shader_info
,
434 struct nir_variable
*variable
)
436 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
438 if (variable
->data
.compact
) {
439 unsigned component_count
= variable
->data
.location_frac
+
440 glsl_get_length(variable
->type
);
441 attrib_count
= (component_count
+ 3) / 4;
443 mark_16bit_fs_input(shader_info
, variable
->type
,
444 variable
->data
.driver_location
);
447 uint64_t mask
= ((1ull << attrib_count
) - 1);
449 if (variable
->data
.interpolation
== INTERP_MODE_FLAT
)
450 shader_info
->fs
.flat_shaded_mask
|= mask
<< variable
->data
.driver_location
;
452 if (variable
->data
.location
>= VARYING_SLOT_VAR0
)
453 shader_info
->fs
.input_mask
|= mask
<< (variable
->data
.location
- VARYING_SLOT_VAR0
);
457 type_size_vec4(const struct glsl_type
*type
, bool bindless
)
459 return glsl_count_attribute_slots(type
, false);
462 static nir_variable
*
463 find_layer_in_var(nir_shader
*nir
)
465 nir_foreach_variable(var
, &nir
->inputs
) {
466 if (var
->data
.location
== VARYING_SLOT_LAYER
) {
472 nir_variable_create(nir
, nir_var_shader_in
, glsl_int_type(), "layer id");
473 var
->data
.location
= VARYING_SLOT_LAYER
;
474 var
->data
.interpolation
= INTERP_MODE_FLAT
;
478 /* We use layered rendering to implement multiview, which means we need to map
479 * view_index to gl_Layer. The attachment lowering also uses needs to know the
480 * layer so that it can sample from the correct layer. The code generates a
481 * load from the layer_id sysval, but since we don't have a way to get at this
482 * information from the fragment shader, we also need to lower this to the
483 * gl_Layer varying. This pass lowers both to a varying load from the LAYER
484 * slot, before lowering io, so that nir_assign_var_locations() will give the
485 * LAYER varying the correct driver_location.
489 lower_view_index(nir_shader
*nir
)
491 bool progress
= false;
492 nir_function_impl
*entry
= nir_shader_get_entrypoint(nir
);
494 nir_builder_init(&b
, entry
);
496 nir_variable
*layer
= NULL
;
497 nir_foreach_block(block
, entry
) {
498 nir_foreach_instr_safe(instr
, block
) {
499 if (instr
->type
!= nir_instr_type_intrinsic
)
502 nir_intrinsic_instr
*load
= nir_instr_as_intrinsic(instr
);
503 if (load
->intrinsic
!= nir_intrinsic_load_view_index
&&
504 load
->intrinsic
!= nir_intrinsic_load_layer_id
)
508 layer
= find_layer_in_var(nir
);
510 b
.cursor
= nir_before_instr(instr
);
511 nir_ssa_def
*def
= nir_load_var(&b
, layer
);
512 nir_ssa_def_rewrite_uses(&load
->dest
.ssa
,
513 nir_src_for_ssa(def
));
515 nir_instr_remove(instr
);
523 /* Gather information needed to setup the vs<->ps linking registers in
524 * radv_pipeline_generate_ps_inputs().
528 handle_fs_inputs(nir_shader
*nir
, struct radv_shader_variant_info
*shader_info
)
530 shader_info
->fs
.num_interp
= nir
->num_inputs
;
532 nir_foreach_variable(variable
, &nir
->inputs
)
533 handle_fs_input_decl(shader_info
, variable
);
537 lower_fs_io(nir_shader
*nir
, struct radv_shader_variant_info
*shader_info
)
539 NIR_PASS_V(nir
, lower_view_index
);
540 nir_assign_io_var_locations(&nir
->inputs
, &nir
->num_inputs
,
541 MESA_SHADER_FRAGMENT
);
543 handle_fs_inputs(nir
, shader_info
);
545 NIR_PASS_V(nir
, nir_lower_io
, nir_var_shader_in
, type_size_vec4
, 0);
547 /* This pass needs actual constants */
548 nir_opt_constant_folding(nir
);
550 NIR_PASS_V(nir
, nir_io_add_const_offset_to_base
, nir_var_shader_in
);
551 radv_optimize_nir(nir
, false, false);
556 radv_alloc_shader_memory(struct radv_device
*device
,
557 struct radv_shader_variant
*shader
)
559 mtx_lock(&device
->shader_slab_mutex
);
560 list_for_each_entry(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
562 list_for_each_entry(struct radv_shader_variant
, s
, &slab
->shaders
, slab_list
) {
563 if (s
->bo_offset
- offset
>= shader
->code_size
) {
564 shader
->bo
= slab
->bo
;
565 shader
->bo_offset
= offset
;
566 list_addtail(&shader
->slab_list
, &s
->slab_list
);
567 mtx_unlock(&device
->shader_slab_mutex
);
568 return slab
->ptr
+ offset
;
570 offset
= align_u64(s
->bo_offset
+ s
->code_size
, 256);
572 if (slab
->size
- offset
>= shader
->code_size
) {
573 shader
->bo
= slab
->bo
;
574 shader
->bo_offset
= offset
;
575 list_addtail(&shader
->slab_list
, &slab
->shaders
);
576 mtx_unlock(&device
->shader_slab_mutex
);
577 return slab
->ptr
+ offset
;
581 mtx_unlock(&device
->shader_slab_mutex
);
582 struct radv_shader_slab
*slab
= calloc(1, sizeof(struct radv_shader_slab
));
584 slab
->size
= 256 * 1024;
585 slab
->bo
= device
->ws
->buffer_create(device
->ws
, slab
->size
, 256,
587 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
588 (device
->physical_device
->cpdma_prefetch_writes_memory
?
589 0 : RADEON_FLAG_READ_ONLY
),
590 RADV_BO_PRIORITY_SHADER
);
591 slab
->ptr
= (char*)device
->ws
->buffer_map(slab
->bo
);
592 list_inithead(&slab
->shaders
);
594 mtx_lock(&device
->shader_slab_mutex
);
595 list_add(&slab
->slabs
, &device
->shader_slabs
);
597 shader
->bo
= slab
->bo
;
598 shader
->bo_offset
= 0;
599 list_add(&shader
->slab_list
, &slab
->shaders
);
600 mtx_unlock(&device
->shader_slab_mutex
);
605 radv_destroy_shader_slabs(struct radv_device
*device
)
607 list_for_each_entry_safe(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
608 device
->ws
->buffer_destroy(slab
->bo
);
611 mtx_destroy(&device
->shader_slab_mutex
);
614 /* For the UMR disassembler. */
615 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
616 #define DEBUGGER_NUM_MARKERS 5
619 radv_get_shader_binary_size(size_t code_size
)
621 return code_size
+ DEBUGGER_NUM_MARKERS
* 4;
624 static void radv_postprocess_config(const struct radv_physical_device
*pdevice
,
625 const struct ac_shader_config
*config_in
,
626 const struct radv_shader_variant_info
*info
,
627 gl_shader_stage stage
,
628 struct ac_shader_config
*config_out
)
630 bool scratch_enabled
= config_in
->scratch_bytes_per_wave
> 0;
631 unsigned vgpr_comp_cnt
= 0;
632 unsigned num_input_vgprs
= info
->num_input_vgprs
;
634 if (stage
== MESA_SHADER_FRAGMENT
) {
636 if (G_0286CC_PERSP_SAMPLE_ENA(config_in
->spi_ps_input_addr
))
637 num_input_vgprs
+= 2;
638 if (G_0286CC_PERSP_CENTER_ENA(config_in
->spi_ps_input_addr
))
639 num_input_vgprs
+= 2;
640 if (G_0286CC_PERSP_CENTROID_ENA(config_in
->spi_ps_input_addr
))
641 num_input_vgprs
+= 2;
642 if (G_0286CC_PERSP_PULL_MODEL_ENA(config_in
->spi_ps_input_addr
))
643 num_input_vgprs
+= 3;
644 if (G_0286CC_LINEAR_SAMPLE_ENA(config_in
->spi_ps_input_addr
))
645 num_input_vgprs
+= 2;
646 if (G_0286CC_LINEAR_CENTER_ENA(config_in
->spi_ps_input_addr
))
647 num_input_vgprs
+= 2;
648 if (G_0286CC_LINEAR_CENTROID_ENA(config_in
->spi_ps_input_addr
))
649 num_input_vgprs
+= 2;
650 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config_in
->spi_ps_input_addr
))
651 num_input_vgprs
+= 1;
652 if (G_0286CC_POS_X_FLOAT_ENA(config_in
->spi_ps_input_addr
))
653 num_input_vgprs
+= 1;
654 if (G_0286CC_POS_Y_FLOAT_ENA(config_in
->spi_ps_input_addr
))
655 num_input_vgprs
+= 1;
656 if (G_0286CC_POS_Z_FLOAT_ENA(config_in
->spi_ps_input_addr
))
657 num_input_vgprs
+= 1;
658 if (G_0286CC_POS_W_FLOAT_ENA(config_in
->spi_ps_input_addr
))
659 num_input_vgprs
+= 1;
660 if (G_0286CC_FRONT_FACE_ENA(config_in
->spi_ps_input_addr
))
661 num_input_vgprs
+= 1;
662 if (G_0286CC_ANCILLARY_ENA(config_in
->spi_ps_input_addr
))
663 num_input_vgprs
+= 1;
664 if (G_0286CC_SAMPLE_COVERAGE_ENA(config_in
->spi_ps_input_addr
))
665 num_input_vgprs
+= 1;
666 if (G_0286CC_POS_FIXED_PT_ENA(config_in
->spi_ps_input_addr
))
667 num_input_vgprs
+= 1;
670 unsigned num_vgprs
= MAX2(config_in
->num_vgprs
, num_input_vgprs
);
671 /* +3 for scratch wave offset and VCC */
672 unsigned num_sgprs
= MAX2(config_in
->num_sgprs
, info
->num_input_sgprs
+ 3);
674 *config_out
= *config_in
;
675 config_out
->num_vgprs
= num_vgprs
;
676 config_out
->num_sgprs
= num_sgprs
;
678 /* Enable 64-bit and 16-bit denormals, because there is no performance
681 * If denormals are enabled, all floating-point output modifiers are
684 * Don't enable denormals for 32-bit floats, because:
685 * - Floating-point output modifiers would be ignored by the hw.
686 * - Some opcodes don't support denormals, such as v_mad_f32. We would
687 * have to stop using those.
688 * - GFX6 & GFX7 would be very slow.
690 config_out
->float_mode
|= V_00B028_FP_64_DENORMS
;
692 config_out
->rsrc2
= S_00B12C_USER_SGPR(info
->num_user_sgprs
) |
693 S_00B12C_SCRATCH_EN(scratch_enabled
);
695 config_out
->rsrc1
= S_00B848_VGPRS((num_vgprs
- 1) / 4) |
696 S_00B848_DX10_CLAMP(1) |
697 S_00B848_FLOAT_MODE(config_out
->float_mode
);
699 if (pdevice
->rad_info
.chip_class
>= GFX10
) {
700 config_out
->rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX10(info
->num_user_sgprs
>> 5);
702 config_out
->rsrc1
|= S_00B228_SGPRS((num_sgprs
- 1) / 8);
703 config_out
->rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX9(info
->num_user_sgprs
>> 5) |
704 S_00B12C_SO_BASE0_EN(!!info
->info
.so
.strides
[0]) |
705 S_00B12C_SO_BASE1_EN(!!info
->info
.so
.strides
[1]) |
706 S_00B12C_SO_BASE2_EN(!!info
->info
.so
.strides
[2]) |
707 S_00B12C_SO_BASE3_EN(!!info
->info
.so
.strides
[3]) |
708 S_00B12C_SO_EN(!!info
->info
.so
.num_outputs
);
712 case MESA_SHADER_TESS_EVAL
:
714 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
715 config_out
->rsrc2
|= S_00B22C_OC_LDS_EN(1);
716 } else if (info
->tes
.as_es
) {
717 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
718 vgpr_comp_cnt
= info
->info
.uses_prim_id
? 3 : 2;
720 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
722 bool enable_prim_id
= info
->tes
.export_prim_id
|| info
->info
.uses_prim_id
;
723 vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
725 config_out
->rsrc1
|= S_00B128_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
726 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
729 case MESA_SHADER_TESS_CTRL
:
730 if (pdevice
->rad_info
.chip_class
>= GFX9
) {
731 /* We need at least 2 components for LS.
732 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
733 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
735 if (pdevice
->rad_info
.chip_class
>= GFX10
) {
736 vgpr_comp_cnt
= info
->info
.vs
.needs_instance_id
? 3 : 1;
738 vgpr_comp_cnt
= info
->info
.vs
.needs_instance_id
? 2 : 1;
741 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
743 config_out
->rsrc1
|= S_00B428_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
745 case MESA_SHADER_VERTEX
:
747 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
748 } else if (info
->vs
.as_ls
) {
749 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
750 /* We need at least 2 components for LS.
751 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
752 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
754 vgpr_comp_cnt
= info
->info
.vs
.needs_instance_id
? 2 : 1;
755 } else if (info
->vs
.as_es
) {
756 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
757 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
758 vgpr_comp_cnt
= info
->info
.vs
.needs_instance_id
? 1 : 0;
760 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
761 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
762 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
764 if (info
->vs
.export_prim_id
) {
766 } else if (info
->info
.vs
.needs_instance_id
) {
772 config_out
->rsrc1
|= S_00B128_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
775 case MESA_SHADER_FRAGMENT
:
776 config_out
->rsrc1
|= S_00B028_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
778 case MESA_SHADER_GEOMETRY
:
779 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
781 case MESA_SHADER_COMPUTE
:
782 config_out
->rsrc1
|= S_00B848_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
784 S_00B84C_TGID_X_EN(info
->info
.cs
.uses_block_id
[0]) |
785 S_00B84C_TGID_Y_EN(info
->info
.cs
.uses_block_id
[1]) |
786 S_00B84C_TGID_Z_EN(info
->info
.cs
.uses_block_id
[2]) |
787 S_00B84C_TIDIG_COMP_CNT(info
->info
.cs
.uses_thread_id
[2] ? 2 :
788 info
->info
.cs
.uses_thread_id
[1] ? 1 : 0) |
789 S_00B84C_TG_SIZE_EN(info
->info
.cs
.uses_local_invocation_idx
) |
790 S_00B84C_LDS_SIZE(config_in
->lds_size
);
793 unreachable("unsupported shader type");
797 if (pdevice
->rad_info
.chip_class
>= GFX10
&&
798 (stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
)) {
799 unsigned gs_vgpr_comp_cnt
, es_vgpr_comp_cnt
;
801 /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
802 if (stage
== MESA_SHADER_VERTEX
) {
803 es_vgpr_comp_cnt
= info
->info
.vs
.needs_instance_id
? 3 : 0;
804 } else if (stage
== MESA_SHADER_TESS_EVAL
) {
805 bool enable_prim_id
= info
->tes
.export_prim_id
|| info
->info
.uses_prim_id
;
806 es_vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
809 bool tes_triangles
= stage
== MESA_SHADER_TESS_EVAL
&&
810 info
->tes
.primitive_mode
>= 4; /* GL_TRIANGLES */
811 if (info
->info
.uses_invocation_id
|| stage
== MESA_SHADER_VERTEX
) {
812 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
813 } else if (info
->info
.uses_prim_id
) {
814 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
815 } else if (info
->gs
.vertices_in
>= 3 || tes_triangles
) {
816 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
818 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
821 config_out
->rsrc1
|= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
);
822 config_out
->rsrc2
|= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
823 S_00B22C_LDS_SIZE(config_in
->lds_size
);
824 } else if (pdevice
->rad_info
.chip_class
>= GFX9
&&
825 stage
== MESA_SHADER_GEOMETRY
) {
826 unsigned es_type
= info
->gs
.es_type
;
827 unsigned gs_vgpr_comp_cnt
, es_vgpr_comp_cnt
;
829 if (es_type
== MESA_SHADER_VERTEX
) {
830 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
831 es_vgpr_comp_cnt
= info
->info
.vs
.needs_instance_id
? 1 : 0;
832 } else if (es_type
== MESA_SHADER_TESS_EVAL
) {
833 es_vgpr_comp_cnt
= info
->info
.uses_prim_id
? 3 : 2;
835 unreachable("invalid shader ES type");
838 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
839 * VGPR[0:4] are always loaded.
841 if (info
->info
.uses_invocation_id
) {
842 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
843 } else if (info
->info
.uses_prim_id
) {
844 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
845 } else if (info
->gs
.vertices_in
>= 3) {
846 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
848 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
851 config_out
->rsrc1
|= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
);
852 config_out
->rsrc2
|= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
853 S_00B22C_OC_LDS_EN(es_type
== MESA_SHADER_TESS_EVAL
);
854 } else if (pdevice
->rad_info
.chip_class
>= GFX9
&&
855 stage
== MESA_SHADER_TESS_CTRL
) {
856 config_out
->rsrc1
|= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt
);
858 config_out
->rsrc1
|= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
);
862 static void radv_init_llvm_target()
864 LLVMInitializeAMDGPUTargetInfo();
865 LLVMInitializeAMDGPUTarget();
866 LLVMInitializeAMDGPUTargetMC();
867 LLVMInitializeAMDGPUAsmPrinter();
869 /* For inline assembly. */
870 LLVMInitializeAMDGPUAsmParser();
872 /* Workaround for bug in llvm 4.0 that causes image intrinsics
874 * https://reviews.llvm.org/D26348
876 * Workaround for bug in llvm that causes the GPU to hang in presence
877 * of nested loops because there is an exec mask issue. The proper
878 * solution is to fix LLVM but this might require a bunch of work.
879 * https://bugs.llvm.org/show_bug.cgi?id=37744
881 * "mesa" is the prefix for error messages.
883 if (HAVE_LLVM
>= 0x0800) {
884 const char *argv
[2] = { "mesa", "-simplifycfg-sink-common=false" };
885 LLVMParseCommandLineOptions(2, argv
, NULL
);
888 const char *argv
[3] = { "mesa", "-simplifycfg-sink-common=false",
889 "-amdgpu-skip-threshold=1" };
890 LLVMParseCommandLineOptions(3, argv
, NULL
);
894 static once_flag radv_init_llvm_target_once_flag
= ONCE_FLAG_INIT
;
896 static void radv_init_llvm_once(void)
898 call_once(&radv_init_llvm_target_once_flag
, radv_init_llvm_target
);
901 struct radv_shader_variant
*
902 radv_shader_variant_create(struct radv_device
*device
,
903 const struct radv_shader_binary
*binary
)
905 struct ac_shader_config config
= {0};
906 struct ac_rtld_binary rtld_binary
= {0};
907 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
911 variant
->ref_count
= 1;
913 if (binary
->type
== RADV_BINARY_TYPE_RTLD
) {
914 struct ac_rtld_symbol lds_symbols
[1];
915 unsigned num_lds_symbols
= 0;
916 const char *elf_data
= (const char *)((struct radv_shader_binary_rtld
*)binary
)->data
;
917 size_t elf_size
= ((struct radv_shader_binary_rtld
*)binary
)->elf_size
;
919 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
920 binary
->stage
== MESA_SHADER_GEOMETRY
&& !binary
->is_gs_copy_shader
) {
921 /* We add this symbol even on LLVM <= 8 to ensure that
922 * shader->config.lds_size is set correctly below.
924 struct ac_rtld_symbol
*sym
= &lds_symbols
[num_lds_symbols
++];
925 sym
->name
= "esgs_ring";
926 sym
->size
= 32 * 1024;
927 sym
->align
= 64 * 1024;
929 struct ac_rtld_open_info open_info
= {
930 .info
= &device
->physical_device
->rad_info
,
931 .shader_type
= binary
->stage
,
933 .elf_ptrs
= &elf_data
,
934 .elf_sizes
= &elf_size
,
935 .num_shared_lds_symbols
= num_lds_symbols
,
936 .shared_lds_symbols
= lds_symbols
,
939 if (!ac_rtld_open(&rtld_binary
, open_info
)) {
944 if (!ac_rtld_read_config(&rtld_binary
, &config
)) {
945 ac_rtld_close(&rtld_binary
);
950 if (rtld_binary
.lds_size
> 0) {
951 unsigned alloc_granularity
= device
->physical_device
->rad_info
.chip_class
>= GFX7
? 512 : 256;
952 config
.lds_size
= align(rtld_binary
.lds_size
, alloc_granularity
) / alloc_granularity
;
955 variant
->code_size
= rtld_binary
.rx_size
;
957 assert(binary
->type
== RADV_BINARY_TYPE_LEGACY
);
958 config
= ((struct radv_shader_binary_legacy
*)binary
)->config
;
959 variant
->code_size
= radv_get_shader_binary_size(((struct radv_shader_binary_legacy
*)binary
)->code_size
);
962 variant
->info
= binary
->variant_info
;
963 radv_postprocess_config(device
->physical_device
, &config
, &binary
->variant_info
,
964 binary
->stage
, &variant
->config
);
966 void *dest_ptr
= radv_alloc_shader_memory(device
, variant
);
968 if (binary
->type
== RADV_BINARY_TYPE_RTLD
) {
969 struct radv_shader_binary_rtld
* bin
= (struct radv_shader_binary_rtld
*)binary
;
970 struct ac_rtld_upload_info info
= {
971 .binary
= &rtld_binary
,
972 .rx_va
= radv_buffer_get_va(variant
->bo
) + variant
->bo_offset
,
976 if (!ac_rtld_upload(&info
)) {
977 radv_shader_variant_destroy(device
, variant
);
978 ac_rtld_close(&rtld_binary
);
982 const char *disasm_data
;
984 if (!ac_rtld_get_section_by_name(&rtld_binary
, ".AMDGPU.disasm", &disasm_data
, &disasm_size
)) {
985 radv_shader_variant_destroy(device
, variant
);
986 ac_rtld_close(&rtld_binary
);
990 variant
->llvm_ir_string
= bin
->llvm_ir_size
? strdup((const char*)(bin
->data
+ bin
->elf_size
)) : NULL
;
991 variant
->disasm_string
= malloc(disasm_size
+ 1);
992 memcpy(variant
->disasm_string
, disasm_data
, disasm_size
);
993 variant
->disasm_string
[disasm_size
] = 0;
995 ac_rtld_close(&rtld_binary
);
997 struct radv_shader_binary_legacy
* bin
= (struct radv_shader_binary_legacy
*)binary
;
998 memcpy(dest_ptr
, bin
->data
, bin
->code_size
);
1000 /* Add end-of-code markers for the UMR disassembler. */
1001 uint32_t *ptr32
= (uint32_t *)dest_ptr
+ bin
->code_size
/ 4;
1002 for (unsigned i
= 0; i
< DEBUGGER_NUM_MARKERS
; i
++)
1003 ptr32
[i
] = DEBUGGER_END_OF_CODE_MARKER
;
1005 variant
->llvm_ir_string
= bin
->llvm_ir_size
? strdup((const char*)(bin
->data
+ bin
->code_size
)) : NULL
;
1006 variant
->disasm_string
= bin
->disasm_size
? strdup((const char*)(bin
->data
+ bin
->code_size
+ bin
->llvm_ir_size
)) : NULL
;
1011 static struct radv_shader_variant
*
1012 shader_variant_compile(struct radv_device
*device
,
1013 struct radv_shader_module
*module
,
1014 struct nir_shader
* const *shaders
,
1016 gl_shader_stage stage
,
1017 struct radv_nir_compiler_options
*options
,
1018 bool gs_copy_shader
,
1019 struct radv_shader_binary
**binary_out
)
1021 enum radeon_family chip_family
= device
->physical_device
->rad_info
.family
;
1022 enum ac_target_machine_options tm_options
= 0;
1023 struct ac_llvm_compiler ac_llvm
;
1024 struct radv_shader_binary
*binary
= NULL
;
1025 struct radv_shader_variant_info variant_info
= {0};
1026 bool thread_compiler
;
1028 if (shaders
[0]->info
.stage
== MESA_SHADER_FRAGMENT
)
1029 lower_fs_io(shaders
[0], &variant_info
);
1031 options
->family
= chip_family
;
1032 options
->chip_class
= device
->physical_device
->rad_info
.chip_class
;
1033 options
->dump_shader
= radv_can_dump_shader(device
, module
, gs_copy_shader
);
1034 options
->dump_preoptir
= options
->dump_shader
&&
1035 device
->instance
->debug_flags
& RADV_DEBUG_PREOPTIR
;
1036 options
->record_llvm_ir
= device
->keep_shader_info
;
1037 options
->check_ir
= device
->instance
->debug_flags
& RADV_DEBUG_CHECKIR
;
1038 options
->tess_offchip_block_dw_size
= device
->tess_offchip_block_dw_size
;
1039 options
->address32_hi
= device
->physical_device
->rad_info
.address32_hi
;
1041 if (options
->supports_spill
)
1042 tm_options
|= AC_TM_SUPPORTS_SPILL
;
1043 if (device
->instance
->perftest_flags
& RADV_PERFTEST_SISCHED
)
1044 tm_options
|= AC_TM_SISCHED
;
1045 if (options
->check_ir
)
1046 tm_options
|= AC_TM_CHECK_IR
;
1047 if (device
->instance
->debug_flags
& RADV_DEBUG_NO_LOAD_STORE_OPT
)
1048 tm_options
|= AC_TM_NO_LOAD_STORE_OPT
;
1050 thread_compiler
= !(device
->instance
->debug_flags
& RADV_DEBUG_NOTHREADLLVM
);
1051 radv_init_llvm_once();
1052 radv_init_llvm_compiler(&ac_llvm
,
1054 chip_family
, tm_options
);
1055 if (gs_copy_shader
) {
1056 assert(shader_count
== 1);
1057 radv_compile_gs_copy_shader(&ac_llvm
, *shaders
, &binary
,
1058 &variant_info
, options
);
1060 radv_compile_nir_shader(&ac_llvm
, &binary
, &variant_info
,
1061 shaders
, shader_count
, options
);
1063 binary
->variant_info
= variant_info
;
1065 radv_destroy_llvm_compiler(&ac_llvm
, thread_compiler
);
1067 struct radv_shader_variant
*variant
= radv_shader_variant_create(device
, binary
);
1073 if (options
->dump_shader
) {
1074 fprintf(stderr
, "disasm:\n%s\n", variant
->disasm_string
);
1078 if (device
->keep_shader_info
) {
1079 if (!gs_copy_shader
&& !module
->nir
) {
1080 variant
->nir
= *shaders
;
1081 variant
->spirv
= (uint32_t *)module
->data
;
1082 variant
->spirv_size
= module
->size
;
1087 *binary_out
= binary
;
1094 struct radv_shader_variant
*
1095 radv_shader_variant_compile(struct radv_device
*device
,
1096 struct radv_shader_module
*module
,
1097 struct nir_shader
*const *shaders
,
1099 struct radv_pipeline_layout
*layout
,
1100 const struct radv_shader_variant_key
*key
,
1101 struct radv_shader_binary
**binary_out
)
1103 struct radv_nir_compiler_options options
= {0};
1105 options
.layout
= layout
;
1109 options
.unsafe_math
= !!(device
->instance
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
);
1110 options
.supports_spill
= true;
1112 return shader_variant_compile(device
, module
, shaders
, shader_count
, shaders
[shader_count
- 1]->info
.stage
,
1113 &options
, false, binary_out
);
1116 struct radv_shader_variant
*
1117 radv_create_gs_copy_shader(struct radv_device
*device
,
1118 struct nir_shader
*shader
,
1119 struct radv_shader_binary
**binary_out
,
1122 struct radv_nir_compiler_options options
= {0};
1124 options
.key
.has_multiview_view_index
= multiview
;
1126 return shader_variant_compile(device
, NULL
, &shader
, 1, MESA_SHADER_VERTEX
,
1127 &options
, true, binary_out
);
1131 radv_shader_variant_destroy(struct radv_device
*device
,
1132 struct radv_shader_variant
*variant
)
1134 if (!p_atomic_dec_zero(&variant
->ref_count
))
1137 mtx_lock(&device
->shader_slab_mutex
);
1138 list_del(&variant
->slab_list
);
1139 mtx_unlock(&device
->shader_slab_mutex
);
1141 ralloc_free(variant
->nir
);
1142 free(variant
->disasm_string
);
1143 free(variant
->llvm_ir_string
);
1148 radv_get_shader_name(struct radv_shader_variant
*var
, gl_shader_stage stage
)
1151 case MESA_SHADER_VERTEX
: return var
->info
.vs
.as_ls
? "Vertex Shader as LS" : var
->info
.vs
.as_es
? "Vertex Shader as ES" : "Vertex Shader as VS";
1152 case MESA_SHADER_GEOMETRY
: return "Geometry Shader";
1153 case MESA_SHADER_FRAGMENT
: return "Pixel Shader";
1154 case MESA_SHADER_COMPUTE
: return "Compute Shader";
1155 case MESA_SHADER_TESS_CTRL
: return "Tessellation Control Shader";
1156 case MESA_SHADER_TESS_EVAL
: return var
->info
.tes
.as_es
? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
1158 return "Unknown shader";
1163 generate_shader_stats(struct radv_device
*device
,
1164 struct radv_shader_variant
*variant
,
1165 gl_shader_stage stage
,
1166 struct _mesa_string_buffer
*buf
)
1168 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
1169 unsigned lds_increment
= chip_class
>= GFX7
? 512 : 256;
1170 struct ac_shader_config
*conf
;
1171 unsigned max_simd_waves
;
1172 unsigned lds_per_wave
= 0;
1174 max_simd_waves
= ac_get_max_simd_waves(device
->physical_device
->rad_info
.family
);
1176 conf
= &variant
->config
;
1178 if (stage
== MESA_SHADER_FRAGMENT
) {
1179 lds_per_wave
= conf
->lds_size
* lds_increment
+
1180 align(variant
->info
.fs
.num_interp
* 48,
1182 } else if (stage
== MESA_SHADER_COMPUTE
) {
1183 unsigned max_workgroup_size
=
1184 radv_nir_get_max_workgroup_size(chip_class
, variant
->nir
);
1185 lds_per_wave
= (conf
->lds_size
* lds_increment
) /
1186 DIV_ROUND_UP(max_workgroup_size
, 64);
1189 if (conf
->num_sgprs
)
1191 MIN2(max_simd_waves
,
1192 ac_get_num_physical_sgprs(chip_class
) / conf
->num_sgprs
);
1194 if (conf
->num_vgprs
)
1196 MIN2(max_simd_waves
,
1197 RADV_NUM_PHYSICAL_VGPRS
/ conf
->num_vgprs
);
1199 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
1203 max_simd_waves
= MIN2(max_simd_waves
, 16384 / lds_per_wave
);
1205 if (stage
== MESA_SHADER_FRAGMENT
) {
1206 _mesa_string_buffer_printf(buf
, "*** SHADER CONFIG ***\n"
1207 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1208 "SPI_PS_INPUT_ENA = 0x%04x\n",
1209 conf
->spi_ps_input_addr
, conf
->spi_ps_input_ena
);
1212 _mesa_string_buffer_printf(buf
, "*** SHADER STATS ***\n"
1215 "Spilled SGPRs: %d\n"
1216 "Spilled VGPRs: %d\n"
1217 "PrivMem VGPRS: %d\n"
1218 "Code Size: %d bytes\n"
1220 "Scratch: %d bytes per wave\n"
1222 "********************\n\n\n",
1223 conf
->num_sgprs
, conf
->num_vgprs
,
1224 conf
->spilled_sgprs
, conf
->spilled_vgprs
,
1225 variant
->info
.private_mem_vgprs
, variant
->code_size
,
1226 conf
->lds_size
, conf
->scratch_bytes_per_wave
,
1231 radv_shader_dump_stats(struct radv_device
*device
,
1232 struct radv_shader_variant
*variant
,
1233 gl_shader_stage stage
,
1236 struct _mesa_string_buffer
*buf
= _mesa_string_buffer_create(NULL
, 256);
1238 generate_shader_stats(device
, variant
, stage
, buf
);
1240 fprintf(file
, "\n%s:\n", radv_get_shader_name(variant
, stage
));
1241 fprintf(file
, "%s", buf
->buf
);
1243 _mesa_string_buffer_destroy(buf
);
1247 radv_GetShaderInfoAMD(VkDevice _device
,
1248 VkPipeline _pipeline
,
1249 VkShaderStageFlagBits shaderStage
,
1250 VkShaderInfoTypeAMD infoType
,
1254 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1255 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
1256 gl_shader_stage stage
= vk_to_mesa_shader_stage(shaderStage
);
1257 struct radv_shader_variant
*variant
= pipeline
->shaders
[stage
];
1258 struct _mesa_string_buffer
*buf
;
1259 VkResult result
= VK_SUCCESS
;
1261 /* Spec doesn't indicate what to do if the stage is invalid, so just
1262 * return no info for this. */
1264 return vk_error(device
->instance
, VK_ERROR_FEATURE_NOT_PRESENT
);
1267 case VK_SHADER_INFO_TYPE_STATISTICS_AMD
:
1269 *pInfoSize
= sizeof(VkShaderStatisticsInfoAMD
);
1271 unsigned lds_multiplier
= device
->physical_device
->rad_info
.chip_class
>= GFX7
? 512 : 256;
1272 struct ac_shader_config
*conf
= &variant
->config
;
1274 VkShaderStatisticsInfoAMD statistics
= {};
1275 statistics
.shaderStageMask
= shaderStage
;
1276 statistics
.numPhysicalVgprs
= RADV_NUM_PHYSICAL_VGPRS
;
1277 statistics
.numPhysicalSgprs
= ac_get_num_physical_sgprs(device
->physical_device
->rad_info
.chip_class
);
1278 statistics
.numAvailableSgprs
= statistics
.numPhysicalSgprs
;
1280 if (stage
== MESA_SHADER_COMPUTE
) {
1281 unsigned *local_size
= variant
->nir
->info
.cs
.local_size
;
1282 unsigned workgroup_size
= local_size
[0] * local_size
[1] * local_size
[2];
1284 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
/
1285 ceil((double)workgroup_size
/ statistics
.numPhysicalVgprs
);
1287 statistics
.computeWorkGroupSize
[0] = local_size
[0];
1288 statistics
.computeWorkGroupSize
[1] = local_size
[1];
1289 statistics
.computeWorkGroupSize
[2] = local_size
[2];
1291 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
;
1294 statistics
.resourceUsage
.numUsedVgprs
= conf
->num_vgprs
;
1295 statistics
.resourceUsage
.numUsedSgprs
= conf
->num_sgprs
;
1296 statistics
.resourceUsage
.ldsSizePerLocalWorkGroup
= 32768;
1297 statistics
.resourceUsage
.ldsUsageSizeInBytes
= conf
->lds_size
* lds_multiplier
;
1298 statistics
.resourceUsage
.scratchMemUsageInBytes
= conf
->scratch_bytes_per_wave
;
1300 size_t size
= *pInfoSize
;
1301 *pInfoSize
= sizeof(statistics
);
1303 memcpy(pInfo
, &statistics
, MIN2(size
, *pInfoSize
));
1305 if (size
< *pInfoSize
)
1306 result
= VK_INCOMPLETE
;
1310 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD
:
1311 buf
= _mesa_string_buffer_create(NULL
, 1024);
1313 _mesa_string_buffer_printf(buf
, "%s:\n", radv_get_shader_name(variant
, stage
));
1314 _mesa_string_buffer_printf(buf
, "%s\n\n", variant
->llvm_ir_string
);
1315 _mesa_string_buffer_printf(buf
, "%s\n\n", variant
->disasm_string
);
1316 generate_shader_stats(device
, variant
, stage
, buf
);
1318 /* Need to include the null terminator. */
1319 size_t length
= buf
->length
+ 1;
1322 *pInfoSize
= length
;
1324 size_t size
= *pInfoSize
;
1325 *pInfoSize
= length
;
1327 memcpy(pInfo
, buf
->buf
, MIN2(size
, length
));
1330 result
= VK_INCOMPLETE
;
1333 _mesa_string_buffer_destroy(buf
);
1336 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1337 result
= VK_ERROR_FEATURE_NOT_PRESENT
;