2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
38 #include <llvm-c/Core.h>
39 #include <llvm-c/TargetMachine.h>
40 #include <llvm-c/Support.h>
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
51 #include "aco_interface.h"
53 #include "util/string_buffer.h"
55 static const struct nir_shader_compiler_options nir_options_llvm
= {
56 .vertex_id_zero_based
= true,
61 .lower_device_index_to_zero
= true,
64 .lower_bitfield_insert_to_bitfield_select
= true,
65 .lower_bitfield_extract
= true,
67 .lower_pack_snorm_2x16
= true,
68 .lower_pack_snorm_4x8
= true,
69 .lower_pack_unorm_2x16
= true,
70 .lower_pack_unorm_4x8
= true,
71 .lower_unpack_snorm_2x16
= true,
72 .lower_unpack_snorm_4x8
= true,
73 .lower_unpack_unorm_2x16
= true,
74 .lower_unpack_unorm_4x8
= true,
75 .lower_extract_byte
= true,
76 .lower_extract_word
= true,
79 .lower_mul_2x32_64
= true,
81 .max_unroll_iterations
= 32,
82 .use_interpolated_input_intrinsics
= true,
85 static const struct nir_shader_compiler_options nir_options_aco
= {
86 .vertex_id_zero_based
= true,
91 .lower_device_index_to_zero
= true,
93 .lower_bitfield_insert_to_bitfield_select
= true,
94 .lower_bitfield_extract
= true,
95 .lower_sub
= true, /* TODO: set this to false once !1236 is merged */
96 .lower_pack_snorm_2x16
= true,
97 .lower_pack_snorm_4x8
= true,
98 .lower_pack_unorm_2x16
= true,
99 .lower_pack_unorm_4x8
= true,
100 .lower_unpack_snorm_2x16
= true,
101 .lower_unpack_snorm_4x8
= true,
102 .lower_unpack_unorm_2x16
= true,
103 .lower_unpack_unorm_4x8
= true,
104 .lower_unpack_half_2x16
= true,
105 .lower_extract_byte
= true,
106 .lower_extract_word
= true,
109 .lower_mul_2x32_64
= true,
110 .lower_rotate
= true,
111 .max_unroll_iterations
= 32,
112 .use_interpolated_input_intrinsics
= true,
116 radv_can_dump_shader(struct radv_device
*device
,
117 struct radv_shader_module
*module
,
118 bool is_gs_copy_shader
)
120 if (!(device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SHADERS
))
123 /* Only dump non-meta shaders, useful for debugging purposes. */
124 return (module
&& !module
->nir
) || is_gs_copy_shader
;
128 radv_can_dump_shader_stats(struct radv_device
*device
,
129 struct radv_shader_module
*module
)
131 /* Only dump non-meta shader stats. */
132 return device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
&&
133 module
&& !module
->nir
;
136 unsigned shader_io_get_unique_index(gl_varying_slot slot
)
138 /* handle patch indices separate */
139 if (slot
== VARYING_SLOT_TESS_LEVEL_OUTER
)
141 if (slot
== VARYING_SLOT_TESS_LEVEL_INNER
)
143 if (slot
>= VARYING_SLOT_PATCH0
&& slot
<= VARYING_SLOT_TESS_MAX
)
144 return 2 + (slot
- VARYING_SLOT_PATCH0
);
145 if (slot
== VARYING_SLOT_POS
)
147 if (slot
== VARYING_SLOT_PSIZ
)
149 if (slot
== VARYING_SLOT_CLIP_DIST0
)
151 if (slot
== VARYING_SLOT_CLIP_DIST1
)
153 /* 3 is reserved for clip dist as well */
154 if (slot
>= VARYING_SLOT_VAR0
&& slot
<= VARYING_SLOT_VAR31
)
155 return 4 + (slot
- VARYING_SLOT_VAR0
);
156 unreachable("illegal slot in get unique index\n");
159 VkResult
radv_CreateShaderModule(
161 const VkShaderModuleCreateInfo
* pCreateInfo
,
162 const VkAllocationCallbacks
* pAllocator
,
163 VkShaderModule
* pShaderModule
)
165 RADV_FROM_HANDLE(radv_device
, device
, _device
);
166 struct radv_shader_module
*module
;
168 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
169 assert(pCreateInfo
->flags
== 0);
171 module
= vk_alloc2(&device
->alloc
, pAllocator
,
172 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
173 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
175 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
178 module
->size
= pCreateInfo
->codeSize
;
179 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
181 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
183 *pShaderModule
= radv_shader_module_to_handle(module
);
188 void radv_DestroyShaderModule(
190 VkShaderModule _module
,
191 const VkAllocationCallbacks
* pAllocator
)
193 RADV_FROM_HANDLE(radv_device
, device
, _device
);
194 RADV_FROM_HANDLE(radv_shader_module
, module
, _module
);
199 vk_free2(&device
->alloc
, pAllocator
, module
);
203 radv_optimize_nir(struct nir_shader
*shader
, bool optimize_conservatively
,
207 unsigned lower_flrp
=
208 (shader
->options
->lower_flrp16
? 16 : 0) |
209 (shader
->options
->lower_flrp32
? 32 : 0) |
210 (shader
->options
->lower_flrp64
? 64 : 0);
215 NIR_PASS(progress
, shader
, nir_split_array_vars
, nir_var_function_temp
);
216 NIR_PASS(progress
, shader
, nir_shrink_vec_array_vars
, nir_var_function_temp
);
218 NIR_PASS_V(shader
, nir_lower_vars_to_ssa
);
219 NIR_PASS_V(shader
, nir_lower_pack
);
222 /* Only run this pass in the first call to
223 * radv_optimize_nir. Later calls assume that we've
224 * lowered away any copy_deref instructions and we
225 * don't want to introduce any more.
227 NIR_PASS(progress
, shader
, nir_opt_find_array_copies
);
230 NIR_PASS(progress
, shader
, nir_opt_copy_prop_vars
);
231 NIR_PASS(progress
, shader
, nir_opt_dead_write_vars
);
232 NIR_PASS(progress
, shader
, nir_remove_dead_variables
,
233 nir_var_function_temp
);
235 NIR_PASS_V(shader
, nir_lower_alu_to_scalar
, NULL
, NULL
);
236 NIR_PASS_V(shader
, nir_lower_phis_to_scalar
);
238 NIR_PASS(progress
, shader
, nir_copy_prop
);
239 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
240 NIR_PASS(progress
, shader
, nir_opt_dce
);
241 if (nir_opt_trivial_continues(shader
)) {
243 NIR_PASS(progress
, shader
, nir_copy_prop
);
244 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
245 NIR_PASS(progress
, shader
, nir_opt_dce
);
247 NIR_PASS(progress
, shader
, nir_opt_if
, true);
248 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
249 NIR_PASS(progress
, shader
, nir_opt_cse
);
250 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 8, true, true);
251 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
252 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
254 if (lower_flrp
!= 0) {
255 bool lower_flrp_progress
= false;
256 NIR_PASS(lower_flrp_progress
,
260 false /* always_precise */,
261 shader
->options
->lower_ffma
);
262 if (lower_flrp_progress
) {
263 NIR_PASS(progress
, shader
,
264 nir_opt_constant_folding
);
268 /* Nothing should rematerialize any flrps, so we only
269 * need to do this lowering once.
274 NIR_PASS(progress
, shader
, nir_opt_undef
);
275 if (shader
->options
->max_unroll_iterations
) {
276 NIR_PASS(progress
, shader
, nir_opt_loop_unroll
, 0);
278 } while (progress
&& !optimize_conservatively
);
280 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
281 NIR_PASS(progress
, shader
, nir_opt_shrink_load
);
282 NIR_PASS(progress
, shader
, nir_opt_move
, nir_move_load_ubo
);
286 radv_shader_compile_to_nir(struct radv_device
*device
,
287 struct radv_shader_module
*module
,
288 const char *entrypoint_name
,
289 gl_shader_stage stage
,
290 const VkSpecializationInfo
*spec_info
,
291 const VkPipelineCreateFlags flags
,
292 const struct radv_pipeline_layout
*layout
,
296 const nir_shader_compiler_options
*nir_options
= use_aco
? &nir_options_aco
:
299 /* Some things such as our meta clear/blit code will give us a NIR
300 * shader directly. In that case, we just ignore the SPIR-V entirely
301 * and just use the NIR shader */
303 nir
->options
= nir_options
;
304 nir_validate_shader(nir
, "in internal shader");
306 assert(exec_list_length(&nir
->functions
) == 1);
308 uint32_t *spirv
= (uint32_t *) module
->data
;
309 assert(module
->size
% 4 == 0);
311 if (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SPIRV
)
312 radv_print_spirv(spirv
, module
->size
, stderr
);
314 uint32_t num_spec_entries
= 0;
315 struct nir_spirv_specialization
*spec_entries
= NULL
;
316 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
317 num_spec_entries
= spec_info
->mapEntryCount
;
318 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
319 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
320 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
321 const void *data
= spec_info
->pData
+ entry
.offset
;
322 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
324 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
325 if (spec_info
->dataSize
== 8)
326 spec_entries
[i
].data64
= *(const uint64_t *)data
;
328 spec_entries
[i
].data32
= *(const uint32_t *)data
;
331 const struct spirv_to_nir_options spirv_options
= {
332 .lower_ubo_ssbo_access_to_offsets
= true,
334 .amd_gcn_shader
= true,
335 .amd_shader_ballot
= device
->physical_device
->use_shader_ballot
,
336 .amd_trinary_minmax
= true,
337 .demote_to_helper_invocation
= device
->physical_device
->use_aco
,
338 .derivative_group
= true,
339 .descriptor_array_dynamic_indexing
= true,
340 .descriptor_array_non_uniform_indexing
= true,
341 .descriptor_indexing
= true,
342 .device_group
= true,
343 .draw_parameters
= true,
344 .float16
= !device
->physical_device
->use_aco
,
346 .geometry_streams
= true,
347 .image_read_without_format
= true,
348 .image_write_without_format
= true,
349 .int8
= !device
->physical_device
->use_aco
,
350 .int16
= !device
->physical_device
->use_aco
,
352 .int64_atomics
= true,
354 .physical_storage_buffer_address
= true,
355 .post_depth_coverage
= true,
356 .runtime_descriptor_array
= true,
357 .shader_viewport_index_layer
= true,
358 .stencil_export
= true,
359 .storage_8bit
= !device
->physical_device
->use_aco
,
360 .storage_16bit
= !device
->physical_device
->use_aco
,
361 .storage_image_ms
= true,
362 .subgroup_arithmetic
= true,
363 .subgroup_ballot
= true,
364 .subgroup_basic
= true,
365 .subgroup_quad
= true,
366 .subgroup_shuffle
= true,
367 .subgroup_vote
= true,
368 .tessellation
= true,
369 .transform_feedback
= true,
370 .variable_pointers
= true,
372 .ubo_addr_format
= nir_address_format_32bit_index_offset
,
373 .ssbo_addr_format
= nir_address_format_32bit_index_offset
,
374 .phys_ssbo_addr_format
= nir_address_format_64bit_global
,
375 .push_const_addr_format
= nir_address_format_logical
,
376 .shared_addr_format
= nir_address_format_32bit_offset
,
377 .frag_coord_is_sysval
= true,
379 nir
= spirv_to_nir(spirv
, module
->size
/ 4,
380 spec_entries
, num_spec_entries
,
381 stage
, entrypoint_name
,
382 &spirv_options
, nir_options
);
383 assert(nir
->info
.stage
== stage
);
384 nir_validate_shader(nir
, "after spirv_to_nir");
388 /* We have to lower away local constant initializers right before we
389 * inline functions. That way they get properly initialized at the top
390 * of the function and not at the top of its caller.
392 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_function_temp
);
393 NIR_PASS_V(nir
, nir_lower_returns
);
394 NIR_PASS_V(nir
, nir_inline_functions
);
395 NIR_PASS_V(nir
, nir_opt_deref
);
397 /* Pick off the single entrypoint that we want */
398 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
399 if (func
->is_entrypoint
)
400 func
->name
= ralloc_strdup(func
, "main");
402 exec_node_remove(&func
->node
);
404 assert(exec_list_length(&nir
->functions
) == 1);
406 /* Make sure we lower constant initializers on output variables so that
407 * nir_remove_dead_variables below sees the corresponding stores
409 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_shader_out
);
411 /* Now that we've deleted all but the main function, we can go ahead and
412 * lower the rest of the constant initializers.
414 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
416 /* Split member structs. We do this before lower_io_to_temporaries so that
417 * it doesn't lower system values to temporaries by accident.
419 NIR_PASS_V(nir
, nir_split_var_copies
);
420 NIR_PASS_V(nir
, nir_split_per_member_structs
);
422 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&& use_aco
)
423 NIR_PASS_V(nir
, nir_lower_io_to_vector
, nir_var_shader_out
);
424 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
)
425 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
427 NIR_PASS_V(nir
, nir_remove_dead_variables
,
428 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
);
430 NIR_PASS_V(nir
, nir_propagate_invariant
);
432 NIR_PASS_V(nir
, nir_lower_system_values
);
433 NIR_PASS_V(nir
, nir_lower_clip_cull_distance_arrays
);
434 NIR_PASS_V(nir
, radv_nir_lower_ycbcr_textures
, layout
);
437 /* Vulkan uses the separate-shader linking model */
438 nir
->info
.separate_shader
= true;
440 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
442 static const nir_lower_tex_options tex_options
= {
444 .lower_tg4_offsets
= true,
447 nir_lower_tex(nir
, &tex_options
);
449 nir_lower_vars_to_ssa(nir
);
451 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
452 nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
453 nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
454 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
455 nir_shader_get_entrypoint(nir
), true, true);
456 } else if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
457 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
458 nir_shader_get_entrypoint(nir
), true, false);
461 nir_split_var_copies(nir
);
463 nir_lower_global_vars_to_local(nir
);
464 nir_remove_dead_variables(nir
, nir_var_function_temp
);
465 nir_lower_subgroups(nir
, &(struct nir_lower_subgroups_options
) {
467 .ballot_bit_size
= 64,
468 .lower_to_scalar
= 1,
469 .lower_subgroup_masks
= 1,
471 .lower_shuffle_to_32bit
= 1,
472 .lower_vote_eq_to_ballot
= 1,
475 nir_lower_load_const_to_scalar(nir
);
477 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
478 radv_optimize_nir(nir
, false, true);
480 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
481 * to remove any copies introduced by nir_opt_find_array_copies().
483 nir_lower_var_copies(nir
);
485 /* Lower large variables that are always constant with load_constant
486 * intrinsics, which get turned into PC-relative loads from a data
487 * section next to the shader.
489 NIR_PASS_V(nir
, nir_opt_large_constants
,
490 glsl_get_natural_size_align_bytes
, 16);
492 /* Indirect lowering must be called after the radv_optimize_nir() loop
493 * has been called at least once. Otherwise indirect lowering can
494 * bloat the instruction count of the loop and cause it to be
495 * considered too large for unrolling.
497 ac_lower_indirect_derefs(nir
, device
->physical_device
->rad_info
.chip_class
);
498 radv_optimize_nir(nir
, flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
, false);
504 type_size_vec4(const struct glsl_type
*type
, bool bindless
)
506 return glsl_count_attribute_slots(type
, false);
509 static nir_variable
*
510 find_layer_in_var(nir_shader
*nir
)
512 nir_foreach_variable(var
, &nir
->inputs
) {
513 if (var
->data
.location
== VARYING_SLOT_LAYER
) {
519 nir_variable_create(nir
, nir_var_shader_in
, glsl_int_type(), "layer id");
520 var
->data
.location
= VARYING_SLOT_LAYER
;
521 var
->data
.interpolation
= INTERP_MODE_FLAT
;
525 /* We use layered rendering to implement multiview, which means we need to map
526 * view_index to gl_Layer. The attachment lowering also uses needs to know the
527 * layer so that it can sample from the correct layer. The code generates a
528 * load from the layer_id sysval, but since we don't have a way to get at this
529 * information from the fragment shader, we also need to lower this to the
530 * gl_Layer varying. This pass lowers both to a varying load from the LAYER
531 * slot, before lowering io, so that nir_assign_var_locations() will give the
532 * LAYER varying the correct driver_location.
536 lower_view_index(nir_shader
*nir
)
538 bool progress
= false;
539 nir_function_impl
*entry
= nir_shader_get_entrypoint(nir
);
541 nir_builder_init(&b
, entry
);
543 nir_variable
*layer
= NULL
;
544 nir_foreach_block(block
, entry
) {
545 nir_foreach_instr_safe(instr
, block
) {
546 if (instr
->type
!= nir_instr_type_intrinsic
)
549 nir_intrinsic_instr
*load
= nir_instr_as_intrinsic(instr
);
550 if (load
->intrinsic
!= nir_intrinsic_load_view_index
&&
551 load
->intrinsic
!= nir_intrinsic_load_layer_id
)
555 layer
= find_layer_in_var(nir
);
557 b
.cursor
= nir_before_instr(instr
);
558 nir_ssa_def
*def
= nir_load_var(&b
, layer
);
559 nir_ssa_def_rewrite_uses(&load
->dest
.ssa
,
560 nir_src_for_ssa(def
));
562 nir_instr_remove(instr
);
571 radv_lower_fs_io(nir_shader
*nir
)
573 NIR_PASS_V(nir
, lower_view_index
);
574 nir_assign_io_var_locations(&nir
->inputs
, &nir
->num_inputs
,
575 MESA_SHADER_FRAGMENT
);
577 NIR_PASS_V(nir
, nir_lower_io
, nir_var_shader_in
, type_size_vec4
, 0);
579 /* This pass needs actual constants */
580 nir_opt_constant_folding(nir
);
582 NIR_PASS_V(nir
, nir_io_add_const_offset_to_base
, nir_var_shader_in
);
587 radv_alloc_shader_memory(struct radv_device
*device
,
588 struct radv_shader_variant
*shader
)
590 mtx_lock(&device
->shader_slab_mutex
);
591 list_for_each_entry(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
593 list_for_each_entry(struct radv_shader_variant
, s
, &slab
->shaders
, slab_list
) {
594 if (s
->bo_offset
- offset
>= shader
->code_size
) {
595 shader
->bo
= slab
->bo
;
596 shader
->bo_offset
= offset
;
597 list_addtail(&shader
->slab_list
, &s
->slab_list
);
598 mtx_unlock(&device
->shader_slab_mutex
);
599 return slab
->ptr
+ offset
;
601 offset
= align_u64(s
->bo_offset
+ s
->code_size
, 256);
603 if (slab
->size
- offset
>= shader
->code_size
) {
604 shader
->bo
= slab
->bo
;
605 shader
->bo_offset
= offset
;
606 list_addtail(&shader
->slab_list
, &slab
->shaders
);
607 mtx_unlock(&device
->shader_slab_mutex
);
608 return slab
->ptr
+ offset
;
612 mtx_unlock(&device
->shader_slab_mutex
);
613 struct radv_shader_slab
*slab
= calloc(1, sizeof(struct radv_shader_slab
));
615 slab
->size
= 256 * 1024;
616 slab
->bo
= device
->ws
->buffer_create(device
->ws
, slab
->size
, 256,
618 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
619 (device
->physical_device
->rad_info
.cpdma_prefetch_writes_memory
?
620 0 : RADEON_FLAG_READ_ONLY
),
621 RADV_BO_PRIORITY_SHADER
);
622 slab
->ptr
= (char*)device
->ws
->buffer_map(slab
->bo
);
623 list_inithead(&slab
->shaders
);
625 mtx_lock(&device
->shader_slab_mutex
);
626 list_add(&slab
->slabs
, &device
->shader_slabs
);
628 shader
->bo
= slab
->bo
;
629 shader
->bo_offset
= 0;
630 list_add(&shader
->slab_list
, &slab
->shaders
);
631 mtx_unlock(&device
->shader_slab_mutex
);
636 radv_destroy_shader_slabs(struct radv_device
*device
)
638 list_for_each_entry_safe(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
639 device
->ws
->buffer_destroy(slab
->bo
);
642 mtx_destroy(&device
->shader_slab_mutex
);
645 /* For the UMR disassembler. */
646 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
647 #define DEBUGGER_NUM_MARKERS 5
650 radv_get_shader_binary_size(size_t code_size
)
652 return code_size
+ DEBUGGER_NUM_MARKERS
* 4;
655 static void radv_postprocess_config(const struct radv_physical_device
*pdevice
,
656 const struct ac_shader_config
*config_in
,
657 const struct radv_shader_info
*info
,
658 gl_shader_stage stage
,
659 struct ac_shader_config
*config_out
)
661 bool scratch_enabled
= config_in
->scratch_bytes_per_wave
> 0;
662 unsigned vgpr_comp_cnt
= 0;
663 unsigned num_input_vgprs
= info
->num_input_vgprs
;
665 if (stage
== MESA_SHADER_FRAGMENT
) {
667 if (G_0286CC_PERSP_SAMPLE_ENA(config_in
->spi_ps_input_addr
))
668 num_input_vgprs
+= 2;
669 if (G_0286CC_PERSP_CENTER_ENA(config_in
->spi_ps_input_addr
))
670 num_input_vgprs
+= 2;
671 if (G_0286CC_PERSP_CENTROID_ENA(config_in
->spi_ps_input_addr
))
672 num_input_vgprs
+= 2;
673 if (G_0286CC_PERSP_PULL_MODEL_ENA(config_in
->spi_ps_input_addr
))
674 num_input_vgprs
+= 3;
675 if (G_0286CC_LINEAR_SAMPLE_ENA(config_in
->spi_ps_input_addr
))
676 num_input_vgprs
+= 2;
677 if (G_0286CC_LINEAR_CENTER_ENA(config_in
->spi_ps_input_addr
))
678 num_input_vgprs
+= 2;
679 if (G_0286CC_LINEAR_CENTROID_ENA(config_in
->spi_ps_input_addr
))
680 num_input_vgprs
+= 2;
681 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config_in
->spi_ps_input_addr
))
682 num_input_vgprs
+= 1;
683 if (G_0286CC_POS_X_FLOAT_ENA(config_in
->spi_ps_input_addr
))
684 num_input_vgprs
+= 1;
685 if (G_0286CC_POS_Y_FLOAT_ENA(config_in
->spi_ps_input_addr
))
686 num_input_vgprs
+= 1;
687 if (G_0286CC_POS_Z_FLOAT_ENA(config_in
->spi_ps_input_addr
))
688 num_input_vgprs
+= 1;
689 if (G_0286CC_POS_W_FLOAT_ENA(config_in
->spi_ps_input_addr
))
690 num_input_vgprs
+= 1;
691 if (G_0286CC_FRONT_FACE_ENA(config_in
->spi_ps_input_addr
))
692 num_input_vgprs
+= 1;
693 if (G_0286CC_ANCILLARY_ENA(config_in
->spi_ps_input_addr
))
694 num_input_vgprs
+= 1;
695 if (G_0286CC_SAMPLE_COVERAGE_ENA(config_in
->spi_ps_input_addr
))
696 num_input_vgprs
+= 1;
697 if (G_0286CC_POS_FIXED_PT_ENA(config_in
->spi_ps_input_addr
))
698 num_input_vgprs
+= 1;
701 unsigned num_vgprs
= MAX2(config_in
->num_vgprs
, num_input_vgprs
);
702 /* +3 for scratch wave offset and VCC */
703 unsigned num_sgprs
= MAX2(config_in
->num_sgprs
, info
->num_input_sgprs
+ 3);
705 *config_out
= *config_in
;
706 config_out
->num_vgprs
= num_vgprs
;
707 config_out
->num_sgprs
= num_sgprs
;
709 /* Enable 64-bit and 16-bit denormals, because there is no performance
712 * If denormals are enabled, all floating-point output modifiers are
715 * Don't enable denormals for 32-bit floats, because:
716 * - Floating-point output modifiers would be ignored by the hw.
717 * - Some opcodes don't support denormals, such as v_mad_f32. We would
718 * have to stop using those.
719 * - GFX6 & GFX7 would be very slow.
721 config_out
->float_mode
|= V_00B028_FP_64_DENORMS
;
723 config_out
->rsrc2
= S_00B12C_USER_SGPR(info
->num_user_sgprs
) |
724 S_00B12C_SCRATCH_EN(scratch_enabled
);
726 if (!pdevice
->use_ngg_streamout
) {
727 config_out
->rsrc2
|= S_00B12C_SO_BASE0_EN(!!info
->so
.strides
[0]) |
728 S_00B12C_SO_BASE1_EN(!!info
->so
.strides
[1]) |
729 S_00B12C_SO_BASE2_EN(!!info
->so
.strides
[2]) |
730 S_00B12C_SO_BASE3_EN(!!info
->so
.strides
[3]) |
731 S_00B12C_SO_EN(!!info
->so
.num_outputs
);
734 config_out
->rsrc1
= S_00B848_VGPRS((num_vgprs
- 1) /
735 (info
->wave_size
== 32 ? 8 : 4)) |
736 S_00B848_DX10_CLAMP(1) |
737 S_00B848_FLOAT_MODE(config_out
->float_mode
);
739 if (pdevice
->rad_info
.chip_class
>= GFX10
) {
740 config_out
->rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX10(info
->num_user_sgprs
>> 5);
742 config_out
->rsrc1
|= S_00B228_SGPRS((num_sgprs
- 1) / 8);
743 config_out
->rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX9(info
->num_user_sgprs
>> 5);
747 case MESA_SHADER_TESS_EVAL
:
749 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
750 config_out
->rsrc2
|= S_00B22C_OC_LDS_EN(1);
751 } else if (info
->tes
.as_es
) {
752 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
753 vgpr_comp_cnt
= info
->uses_prim_id
? 3 : 2;
755 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
757 bool enable_prim_id
= info
->tes
.export_prim_id
|| info
->uses_prim_id
;
758 vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
760 config_out
->rsrc1
|= S_00B128_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
761 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
764 case MESA_SHADER_TESS_CTRL
:
765 if (pdevice
->rad_info
.chip_class
>= GFX9
) {
766 /* We need at least 2 components for LS.
767 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
768 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
770 if (pdevice
->rad_info
.chip_class
>= GFX10
) {
771 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 3 : 1;
773 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 2 : 1;
776 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
778 config_out
->rsrc1
|= S_00B428_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
) |
779 S_00B848_WGP_MODE(pdevice
->rad_info
.chip_class
>= GFX10
);
781 case MESA_SHADER_VERTEX
:
783 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
784 } else if (info
->vs
.as_ls
) {
785 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
786 /* We need at least 2 components for LS.
787 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
788 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
790 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 2 : 1;
791 } else if (info
->vs
.as_es
) {
792 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
793 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
794 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 1 : 0;
796 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
797 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
798 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
800 if (info
->vs
.needs_instance_id
&& pdevice
->rad_info
.chip_class
>= GFX10
) {
802 } else if (info
->vs
.export_prim_id
) {
804 } else if (info
->vs
.needs_instance_id
) {
810 config_out
->rsrc1
|= S_00B128_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
813 case MESA_SHADER_FRAGMENT
:
814 config_out
->rsrc1
|= S_00B028_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
816 case MESA_SHADER_GEOMETRY
:
817 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
) |
818 S_00B848_WGP_MODE(pdevice
->rad_info
.chip_class
>= GFX10
);
820 case MESA_SHADER_COMPUTE
:
821 config_out
->rsrc1
|= S_00B848_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
) |
822 S_00B848_WGP_MODE(pdevice
->rad_info
.chip_class
>= GFX10
);
824 S_00B84C_TGID_X_EN(info
->cs
.uses_block_id
[0]) |
825 S_00B84C_TGID_Y_EN(info
->cs
.uses_block_id
[1]) |
826 S_00B84C_TGID_Z_EN(info
->cs
.uses_block_id
[2]) |
827 S_00B84C_TIDIG_COMP_CNT(info
->cs
.uses_thread_id
[2] ? 2 :
828 info
->cs
.uses_thread_id
[1] ? 1 : 0) |
829 S_00B84C_TG_SIZE_EN(info
->cs
.uses_local_invocation_idx
) |
830 S_00B84C_LDS_SIZE(config_in
->lds_size
);
833 unreachable("unsupported shader type");
837 if (pdevice
->rad_info
.chip_class
>= GFX10
&& info
->is_ngg
&&
838 (stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
|| stage
== MESA_SHADER_GEOMETRY
)) {
839 unsigned gs_vgpr_comp_cnt
, es_vgpr_comp_cnt
;
840 gl_shader_stage es_stage
= stage
;
841 if (stage
== MESA_SHADER_GEOMETRY
)
842 es_stage
= info
->gs
.es_type
;
844 /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
845 if (es_stage
== MESA_SHADER_VERTEX
) {
846 es_vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 3 : 0;
847 } else if (es_stage
== MESA_SHADER_TESS_EVAL
) {
848 bool enable_prim_id
= info
->tes
.export_prim_id
|| info
->uses_prim_id
;
849 es_vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
851 unreachable("Unexpected ES shader stage");
853 bool tes_triangles
= stage
== MESA_SHADER_TESS_EVAL
&&
854 info
->tes
.primitive_mode
>= 4; /* GL_TRIANGLES */
855 if (info
->uses_invocation_id
|| stage
== MESA_SHADER_VERTEX
) {
856 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
857 } else if (info
->uses_prim_id
) {
858 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
859 } else if (info
->gs
.vertices_in
>= 3 || tes_triangles
) {
860 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
862 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
865 config_out
->rsrc1
|= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
) |
866 S_00B228_WGP_MODE(1);
867 config_out
->rsrc2
|= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
868 S_00B22C_LDS_SIZE(config_in
->lds_size
) |
869 S_00B22C_OC_LDS_EN(es_stage
== MESA_SHADER_TESS_EVAL
);
870 } else if (pdevice
->rad_info
.chip_class
>= GFX9
&&
871 stage
== MESA_SHADER_GEOMETRY
) {
872 unsigned es_type
= info
->gs
.es_type
;
873 unsigned gs_vgpr_comp_cnt
, es_vgpr_comp_cnt
;
875 if (es_type
== MESA_SHADER_VERTEX
) {
876 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
877 if (info
->vs
.needs_instance_id
) {
878 es_vgpr_comp_cnt
= pdevice
->rad_info
.chip_class
>= GFX10
? 3 : 1;
880 es_vgpr_comp_cnt
= 0;
882 } else if (es_type
== MESA_SHADER_TESS_EVAL
) {
883 es_vgpr_comp_cnt
= info
->uses_prim_id
? 3 : 2;
885 unreachable("invalid shader ES type");
888 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
889 * VGPR[0:4] are always loaded.
891 if (info
->uses_invocation_id
) {
892 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
893 } else if (info
->uses_prim_id
) {
894 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
895 } else if (info
->gs
.vertices_in
>= 3) {
896 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
898 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
901 config_out
->rsrc1
|= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
);
902 config_out
->rsrc2
|= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
903 S_00B22C_OC_LDS_EN(es_type
== MESA_SHADER_TESS_EVAL
);
904 } else if (pdevice
->rad_info
.chip_class
>= GFX9
&&
905 stage
== MESA_SHADER_TESS_CTRL
) {
906 config_out
->rsrc1
|= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt
);
908 config_out
->rsrc1
|= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
);
912 struct radv_shader_variant
*
913 radv_shader_variant_create(struct radv_device
*device
,
914 const struct radv_shader_binary
*binary
,
915 bool keep_shader_info
)
917 struct ac_shader_config config
= {0};
918 struct ac_rtld_binary rtld_binary
= {0};
919 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
923 variant
->ref_count
= 1;
925 if (binary
->type
== RADV_BINARY_TYPE_RTLD
) {
926 struct ac_rtld_symbol lds_symbols
[2];
927 unsigned num_lds_symbols
= 0;
928 const char *elf_data
= (const char *)((struct radv_shader_binary_rtld
*)binary
)->data
;
929 size_t elf_size
= ((struct radv_shader_binary_rtld
*)binary
)->elf_size
;
931 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
932 (binary
->stage
== MESA_SHADER_GEOMETRY
|| binary
->info
.is_ngg
) &&
933 !binary
->is_gs_copy_shader
) {
934 /* We add this symbol even on LLVM <= 8 to ensure that
935 * shader->config.lds_size is set correctly below.
937 /* TODO: For some reasons, using the computed ESGS ring
938 * size randomly hangs with CTS. Just use the maximum
939 * possible LDS size for now.
941 unsigned ngg_scratch_size
= 8 * 4;
942 if (binary
->info
.so
.num_outputs
) {
943 /* Memory layout of NGG streamout scratch:
944 * [0-3]: number of generated primitives
945 * [4-7]: number of emitted primitives
946 * [8-11]: streamout offsets
947 * [12:19]: primitive offsets for stream 0
948 * [20:27]: primitive offsets for stream 1
949 * [28:35]: primitive offsets for stream 2
950 * [36:43]: primitive offsets for stream 3
952 ngg_scratch_size
= 44 * 4;
955 struct ac_rtld_symbol
*sym
= &lds_symbols
[num_lds_symbols
++];
956 sym
->name
= "esgs_ring";
957 sym
->size
= (32 * 1024) - (binary
->info
.ngg_info
.ngg_emit_size
* 4) - ngg_scratch_size
;
958 sym
->align
= 64 * 1024;
961 if (binary
->info
.is_ngg
&&
962 binary
->stage
== MESA_SHADER_GEOMETRY
) {
963 struct ac_rtld_symbol
*sym
= &lds_symbols
[num_lds_symbols
++];
964 sym
->name
= "ngg_emit";
965 sym
->size
= binary
->info
.ngg_info
.ngg_emit_size
* 4;
969 struct ac_rtld_open_info open_info
= {
970 .info
= &device
->physical_device
->rad_info
,
971 .shader_type
= binary
->stage
,
972 .wave_size
= binary
->info
.wave_size
,
974 .elf_ptrs
= &elf_data
,
975 .elf_sizes
= &elf_size
,
976 .num_shared_lds_symbols
= num_lds_symbols
,
977 .shared_lds_symbols
= lds_symbols
,
980 if (!ac_rtld_open(&rtld_binary
, open_info
)) {
985 if (!ac_rtld_read_config(&rtld_binary
, &config
)) {
986 ac_rtld_close(&rtld_binary
);
991 if (rtld_binary
.lds_size
> 0) {
992 unsigned alloc_granularity
= device
->physical_device
->rad_info
.chip_class
>= GFX7
? 512 : 256;
993 config
.lds_size
= align(rtld_binary
.lds_size
, alloc_granularity
) / alloc_granularity
;
996 variant
->code_size
= rtld_binary
.rx_size
;
997 variant
->exec_size
= rtld_binary
.exec_size
;
999 assert(binary
->type
== RADV_BINARY_TYPE_LEGACY
);
1000 config
= ((struct radv_shader_binary_legacy
*)binary
)->config
;
1001 variant
->code_size
= radv_get_shader_binary_size(((struct radv_shader_binary_legacy
*)binary
)->code_size
);
1002 variant
->exec_size
= ((struct radv_shader_binary_legacy
*)binary
)->exec_size
;
1005 variant
->info
= binary
->info
;
1006 radv_postprocess_config(device
->physical_device
, &config
, &binary
->info
,
1007 binary
->stage
, &variant
->config
);
1009 void *dest_ptr
= radv_alloc_shader_memory(device
, variant
);
1011 if (binary
->type
== RADV_BINARY_TYPE_RTLD
) {
1012 struct radv_shader_binary_rtld
* bin
= (struct radv_shader_binary_rtld
*)binary
;
1013 struct ac_rtld_upload_info info
= {
1014 .binary
= &rtld_binary
,
1015 .rx_va
= radv_buffer_get_va(variant
->bo
) + variant
->bo_offset
,
1019 if (!ac_rtld_upload(&info
)) {
1020 radv_shader_variant_destroy(device
, variant
);
1021 ac_rtld_close(&rtld_binary
);
1025 if (keep_shader_info
||
1026 (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SHADERS
)) {
1027 const char *disasm_data
;
1029 if (!ac_rtld_get_section_by_name(&rtld_binary
, ".AMDGPU.disasm", &disasm_data
, &disasm_size
)) {
1030 radv_shader_variant_destroy(device
, variant
);
1031 ac_rtld_close(&rtld_binary
);
1035 variant
->llvm_ir_string
= bin
->llvm_ir_size
? strdup((const char*)(bin
->data
+ bin
->elf_size
)) : NULL
;
1036 variant
->disasm_string
= malloc(disasm_size
+ 1);
1037 memcpy(variant
->disasm_string
, disasm_data
, disasm_size
);
1038 variant
->disasm_string
[disasm_size
] = 0;
1041 ac_rtld_close(&rtld_binary
);
1043 struct radv_shader_binary_legacy
* bin
= (struct radv_shader_binary_legacy
*)binary
;
1044 memcpy(dest_ptr
, bin
->data
, bin
->code_size
);
1046 /* Add end-of-code markers for the UMR disassembler. */
1047 uint32_t *ptr32
= (uint32_t *)dest_ptr
+ bin
->code_size
/ 4;
1048 for (unsigned i
= 0; i
< DEBUGGER_NUM_MARKERS
; i
++)
1049 ptr32
[i
] = DEBUGGER_END_OF_CODE_MARKER
;
1051 variant
->llvm_ir_string
= bin
->llvm_ir_size
? strdup((const char*)(bin
->data
+ bin
->code_size
)) : NULL
;
1052 variant
->disasm_string
= bin
->disasm_size
? strdup((const char*)(bin
->data
+ bin
->code_size
+ bin
->llvm_ir_size
)) : NULL
;
1058 radv_dump_nir_shaders(struct nir_shader
* const *shaders
,
1064 FILE *f
= open_memstream(&data
, &size
);
1066 for (int i
= 0; i
< shader_count
; ++i
)
1067 nir_print_shader(shaders
[i
], f
);
1071 ret
= malloc(size
+ 1);
1073 memcpy(ret
, data
, size
);
1080 static struct radv_shader_variant
*
1081 shader_variant_compile(struct radv_device
*device
,
1082 struct radv_shader_module
*module
,
1083 struct nir_shader
* const *shaders
,
1085 gl_shader_stage stage
,
1086 struct radv_shader_info
*info
,
1087 struct radv_nir_compiler_options
*options
,
1088 bool gs_copy_shader
,
1089 bool keep_shader_info
,
1091 struct radv_shader_binary
**binary_out
)
1093 enum radeon_family chip_family
= device
->physical_device
->rad_info
.family
;
1094 struct radv_shader_binary
*binary
= NULL
;
1096 options
->family
= chip_family
;
1097 options
->chip_class
= device
->physical_device
->rad_info
.chip_class
;
1098 options
->dump_shader
= radv_can_dump_shader(device
, module
, gs_copy_shader
);
1099 options
->dump_preoptir
= options
->dump_shader
&&
1100 device
->instance
->debug_flags
& RADV_DEBUG_PREOPTIR
;
1101 options
->record_llvm_ir
= keep_shader_info
;
1102 options
->check_ir
= device
->instance
->debug_flags
& RADV_DEBUG_CHECKIR
;
1103 options
->tess_offchip_block_dw_size
= device
->tess_offchip_block_dw_size
;
1104 options
->address32_hi
= device
->physical_device
->rad_info
.address32_hi
;
1105 options
->has_ls_vgpr_init_bug
= device
->physical_device
->rad_info
.has_ls_vgpr_init_bug
;
1106 options
->use_ngg_streamout
= device
->physical_device
->use_ngg_streamout
;
1108 if ((stage
== MESA_SHADER_GEOMETRY
&& !options
->key
.vs_common_out
.as_ngg
) ||
1110 options
->wave_size
= 64;
1111 else if (stage
== MESA_SHADER_COMPUTE
)
1112 options
->wave_size
= device
->physical_device
->cs_wave_size
;
1113 else if (stage
== MESA_SHADER_FRAGMENT
)
1114 options
->wave_size
= device
->physical_device
->ps_wave_size
;
1116 options
->wave_size
= device
->physical_device
->ge_wave_size
;
1118 if (!use_aco
|| options
->dump_shader
|| options
->record_llvm_ir
)
1119 ac_init_llvm_once();
1122 aco_compile_shader(shader_count
, shaders
, &binary
, info
, options
);
1123 binary
->info
= *info
;
1125 enum ac_target_machine_options tm_options
= 0;
1126 struct ac_llvm_compiler ac_llvm
;
1127 bool thread_compiler
;
1129 if (options
->supports_spill
)
1130 tm_options
|= AC_TM_SUPPORTS_SPILL
;
1131 if (device
->instance
->perftest_flags
& RADV_PERFTEST_SISCHED
)
1132 tm_options
|= AC_TM_SISCHED
;
1133 if (options
->check_ir
)
1134 tm_options
|= AC_TM_CHECK_IR
;
1135 if (device
->instance
->debug_flags
& RADV_DEBUG_NO_LOAD_STORE_OPT
)
1136 tm_options
|= AC_TM_NO_LOAD_STORE_OPT
;
1138 thread_compiler
= !(device
->instance
->debug_flags
& RADV_DEBUG_NOTHREADLLVM
);
1139 radv_init_llvm_compiler(&ac_llvm
,
1141 chip_family
, tm_options
,
1142 options
->wave_size
);
1144 if (gs_copy_shader
) {
1145 assert(shader_count
== 1);
1146 radv_compile_gs_copy_shader(&ac_llvm
, *shaders
, &binary
,
1149 radv_compile_nir_shader(&ac_llvm
, &binary
, info
,
1150 shaders
, shader_count
, options
);
1153 binary
->info
= *info
;
1154 radv_destroy_llvm_compiler(&ac_llvm
, thread_compiler
);
1157 struct radv_shader_variant
*variant
= radv_shader_variant_create(device
, binary
,
1164 if (options
->dump_shader
) {
1165 fprintf(stderr
, "disasm:\n%s\n", variant
->disasm_string
);
1169 if (keep_shader_info
) {
1170 variant
->nir_string
= radv_dump_nir_shaders(shaders
, shader_count
);
1171 if (!gs_copy_shader
&& !module
->nir
) {
1172 variant
->spirv
= (uint32_t *)module
->data
;
1173 variant
->spirv_size
= module
->size
;
1178 *binary_out
= binary
;
1185 struct radv_shader_variant
*
1186 radv_shader_variant_compile(struct radv_device
*device
,
1187 struct radv_shader_module
*module
,
1188 struct nir_shader
*const *shaders
,
1190 struct radv_pipeline_layout
*layout
,
1191 const struct radv_shader_variant_key
*key
,
1192 struct radv_shader_info
*info
,
1193 bool keep_shader_info
,
1195 struct radv_shader_binary
**binary_out
)
1197 struct radv_nir_compiler_options options
= {0};
1199 options
.layout
= layout
;
1203 options
.unsafe_math
= !!(device
->instance
->debug_flags
& RADV_DEBUG_UNSAFE_MATH
);
1204 options
.supports_spill
= true;
1205 options
.robust_buffer_access
= device
->robust_buffer_access
;
1207 return shader_variant_compile(device
, module
, shaders
, shader_count
, shaders
[shader_count
- 1]->info
.stage
, info
,
1208 &options
, false, keep_shader_info
, use_aco
, binary_out
);
1211 struct radv_shader_variant
*
1212 radv_create_gs_copy_shader(struct radv_device
*device
,
1213 struct nir_shader
*shader
,
1214 struct radv_shader_info
*info
,
1215 struct radv_shader_binary
**binary_out
,
1216 bool keep_shader_info
,
1219 struct radv_nir_compiler_options options
= {0};
1221 options
.key
.has_multiview_view_index
= multiview
;
1223 return shader_variant_compile(device
, NULL
, &shader
, 1, MESA_SHADER_VERTEX
,
1224 info
, &options
, true, keep_shader_info
, false, binary_out
);
1228 radv_shader_variant_destroy(struct radv_device
*device
,
1229 struct radv_shader_variant
*variant
)
1231 if (!p_atomic_dec_zero(&variant
->ref_count
))
1234 mtx_lock(&device
->shader_slab_mutex
);
1235 list_del(&variant
->slab_list
);
1236 mtx_unlock(&device
->shader_slab_mutex
);
1238 free(variant
->nir_string
);
1239 free(variant
->disasm_string
);
1240 free(variant
->llvm_ir_string
);
1245 radv_get_shader_name(struct radv_shader_info
*info
,
1246 gl_shader_stage stage
)
1249 case MESA_SHADER_VERTEX
:
1251 return "Vertex Shader as LS";
1252 else if (info
->vs
.as_es
)
1253 return "Vertex Shader as ES";
1254 else if (info
->is_ngg
)
1255 return "Vertex Shader as ESGS";
1257 return "Vertex Shader as VS";
1258 case MESA_SHADER_TESS_CTRL
:
1259 return "Tessellation Control Shader";
1260 case MESA_SHADER_TESS_EVAL
:
1261 if (info
->tes
.as_es
)
1262 return "Tessellation Evaluation Shader as ES";
1263 else if (info
->is_ngg
)
1264 return "Tessellation Evaluation Shader as ESGS";
1266 return "Tessellation Evaluation Shader as VS";
1267 case MESA_SHADER_GEOMETRY
:
1268 return "Geometry Shader";
1269 case MESA_SHADER_FRAGMENT
:
1270 return "Pixel Shader";
1271 case MESA_SHADER_COMPUTE
:
1272 return "Compute Shader";
1274 return "Unknown shader";
1279 radv_get_max_workgroup_size(enum chip_class chip_class
,
1280 gl_shader_stage stage
,
1281 const unsigned *sizes
)
1284 case MESA_SHADER_TESS_CTRL
:
1285 return chip_class
>= GFX7
? 128 : 64;
1286 case MESA_SHADER_GEOMETRY
:
1287 return chip_class
>= GFX9
? 128 : 64;
1288 case MESA_SHADER_COMPUTE
:
1294 unsigned max_workgroup_size
= sizes
[0] * sizes
[1] * sizes
[2];
1295 return max_workgroup_size
;
1299 radv_get_max_waves(struct radv_device
*device
,
1300 struct radv_shader_variant
*variant
,
1301 gl_shader_stage stage
)
1303 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
1304 unsigned lds_increment
= chip_class
>= GFX7
? 512 : 256;
1305 uint8_t wave_size
= variant
->info
.wave_size
;
1306 struct ac_shader_config
*conf
= &variant
->config
;
1307 unsigned max_simd_waves
;
1308 unsigned lds_per_wave
= 0;
1310 max_simd_waves
= device
->physical_device
->rad_info
.max_wave64_per_simd
;
1312 if (stage
== MESA_SHADER_FRAGMENT
) {
1313 lds_per_wave
= conf
->lds_size
* lds_increment
+
1314 align(variant
->info
.ps
.num_interp
* 48,
1316 } else if (stage
== MESA_SHADER_COMPUTE
) {
1317 unsigned max_workgroup_size
=
1318 radv_get_max_workgroup_size(chip_class
, stage
, variant
->info
.cs
.block_size
);
1319 lds_per_wave
= (conf
->lds_size
* lds_increment
) /
1320 DIV_ROUND_UP(max_workgroup_size
, wave_size
);
1323 if (conf
->num_sgprs
)
1325 MIN2(max_simd_waves
,
1326 device
->physical_device
->rad_info
.num_physical_sgprs_per_simd
/
1329 if (conf
->num_vgprs
)
1331 MIN2(max_simd_waves
,
1332 RADV_NUM_PHYSICAL_VGPRS
/ conf
->num_vgprs
);
1334 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
1338 max_simd_waves
= MIN2(max_simd_waves
, 16384 / lds_per_wave
);
1340 return max_simd_waves
;
1344 generate_shader_stats(struct radv_device
*device
,
1345 struct radv_shader_variant
*variant
,
1346 gl_shader_stage stage
,
1347 struct _mesa_string_buffer
*buf
)
1349 struct ac_shader_config
*conf
= &variant
->config
;
1350 unsigned max_simd_waves
= radv_get_max_waves(device
, variant
, stage
);
1352 if (stage
== MESA_SHADER_FRAGMENT
) {
1353 _mesa_string_buffer_printf(buf
, "*** SHADER CONFIG ***\n"
1354 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1355 "SPI_PS_INPUT_ENA = 0x%04x\n",
1356 conf
->spi_ps_input_addr
, conf
->spi_ps_input_ena
);
1359 _mesa_string_buffer_printf(buf
, "*** SHADER STATS ***\n"
1362 "Spilled SGPRs: %d\n"
1363 "Spilled VGPRs: %d\n"
1364 "PrivMem VGPRS: %d\n"
1365 "Code Size: %d bytes\n"
1367 "Scratch: %d bytes per wave\n"
1369 "********************\n\n\n",
1370 conf
->num_sgprs
, conf
->num_vgprs
,
1371 conf
->spilled_sgprs
, conf
->spilled_vgprs
,
1372 variant
->info
.private_mem_vgprs
, variant
->exec_size
,
1373 conf
->lds_size
, conf
->scratch_bytes_per_wave
,
1378 radv_shader_dump_stats(struct radv_device
*device
,
1379 struct radv_shader_variant
*variant
,
1380 gl_shader_stage stage
,
1383 struct _mesa_string_buffer
*buf
= _mesa_string_buffer_create(NULL
, 256);
1385 generate_shader_stats(device
, variant
, stage
, buf
);
1387 fprintf(file
, "\n%s:\n", radv_get_shader_name(&variant
->info
, stage
));
1388 fprintf(file
, "%s", buf
->buf
);
1390 _mesa_string_buffer_destroy(buf
);
1394 radv_GetShaderInfoAMD(VkDevice _device
,
1395 VkPipeline _pipeline
,
1396 VkShaderStageFlagBits shaderStage
,
1397 VkShaderInfoTypeAMD infoType
,
1401 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1402 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
1403 gl_shader_stage stage
= vk_to_mesa_shader_stage(shaderStage
);
1404 struct radv_shader_variant
*variant
= pipeline
->shaders
[stage
];
1405 struct _mesa_string_buffer
*buf
;
1406 VkResult result
= VK_SUCCESS
;
1408 /* Spec doesn't indicate what to do if the stage is invalid, so just
1409 * return no info for this. */
1411 return vk_error(device
->instance
, VK_ERROR_FEATURE_NOT_PRESENT
);
1414 case VK_SHADER_INFO_TYPE_STATISTICS_AMD
:
1416 *pInfoSize
= sizeof(VkShaderStatisticsInfoAMD
);
1418 unsigned lds_multiplier
= device
->physical_device
->rad_info
.chip_class
>= GFX7
? 512 : 256;
1419 struct ac_shader_config
*conf
= &variant
->config
;
1421 VkShaderStatisticsInfoAMD statistics
= {};
1422 statistics
.shaderStageMask
= shaderStage
;
1423 statistics
.numPhysicalVgprs
= RADV_NUM_PHYSICAL_VGPRS
;
1424 statistics
.numPhysicalSgprs
= device
->physical_device
->rad_info
.num_physical_sgprs_per_simd
;
1425 statistics
.numAvailableSgprs
= statistics
.numPhysicalSgprs
;
1427 if (stage
== MESA_SHADER_COMPUTE
) {
1428 unsigned *local_size
= variant
->info
.cs
.block_size
;
1429 unsigned workgroup_size
= local_size
[0] * local_size
[1] * local_size
[2];
1431 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
/
1432 ceil((double)workgroup_size
/ statistics
.numPhysicalVgprs
);
1434 statistics
.computeWorkGroupSize
[0] = local_size
[0];
1435 statistics
.computeWorkGroupSize
[1] = local_size
[1];
1436 statistics
.computeWorkGroupSize
[2] = local_size
[2];
1438 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
;
1441 statistics
.resourceUsage
.numUsedVgprs
= conf
->num_vgprs
;
1442 statistics
.resourceUsage
.numUsedSgprs
= conf
->num_sgprs
;
1443 statistics
.resourceUsage
.ldsSizePerLocalWorkGroup
= 32768;
1444 statistics
.resourceUsage
.ldsUsageSizeInBytes
= conf
->lds_size
* lds_multiplier
;
1445 statistics
.resourceUsage
.scratchMemUsageInBytes
= conf
->scratch_bytes_per_wave
;
1447 size_t size
= *pInfoSize
;
1448 *pInfoSize
= sizeof(statistics
);
1450 memcpy(pInfo
, &statistics
, MIN2(size
, *pInfoSize
));
1452 if (size
< *pInfoSize
)
1453 result
= VK_INCOMPLETE
;
1457 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD
:
1458 buf
= _mesa_string_buffer_create(NULL
, 1024);
1460 _mesa_string_buffer_printf(buf
, "%s:\n", radv_get_shader_name(&variant
->info
, stage
));
1461 _mesa_string_buffer_printf(buf
, "%s\n\n", variant
->llvm_ir_string
);
1462 _mesa_string_buffer_printf(buf
, "%s\n\n", variant
->disasm_string
);
1463 generate_shader_stats(device
, variant
, stage
, buf
);
1465 /* Need to include the null terminator. */
1466 size_t length
= buf
->length
+ 1;
1469 *pInfoSize
= length
;
1471 size_t size
= *pInfoSize
;
1472 *pInfoSize
= length
;
1474 memcpy(pInfo
, buf
->buf
, MIN2(size
, length
));
1477 result
= VK_INCOMPLETE
;
1480 _mesa_string_buffer_destroy(buf
);
1483 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1484 result
= VK_ERROR_FEATURE_NOT_PRESENT
;