2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
34 #include "radv_shader_args.h"
36 #include "nir/nir_builder.h"
37 #include "spirv/nir_spirv.h"
40 #include "ac_binary.h"
41 #include "ac_llvm_util.h"
42 #include "ac_nir_to_llvm.h"
44 #include "vk_format.h"
45 #include "util/debug.h"
46 #include "ac_exp_param.h"
48 #include "aco_interface.h"
50 #include "util/string_buffer.h"
52 static const struct nir_shader_compiler_options nir_options_llvm
= {
53 .vertex_id_zero_based
= true,
58 .lower_device_index_to_zero
= true,
62 .lower_bitfield_insert_to_bitfield_select
= true,
63 .lower_bitfield_extract
= true,
65 .lower_pack_snorm_2x16
= true,
66 .lower_pack_snorm_4x8
= true,
67 .lower_pack_unorm_2x16
= true,
68 .lower_pack_unorm_4x8
= true,
69 .lower_unpack_snorm_2x16
= true,
70 .lower_unpack_snorm_4x8
= true,
71 .lower_unpack_unorm_2x16
= true,
72 .lower_unpack_unorm_4x8
= true,
73 .lower_extract_byte
= true,
74 .lower_extract_word
= true,
77 .lower_mul_2x32_64
= true,
79 .use_scoped_barrier
= true,
80 .max_unroll_iterations
= 32,
81 .use_interpolated_input_intrinsics
= true,
82 /* nir_lower_int64() isn't actually called for the LLVM backend, but
83 * this helps the loop unrolling heuristics. */
84 .lower_int64_options
= nir_lower_imul64
|
85 nir_lower_imul_high64
|
86 nir_lower_imul_2x32_64
|
90 .lower_doubles_options
= nir_lower_drcp
|
96 static const struct nir_shader_compiler_options nir_options_aco
= {
97 .vertex_id_zero_based
= true,
100 .lower_flrp32
= true,
101 .lower_flrp64
= true,
102 .lower_device_index_to_zero
= true,
105 .lower_bitfield_insert_to_bitfield_select
= true,
106 .lower_bitfield_extract
= true,
107 .lower_pack_snorm_2x16
= true,
108 .lower_pack_snorm_4x8
= true,
109 .lower_pack_unorm_2x16
= true,
110 .lower_pack_unorm_4x8
= true,
111 .lower_unpack_snorm_2x16
= true,
112 .lower_unpack_snorm_4x8
= true,
113 .lower_unpack_unorm_2x16
= true,
114 .lower_unpack_unorm_4x8
= true,
115 .lower_unpack_half_2x16
= true,
116 .lower_extract_byte
= true,
117 .lower_extract_word
= true,
120 .lower_mul_2x32_64
= true,
121 .lower_rotate
= true,
122 .use_scoped_barrier
= true,
123 .max_unroll_iterations
= 32,
124 .use_interpolated_input_intrinsics
= true,
125 .lower_int64_options
= nir_lower_imul64
|
126 nir_lower_imul_high64
|
127 nir_lower_imul_2x32_64
|
131 .lower_doubles_options
= nir_lower_drcp
|
138 radv_can_dump_shader(struct radv_device
*device
,
139 struct radv_shader_module
*module
,
140 bool is_gs_copy_shader
)
142 if (!(device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SHADERS
))
145 return !module
->nir
||
146 (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_META_SHADERS
);
148 return is_gs_copy_shader
;
152 radv_can_dump_shader_stats(struct radv_device
*device
,
153 struct radv_shader_module
*module
)
155 /* Only dump non-meta shader stats. */
156 return device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SHADER_STATS
&&
157 module
&& !module
->nir
;
160 VkResult
radv_CreateShaderModule(
162 const VkShaderModuleCreateInfo
* pCreateInfo
,
163 const VkAllocationCallbacks
* pAllocator
,
164 VkShaderModule
* pShaderModule
)
166 RADV_FROM_HANDLE(radv_device
, device
, _device
);
167 struct radv_shader_module
*module
;
169 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
170 assert(pCreateInfo
->flags
== 0);
172 module
= vk_alloc2(&device
->vk
.alloc
, pAllocator
,
173 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
174 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
176 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
178 vk_object_base_init(&device
->vk
, &module
->base
,
179 VK_OBJECT_TYPE_SHADER_MODULE
);
182 module
->size
= pCreateInfo
->codeSize
;
183 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
185 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
187 *pShaderModule
= radv_shader_module_to_handle(module
);
192 void radv_DestroyShaderModule(
194 VkShaderModule _module
,
195 const VkAllocationCallbacks
* pAllocator
)
197 RADV_FROM_HANDLE(radv_device
, device
, _device
);
198 RADV_FROM_HANDLE(radv_shader_module
, module
, _module
);
203 vk_object_base_finish(&module
->base
);
204 vk_free2(&device
->vk
.alloc
, pAllocator
, module
);
208 radv_optimize_nir(struct nir_shader
*shader
, bool optimize_conservatively
,
212 unsigned lower_flrp
=
213 (shader
->options
->lower_flrp16
? 16 : 0) |
214 (shader
->options
->lower_flrp32
? 32 : 0) |
215 (shader
->options
->lower_flrp64
? 64 : 0);
220 NIR_PASS(progress
, shader
, nir_split_array_vars
, nir_var_function_temp
);
221 NIR_PASS(progress
, shader
, nir_shrink_vec_array_vars
, nir_var_function_temp
);
223 NIR_PASS_V(shader
, nir_lower_vars_to_ssa
);
224 NIR_PASS_V(shader
, nir_lower_pack
);
227 /* Only run this pass in the first call to
228 * radv_optimize_nir. Later calls assume that we've
229 * lowered away any copy_deref instructions and we
230 * don't want to introduce any more.
232 NIR_PASS(progress
, shader
, nir_opt_find_array_copies
);
235 NIR_PASS(progress
, shader
, nir_opt_copy_prop_vars
);
236 NIR_PASS(progress
, shader
, nir_opt_dead_write_vars
);
237 NIR_PASS(progress
, shader
, nir_remove_dead_variables
,
238 nir_var_function_temp
| nir_var_shader_in
| nir_var_shader_out
,
241 NIR_PASS_V(shader
, nir_lower_alu_to_scalar
, NULL
, NULL
);
242 NIR_PASS_V(shader
, nir_lower_phis_to_scalar
);
244 NIR_PASS(progress
, shader
, nir_copy_prop
);
245 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
246 NIR_PASS(progress
, shader
, nir_opt_dce
);
247 if (nir_opt_trivial_continues(shader
)) {
249 NIR_PASS(progress
, shader
, nir_copy_prop
);
250 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
251 NIR_PASS(progress
, shader
, nir_opt_dce
);
253 NIR_PASS(progress
, shader
, nir_opt_if
, true);
254 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
255 NIR_PASS(progress
, shader
, nir_opt_cse
);
256 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 8, true, true);
257 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
258 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
260 if (lower_flrp
!= 0) {
261 bool lower_flrp_progress
= false;
262 NIR_PASS(lower_flrp_progress
,
266 false /* always_precise */,
267 shader
->options
->lower_ffma
);
268 if (lower_flrp_progress
) {
269 NIR_PASS(progress
, shader
,
270 nir_opt_constant_folding
);
274 /* Nothing should rematerialize any flrps, so we only
275 * need to do this lowering once.
280 NIR_PASS(progress
, shader
, nir_opt_undef
);
281 if (shader
->options
->max_unroll_iterations
) {
282 NIR_PASS(progress
, shader
, nir_opt_loop_unroll
, 0);
284 } while (progress
&& !optimize_conservatively
);
286 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
287 NIR_PASS(progress
, shader
, nir_opt_shrink_vectors
);
288 NIR_PASS(progress
, shader
, nir_opt_move
, nir_move_load_ubo
);
292 shared_var_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
294 assert(glsl_type_is_vector_or_scalar(type
));
296 uint32_t comp_size
= glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
297 unsigned length
= glsl_get_vector_elements(type
);
298 *size
= comp_size
* length
,
302 struct radv_shader_debug_data
{
303 struct radv_device
*device
;
304 const struct radv_shader_module
*module
;
307 static void radv_spirv_nir_debug(void *private_data
,
308 enum nir_spirv_debug_level level
,
312 struct radv_shader_debug_data
*debug_data
= private_data
;
313 struct radv_instance
*instance
= debug_data
->device
->instance
;
315 static const VkDebugReportFlagsEXT vk_flags
[] = {
316 [NIR_SPIRV_DEBUG_LEVEL_INFO
] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT
,
317 [NIR_SPIRV_DEBUG_LEVEL_WARNING
] = VK_DEBUG_REPORT_WARNING_BIT_EXT
,
318 [NIR_SPIRV_DEBUG_LEVEL_ERROR
] = VK_DEBUG_REPORT_ERROR_BIT_EXT
,
322 snprintf(buffer
, sizeof(buffer
), "SPIR-V offset %lu: %s",
323 (unsigned long)spirv_offset
, message
);
325 vk_debug_report(&instance
->debug_report_callbacks
,
327 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT
,
328 (uint64_t)(uintptr_t)debug_data
->module
,
329 0, 0, "radv", buffer
);
332 static void radv_compiler_debug(void *private_data
,
333 enum radv_compiler_debug_level level
,
336 struct radv_shader_debug_data
*debug_data
= private_data
;
337 struct radv_instance
*instance
= debug_data
->device
->instance
;
339 static const VkDebugReportFlagsEXT vk_flags
[] = {
340 [RADV_COMPILER_DEBUG_LEVEL_PERFWARN
] = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT
,
341 [RADV_COMPILER_DEBUG_LEVEL_ERROR
] = VK_DEBUG_REPORT_ERROR_BIT_EXT
,
344 /* VK_DEBUG_REPORT_DEBUG_BIT_EXT specifies diagnostic information
345 * from the implementation and layers.
347 vk_debug_report(&instance
->debug_report_callbacks
,
348 vk_flags
[level
] | VK_DEBUG_REPORT_DEBUG_BIT_EXT
,
349 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT
,
350 (uint64_t)(uintptr_t)debug_data
->module
,
351 0, 0, "radv", message
);
355 radv_shader_compile_to_nir(struct radv_device
*device
,
356 struct radv_shader_module
*module
,
357 const char *entrypoint_name
,
358 gl_shader_stage stage
,
359 const VkSpecializationInfo
*spec_info
,
360 const VkPipelineCreateFlags flags
,
361 const struct radv_pipeline_layout
*layout
,
362 unsigned subgroup_size
, unsigned ballot_bit_size
)
365 const nir_shader_compiler_options
*nir_options
=
366 radv_use_llvm_for_stage(device
, stage
) ? &nir_options_llvm
: &nir_options_aco
;
369 /* Some things such as our meta clear/blit code will give us a NIR
370 * shader directly. In that case, we just ignore the SPIR-V entirely
371 * and just use the NIR shader */
373 nir
->options
= nir_options
;
374 nir_validate_shader(nir
, "in internal shader");
376 assert(exec_list_length(&nir
->functions
) == 1);
378 uint32_t *spirv
= (uint32_t *) module
->data
;
379 assert(module
->size
% 4 == 0);
381 if (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SPIRV
)
382 radv_print_spirv(module
->data
, module
->size
, stderr
);
384 uint32_t num_spec_entries
= 0;
385 struct nir_spirv_specialization
*spec_entries
= NULL
;
386 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
387 num_spec_entries
= spec_info
->mapEntryCount
;
388 spec_entries
= calloc(num_spec_entries
, sizeof(*spec_entries
));
389 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
390 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
391 const void *data
= spec_info
->pData
+ entry
.offset
;
392 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
394 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
395 switch (entry
.size
) {
397 spec_entries
[i
].value
.u64
= *(const uint64_t *)data
;
400 spec_entries
[i
].value
.u32
= *(const uint32_t *)data
;
403 spec_entries
[i
].value
.u16
= *(const uint16_t *)data
;
406 spec_entries
[i
].value
.u8
= *(const uint8_t *)data
;
409 assert(!"Invalid spec constant size");
415 struct radv_shader_debug_data spirv_debug_data
= {
419 const struct spirv_to_nir_options spirv_options
= {
420 .lower_ubo_ssbo_access_to_offsets
= true,
422 .amd_fragment_mask
= true,
423 .amd_gcn_shader
= true,
424 .amd_image_gather_bias_lod
= true,
425 .amd_image_read_write_lod
= true,
426 .amd_shader_ballot
= true,
427 .amd_shader_explicit_vertex_parameter
= true,
428 .amd_trinary_minmax
= true,
429 .demote_to_helper_invocation
= true,
430 .derivative_group
= true,
431 .descriptor_array_dynamic_indexing
= true,
432 .descriptor_array_non_uniform_indexing
= true,
433 .descriptor_indexing
= true,
434 .device_group
= true,
435 .draw_parameters
= true,
436 .float_controls
= true,
437 .float16
= device
->physical_device
->rad_info
.has_packed_math_16bit
,
438 .float32_atomic_add
= true,
440 .geometry_streams
= true,
441 .image_ms_array
= true,
442 .image_read_without_format
= true,
443 .image_write_without_format
= true,
447 .int64_atomics
= true,
450 .physical_storage_buffer_address
= true,
451 .post_depth_coverage
= true,
452 .runtime_descriptor_array
= true,
453 .shader_clock
= true,
454 .shader_viewport_index_layer
= true,
455 .stencil_export
= true,
456 .storage_8bit
= true,
457 .storage_16bit
= true,
458 .storage_image_ms
= true,
459 .subgroup_arithmetic
= true,
460 .subgroup_ballot
= true,
461 .subgroup_basic
= true,
462 .subgroup_quad
= true,
463 .subgroup_shuffle
= true,
464 .subgroup_vote
= true,
465 .tessellation
= true,
466 .transform_feedback
= true,
467 .variable_pointers
= true,
468 .vk_memory_model
= true,
469 .vk_memory_model_device_scope
= true,
471 .ubo_addr_format
= nir_address_format_32bit_index_offset
,
472 .ssbo_addr_format
= nir_address_format_32bit_index_offset
,
473 .phys_ssbo_addr_format
= nir_address_format_64bit_global
,
474 .push_const_addr_format
= nir_address_format_logical
,
475 .shared_addr_format
= nir_address_format_32bit_offset
,
476 .frag_coord_is_sysval
= true,
478 .func
= radv_spirv_nir_debug
,
479 .private_data
= &spirv_debug_data
,
482 nir
= spirv_to_nir(spirv
, module
->size
/ 4,
483 spec_entries
, num_spec_entries
,
484 stage
, entrypoint_name
,
485 &spirv_options
, nir_options
);
486 assert(nir
->info
.stage
== stage
);
487 nir_validate_shader(nir
, "after spirv_to_nir");
491 /* We have to lower away local constant initializers right before we
492 * inline functions. That way they get properly initialized at the top
493 * of the function and not at the top of its caller.
495 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_function_temp
);
496 NIR_PASS_V(nir
, nir_lower_returns
);
497 NIR_PASS_V(nir
, nir_inline_functions
);
498 NIR_PASS_V(nir
, nir_copy_prop
);
499 NIR_PASS_V(nir
, nir_opt_deref
);
501 /* Pick off the single entrypoint that we want */
502 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
503 if (func
->is_entrypoint
)
504 func
->name
= ralloc_strdup(func
, "main");
506 exec_node_remove(&func
->node
);
508 assert(exec_list_length(&nir
->functions
) == 1);
510 /* Make sure we lower constant initializers on output variables so that
511 * nir_remove_dead_variables below sees the corresponding stores
513 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_shader_out
);
515 /* Now that we've deleted all but the main function, we can go ahead and
516 * lower the rest of the constant initializers.
518 NIR_PASS_V(nir
, nir_lower_variable_initializers
, ~0);
520 /* Split member structs. We do this before lower_io_to_temporaries so that
521 * it doesn't lower system values to temporaries by accident.
523 NIR_PASS_V(nir
, nir_split_var_copies
);
524 NIR_PASS_V(nir
, nir_split_per_member_structs
);
526 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
527 !radv_use_llvm_for_stage(device
, nir
->info
.stage
))
528 NIR_PASS_V(nir
, nir_lower_io_to_vector
, nir_var_shader_out
);
529 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
)
530 NIR_PASS_V(nir
, nir_lower_input_attachments
,
531 &(nir_input_attachment_options
) {
532 .use_fragcoord_sysval
= true,
533 .use_layer_id_sysval
= false,
536 NIR_PASS_V(nir
, nir_remove_dead_variables
,
537 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
,
540 NIR_PASS_V(nir
, nir_propagate_invariant
);
542 NIR_PASS_V(nir
, nir_lower_system_values
);
543 NIR_PASS_V(nir
, nir_lower_clip_cull_distance_arrays
);
545 if (device
->instance
->debug_flags
& RADV_DEBUG_DISCARD_TO_DEMOTE
)
546 NIR_PASS_V(nir
, nir_lower_discard_to_demote
);
548 nir_lower_doubles_options lower_doubles
=
549 nir
->options
->lower_doubles_options
;
551 if (device
->physical_device
->rad_info
.chip_class
== GFX6
) {
552 /* GFX6 doesn't support v_floor_f64 and the precision
553 * of v_fract_f64 which is used to implement 64-bit
554 * floor is less than what Vulkan requires.
556 lower_doubles
|= nir_lower_dfloor
;
559 NIR_PASS_V(nir
, nir_lower_doubles
, NULL
, lower_doubles
);
562 /* Vulkan uses the separate-shader linking model */
563 nir
->info
.separate_shader
= true;
565 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
567 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
)
568 nir_lower_gs_intrinsics(nir
, true);
570 static const nir_lower_tex_options tex_options
= {
572 .lower_tg4_offsets
= true,
575 nir_lower_tex(nir
, &tex_options
);
577 nir_lower_vars_to_ssa(nir
);
579 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
580 nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
581 nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
582 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
583 nir_shader_get_entrypoint(nir
), true, true);
584 } else if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
585 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
586 nir_shader_get_entrypoint(nir
), true, false);
589 nir_split_var_copies(nir
);
591 nir_lower_global_vars_to_local(nir
);
592 nir_remove_dead_variables(nir
, nir_var_function_temp
, NULL
);
593 bool gfx7minus
= device
->physical_device
->rad_info
.chip_class
<= GFX7
;
594 nir_lower_subgroups(nir
, &(struct nir_lower_subgroups_options
) {
595 .subgroup_size
= subgroup_size
,
596 .ballot_bit_size
= ballot_bit_size
,
597 .lower_to_scalar
= 1,
598 .lower_subgroup_masks
= 1,
600 .lower_shuffle_to_32bit
= 1,
601 .lower_vote_eq_to_ballot
= 1,
602 .lower_quad_broadcast_dynamic
= 1,
603 .lower_quad_broadcast_dynamic_to_const
= gfx7minus
,
604 .lower_shuffle_to_swizzle_amd
= 1,
607 nir_lower_load_const_to_scalar(nir
);
609 if (!(flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
))
610 radv_optimize_nir(nir
, false, true);
612 /* call radv_nir_lower_ycbcr_textures() late as there might still be
613 * tex with undef texture/sampler before first optimization */
614 NIR_PASS_V(nir
, radv_nir_lower_ycbcr_textures
, layout
);
616 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
617 * to remove any copies introduced by nir_opt_find_array_copies().
619 nir_lower_var_copies(nir
);
621 /* Lower deref operations for compute shared memory. */
622 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
) {
623 NIR_PASS_V(nir
, nir_lower_vars_to_explicit_types
,
624 nir_var_mem_shared
, shared_var_info
);
625 NIR_PASS_V(nir
, nir_lower_explicit_io
,
626 nir_var_mem_shared
, nir_address_format_32bit_offset
);
629 /* Lower large variables that are always constant with load_constant
630 * intrinsics, which get turned into PC-relative loads from a data
631 * section next to the shader.
633 NIR_PASS_V(nir
, nir_opt_large_constants
,
634 glsl_get_natural_size_align_bytes
, 16);
636 /* Indirect lowering must be called after the radv_optimize_nir() loop
637 * has been called at least once. Otherwise indirect lowering can
638 * bloat the instruction count of the loop and cause it to be
639 * considered too large for unrolling.
641 ac_lower_indirect_derefs(nir
, device
->physical_device
->rad_info
.chip_class
);
642 radv_optimize_nir(nir
, flags
& VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
, false);
648 type_size_vec4(const struct glsl_type
*type
, bool bindless
)
650 return glsl_count_attribute_slots(type
, false);
653 static nir_variable
*
654 find_layer_in_var(nir_shader
*nir
)
657 nir_find_variable_with_location(nir
, nir_var_shader_in
, VARYING_SLOT_LAYER
);
661 var
= nir_variable_create(nir
, nir_var_shader_in
, glsl_int_type(), "layer id");
662 var
->data
.location
= VARYING_SLOT_LAYER
;
663 var
->data
.interpolation
= INTERP_MODE_FLAT
;
667 /* We use layered rendering to implement multiview, which means we need to map
668 * view_index to gl_Layer. The code generates a load from the layer_id sysval,
669 * but since we don't have a way to get at this information from the fragment
670 * shader, we also need to lower this to the gl_Layer varying. This pass
671 * lowers both to a varying load from the LAYER slot, before lowering io, so
672 * that nir_assign_var_locations() will give the LAYER varying the correct
677 lower_view_index(nir_shader
*nir
)
679 bool progress
= false;
680 nir_function_impl
*entry
= nir_shader_get_entrypoint(nir
);
682 nir_builder_init(&b
, entry
);
684 nir_variable
*layer
= NULL
;
685 nir_foreach_block(block
, entry
) {
686 nir_foreach_instr_safe(instr
, block
) {
687 if (instr
->type
!= nir_instr_type_intrinsic
)
690 nir_intrinsic_instr
*load
= nir_instr_as_intrinsic(instr
);
691 if (load
->intrinsic
!= nir_intrinsic_load_view_index
)
695 layer
= find_layer_in_var(nir
);
697 b
.cursor
= nir_before_instr(instr
);
698 nir_ssa_def
*def
= nir_load_var(&b
, layer
);
699 nir_ssa_def_rewrite_uses(&load
->dest
.ssa
,
700 nir_src_for_ssa(def
));
702 nir_instr_remove(instr
);
711 radv_lower_fs_io(nir_shader
*nir
)
713 NIR_PASS_V(nir
, lower_view_index
);
714 nir_assign_io_var_locations(nir
, nir_var_shader_in
, &nir
->num_inputs
,
715 MESA_SHADER_FRAGMENT
);
717 NIR_PASS_V(nir
, nir_lower_io
, nir_var_shader_in
, type_size_vec4
, 0);
719 /* This pass needs actual constants */
720 nir_opt_constant_folding(nir
);
722 NIR_PASS_V(nir
, nir_io_add_const_offset_to_base
, nir_var_shader_in
);
727 radv_alloc_shader_memory(struct radv_device
*device
,
728 struct radv_shader_variant
*shader
)
730 mtx_lock(&device
->shader_slab_mutex
);
731 list_for_each_entry(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
733 list_for_each_entry(struct radv_shader_variant
, s
, &slab
->shaders
, slab_list
) {
734 if (s
->bo_offset
- offset
>= shader
->code_size
) {
735 shader
->bo
= slab
->bo
;
736 shader
->bo_offset
= offset
;
737 list_addtail(&shader
->slab_list
, &s
->slab_list
);
738 mtx_unlock(&device
->shader_slab_mutex
);
739 return slab
->ptr
+ offset
;
741 offset
= align_u64(s
->bo_offset
+ s
->code_size
, 256);
743 if (offset
<= slab
->size
&& slab
->size
- offset
>= shader
->code_size
) {
744 shader
->bo
= slab
->bo
;
745 shader
->bo_offset
= offset
;
746 list_addtail(&shader
->slab_list
, &slab
->shaders
);
747 mtx_unlock(&device
->shader_slab_mutex
);
748 return slab
->ptr
+ offset
;
752 mtx_unlock(&device
->shader_slab_mutex
);
753 struct radv_shader_slab
*slab
= calloc(1, sizeof(struct radv_shader_slab
));
755 slab
->size
= MAX2(256 * 1024, shader
->code_size
);
756 slab
->bo
= device
->ws
->buffer_create(device
->ws
, slab
->size
, 256,
758 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
759 (device
->physical_device
->rad_info
.cpdma_prefetch_writes_memory
?
760 0 : RADEON_FLAG_READ_ONLY
),
761 RADV_BO_PRIORITY_SHADER
);
767 slab
->ptr
= (char*)device
->ws
->buffer_map(slab
->bo
);
769 device
->ws
->buffer_destroy(slab
->bo
);
774 list_inithead(&slab
->shaders
);
776 mtx_lock(&device
->shader_slab_mutex
);
777 list_add(&slab
->slabs
, &device
->shader_slabs
);
779 shader
->bo
= slab
->bo
;
780 shader
->bo_offset
= 0;
781 list_add(&shader
->slab_list
, &slab
->shaders
);
782 mtx_unlock(&device
->shader_slab_mutex
);
787 radv_destroy_shader_slabs(struct radv_device
*device
)
789 list_for_each_entry_safe(struct radv_shader_slab
, slab
, &device
->shader_slabs
, slabs
) {
790 device
->ws
->buffer_destroy(slab
->bo
);
793 mtx_destroy(&device
->shader_slab_mutex
);
796 /* For the UMR disassembler. */
797 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
798 #define DEBUGGER_NUM_MARKERS 5
801 radv_get_shader_binary_size(size_t code_size
)
803 return code_size
+ DEBUGGER_NUM_MARKERS
* 4;
806 static void radv_postprocess_config(const struct radv_physical_device
*pdevice
,
807 const struct ac_shader_config
*config_in
,
808 const struct radv_shader_info
*info
,
809 gl_shader_stage stage
,
810 struct ac_shader_config
*config_out
)
812 bool scratch_enabled
= config_in
->scratch_bytes_per_wave
> 0;
813 unsigned vgpr_comp_cnt
= 0;
814 unsigned num_input_vgprs
= info
->num_input_vgprs
;
816 if (stage
== MESA_SHADER_FRAGMENT
) {
817 num_input_vgprs
= ac_get_fs_input_vgpr_cnt(config_in
, NULL
, NULL
);
820 unsigned num_vgprs
= MAX2(config_in
->num_vgprs
, num_input_vgprs
);
821 /* +3 for scratch wave offset and VCC */
822 unsigned num_sgprs
= MAX2(config_in
->num_sgprs
, info
->num_input_sgprs
+ 3);
823 unsigned num_shared_vgprs
= config_in
->num_shared_vgprs
;
824 /* shared VGPRs are introduced in Navi and are allocated in blocks of 8 (RDNA ref 3.6.5) */
825 assert((pdevice
->rad_info
.chip_class
>= GFX10
&& num_shared_vgprs
% 8 == 0)
826 || (pdevice
->rad_info
.chip_class
< GFX10
&& num_shared_vgprs
== 0));
827 unsigned num_shared_vgpr_blocks
= num_shared_vgprs
/ 8;
829 *config_out
= *config_in
;
830 config_out
->num_vgprs
= num_vgprs
;
831 config_out
->num_sgprs
= num_sgprs
;
832 config_out
->num_shared_vgprs
= num_shared_vgprs
;
834 config_out
->rsrc2
= S_00B12C_USER_SGPR(info
->num_user_sgprs
) |
835 S_00B12C_SCRATCH_EN(scratch_enabled
);
837 if (!pdevice
->use_ngg_streamout
) {
838 config_out
->rsrc2
|= S_00B12C_SO_BASE0_EN(!!info
->so
.strides
[0]) |
839 S_00B12C_SO_BASE1_EN(!!info
->so
.strides
[1]) |
840 S_00B12C_SO_BASE2_EN(!!info
->so
.strides
[2]) |
841 S_00B12C_SO_BASE3_EN(!!info
->so
.strides
[3]) |
842 S_00B12C_SO_EN(!!info
->so
.num_outputs
);
845 config_out
->rsrc1
= S_00B848_VGPRS((num_vgprs
- 1) /
846 (info
->wave_size
== 32 ? 8 : 4)) |
847 S_00B848_DX10_CLAMP(1) |
848 S_00B848_FLOAT_MODE(config_out
->float_mode
);
850 if (pdevice
->rad_info
.chip_class
>= GFX10
) {
851 config_out
->rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX10(info
->num_user_sgprs
>> 5);
853 config_out
->rsrc1
|= S_00B228_SGPRS((num_sgprs
- 1) / 8);
854 config_out
->rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX9(info
->num_user_sgprs
>> 5);
858 case MESA_SHADER_TESS_EVAL
:
860 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
861 config_out
->rsrc2
|= S_00B22C_OC_LDS_EN(1);
862 } else if (info
->tes
.as_es
) {
863 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
864 vgpr_comp_cnt
= info
->uses_prim_id
? 3 : 2;
866 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
868 bool enable_prim_id
= info
->tes
.export_prim_id
|| info
->uses_prim_id
;
869 vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
871 config_out
->rsrc1
|= S_00B128_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
872 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
874 config_out
->rsrc2
|= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks
);
876 case MESA_SHADER_TESS_CTRL
:
877 if (pdevice
->rad_info
.chip_class
>= GFX9
) {
878 /* We need at least 2 components for LS.
879 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
880 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
882 if (pdevice
->rad_info
.chip_class
>= GFX10
) {
883 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 3 : 1;
884 config_out
->rsrc2
|= S_00B42C_LDS_SIZE_GFX10(info
->tcs
.num_lds_blocks
);
886 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 2 : 1;
887 config_out
->rsrc2
|= S_00B42C_LDS_SIZE_GFX9(info
->tcs
.num_lds_blocks
);
890 config_out
->rsrc2
|= S_00B12C_OC_LDS_EN(1);
892 config_out
->rsrc1
|= S_00B428_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
) |
893 S_00B848_WGP_MODE(pdevice
->rad_info
.chip_class
>= GFX10
);
894 config_out
->rsrc2
|= S_00B42C_SHARED_VGPR_CNT(num_shared_vgpr_blocks
);
896 case MESA_SHADER_VERTEX
:
898 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
899 } else if (info
->vs
.as_ls
) {
900 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
901 /* We need at least 2 components for LS.
902 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
903 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
905 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 2 : 1;
906 } else if (info
->vs
.as_es
) {
907 assert(pdevice
->rad_info
.chip_class
<= GFX8
);
908 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
909 vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 1 : 0;
911 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
912 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
913 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
915 if (info
->vs
.needs_instance_id
&& pdevice
->rad_info
.chip_class
>= GFX10
) {
917 } else if (info
->vs
.export_prim_id
) {
919 } else if (info
->vs
.needs_instance_id
) {
925 config_out
->rsrc1
|= S_00B128_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
927 config_out
->rsrc2
|= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks
);
929 case MESA_SHADER_FRAGMENT
:
930 config_out
->rsrc1
|= S_00B028_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
);
931 config_out
->rsrc2
|= S_00B02C_SHARED_VGPR_CNT(num_shared_vgpr_blocks
);
933 case MESA_SHADER_GEOMETRY
:
934 config_out
->rsrc1
|= S_00B228_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
) |
935 S_00B848_WGP_MODE(pdevice
->rad_info
.chip_class
>= GFX10
);
936 config_out
->rsrc2
|= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks
);
938 case MESA_SHADER_COMPUTE
:
939 config_out
->rsrc1
|= S_00B848_MEM_ORDERED(pdevice
->rad_info
.chip_class
>= GFX10
) |
940 S_00B848_WGP_MODE(pdevice
->rad_info
.chip_class
>= GFX10
);
942 S_00B84C_TGID_X_EN(info
->cs
.uses_block_id
[0]) |
943 S_00B84C_TGID_Y_EN(info
->cs
.uses_block_id
[1]) |
944 S_00B84C_TGID_Z_EN(info
->cs
.uses_block_id
[2]) |
945 S_00B84C_TIDIG_COMP_CNT(info
->cs
.uses_thread_id
[2] ? 2 :
946 info
->cs
.uses_thread_id
[1] ? 1 : 0) |
947 S_00B84C_TG_SIZE_EN(info
->cs
.uses_local_invocation_idx
) |
948 S_00B84C_LDS_SIZE(config_in
->lds_size
);
949 config_out
->rsrc3
|= S_00B8A0_SHARED_VGPR_CNT(num_shared_vgpr_blocks
);
953 unreachable("unsupported shader type");
957 if (pdevice
->rad_info
.chip_class
>= GFX10
&& info
->is_ngg
&&
958 (stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
|| stage
== MESA_SHADER_GEOMETRY
)) {
959 unsigned gs_vgpr_comp_cnt
, es_vgpr_comp_cnt
;
960 gl_shader_stage es_stage
= stage
;
961 if (stage
== MESA_SHADER_GEOMETRY
)
962 es_stage
= info
->gs
.es_type
;
964 /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
965 if (es_stage
== MESA_SHADER_VERTEX
) {
966 es_vgpr_comp_cnt
= info
->vs
.needs_instance_id
? 3 : 0;
967 } else if (es_stage
== MESA_SHADER_TESS_EVAL
) {
968 bool enable_prim_id
= info
->tes
.export_prim_id
|| info
->uses_prim_id
;
969 es_vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
971 unreachable("Unexpected ES shader stage");
973 bool tes_triangles
= stage
== MESA_SHADER_TESS_EVAL
&&
974 info
->tes
.primitive_mode
>= 4; /* GL_TRIANGLES */
975 if (info
->uses_invocation_id
|| stage
== MESA_SHADER_VERTEX
) {
976 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
977 } else if (info
->uses_prim_id
) {
978 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
979 } else if (info
->gs
.vertices_in
>= 3 || tes_triangles
) {
980 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
982 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
985 config_out
->rsrc1
|= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
) |
986 S_00B228_WGP_MODE(1);
987 config_out
->rsrc2
|= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
988 S_00B22C_LDS_SIZE(config_in
->lds_size
) |
989 S_00B22C_OC_LDS_EN(es_stage
== MESA_SHADER_TESS_EVAL
);
990 } else if (pdevice
->rad_info
.chip_class
>= GFX9
&&
991 stage
== MESA_SHADER_GEOMETRY
) {
992 unsigned es_type
= info
->gs
.es_type
;
993 unsigned gs_vgpr_comp_cnt
, es_vgpr_comp_cnt
;
995 if (es_type
== MESA_SHADER_VERTEX
) {
996 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
997 if (info
->vs
.needs_instance_id
) {
998 es_vgpr_comp_cnt
= pdevice
->rad_info
.chip_class
>= GFX10
? 3 : 1;
1000 es_vgpr_comp_cnt
= 0;
1002 } else if (es_type
== MESA_SHADER_TESS_EVAL
) {
1003 es_vgpr_comp_cnt
= info
->uses_prim_id
? 3 : 2;
1005 unreachable("invalid shader ES type");
1008 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
1009 * VGPR[0:4] are always loaded.
1011 if (info
->uses_invocation_id
) {
1012 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
1013 } else if (info
->uses_prim_id
) {
1014 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
1015 } else if (info
->gs
.vertices_in
>= 3) {
1016 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
1018 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
1021 config_out
->rsrc1
|= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
);
1022 config_out
->rsrc2
|= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
1023 S_00B22C_OC_LDS_EN(es_type
== MESA_SHADER_TESS_EVAL
);
1024 } else if (pdevice
->rad_info
.chip_class
>= GFX9
&&
1025 stage
== MESA_SHADER_TESS_CTRL
) {
1026 config_out
->rsrc1
|= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt
);
1028 config_out
->rsrc1
|= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
);
1032 struct radv_shader_variant
*
1033 radv_shader_variant_create(struct radv_device
*device
,
1034 const struct radv_shader_binary
*binary
,
1035 bool keep_shader_info
)
1037 struct ac_shader_config config
= {0};
1038 struct ac_rtld_binary rtld_binary
= {0};
1039 struct radv_shader_variant
*variant
= calloc(1, sizeof(struct radv_shader_variant
));
1043 variant
->ref_count
= 1;
1045 if (binary
->type
== RADV_BINARY_TYPE_RTLD
) {
1046 struct ac_rtld_symbol lds_symbols
[2];
1047 unsigned num_lds_symbols
= 0;
1048 const char *elf_data
= (const char *)((struct radv_shader_binary_rtld
*)binary
)->data
;
1049 size_t elf_size
= ((struct radv_shader_binary_rtld
*)binary
)->elf_size
;
1051 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
1052 (binary
->stage
== MESA_SHADER_GEOMETRY
|| binary
->info
.is_ngg
) &&
1053 !binary
->is_gs_copy_shader
) {
1054 /* We add this symbol even on LLVM <= 8 to ensure that
1055 * shader->config.lds_size is set correctly below.
1057 struct ac_rtld_symbol
*sym
= &lds_symbols
[num_lds_symbols
++];
1058 sym
->name
= "esgs_ring";
1059 sym
->size
= binary
->info
.ngg_info
.esgs_ring_size
;
1060 sym
->align
= 64 * 1024;
1063 if (binary
->info
.is_ngg
&&
1064 binary
->stage
== MESA_SHADER_GEOMETRY
) {
1065 struct ac_rtld_symbol
*sym
= &lds_symbols
[num_lds_symbols
++];
1066 sym
->name
= "ngg_emit";
1067 sym
->size
= binary
->info
.ngg_info
.ngg_emit_size
* 4;
1071 struct ac_rtld_open_info open_info
= {
1072 .info
= &device
->physical_device
->rad_info
,
1073 .shader_type
= binary
->stage
,
1074 .wave_size
= binary
->info
.wave_size
,
1076 .elf_ptrs
= &elf_data
,
1077 .elf_sizes
= &elf_size
,
1078 .num_shared_lds_symbols
= num_lds_symbols
,
1079 .shared_lds_symbols
= lds_symbols
,
1082 if (!ac_rtld_open(&rtld_binary
, open_info
)) {
1087 if (!ac_rtld_read_config(&device
->physical_device
->rad_info
,
1088 &rtld_binary
, &config
)) {
1089 ac_rtld_close(&rtld_binary
);
1094 if (rtld_binary
.lds_size
> 0) {
1095 unsigned alloc_granularity
= device
->physical_device
->rad_info
.chip_class
>= GFX7
? 512 : 256;
1096 config
.lds_size
= align(rtld_binary
.lds_size
, alloc_granularity
) / alloc_granularity
;
1099 variant
->code_size
= rtld_binary
.rx_size
;
1100 variant
->exec_size
= rtld_binary
.exec_size
;
1102 assert(binary
->type
== RADV_BINARY_TYPE_LEGACY
);
1103 config
= ((struct radv_shader_binary_legacy
*)binary
)->config
;
1104 variant
->code_size
= radv_get_shader_binary_size(((struct radv_shader_binary_legacy
*)binary
)->code_size
);
1105 variant
->exec_size
= ((struct radv_shader_binary_legacy
*)binary
)->exec_size
;
1108 variant
->info
= binary
->info
;
1109 radv_postprocess_config(device
->physical_device
, &config
, &binary
->info
,
1110 binary
->stage
, &variant
->config
);
1112 void *dest_ptr
= radv_alloc_shader_memory(device
, variant
);
1114 if (binary
->type
== RADV_BINARY_TYPE_RTLD
)
1115 ac_rtld_close(&rtld_binary
);
1120 if (binary
->type
== RADV_BINARY_TYPE_RTLD
) {
1121 struct radv_shader_binary_rtld
* bin
= (struct radv_shader_binary_rtld
*)binary
;
1122 struct ac_rtld_upload_info info
= {
1123 .binary
= &rtld_binary
,
1124 .rx_va
= radv_buffer_get_va(variant
->bo
) + variant
->bo_offset
,
1128 if (!ac_rtld_upload(&info
)) {
1129 radv_shader_variant_destroy(device
, variant
);
1130 ac_rtld_close(&rtld_binary
);
1134 if (keep_shader_info
||
1135 (device
->instance
->debug_flags
& RADV_DEBUG_DUMP_SHADERS
)) {
1136 const char *disasm_data
;
1138 if (!ac_rtld_get_section_by_name(&rtld_binary
, ".AMDGPU.disasm", &disasm_data
, &disasm_size
)) {
1139 radv_shader_variant_destroy(device
, variant
);
1140 ac_rtld_close(&rtld_binary
);
1144 variant
->ir_string
= bin
->llvm_ir_size
? strdup((const char*)(bin
->data
+ bin
->elf_size
)) : NULL
;
1145 variant
->disasm_string
= malloc(disasm_size
+ 1);
1146 memcpy(variant
->disasm_string
, disasm_data
, disasm_size
);
1147 variant
->disasm_string
[disasm_size
] = 0;
1150 ac_rtld_close(&rtld_binary
);
1152 struct radv_shader_binary_legacy
* bin
= (struct radv_shader_binary_legacy
*)binary
;
1153 memcpy(dest_ptr
, bin
->data
+ bin
->stats_size
, bin
->code_size
);
1155 /* Add end-of-code markers for the UMR disassembler. */
1156 uint32_t *ptr32
= (uint32_t *)dest_ptr
+ bin
->code_size
/ 4;
1157 for (unsigned i
= 0; i
< DEBUGGER_NUM_MARKERS
; i
++)
1158 ptr32
[i
] = DEBUGGER_END_OF_CODE_MARKER
;
1160 variant
->ir_string
= bin
->ir_size
? strdup((const char*)(bin
->data
+ bin
->stats_size
+ bin
->code_size
)) : NULL
;
1161 variant
->disasm_string
= bin
->disasm_size
? strdup((const char*)(bin
->data
+ bin
->stats_size
+ bin
->code_size
+ bin
->ir_size
)) : NULL
;
1163 if (bin
->stats_size
) {
1164 variant
->statistics
= calloc(bin
->stats_size
, 1);
1165 memcpy(variant
->statistics
, bin
->data
, bin
->stats_size
);
1172 radv_dump_nir_shaders(struct nir_shader
* const *shaders
,
1178 FILE *f
= open_memstream(&data
, &size
);
1180 for (int i
= 0; i
< shader_count
; ++i
)
1181 nir_print_shader(shaders
[i
], f
);
1185 ret
= malloc(size
+ 1);
1187 memcpy(ret
, data
, size
);
1194 static struct radv_shader_variant
*
1195 shader_variant_compile(struct radv_device
*device
,
1196 struct radv_shader_module
*module
,
1197 struct nir_shader
* const *shaders
,
1199 gl_shader_stage stage
,
1200 struct radv_shader_info
*info
,
1201 struct radv_nir_compiler_options
*options
,
1202 bool gs_copy_shader
,
1203 bool keep_shader_info
,
1204 bool keep_statistic_info
,
1205 struct radv_shader_binary
**binary_out
)
1207 enum radeon_family chip_family
= device
->physical_device
->rad_info
.family
;
1208 struct radv_shader_binary
*binary
= NULL
;
1210 struct radv_shader_debug_data debug_data
= {
1215 options
->family
= chip_family
;
1216 options
->chip_class
= device
->physical_device
->rad_info
.chip_class
;
1217 options
->dump_shader
= radv_can_dump_shader(device
, module
, gs_copy_shader
);
1218 options
->dump_preoptir
= options
->dump_shader
&&
1219 device
->instance
->debug_flags
& RADV_DEBUG_PREOPTIR
;
1220 options
->record_ir
= keep_shader_info
;
1221 options
->record_stats
= keep_statistic_info
;
1222 options
->check_ir
= device
->instance
->debug_flags
& RADV_DEBUG_CHECKIR
;
1223 options
->tess_offchip_block_dw_size
= device
->tess_offchip_block_dw_size
;
1224 options
->address32_hi
= device
->physical_device
->rad_info
.address32_hi
;
1225 options
->has_ls_vgpr_init_bug
= device
->physical_device
->rad_info
.has_ls_vgpr_init_bug
;
1226 options
->use_ngg_streamout
= device
->physical_device
->use_ngg_streamout
;
1227 options
->enable_mrt_output_nan_fixup
= device
->instance
->enable_mrt_output_nan_fixup
;
1228 options
->debug
.func
= radv_compiler_debug
;
1229 options
->debug
.private_data
= &debug_data
;
1231 struct radv_shader_args args
= {};
1232 args
.options
= options
;
1233 args
.shader_info
= info
;
1234 args
.is_gs_copy_shader
= gs_copy_shader
;
1235 radv_declare_shader_args(&args
,
1236 gs_copy_shader
? MESA_SHADER_VERTEX
1237 : shaders
[shader_count
- 1]->info
.stage
,
1239 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
1240 : MESA_SHADER_VERTEX
);
1242 if (radv_use_llvm_for_stage(device
, stage
) ||
1243 options
->dump_shader
|| options
->record_ir
)
1244 ac_init_llvm_once();
1246 if (radv_use_llvm_for_stage(device
, stage
)) {
1247 llvm_compile_shader(device
, shader_count
, shaders
, &binary
, &args
);
1249 aco_compile_shader(shader_count
, shaders
, &binary
, &args
);
1252 binary
->info
= *info
;
1254 struct radv_shader_variant
*variant
= radv_shader_variant_create(device
, binary
,
1261 if (options
->dump_shader
) {
1262 fprintf(stderr
, "%s", radv_get_shader_name(info
, shaders
[0]->info
.stage
));
1263 for (int i
= 1; i
< shader_count
; ++i
)
1264 fprintf(stderr
, " + %s", radv_get_shader_name(info
, shaders
[i
]->info
.stage
));
1266 fprintf(stderr
, "\ndisasm:\n%s\n", variant
->disasm_string
);
1270 if (keep_shader_info
) {
1271 variant
->nir_string
= radv_dump_nir_shaders(shaders
, shader_count
);
1272 if (!gs_copy_shader
&& !module
->nir
) {
1273 variant
->spirv
= malloc(module
->size
);
1274 if (!variant
->spirv
) {
1280 memcpy(variant
->spirv
, module
->data
, module
->size
);
1281 variant
->spirv_size
= module
->size
;
1286 *binary_out
= binary
;
1293 struct radv_shader_variant
*
1294 radv_shader_variant_compile(struct radv_device
*device
,
1295 struct radv_shader_module
*module
,
1296 struct nir_shader
*const *shaders
,
1298 struct radv_pipeline_layout
*layout
,
1299 const struct radv_shader_variant_key
*key
,
1300 struct radv_shader_info
*info
,
1301 bool keep_shader_info
, bool keep_statistic_info
,
1302 struct radv_shader_binary
**binary_out
)
1304 gl_shader_stage stage
= shaders
[shader_count
- 1]->info
.stage
;
1305 struct radv_nir_compiler_options options
= {0};
1307 options
.layout
= layout
;
1311 options
.explicit_scratch_args
= !radv_use_llvm_for_stage(device
, stage
);
1312 options
.robust_buffer_access
= device
->robust_buffer_access
;
1314 return shader_variant_compile(device
, module
, shaders
, shader_count
, stage
, info
,
1315 &options
, false, keep_shader_info
, keep_statistic_info
, binary_out
);
1318 struct radv_shader_variant
*
1319 radv_create_gs_copy_shader(struct radv_device
*device
,
1320 struct nir_shader
*shader
,
1321 struct radv_shader_info
*info
,
1322 struct radv_shader_binary
**binary_out
,
1323 bool keep_shader_info
, bool keep_statistic_info
,
1326 struct radv_nir_compiler_options options
= {0};
1327 gl_shader_stage stage
= MESA_SHADER_VERTEX
;
1329 options
.explicit_scratch_args
= !radv_use_llvm_for_stage(device
, stage
);
1330 options
.key
.has_multiview_view_index
= multiview
;
1332 return shader_variant_compile(device
, NULL
, &shader
, 1, stage
,
1333 info
, &options
, true, keep_shader_info
, keep_statistic_info
, binary_out
);
1337 radv_shader_variant_destroy(struct radv_device
*device
,
1338 struct radv_shader_variant
*variant
)
1340 if (!p_atomic_dec_zero(&variant
->ref_count
))
1343 mtx_lock(&device
->shader_slab_mutex
);
1344 list_del(&variant
->slab_list
);
1345 mtx_unlock(&device
->shader_slab_mutex
);
1347 free(variant
->spirv
);
1348 free(variant
->nir_string
);
1349 free(variant
->disasm_string
);
1350 free(variant
->ir_string
);
1351 free(variant
->statistics
);
1356 radv_get_shader_name(struct radv_shader_info
*info
,
1357 gl_shader_stage stage
)
1360 case MESA_SHADER_VERTEX
:
1362 return "Vertex Shader as LS";
1363 else if (info
->vs
.as_es
)
1364 return "Vertex Shader as ES";
1365 else if (info
->is_ngg
)
1366 return "Vertex Shader as ESGS";
1368 return "Vertex Shader as VS";
1369 case MESA_SHADER_TESS_CTRL
:
1370 return "Tessellation Control Shader";
1371 case MESA_SHADER_TESS_EVAL
:
1372 if (info
->tes
.as_es
)
1373 return "Tessellation Evaluation Shader as ES";
1374 else if (info
->is_ngg
)
1375 return "Tessellation Evaluation Shader as ESGS";
1377 return "Tessellation Evaluation Shader as VS";
1378 case MESA_SHADER_GEOMETRY
:
1379 return "Geometry Shader";
1380 case MESA_SHADER_FRAGMENT
:
1381 return "Pixel Shader";
1382 case MESA_SHADER_COMPUTE
:
1383 return "Compute Shader";
1385 return "Unknown shader";
1390 radv_get_max_workgroup_size(enum chip_class chip_class
,
1391 gl_shader_stage stage
,
1392 const unsigned *sizes
)
1395 case MESA_SHADER_TESS_CTRL
:
1396 return chip_class
>= GFX7
? 128 : 64;
1397 case MESA_SHADER_GEOMETRY
:
1398 return chip_class
>= GFX9
? 128 : 64;
1399 case MESA_SHADER_COMPUTE
:
1405 unsigned max_workgroup_size
= sizes
[0] * sizes
[1] * sizes
[2];
1406 return max_workgroup_size
;
1410 radv_get_max_waves(struct radv_device
*device
,
1411 struct radv_shader_variant
*variant
,
1412 gl_shader_stage stage
)
1414 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
1415 unsigned lds_increment
= chip_class
>= GFX7
? 512 : 256;
1416 uint8_t wave_size
= variant
->info
.wave_size
;
1417 struct ac_shader_config
*conf
= &variant
->config
;
1418 unsigned max_simd_waves
;
1419 unsigned lds_per_wave
= 0;
1421 max_simd_waves
= device
->physical_device
->rad_info
.max_wave64_per_simd
;
1423 if (stage
== MESA_SHADER_FRAGMENT
) {
1424 lds_per_wave
= conf
->lds_size
* lds_increment
+
1425 align(variant
->info
.ps
.num_interp
* 48,
1427 } else if (stage
== MESA_SHADER_COMPUTE
) {
1428 unsigned max_workgroup_size
=
1429 radv_get_max_workgroup_size(chip_class
, stage
, variant
->info
.cs
.block_size
);
1430 lds_per_wave
= (conf
->lds_size
* lds_increment
) /
1431 DIV_ROUND_UP(max_workgroup_size
, wave_size
);
1434 if (conf
->num_sgprs
) {
1435 unsigned sgprs
= align(conf
->num_sgprs
, chip_class
>= GFX8
? 16 : 8);
1437 MIN2(max_simd_waves
,
1438 device
->physical_device
->rad_info
.num_physical_sgprs_per_simd
/
1442 if (conf
->num_vgprs
) {
1443 unsigned vgprs
= align(conf
->num_vgprs
, wave_size
== 32 ? 8 : 4);
1445 MIN2(max_simd_waves
,
1446 device
->physical_device
->rad_info
.num_physical_wave64_vgprs_per_simd
/ vgprs
);
1449 unsigned max_lds_per_simd
= device
->physical_device
->rad_info
.lds_size_per_workgroup
/ device
->physical_device
->rad_info
.num_simd_per_compute_unit
;
1451 max_simd_waves
= MIN2(max_simd_waves
, max_lds_per_simd
/ lds_per_wave
);
1453 return max_simd_waves
;
1457 generate_shader_stats(struct radv_device
*device
,
1458 struct radv_shader_variant
*variant
,
1459 gl_shader_stage stage
,
1460 struct _mesa_string_buffer
*buf
)
1462 struct ac_shader_config
*conf
= &variant
->config
;
1463 unsigned max_simd_waves
= radv_get_max_waves(device
, variant
, stage
);
1465 if (stage
== MESA_SHADER_FRAGMENT
) {
1466 _mesa_string_buffer_printf(buf
, "*** SHADER CONFIG ***\n"
1467 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1468 "SPI_PS_INPUT_ENA = 0x%04x\n",
1469 conf
->spi_ps_input_addr
, conf
->spi_ps_input_ena
);
1472 _mesa_string_buffer_printf(buf
, "*** SHADER STATS ***\n"
1475 "Spilled SGPRs: %d\n"
1476 "Spilled VGPRs: %d\n"
1477 "PrivMem VGPRS: %d\n"
1478 "Code Size: %d bytes\n"
1480 "Scratch: %d bytes per wave\n"
1482 conf
->num_sgprs
, conf
->num_vgprs
,
1483 conf
->spilled_sgprs
, conf
->spilled_vgprs
,
1484 variant
->info
.private_mem_vgprs
, variant
->exec_size
,
1485 conf
->lds_size
, conf
->scratch_bytes_per_wave
,
1488 if (variant
->statistics
) {
1489 _mesa_string_buffer_printf(buf
, "*** COMPILER STATS ***\n");
1490 for (unsigned i
= 0; i
< variant
->statistics
->count
; i
++) {
1491 struct radv_compiler_statistic_info
*info
= &variant
->statistics
->infos
[i
];
1492 uint32_t value
= variant
->statistics
->values
[i
];
1493 _mesa_string_buffer_printf(buf
, "%s: %lu\n", info
->name
, value
);
1497 _mesa_string_buffer_printf(buf
, "********************\n\n\n");
1501 radv_shader_dump_stats(struct radv_device
*device
,
1502 struct radv_shader_variant
*variant
,
1503 gl_shader_stage stage
,
1506 struct _mesa_string_buffer
*buf
= _mesa_string_buffer_create(NULL
, 256);
1508 generate_shader_stats(device
, variant
, stage
, buf
);
1510 fprintf(file
, "\n%s:\n", radv_get_shader_name(&variant
->info
, stage
));
1511 fprintf(file
, "%s", buf
->buf
);
1513 _mesa_string_buffer_destroy(buf
);
1517 radv_GetShaderInfoAMD(VkDevice _device
,
1518 VkPipeline _pipeline
,
1519 VkShaderStageFlagBits shaderStage
,
1520 VkShaderInfoTypeAMD infoType
,
1524 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1525 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, _pipeline
);
1526 gl_shader_stage stage
= vk_to_mesa_shader_stage(shaderStage
);
1527 struct radv_shader_variant
*variant
= pipeline
->shaders
[stage
];
1528 struct _mesa_string_buffer
*buf
;
1529 VkResult result
= VK_SUCCESS
;
1531 /* Spec doesn't indicate what to do if the stage is invalid, so just
1532 * return no info for this. */
1534 return vk_error(device
->instance
, VK_ERROR_FEATURE_NOT_PRESENT
);
1537 case VK_SHADER_INFO_TYPE_STATISTICS_AMD
:
1539 *pInfoSize
= sizeof(VkShaderStatisticsInfoAMD
);
1541 unsigned lds_multiplier
= device
->physical_device
->rad_info
.chip_class
>= GFX7
? 512 : 256;
1542 struct ac_shader_config
*conf
= &variant
->config
;
1544 VkShaderStatisticsInfoAMD statistics
= {};
1545 statistics
.shaderStageMask
= shaderStage
;
1546 statistics
.numPhysicalVgprs
= device
->physical_device
->rad_info
.num_physical_wave64_vgprs_per_simd
;
1547 statistics
.numPhysicalSgprs
= device
->physical_device
->rad_info
.num_physical_sgprs_per_simd
;
1548 statistics
.numAvailableSgprs
= statistics
.numPhysicalSgprs
;
1550 if (stage
== MESA_SHADER_COMPUTE
) {
1551 unsigned *local_size
= variant
->info
.cs
.block_size
;
1552 unsigned workgroup_size
= local_size
[0] * local_size
[1] * local_size
[2];
1554 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
/
1555 ceil((double)workgroup_size
/ statistics
.numPhysicalVgprs
);
1557 statistics
.computeWorkGroupSize
[0] = local_size
[0];
1558 statistics
.computeWorkGroupSize
[1] = local_size
[1];
1559 statistics
.computeWorkGroupSize
[2] = local_size
[2];
1561 statistics
.numAvailableVgprs
= statistics
.numPhysicalVgprs
;
1564 statistics
.resourceUsage
.numUsedVgprs
= conf
->num_vgprs
;
1565 statistics
.resourceUsage
.numUsedSgprs
= conf
->num_sgprs
;
1566 statistics
.resourceUsage
.ldsSizePerLocalWorkGroup
= 32768;
1567 statistics
.resourceUsage
.ldsUsageSizeInBytes
= conf
->lds_size
* lds_multiplier
;
1568 statistics
.resourceUsage
.scratchMemUsageInBytes
= conf
->scratch_bytes_per_wave
;
1570 size_t size
= *pInfoSize
;
1571 *pInfoSize
= sizeof(statistics
);
1573 memcpy(pInfo
, &statistics
, MIN2(size
, *pInfoSize
));
1575 if (size
< *pInfoSize
)
1576 result
= VK_INCOMPLETE
;
1580 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD
:
1581 buf
= _mesa_string_buffer_create(NULL
, 1024);
1583 _mesa_string_buffer_printf(buf
, "%s:\n", radv_get_shader_name(&variant
->info
, stage
));
1584 _mesa_string_buffer_printf(buf
, "%s\n\n", variant
->ir_string
);
1585 _mesa_string_buffer_printf(buf
, "%s\n\n", variant
->disasm_string
);
1586 generate_shader_stats(device
, variant
, stage
, buf
);
1588 /* Need to include the null terminator. */
1589 size_t length
= buf
->length
+ 1;
1592 *pInfoSize
= length
;
1594 size_t size
= *pInfoSize
;
1595 *pInfoSize
= length
;
1597 memcpy(pInfo
, buf
->buf
, MIN2(size
, length
));
1600 result
= VK_INCOMPLETE
;
1603 _mesa_string_buffer_destroy(buf
);
1606 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1607 result
= VK_ERROR_FEATURE_NOT_PRESENT
;