#include "radv_private.h"
#include "radv_shader.h"
#include "radv_shader_helper.h"
+#include "radv_shader_args.h"
#include "nir/nir.h"
#include "nir/nir_builder.h"
#include "spirv/nir_spirv.h"
.lower_device_index_to_zero = true,
.lower_fsat = true,
.lower_fdiv = true,
+ .lower_fmod = true,
.lower_bitfield_insert_to_bitfield_select = true,
.lower_bitfield_extract = true,
.lower_sub = true,
.lower_rotate = true,
.max_unroll_iterations = 32,
.use_interpolated_input_intrinsics = true,
+ /* nir_lower_int64() isn't actually called for the LLVM backend, but
+ * this helps the loop unrolling heuristics. */
+ .lower_int64_options = nir_lower_imul64 |
+ nir_lower_imul_high64 |
+ nir_lower_imul_2x32_64 |
+ nir_lower_divmod64 |
+ nir_lower_minmax64 |
+ nir_lower_iabs64,
};
static const struct nir_shader_compiler_options nir_options_aco = {
.lower_flrp64 = true,
.lower_device_index_to_zero = true,
.lower_fdiv = true,
+ .lower_fmod = true,
.lower_bitfield_insert_to_bitfield_select = true,
.lower_bitfield_extract = true,
.lower_pack_snorm_2x16 = true,
.lower_rotate = true,
.max_unroll_iterations = 32,
.use_interpolated_input_intrinsics = true,
+ .lower_int64_options = nir_lower_imul64 |
+ nir_lower_imul_high64 |
+ nir_lower_imul_2x32_64 |
+ nir_lower_divmod64 |
+ nir_lower_logic64 |
+ nir_lower_minmax64 |
+ nir_lower_iabs64,
};
bool
module && !module->nir;
}
-unsigned shader_io_get_unique_index(gl_varying_slot slot)
-{
- /* handle patch indices separate */
- if (slot == VARYING_SLOT_TESS_LEVEL_OUTER)
- return 0;
- if (slot == VARYING_SLOT_TESS_LEVEL_INNER)
- return 1;
- if (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_TESS_MAX)
- return 2 + (slot - VARYING_SLOT_PATCH0);
- if (slot == VARYING_SLOT_POS)
- return 0;
- if (slot == VARYING_SLOT_PSIZ)
- return 1;
- if (slot == VARYING_SLOT_CLIP_DIST0)
- return 2;
- if (slot == VARYING_SLOT_CLIP_DIST1)
- return 3;
- /* 3 is reserved for clip dist as well */
- if (slot >= VARYING_SLOT_VAR0 && slot <= VARYING_SLOT_VAR31)
- return 4 + (slot - VARYING_SLOT_VAR0);
- unreachable("illegal slot in get unique index\n");
-}
-
VkResult radv_CreateShaderModule(
VkDevice _device,
const VkShaderModuleCreateInfo* pCreateInfo,
NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
NIR_PASS(progress, shader, nir_opt_dead_write_vars);
NIR_PASS(progress, shader, nir_remove_dead_variables,
- nir_var_function_temp);
+ nir_var_function_temp | nir_var_shader_in | nir_var_shader_out);
NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(shader, nir_lower_phis_to_scalar);
NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
}
+static void
+shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+ assert(glsl_type_is_vector_or_scalar(type));
+
+ uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
+ unsigned length = glsl_get_vector_elements(type);
+ *size = comp_size * length,
+ *align = comp_size;
+}
+
nir_shader *
radv_shader_compile_to_nir(struct radv_device *device,
struct radv_shader_module *module,
assert(module->size % 4 == 0);
if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
- radv_print_spirv(spirv, module->size, stderr);
+ radv_print_spirv(module->data, module->size, stderr);
uint32_t num_spec_entries = 0;
struct nir_spirv_specialization *spec_entries = NULL;
const struct spirv_to_nir_options spirv_options = {
.lower_ubo_ssbo_access_to_offsets = true,
.caps = {
+ .amd_fragment_mask = true,
.amd_gcn_shader = true,
+ .amd_image_read_write_lod = true,
.amd_shader_ballot = device->physical_device->use_shader_ballot,
+ .amd_shader_explicit_vertex_parameter = true,
.amd_trinary_minmax = true,
.demote_to_helper_invocation = device->physical_device->use_aco,
.derivative_group = true,
.descriptor_indexing = true,
.device_group = true,
.draw_parameters = true,
+ .float_controls = true,
.float16 = !device->physical_device->use_aco,
.float64 = true,
.geometry_streams = true,
+ .image_ms_array = true,
.image_read_without_format = true,
.image_write_without_format = true,
.int8 = !device->physical_device->use_aco,
.physical_storage_buffer_address = true,
.post_depth_coverage = true,
.runtime_descriptor_array = true,
+ .shader_clock = true,
.shader_viewport_index_layer = true,
.stencil_export = true,
.storage_8bit = !device->physical_device->use_aco,
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
NIR_PASS_V(nir, nir_opt_deref);
/* Make sure we lower constant initializers on output variables so that
* nir_remove_dead_variables below sees the corresponding stores
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_shader_out);
/* Now that we've deleted all but the main function, we can go ahead and
* lower the rest of the constant initializers.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
/* Split member structs. We do this before lower_io_to_temporaries so that
* it doesn't lower system values to temporaries by accident.
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
+ if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
+ NIR_PASS_V(nir, nir_lower_discard_to_demote);
}
/* Vulkan uses the separate-shader linking model */
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
+ if (nir->info.stage == MESA_SHADER_GEOMETRY && use_aco)
+ nir_lower_gs_intrinsics(nir, true);
+
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
.lower_tg4_offsets = true,
nir_lower_global_vars_to_local(nir);
nir_remove_dead_variables(nir, nir_var_function_temp);
+ bool gfx7minus = device->physical_device->rad_info.chip_class <= GFX7;
nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
.subgroup_size = 64,
.ballot_bit_size = 64,
.lower_shuffle = 1,
.lower_shuffle_to_32bit = 1,
.lower_vote_eq_to_ballot = 1,
+ .lower_quad_broadcast_dynamic = 1,
+ .lower_quad_broadcast_dynamic_to_const = gfx7minus,
});
nir_lower_load_const_to_scalar(nir);
*/
nir_lower_var_copies(nir);
+ /* Lower deref operations for compute shared memory. */
+ if (nir->info.stage == MESA_SHADER_COMPUTE) {
+ NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
+ nir_var_mem_shared, shared_var_info);
+ NIR_PASS_V(nir, nir_lower_explicit_io,
+ nir_var_mem_shared, nir_address_format_32bit_offset);
+ }
+
/* Lower large variables that are always constant with load_constant
* intrinsics, which get turned into PC-relative loads from a data
* section next to the shader.
config_out->num_sgprs = num_sgprs;
config_out->num_shared_vgprs = num_shared_vgprs;
- /* Enable 64-bit and 16-bit denormals, because there is no performance
- * cost.
- *
- * If denormals are enabled, all floating-point output modifiers are
- * ignored.
- *
- * Don't enable denormals for 32-bit floats, because:
- * - Floating-point output modifiers would be ignored by the hw.
- * - Some opcodes don't support denormals, such as v_mad_f32. We would
- * have to stop using those.
- * - GFX6 & GFX7 would be very slow.
- */
- config_out->float_mode |= V_00B028_FP_64_DENORMS;
-
config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
S_00B12C_SCRATCH_EN(scratch_enabled);
return NULL;
}
+ /* Enable 64-bit and 16-bit denormals, because there is no performance
+ * cost.
+ *
+ * If denormals are enabled, all floating-point output modifiers are
+ * ignored.
+ *
+ * Don't enable denormals for 32-bit floats, because:
+ * - Floating-point output modifiers would be ignored by the hw.
+ * - Some opcodes don't support denormals, such as v_mad_f32. We would
+ * have to stop using those.
+ * - GFX6 & GFX7 would be very slow.
+ */
+ config.float_mode |= V_00B028_FP_64_DENORMS;
+
if (rtld_binary.lds_size > 0) {
unsigned alloc_granularity = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
config.lds_size = align(rtld_binary.lds_size, alloc_granularity) / alloc_granularity;
variant->info = binary->info;
radv_postprocess_config(device->physical_device, &config, &binary->info,
binary->stage, &variant->config);
-
+
+ if (radv_device_use_secure_compile(device->instance)) {
+ if (binary->type == RADV_BINARY_TYPE_RTLD)
+ ac_rtld_close(&rtld_binary);
+
+ return variant;
+ }
+
void *dest_ptr = radv_alloc_shader_memory(device, variant);
if (binary->type == RADV_BINARY_TYPE_RTLD) {
options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
- if ((stage == MESA_SHADER_GEOMETRY && !options->key.vs_common_out.as_ngg) ||
- gs_copy_shader)
- options->wave_size = 64;
- else if (stage == MESA_SHADER_COMPUTE)
- options->wave_size = device->physical_device->cs_wave_size;
- else if (stage == MESA_SHADER_FRAGMENT)
- options->wave_size = device->physical_device->ps_wave_size;
- else
- options->wave_size = device->physical_device->ge_wave_size;
+ struct radv_shader_args args = {};
+ args.options = options;
+ args.shader_info = info;
+ args.is_gs_copy_shader = gs_copy_shader;
+ radv_declare_shader_args(&args,
+ gs_copy_shader ? MESA_SHADER_VERTEX
+ : shaders[shader_count - 1]->info.stage,
+ shader_count >= 2,
+ shader_count >= 2 ? shaders[shader_count - 2]->info.stage
+ : MESA_SHADER_VERTEX);
if (!use_aco || options->dump_shader || options->record_ir)
ac_init_llvm_once();
if (use_aco) {
- aco_compile_shader(shader_count, shaders, &binary, info, options);
+ aco_compile_shader(shader_count, shaders, &binary, &args);
binary->info = *info;
} else {
enum ac_target_machine_options tm_options = 0;
struct ac_llvm_compiler ac_llvm;
bool thread_compiler;
- if (options->supports_spill)
- tm_options |= AC_TM_SUPPORTS_SPILL;
- if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
- tm_options |= AC_TM_SISCHED;
+ tm_options |= AC_TM_SUPPORTS_SPILL;
if (options->check_ir)
tm_options |= AC_TM_CHECK_IR;
if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
radv_init_llvm_compiler(&ac_llvm,
thread_compiler,
chip_family, tm_options,
- options->wave_size);
+ info->wave_size);
if (gs_copy_shader) {
assert(shader_count == 1);
radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
- info, options);
+ &args);
} else {
- radv_compile_nir_shader(&ac_llvm, &binary, info,
- shaders, shader_count, options);
+ radv_compile_nir_shader(&ac_llvm, &binary, &args,
+ shaders, shader_count);
}
binary->info = *info;
if (keep_shader_info) {
variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
if (!gs_copy_shader && !module->nir) {
- variant->spirv = (uint32_t *)module->data;
+ variant->spirv = malloc(module->size);
+ if (!variant->spirv) {
+ free(variant);
+ free(binary);
+ return NULL;
+ }
+
+ memcpy(variant->spirv, module->data, module->size);
variant->spirv_size = module->size;
}
}
if (key)
options.key = *key;
- options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
- options.supports_spill = true;
+ options.explicit_scratch_args = use_aco;
options.robust_buffer_access = device->robust_buffer_access;
return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
struct radv_shader_info *info,
struct radv_shader_binary **binary_out,
bool keep_shader_info,
- bool multiview)
+ bool multiview, bool use_aco)
{
struct radv_nir_compiler_options options = {0};
+ options.explicit_scratch_args = use_aco;
options.key.has_multiview_view_index = multiview;
return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
- info, &options, true, keep_shader_info, false, binary_out);
+ info, &options, true, keep_shader_info, use_aco, binary_out);
}
void
list_del(&variant->slab_list);
mtx_unlock(&device->shader_slab_mutex);
+ free(variant->spirv);
free(variant->nir_string);
free(variant->disasm_string);
free(variant->ir_string);
DIV_ROUND_UP(max_workgroup_size, wave_size);
}
- if (conf->num_sgprs)
+ if (conf->num_sgprs) {
+ unsigned sgprs = align(conf->num_sgprs, chip_class >= GFX8 ? 16 : 8);
max_simd_waves =
MIN2(max_simd_waves,
device->physical_device->rad_info.num_physical_sgprs_per_simd /
- conf->num_sgprs);
+ sgprs);
+ }
- if (conf->num_vgprs)
+ if (conf->num_vgprs) {
+ unsigned vgprs = align(conf->num_vgprs, wave_size == 32 ? 8 : 4);
max_simd_waves =
MIN2(max_simd_waves,
- RADV_NUM_PHYSICAL_VGPRS / conf->num_vgprs);
+ device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd / vgprs);
+ }
- /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
- * that PS can use.
- */
+ unsigned max_lds_per_simd = device->physical_device->rad_info.lds_size_per_workgroup / device->physical_device->rad_info.num_simd_per_compute_unit;
if (lds_per_wave)
- max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
+ max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
return max_simd_waves;
}
VkShaderStatisticsInfoAMD statistics = {};
statistics.shaderStageMask = shaderStage;
- statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
+ statistics.numPhysicalVgprs = device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd;
statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
statistics.numAvailableSgprs = statistics.numPhysicalSgprs;