#include <llvm-c/Support.h>
#include "sid.h"
-#include "gfx9d.h"
#include "ac_binary.h"
#include "ac_llvm_util.h"
#include "ac_nir_to_llvm.h"
static const struct nir_shader_compiler_options nir_options = {
.vertex_id_zero_based = true,
.lower_scmp = true,
+ .lower_flrp16 = true,
.lower_flrp32 = true,
.lower_flrp64 = true,
.lower_device_index_to_zero = true,
.lower_fsat = true,
.lower_fdiv = true,
+ .lower_bitfield_insert_to_bitfield_select = true,
+ .lower_bitfield_extract = true,
.lower_sub = true,
.lower_pack_snorm_2x16 = true,
.lower_pack_snorm_4x8 = true,
.lower_extract_word = true,
.lower_ffma = true,
.lower_fpow = true,
+ .lower_mul_2x32_64 = true,
+ .lower_rotate = true,
.max_unroll_iterations = 32
};
bool allow_copies)
{
bool progress;
+ unsigned lower_flrp =
+ (shader->options->lower_flrp16 ? 16 : 0) |
+ (shader->options->lower_flrp32 ? 32 : 0) |
+ (shader->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
- NIR_PASS(progress, shader, nir_split_array_vars, nir_var_local);
- NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_local);
+ NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
+ NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
NIR_PASS_V(shader, nir_lower_vars_to_ssa);
NIR_PASS_V(shader, nir_lower_pack);
NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
NIR_PASS(progress, shader, nir_opt_dead_write_vars);
- NIR_PASS_V(shader, nir_lower_alu_to_scalar);
+ NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL);
NIR_PASS_V(shader, nir_lower_phis_to_scalar);
NIR_PASS(progress, shader, nir_copy_prop);
NIR_PASS(progress, shader, nir_opt_remove_phis);
NIR_PASS(progress, shader, nir_opt_dce);
}
- NIR_PASS(progress, shader, nir_opt_if);
+ NIR_PASS(progress, shader, nir_opt_if, true);
NIR_PASS(progress, shader, nir_opt_dead_cf);
NIR_PASS(progress, shader, nir_opt_cse);
NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
- NIR_PASS(progress, shader, nir_opt_algebraic);
NIR_PASS(progress, shader, nir_opt_constant_folding);
+ NIR_PASS(progress, shader, nir_opt_algebraic);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress = false;
+ NIR_PASS(lower_flrp_progress,
+ shader,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ shader->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, shader,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, shader, nir_opt_undef);
NIR_PASS(progress, shader, nir_opt_conditional_discard);
if (shader->options->max_unroll_iterations) {
const char *entrypoint_name,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info,
- const VkPipelineCreateFlags flags)
+ const VkPipelineCreateFlags flags,
+ const struct radv_pipeline_layout *layout)
{
nir_shader *nir;
- nir_function *entry_point;
if (module->nir) {
/* Some things such as our meta clear/blit code will give us a NIR
* shader directly. In that case, we just ignore the SPIR-V entirely
nir_validate_shader(nir, "in internal shader");
assert(exec_list_length(&nir->functions) == 1);
- struct exec_node *node = exec_list_get_head(&nir->functions);
- entry_point = exec_node_data(nir_function, node, node);
} else {
uint32_t *spirv = (uint32_t *) module->data;
assert(module->size % 4 == 0);
const struct spirv_to_nir_options spirv_options = {
.lower_ubo_ssbo_access_to_offsets = true,
.caps = {
+ .amd_gcn_shader = true,
+ .amd_shader_ballot = device->instance->perftest_flags & RADV_PERFTEST_SHADER_BALLOT,
+ .amd_trinary_minmax = true,
+ .derivative_group = true,
.descriptor_array_dynamic_indexing = true,
+ .descriptor_array_non_uniform_indexing = true,
+ .descriptor_indexing = true,
.device_group = true,
.draw_parameters = true,
+ .float16 = true,
.float64 = true,
- .gcn_shader = true,
.geometry_streams = true,
.image_read_without_format = true,
.image_write_without_format = true,
+ .int8 = true,
.int16 = true,
.int64 = true,
+ .int64_atomics = true,
.multiview = true,
+ .physical_storage_buffer_address = true,
.runtime_descriptor_array = true,
.shader_viewport_index_layer = true,
.stencil_export = true,
+ .storage_8bit = true,
.storage_16bit = true,
.storage_image_ms = true,
.subgroup_arithmetic = true,
.subgroup_vote = true,
.tessellation = true,
.transform_feedback = true,
- .trinary_minmax = true,
.variable_pointers = true,
},
- .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
- .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
- .push_const_ptr_type = glsl_uint_type(),
- .shared_ptr_type = glsl_uint_type(),
+ .ubo_addr_format = nir_address_format_32bit_index_offset,
+ .ssbo_addr_format = nir_address_format_32bit_index_offset,
+ .phys_ssbo_addr_format = nir_address_format_64bit_global,
+ .push_const_addr_format = nir_address_format_logical,
+ .shared_addr_format = nir_address_format_32bit_offset,
};
- entry_point = spirv_to_nir(spirv, module->size / 4,
- spec_entries, num_spec_entries,
- stage, entrypoint_name,
- &spirv_options, &nir_options);
- nir = entry_point->shader;
+ nir = spirv_to_nir(spirv, module->size / 4,
+ spec_entries, num_spec_entries,
+ stage, entrypoint_name,
+ &spirv_options, &nir_options);
assert(nir->info.stage == stage);
nir_validate_shader(nir, "after spirv_to_nir");
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
+ NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
- if (func != entry_point)
+ if (func->is_entrypoint)
+ func->name = ralloc_strdup(func, "main");
+ else
exec_node_remove(&func->node);
}
assert(exec_list_length(&nir->functions) == 1);
- entry_point->name = ralloc_strdup(entry_point, "main");
/* Make sure we lower constant initializers on output variables so that
* nir_remove_dead_variables below sees the corresponding stores
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
+ NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
}
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
- nir_shader_gather_info(nir, entry_point->impl);
+ nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
+ .lower_tg4_offsets = true,
};
nir_lower_tex(nir, &tex_options);
nir_split_var_copies(nir);
nir_lower_global_vars_to_local(nir);
- nir_remove_dead_variables(nir, nir_var_local);
+ nir_remove_dead_variables(nir, nir_var_function_temp);
nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
.subgroup_size = 64,
.ballot_bit_size = 64,
RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING |
(device->physical_device->cpdma_prefetch_writes_memory ?
- 0 : RADEON_FLAG_READ_ONLY));
+ 0 : RADEON_FLAG_READ_ONLY),
+ RADV_BO_PRIORITY_SHADER);
slab->ptr = (char*)device->ws->buffer_map(slab->bo);
list_inithead(&slab->shaders);
static void
radv_fill_shader_variant(struct radv_device *device,
struct radv_shader_variant *variant,
+ struct radv_nir_compiler_options *options,
struct ac_shader_binary *binary,
gl_shader_stage stage)
{
switch (stage) {
case MESA_SHADER_TESS_EVAL:
- vgpr_comp_cnt = 3;
+ if (options->key.tes.as_es) {
+ assert(device->physical_device->rad_info.chip_class <= GFX8);
+ vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
+ } else {
+ bool enable_prim_id = options->key.tes.export_prim_id || info->uses_prim_id;
+ vgpr_comp_cnt = enable_prim_id ? 3 : 2;
+ }
variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
break;
case MESA_SHADER_TESS_CTRL:
if (device->physical_device->rad_info.chip_class >= GFX9) {
- vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
+ /* We need at least 2 components for LS.
+ * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
} else {
variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
}
break;
case MESA_SHADER_VERTEX:
- case MESA_SHADER_GEOMETRY:
- vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
+ if (variant->info.vs.as_ls) {
+ assert(device->physical_device->rad_info.chip_class <= GFX8);
+ /* We need at least 2 components for LS.
+ * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
+ } else if (variant->info.vs.as_es) {
+ assert(device->physical_device->rad_info.chip_class <= GFX8);
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
+ } else {
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
+ * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ if (options->key.vs.export_prim_id) {
+ vgpr_comp_cnt = 2;
+ } else if (info->vs.needs_instance_id) {
+ vgpr_comp_cnt = 1;
+ } else {
+ vgpr_comp_cnt = 0;
+ }
+ }
break;
case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_GEOMETRY:
break;
case MESA_SHADER_COMPUTE:
variant->rsrc2 |=
unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
if (es_type == MESA_SHADER_VERTEX) {
- es_vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
+ es_vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
} else if (es_type == MESA_SHADER_TESS_EVAL) {
- es_vgpr_comp_cnt = 3;
+ es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
} else {
unreachable("invalid shader ES type");
}
tm_options |= AC_TM_SISCHED;
if (options->check_ir)
tm_options |= AC_TM_CHECK_IR;
+ if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
+ tm_options |= AC_TM_NO_LOAD_STORE_OPT;
thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
radv_init_llvm_once();
radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
- radv_fill_shader_variant(device, variant, &binary, stage);
+ radv_fill_shader_variant(device, variant, options, &binary, stage);
if (code_out) {
*code_out = binary.code;
gl_shader_stage stage,
struct _mesa_string_buffer *buf)
{
- unsigned lds_increment = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
+ enum chip_class chip_class = device->physical_device->rad_info.chip_class;
+ unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
struct ac_shader_config *conf;
unsigned max_simd_waves;
unsigned lds_per_wave = 0;
lds_per_wave = conf->lds_size * lds_increment +
align(variant->info.fs.num_interp * 48,
lds_increment);
+ } else if (stage == MESA_SHADER_COMPUTE) {
+ unsigned max_workgroup_size =
+ radv_nir_get_max_workgroup_size(chip_class, variant->nir);
+ lds_per_wave = (conf->lds_size * lds_increment) /
+ DIV_ROUND_UP(max_workgroup_size, 64);
}
if (conf->num_sgprs)
max_simd_waves =
MIN2(max_simd_waves,
- radv_get_num_physical_sgprs(device->physical_device) / conf->num_sgprs);
+ ac_get_num_physical_sgprs(chip_class) / conf->num_sgprs);
if (conf->num_vgprs)
max_simd_waves =
if (!pInfo) {
*pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
} else {
- unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
+ unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
struct ac_shader_config *conf = &variant->config;
VkShaderStatisticsInfoAMD statistics = {};
statistics.shaderStageMask = shaderStage;
statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
- statistics.numPhysicalSgprs = radv_get_num_physical_sgprs(device->physical_device);
+ statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(device->physical_device->rad_info.chip_class);
statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
if (stage == MESA_SHADER_COMPUTE) {