.use_interpolated_input_intrinsics = true,
};
+bool
+radv_can_dump_shader(struct radv_device *device,
+ struct radv_shader_module *module,
+ bool is_gs_copy_shader)
+{
+ if (!(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS))
+ return false;
+
+ /* Only dump non-meta shaders, useful for debugging purposes. */
+ return (module && !module->nir) || is_gs_copy_shader;
+}
+
+bool
+radv_can_dump_shader_stats(struct radv_device *device,
+ struct radv_shader_module *module)
+{
+ /* Only dump non-meta shader stats. */
+ return device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS &&
+ module && !module->nir;
+}
+
+unsigned shader_io_get_unique_index(gl_varying_slot slot)
+{
+ /* handle patch indices separate */
+ if (slot == VARYING_SLOT_TESS_LEVEL_OUTER)
+ return 0;
+ if (slot == VARYING_SLOT_TESS_LEVEL_INNER)
+ return 1;
+ if (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_TESS_MAX)
+ return 2 + (slot - VARYING_SLOT_PATCH0);
+ if (slot == VARYING_SLOT_POS)
+ return 0;
+ if (slot == VARYING_SLOT_PSIZ)
+ return 1;
+ if (slot == VARYING_SLOT_CLIP_DIST0)
+ return 2;
+ if (slot == VARYING_SLOT_CLIP_DIST1)
+ return 3;
+ /* 3 is reserved for clip dist as well */
+ if (slot >= VARYING_SLOT_VAR0 && slot <= VARYING_SLOT_VAR31)
+ return 4 + (slot - VARYING_SLOT_VAR0);
+ unreachable("illegal slot in get unique index\n");
+}
+
VkResult radv_CreateShaderModule(
VkDevice _device,
const VkShaderModuleCreateInfo* pCreateInfo,
NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
NIR_PASS(progress, shader, nir_opt_dead_write_vars);
+ NIR_PASS(progress, shader, nir_remove_dead_variables,
+ nir_var_function_temp);
- NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL);
+ NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(shader, nir_lower_phis_to_scalar);
NIR_PASS(progress, shader, nir_copy_prop);
}
NIR_PASS(progress, shader, nir_opt_undef);
- NIR_PASS(progress, shader, nir_opt_conditional_discard);
if (shader->options->max_unroll_iterations) {
NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
}
} while (progress && !optimize_conservatively);
+ NIR_PASS(progress, shader, nir_opt_conditional_discard);
NIR_PASS(progress, shader, nir_opt_shrink_load);
- NIR_PASS(progress, shader, nir_opt_move_load_ubo);
+ NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
}
nir_shader *
.lower_ubo_ssbo_access_to_offsets = true,
.caps = {
.amd_gcn_shader = true,
- .amd_shader_ballot = device->instance->perftest_flags & RADV_PERFTEST_SHADER_BALLOT,
+ .amd_shader_ballot = device->physical_device->use_shader_ballot,
.amd_trinary_minmax = true,
.derivative_group = true,
.descriptor_array_dynamic_indexing = true,
.int64_atomics = true,
.multiview = true,
.physical_storage_buffer_address = true,
+ .post_depth_coverage = true,
.runtime_descriptor_array = true,
.shader_viewport_index_layer = true,
.stencil_export = true,
NIR_PASS_V(nir, nir_remove_dead_variables,
nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
+ NIR_PASS_V(nir, nir_propagate_invariant);
+
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
*/
nir_lower_var_copies(nir);
+ /* Lower large variables that are always constant with load_constant
+ * intrinsics, which get turned into PC-relative loads from a data
+ * section next to the shader.
+ */
+ NIR_PASS_V(nir, nir_opt_large_constants,
+ glsl_get_natural_size_align_bytes, 16);
+
/* Indirect lowering must be called after the radv_optimize_nir() loop
* has been called at least once. Otherwise indirect lowering can
* bloat the instruction count of the loop and cause it to be
return nir;
}
-static void mark_16bit_fs_input(struct radv_shader_variant_info *shader_info,
- const struct glsl_type *type,
- int location)
-{
- if (glsl_type_is_scalar(type) || glsl_type_is_vector(type) || glsl_type_is_matrix(type)) {
- unsigned attrib_count = glsl_count_attribute_slots(type, false);
- if (glsl_type_is_16bit(type)) {
- shader_info->fs.float16_shaded_mask |= ((1ull << attrib_count) - 1) << location;
- }
- } else if (glsl_type_is_array(type)) {
- unsigned stride = glsl_count_attribute_slots(glsl_get_array_element(type), false);
- for (unsigned i = 0; i < glsl_get_length(type); ++i) {
- mark_16bit_fs_input(shader_info, glsl_get_array_element(type), location + i * stride);
- }
- } else {
- assert(glsl_type_is_struct_or_ifc(type));
- for (unsigned i = 0; i < glsl_get_length(type); i++) {
- mark_16bit_fs_input(shader_info, glsl_get_struct_field(type, i), location);
- location += glsl_count_attribute_slots(glsl_get_struct_field(type, i), false);
- }
- }
-}
-
-static void
-handle_fs_input_decl(struct radv_shader_variant_info *shader_info,
- struct nir_variable *variable)
-{
- unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
-
- if (variable->data.compact) {
- unsigned component_count = variable->data.location_frac +
- glsl_get_length(variable->type);
- attrib_count = (component_count + 3) / 4;
- } else {
- mark_16bit_fs_input(shader_info, variable->type,
- variable->data.driver_location);
- }
-
- uint64_t mask = ((1ull << attrib_count) - 1);
-
- if (variable->data.interpolation == INTERP_MODE_FLAT)
- shader_info->fs.flat_shaded_mask |= mask << variable->data.driver_location;
-
- if (variable->data.location >= VARYING_SLOT_VAR0)
- shader_info->fs.input_mask |= mask << (variable->data.location - VARYING_SLOT_VAR0);
-}
-
static int
type_size_vec4(const struct glsl_type *type, bool bindless)
{
return progress;
}
-/* Gather information needed to setup the vs<->ps linking registers in
- * radv_pipeline_generate_ps_inputs().
- */
-
-static void
-handle_fs_inputs(nir_shader *nir, struct radv_shader_variant_info *shader_info)
-{
- shader_info->fs.num_interp = nir->num_inputs;
-
- nir_foreach_variable(variable, &nir->inputs)
- handle_fs_input_decl(shader_info, variable);
-}
-
-static void
-lower_fs_io(nir_shader *nir, struct radv_shader_variant_info *shader_info)
+void
+radv_lower_fs_io(nir_shader *nir)
{
NIR_PASS_V(nir, lower_view_index);
nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
MESA_SHADER_FRAGMENT);
- handle_fs_inputs(nir, shader_info);
-
NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING |
- (device->physical_device->cpdma_prefetch_writes_memory ?
+ (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
0 : RADEON_FLAG_READ_ONLY),
RADV_BO_PRIORITY_SHADER);
slab->ptr = (char*)device->ws->buffer_map(slab->bo);
static void radv_postprocess_config(const struct radv_physical_device *pdevice,
const struct ac_shader_config *config_in,
- const struct radv_shader_variant_info *info,
+ const struct radv_shader_info *info,
gl_shader_stage stage,
struct ac_shader_config *config_out)
{
config_out->float_mode |= V_00B028_FP_64_DENORMS;
config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
- S_00B12C_SCRATCH_EN(scratch_enabled);
-
- config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) / 4) |
+ S_00B12C_SCRATCH_EN(scratch_enabled) |
+ S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
+ S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
+ S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
+ S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
+ S_00B12C_SO_EN(!!info->so.num_outputs);
+
+ config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) /
+ (info->wave_size == 32 ? 8 : 4)) |
S_00B848_DX10_CLAMP(1) |
S_00B848_FLOAT_MODE(config_out->float_mode);
config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(info->num_user_sgprs >> 5);
} else {
config_out->rsrc1 |= S_00B228_SGPRS((num_sgprs - 1) / 8);
- config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5) |
- S_00B12C_SO_BASE0_EN(!!info->info.so.strides[0]) |
- S_00B12C_SO_BASE1_EN(!!info->info.so.strides[1]) |
- S_00B12C_SO_BASE2_EN(!!info->info.so.strides[2]) |
- S_00B12C_SO_BASE3_EN(!!info->info.so.strides[3]) |
- S_00B12C_SO_EN(!!info->info.so.num_outputs);
+ config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5);
}
switch (stage) {
config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1);
} else if (info->tes.as_es) {
assert(pdevice->rad_info.chip_class <= GFX8);
- vgpr_comp_cnt = info->info.uses_prim_id ? 3 : 2;
+ vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
} else {
- bool enable_prim_id = info->tes.export_prim_id || info->info.uses_prim_id;
+ bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
vgpr_comp_cnt = enable_prim_id ? 3 : 2;
config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
if (pdevice->rad_info.chip_class >= GFX10) {
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 3 : 1;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
} else {
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 2 : 1;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
}
} else {
config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
* VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 2 : 1;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
} else if (info->vs.as_es) {
assert(pdevice->rad_info.chip_class <= GFX8);
/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- vgpr_comp_cnt = info->info.vs.needs_instance_id ? 1 : 0;
+ vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
} else {
/* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
* If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
- if (info->vs.export_prim_id) {
+ if (info->vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
+ vgpr_comp_cnt = 3;
+ } else if (info->vs.export_prim_id) {
vgpr_comp_cnt = 2;
- } else if (info->info.vs.needs_instance_id) {
+ } else if (info->vs.needs_instance_id) {
vgpr_comp_cnt = 1;
} else {
vgpr_comp_cnt = 0;
config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
config_out->rsrc2 |=
- S_00B84C_TGID_X_EN(info->info.cs.uses_block_id[0]) |
- S_00B84C_TGID_Y_EN(info->info.cs.uses_block_id[1]) |
- S_00B84C_TGID_Z_EN(info->info.cs.uses_block_id[2]) |
- S_00B84C_TIDIG_COMP_CNT(info->info.cs.uses_thread_id[2] ? 2 :
- info->info.cs.uses_thread_id[1] ? 1 : 0) |
- S_00B84C_TG_SIZE_EN(info->info.cs.uses_local_invocation_idx) |
+ S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
+ S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
+ S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
+ S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
+ info->cs.uses_thread_id[1] ? 1 : 0) |
+ S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
S_00B84C_LDS_SIZE(config_in->lds_size);
break;
default:
break;
}
- if (pdevice->rad_info.chip_class >= GFX10 &&
+ if (pdevice->rad_info.chip_class >= GFX10 && info->is_ngg &&
(stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_GEOMETRY)) {
unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
gl_shader_stage es_stage = stage;
/* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
if (es_stage == MESA_SHADER_VERTEX) {
- es_vgpr_comp_cnt = info->info.vs.needs_instance_id ? 3 : 0;
+ es_vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 0;
} else if (es_stage == MESA_SHADER_TESS_EVAL) {
- bool enable_prim_id = info->tes.export_prim_id || info->info.uses_prim_id;
+ bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
- }
+ } else
+ unreachable("Unexpected ES shader stage");
bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
- if (info->info.uses_invocation_id || stage == MESA_SHADER_VERTEX) {
+ if (info->uses_invocation_id || stage == MESA_SHADER_VERTEX) {
gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
- } else if (info->info.uses_prim_id) {
+ } else if (info->uses_prim_id) {
gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
} else if (info->gs.vertices_in >= 3 || tes_triangles) {
gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt) |
S_00B228_WGP_MODE(1);
config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
- S_00B22C_LDS_SIZE(config_in->lds_size);
+ S_00B22C_LDS_SIZE(config_in->lds_size) |
+ S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL);
} else if (pdevice->rad_info.chip_class >= GFX9 &&
stage == MESA_SHADER_GEOMETRY) {
unsigned es_type = info->gs.es_type;
if (es_type == MESA_SHADER_VERTEX) {
/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- es_vgpr_comp_cnt = info->info.vs.needs_instance_id ? 1 : 0;
+ if (info->vs.needs_instance_id) {
+ es_vgpr_comp_cnt = pdevice->rad_info.chip_class >= GFX10 ? 3 : 1;
+ } else {
+ es_vgpr_comp_cnt = 0;
+ }
} else if (es_type == MESA_SHADER_TESS_EVAL) {
- es_vgpr_comp_cnt = info->info.uses_prim_id ? 3 : 2;
+ es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
} else {
unreachable("invalid shader ES type");
}
/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
* VGPR[0:4] are always loaded.
*/
- if (info->info.uses_invocation_id) {
+ if (info->uses_invocation_id) {
gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
- } else if (info->info.uses_prim_id) {
+ } else if (info->uses_prim_id) {
gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
} else if (info->gs.vertices_in >= 3) {
gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
}
}
-static void radv_init_llvm_target()
-{
- LLVMInitializeAMDGPUTargetInfo();
- LLVMInitializeAMDGPUTarget();
- LLVMInitializeAMDGPUTargetMC();
- LLVMInitializeAMDGPUAsmPrinter();
-
- /* For inline assembly. */
- LLVMInitializeAMDGPUAsmParser();
-
- /* Workaround for bug in llvm 4.0 that causes image intrinsics
- * to disappear.
- * https://reviews.llvm.org/D26348
- *
- * Workaround for bug in llvm that causes the GPU to hang in presence
- * of nested loops because there is an exec mask issue. The proper
- * solution is to fix LLVM but this might require a bunch of work.
- * https://bugs.llvm.org/show_bug.cgi?id=37744
- *
- * "mesa" is the prefix for error messages.
- */
- if (HAVE_LLVM >= 0x0800) {
- const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" };
- LLVMParseCommandLineOptions(2, argv, NULL);
-
- } else {
- const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false",
- "-amdgpu-skip-threshold=1" };
- LLVMParseCommandLineOptions(3, argv, NULL);
- }
-}
-
-static once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT;
-
-static void radv_init_llvm_once(void)
-{
- call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
-}
-
struct radv_shader_variant *
radv_shader_variant_create(struct radv_device *device,
- const struct radv_shader_binary *binary)
+ const struct radv_shader_binary *binary,
+ bool keep_shader_info)
{
struct ac_shader_config config = {0};
struct ac_rtld_binary rtld_binary = {0};
variant->ref_count = 1;
if (binary->type == RADV_BINARY_TYPE_RTLD) {
- struct ac_rtld_symbol lds_symbols[1];
+ struct ac_rtld_symbol lds_symbols[2];
unsigned num_lds_symbols = 0;
const char *elf_data = (const char *)((struct radv_shader_binary_rtld *)binary)->data;
size_t elf_size = ((struct radv_shader_binary_rtld *)binary)->elf_size;
if (device->physical_device->rad_info.chip_class >= GFX9 &&
- binary->stage == MESA_SHADER_GEOMETRY && !binary->is_gs_copy_shader) {
+ (binary->stage == MESA_SHADER_GEOMETRY || binary->info.is_ngg) &&
+ !binary->is_gs_copy_shader) {
/* We add this symbol even on LLVM <= 8 to ensure that
* shader->config.lds_size is set correctly below.
*/
+ /* TODO: For some reasons, using the computed ESGS ring
+ * size randomly hangs with CTS. Just use the maximum
+ * possible LDS size for now.
+ */
struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
sym->name = "esgs_ring";
- sym->size = 32 * 1024;
+ sym->size = (32 * 1024) - (binary->info.ngg_info.ngg_emit_size * 4) - 32; /* 32 is NGG scratch */
sym->align = 64 * 1024;
+ }
- /* Make sure to have LDS space for NGG scratch. */
- /* TODO: Compute this correctly somehow? */
- if (binary->variant_info.is_ngg)
- sym->size -= 32;
+ if (binary->info.is_ngg &&
+ binary->stage == MESA_SHADER_GEOMETRY) {
+ struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
+ sym->name = "ngg_emit";
+ sym->size = binary->info.ngg_info.ngg_emit_size * 4;
+ sym->align = 4;
}
+
struct ac_rtld_open_info open_info = {
.info = &device->physical_device->rad_info,
.shader_type = binary->stage,
+ .wave_size = binary->info.wave_size,
.num_parts = 1,
.elf_ptrs = &elf_data,
.elf_sizes = &elf_size,
}
variant->code_size = rtld_binary.rx_size;
+ variant->exec_size = rtld_binary.exec_size;
} else {
assert(binary->type == RADV_BINARY_TYPE_LEGACY);
config = ((struct radv_shader_binary_legacy *)binary)->config;
- variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
+ variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
+ variant->exec_size = variant->code_size;
}
- variant->info = binary->variant_info;
- radv_postprocess_config(device->physical_device, &config, &binary->variant_info,
+ variant->info = binary->info;
+ radv_postprocess_config(device->physical_device, &config, &binary->info,
binary->stage, &variant->config);
void *dest_ptr = radv_alloc_shader_memory(device, variant);
return NULL;
}
- const char *disasm_data;
- size_t disasm_size;
- if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
- radv_shader_variant_destroy(device, variant);
- ac_rtld_close(&rtld_binary);
- return NULL;
- }
+ if (keep_shader_info ||
+ (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) {
+ const char *disasm_data;
+ size_t disasm_size;
+ if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
+ radv_shader_variant_destroy(device, variant);
+ ac_rtld_close(&rtld_binary);
+ return NULL;
+ }
- variant->llvm_ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
- variant->disasm_string = malloc(disasm_size + 1);
- memcpy(variant->disasm_string, disasm_data, disasm_size);
- variant->disasm_string[disasm_size] = 0;
+ variant->llvm_ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
+ variant->disasm_string = malloc(disasm_size + 1);
+ memcpy(variant->disasm_string, disasm_data, disasm_size);
+ variant->disasm_string[disasm_size] = 0;
+ }
ac_rtld_close(&rtld_binary);
} else {
return variant;
}
+static char *
+radv_dump_nir_shaders(struct nir_shader * const *shaders,
+ int shader_count)
+{
+ char *data = NULL;
+ char *ret = NULL;
+ size_t size = 0;
+ FILE *f = open_memstream(&data, &size);
+ if (f) {
+ for (int i = 0; i < shader_count; ++i)
+ nir_print_shader(shaders[i], f);
+ fclose(f);
+ }
+
+ ret = malloc(size + 1);
+ if (ret) {
+ memcpy(ret, data, size);
+ ret[size] = 0;
+ }
+ free(data);
+ return ret;
+}
+
static struct radv_shader_variant *
shader_variant_compile(struct radv_device *device,
struct radv_shader_module *module,
struct nir_shader * const *shaders,
int shader_count,
gl_shader_stage stage,
+ struct radv_shader_info *info,
struct radv_nir_compiler_options *options,
bool gs_copy_shader,
+ bool keep_shader_info,
struct radv_shader_binary **binary_out)
{
enum radeon_family chip_family = device->physical_device->rad_info.family;
enum ac_target_machine_options tm_options = 0;
struct ac_llvm_compiler ac_llvm;
struct radv_shader_binary *binary = NULL;
- struct radv_shader_variant_info variant_info = {0};
bool thread_compiler;
- if (shaders[0]->info.stage == MESA_SHADER_FRAGMENT)
- lower_fs_io(shaders[0], &variant_info);
-
options->family = chip_family;
options->chip_class = device->physical_device->rad_info.chip_class;
options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
options->dump_preoptir = options->dump_shader &&
device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
- options->record_llvm_ir = device->keep_shader_info;
+ options->record_llvm_ir = keep_shader_info;
options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
options->address32_hi = device->physical_device->rad_info.address32_hi;
+ options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
+
+ if ((stage == MESA_SHADER_GEOMETRY && !options->key.vs_common_out.as_ngg) ||
+ gs_copy_shader)
+ options->wave_size = 64;
+ else if (stage == MESA_SHADER_COMPUTE)
+ options->wave_size = device->physical_device->cs_wave_size;
+ else if (stage == MESA_SHADER_FRAGMENT)
+ options->wave_size = device->physical_device->ps_wave_size;
+ else
+ options->wave_size = device->physical_device->ge_wave_size;
if (options->supports_spill)
tm_options |= AC_TM_SUPPORTS_SPILL;
tm_options |= AC_TM_NO_LOAD_STORE_OPT;
thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
- radv_init_llvm_once();
+ ac_init_llvm_once();
radv_init_llvm_compiler(&ac_llvm,
thread_compiler,
- chip_family, tm_options);
+ chip_family, tm_options,
+ options->wave_size);
if (gs_copy_shader) {
assert(shader_count == 1);
radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
- &variant_info, options);
+ info, options);
} else {
- radv_compile_nir_shader(&ac_llvm, &binary, &variant_info,
+ radv_compile_nir_shader(&ac_llvm, &binary, info,
shaders, shader_count, options);
}
- binary->variant_info = variant_info;
+ binary->info = *info;
radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
- struct radv_shader_variant *variant = radv_shader_variant_create(device, binary);
+ struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
+ keep_shader_info);
if (!variant) {
free(binary);
return NULL;
}
- if (device->keep_shader_info) {
+ if (keep_shader_info) {
+ variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
if (!gs_copy_shader && !module->nir) {
- variant->nir = *shaders;
variant->spirv = (uint32_t *)module->data;
variant->spirv_size = module->size;
}
int shader_count,
struct radv_pipeline_layout *layout,
const struct radv_shader_variant_key *key,
+ struct radv_shader_info *info,
+ bool keep_shader_info,
struct radv_shader_binary **binary_out)
{
struct radv_nir_compiler_options options = {0};
options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
options.supports_spill = true;
+ options.robust_buffer_access = device->robust_buffer_access;
- return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
- &options, false, binary_out);
+ return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
+ &options, false, keep_shader_info, binary_out);
}
struct radv_shader_variant *
radv_create_gs_copy_shader(struct radv_device *device,
struct nir_shader *shader,
+ struct radv_shader_info *info,
struct radv_shader_binary **binary_out,
+ bool keep_shader_info,
bool multiview)
{
struct radv_nir_compiler_options options = {0};
options.key.has_multiview_view_index = multiview;
return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
- &options, true, binary_out);
+ info, &options, true, keep_shader_info, binary_out);
}
void
list_del(&variant->slab_list);
mtx_unlock(&device->shader_slab_mutex);
- ralloc_free(variant->nir);
+ free(variant->nir_string);
free(variant->disasm_string);
free(variant->llvm_ir_string);
free(variant);
}
const char *
-radv_get_shader_name(struct radv_shader_variant_info *info,
+radv_get_shader_name(struct radv_shader_info *info,
gl_shader_stage stage)
{
switch (stage) {
};
}
-static void
-generate_shader_stats(struct radv_device *device,
- struct radv_shader_variant *variant,
- gl_shader_stage stage,
- struct _mesa_string_buffer *buf)
+unsigned
+radv_get_max_workgroup_size(enum chip_class chip_class,
+ gl_shader_stage stage,
+ const unsigned *sizes)
+{
+ switch (stage) {
+ case MESA_SHADER_TESS_CTRL:
+ return chip_class >= GFX7 ? 128 : 64;
+ case MESA_SHADER_GEOMETRY:
+ return chip_class >= GFX9 ? 128 : 64;
+ case MESA_SHADER_COMPUTE:
+ break;
+ default:
+ return 0;
+ }
+
+ unsigned max_workgroup_size = sizes[0] * sizes[1] * sizes[2];
+ return max_workgroup_size;
+}
+
+unsigned
+radv_get_max_waves(struct radv_device *device,
+ struct radv_shader_variant *variant,
+ gl_shader_stage stage)
{
enum chip_class chip_class = device->physical_device->rad_info.chip_class;
unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
- struct ac_shader_config *conf;
+ uint8_t wave_size = variant->info.wave_size;
+ struct ac_shader_config *conf = &variant->config;
unsigned max_simd_waves;
unsigned lds_per_wave = 0;
- max_simd_waves = ac_get_max_simd_waves(device->physical_device->rad_info.family);
-
- conf = &variant->config;
+ max_simd_waves = ac_get_max_wave64_per_simd(device->physical_device->rad_info.family);
if (stage == MESA_SHADER_FRAGMENT) {
lds_per_wave = conf->lds_size * lds_increment +
- align(variant->info.fs.num_interp * 48,
+ align(variant->info.ps.num_interp * 48,
lds_increment);
} else if (stage == MESA_SHADER_COMPUTE) {
unsigned max_workgroup_size =
- radv_nir_get_max_workgroup_size(chip_class, variant->nir);
+ radv_get_max_workgroup_size(chip_class, stage, variant->info.cs.block_size);
lds_per_wave = (conf->lds_size * lds_increment) /
- DIV_ROUND_UP(max_workgroup_size, 64);
+ DIV_ROUND_UP(max_workgroup_size, wave_size);
}
if (conf->num_sgprs)
max_simd_waves =
MIN2(max_simd_waves,
- ac_get_num_physical_sgprs(chip_class) / conf->num_sgprs);
+ ac_get_num_physical_sgprs(&device->physical_device->rad_info) /
+ conf->num_sgprs);
if (conf->num_vgprs)
max_simd_waves =
if (lds_per_wave)
max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
+ return max_simd_waves;
+}
+
+static void
+generate_shader_stats(struct radv_device *device,
+ struct radv_shader_variant *variant,
+ gl_shader_stage stage,
+ struct _mesa_string_buffer *buf)
+{
+ struct ac_shader_config *conf = &variant->config;
+ unsigned max_simd_waves = radv_get_max_waves(device, variant, stage);
+
if (stage == MESA_SHADER_FRAGMENT) {
_mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
"SPI_PS_INPUT_ADDR = 0x%04x\n"
"********************\n\n\n",
conf->num_sgprs, conf->num_vgprs,
conf->spilled_sgprs, conf->spilled_vgprs,
- variant->info.private_mem_vgprs, variant->code_size,
+ variant->info.private_mem_vgprs, variant->exec_size,
conf->lds_size, conf->scratch_bytes_per_wave,
max_simd_waves);
}
VkShaderStatisticsInfoAMD statistics = {};
statistics.shaderStageMask = shaderStage;
statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
- statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(device->physical_device->rad_info.chip_class);
+ statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(&device->physical_device->rad_info);
statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
if (stage == MESA_SHADER_COMPUTE) {
- unsigned *local_size = variant->nir->info.cs.local_size;
+ unsigned *local_size = variant->info.cs.block_size;
unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
statistics.numAvailableVgprs = statistics.numPhysicalVgprs /