#include "radv_debug.h"
#include "radv_private.h"
#include "radv_shader.h"
+#include "radv_shader_helper.h"
#include "nir/nir.h"
#include "nir/nir_builder.h"
#include "spirv/nir_spirv.h"
#include <llvm-c/Core.h>
#include <llvm-c/TargetMachine.h>
+#include <llvm-c/Support.h>
#include "sid.h"
#include "gfx9d.h"
static const struct nir_shader_compiler_options nir_options = {
.vertex_id_zero_based = true,
.lower_scmp = true,
+ .lower_flrp16 = true,
.lower_flrp32 = true,
.lower_flrp64 = true,
.lower_device_index_to_zero = true,
.lower_extract_word = true,
.lower_ffma = true,
.lower_fpow = true,
- .vs_inputs_dual_locations = true,
+ .lower_mul_2x32_64 = true,
.max_unroll_iterations = 32
};
sizeof(*module) + pCreateInfo->codeSize, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (module == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
module->nir = NULL;
module->size = pCreateInfo->codeSize;
}
void
-radv_optimize_nir(struct nir_shader *shader)
+radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
+ bool allow_copies)
{
bool progress;
do {
progress = false;
+ NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
+ NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
+
NIR_PASS_V(shader, nir_lower_vars_to_ssa);
NIR_PASS_V(shader, nir_lower_pack);
+
+ if (allow_copies) {
+ /* Only run this pass in the first call to
+ * radv_optimize_nir. Later calls assume that we've
+ * lowered away any copy_deref instructions and we
+ * don't want to introduce any more.
+ */
+ NIR_PASS(progress, shader, nir_opt_find_array_copies);
+ }
+
+ NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
+ NIR_PASS(progress, shader, nir_opt_dead_write_vars);
+
NIR_PASS_V(shader, nir_lower_alu_to_scalar);
NIR_PASS_V(shader, nir_lower_phis_to_scalar);
NIR_PASS(progress, shader, nir_opt_remove_phis);
NIR_PASS(progress, shader, nir_opt_dce);
}
- NIR_PASS(progress, shader, nir_opt_if);
+ NIR_PASS(progress, shader, nir_opt_if, true);
NIR_PASS(progress, shader, nir_opt_dead_cf);
NIR_PASS(progress, shader, nir_opt_cse);
- NIR_PASS(progress, shader, nir_opt_peephole_select, 8);
+ NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, shader, nir_opt_algebraic);
NIR_PASS(progress, shader, nir_opt_constant_folding);
NIR_PASS(progress, shader, nir_opt_undef);
if (shader->options->max_unroll_iterations) {
NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
}
- } while (progress);
+ } while (progress && !optimize_conservatively);
NIR_PASS(progress, shader, nir_opt_shrink_load);
NIR_PASS(progress, shader, nir_opt_move_load_ubo);
struct radv_shader_module *module,
const char *entrypoint_name,
gl_shader_stage stage,
- const VkSpecializationInfo *spec_info)
+ const VkSpecializationInfo *spec_info,
+ const VkPipelineCreateFlags flags)
{
- if (strcmp(entrypoint_name, "main") != 0) {
- radv_finishme("Multiple shaders per module not really supported");
- }
-
nir_shader *nir;
nir_function *entry_point;
if (module->nir) {
* and just use the NIR shader */
nir = module->nir;
nir->options = &nir_options;
- nir_validate_shader(nir);
+ nir_validate_shader(nir, "in internal shader");
assert(exec_list_length(&nir->functions) == 1);
struct exec_node *node = exec_list_get_head(&nir->functions);
}
}
const struct spirv_to_nir_options spirv_options = {
+ .lower_ubo_ssbo_access_to_offsets = true,
.caps = {
+ .descriptor_array_dynamic_indexing = true,
.device_group = true,
.draw_parameters = true,
.float64 = true,
+ .gcn_shader = true,
+ .geometry_streams = true,
.image_read_without_format = true,
.image_write_without_format = true,
- .tessellation = true,
+ .int16 = true,
.int64 = true,
.multiview = true,
+ .physical_storage_buffer_address = true,
+ .runtime_descriptor_array = true,
+ .shader_viewport_index_layer = true,
+ .stencil_export = true,
+ .storage_16bit = true,
+ .storage_image_ms = true,
+ .subgroup_arithmetic = true,
.subgroup_ballot = true,
.subgroup_basic = true,
.subgroup_quad = true,
.subgroup_shuffle = true,
.subgroup_vote = true,
- .variable_pointers = true,
- .gcn_shader = true,
+ .tessellation = true,
+ .transform_feedback = true,
.trinary_minmax = true,
- .shader_viewport_index_layer = true,
- .descriptor_array_dynamic_indexing = true,
- .runtime_descriptor_array = true,
+ .variable_pointers = true,
+ .storage_8bit = true,
+ .int8 = true,
},
+ .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
+ .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
+ .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
+ .push_const_ptr_type = glsl_uint_type(),
+ .shared_ptr_type = glsl_uint_type(),
};
entry_point = spirv_to_nir(spirv, module->size / 4,
spec_entries, num_spec_entries,
&spirv_options, &nir_options);
nir = entry_point->shader;
assert(nir->info.stage == stage);
- nir_validate_shader(nir);
+ nir_validate_shader(nir, "after spirv_to_nir");
free(spec_entries);
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
+ NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
*/
NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
- NIR_PASS_V(nir, nir_remove_dead_variables,
- nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
-
/* Now that we've deleted all but the main function, we can go ahead and
* lower the rest of the constant initializers.
*/
NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
+
+ /* Split member structs. We do this before lower_io_to_temporaries so that
+ * it doesn't lower system values to temporaries by accident.
+ */
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_split_per_member_structs);
+
+ NIR_PASS_V(nir, nir_remove_dead_variables,
+ nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
+
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
}
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
+ .lower_tg4_offsets = true,
};
nir_lower_tex(nir, &tex_options);
nir_lower_vars_to_ssa(nir);
- nir_lower_var_copies(nir);
+
+ if (nir->info.stage == MESA_SHADER_VERTEX ||
+ nir->info.stage == MESA_SHADER_GEOMETRY) {
+ NIR_PASS_V(nir, nir_lower_io_to_temporaries,
+ nir_shader_get_entrypoint(nir), true, true);
+ } else if (nir->info.stage == MESA_SHADER_TESS_EVAL||
+ nir->info.stage == MESA_SHADER_FRAGMENT) {
+ NIR_PASS_V(nir, nir_lower_io_to_temporaries,
+ nir_shader_get_entrypoint(nir), true, false);
+ }
+
+ nir_split_var_copies(nir);
+
nir_lower_global_vars_to_local(nir);
- nir_remove_dead_variables(nir, nir_var_local);
+ nir_remove_dead_variables(nir, nir_var_function_temp);
nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
.subgroup_size = 64,
.ballot_bit_size = 64,
.lower_vote_eq_to_ballot = 1,
});
- radv_optimize_nir(nir);
+ nir_lower_load_const_to_scalar(nir);
+
+ if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
+ radv_optimize_nir(nir, false, true);
+
+ /* We call nir_lower_var_copies() after the first radv_optimize_nir()
+ * to remove any copies introduced by nir_opt_find_array_copies().
+ */
+ nir_lower_var_copies(nir);
/* Indirect lowering must be called after the radv_optimize_nir() loop
* has been called at least once. Otherwise indirect lowering can
* considered too large for unrolling.
*/
ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
- radv_optimize_nir(nir);
+ radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
return nir;
}
slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING |
- device->physical_device->cpdma_prefetch_writes_memory ?
- 0 : RADEON_FLAG_READ_ONLY);
+ (device->physical_device->cpdma_prefetch_writes_memory ?
+ 0 : RADEON_FLAG_READ_ONLY),
+ RADV_BO_PRIORITY_SHADER);
slab->ptr = (char*)device->ws->buffer_map(slab->bo);
list_inithead(&slab->shaders);
mtx_destroy(&device->shader_slab_mutex);
}
+/* For the UMR disassembler. */
+#define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
+#define DEBUGGER_NUM_MARKERS 5
+
+static unsigned
+radv_get_shader_binary_size(struct ac_shader_binary *binary)
+{
+ return binary->code_size + DEBUGGER_NUM_MARKERS * 4;
+}
+
static void
radv_fill_shader_variant(struct radv_device *device,
struct radv_shader_variant *variant,
gl_shader_stage stage)
{
bool scratch_enabled = variant->config.scratch_bytes_per_wave > 0;
+ struct radv_shader_info *info = &variant->info.info;
unsigned vgpr_comp_cnt = 0;
- if (scratch_enabled && !device->llvm_supports_spill)
- radv_finishme("shader scratch support only available with LLVM 4.0");
-
- variant->code_size = binary->code_size;
+ variant->code_size = radv_get_shader_binary_size(binary);
variant->rsrc2 = S_00B12C_USER_SGPR(variant->info.num_user_sgprs) |
- S_00B12C_SCRATCH_EN(scratch_enabled);
-
- variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
+ S_00B12C_USER_SGPR_MSB(variant->info.num_user_sgprs >> 5) |
+ S_00B12C_SCRATCH_EN(scratch_enabled) |
+ S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
+ S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
+ S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
+ S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
+ S_00B12C_SO_EN(!!info->so.num_outputs);
+
+ variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
S_00B848_SGPRS((variant->config.num_sgprs - 1) / 8) |
S_00B848_DX10_CLAMP(1) |
S_00B848_FLOAT_MODE(variant->config.float_mode);
variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
break;
case MESA_SHADER_TESS_CTRL:
- if (device->physical_device->rad_info.chip_class >= GFX9)
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
- else
+ } else {
variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
+ }
break;
case MESA_SHADER_VERTEX:
case MESA_SHADER_GEOMETRY:
break;
case MESA_SHADER_FRAGMENT:
break;
- case MESA_SHADER_COMPUTE: {
- struct radv_shader_info *info = &variant->info.info;
+ case MESA_SHADER_COMPUTE:
variant->rsrc2 |=
S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
S_00B84C_LDS_SIZE(variant->config.lds_size);
break;
- }
default:
unreachable("unsupported shader type");
break;
if (device->physical_device->rad_info.chip_class >= GFX9 &&
stage == MESA_SHADER_GEOMETRY) {
- struct radv_shader_info *info = &variant->info.info;
unsigned es_type = variant->info.gs.es_type;
unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
* VGPR[0:4] are always loaded.
*/
- if (info->uses_invocation_id)
+ if (info->uses_invocation_id) {
gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
- else if (info->uses_prim_id)
+ } else if (info->uses_prim_id) {
gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
- else if (variant->info.gs.vertices_in >= 3)
+ } else if (variant->info.gs.vertices_in >= 3) {
gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
- else
+ } else {
gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
+ }
variant->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
variant->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
} else if (device->physical_device->rad_info.chip_class >= GFX9 &&
- stage == MESA_SHADER_TESS_CTRL)
+ stage == MESA_SHADER_TESS_CTRL) {
variant->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
- else
+ } else {
variant->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
+ }
void *ptr = radv_alloc_shader_memory(device, variant);
memcpy(ptr, binary->code, binary->code_size);
+
+ /* Add end-of-code markers for the UMR disassembler. */
+ uint32_t *ptr32 = (uint32_t *)ptr + binary->code_size / 4;
+ for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
+ ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
+
+}
+
+static void radv_init_llvm_target()
+{
+ LLVMInitializeAMDGPUTargetInfo();
+ LLVMInitializeAMDGPUTarget();
+ LLVMInitializeAMDGPUTargetMC();
+ LLVMInitializeAMDGPUAsmPrinter();
+
+ /* For inline assembly. */
+ LLVMInitializeAMDGPUAsmParser();
+
+ /* Workaround for bug in llvm 4.0 that causes image intrinsics
+ * to disappear.
+ * https://reviews.llvm.org/D26348
+ *
+ * Workaround for bug in llvm that causes the GPU to hang in presence
+ * of nested loops because there is an exec mask issue. The proper
+ * solution is to fix LLVM but this might require a bunch of work.
+ * https://bugs.llvm.org/show_bug.cgi?id=37744
+ *
+ * "mesa" is the prefix for error messages.
+ */
+ if (HAVE_LLVM >= 0x0800) {
+ const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" };
+ LLVMParseCommandLineOptions(2, argv, NULL);
+
+ } else {
+ const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false",
+ "-amdgpu-skip-threshold=1" };
+ LLVMParseCommandLineOptions(3, argv, NULL);
+ }
+}
+
+static once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT;
+
+static void radv_init_llvm_once(void)
+{
+ call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
}
static struct radv_shader_variant *
enum ac_target_machine_options tm_options = 0;
struct radv_shader_variant *variant;
struct ac_shader_binary binary;
- LLVMTargetMachineRef tm;
-
+ struct ac_llvm_compiler ac_llvm;
+ bool thread_compiler;
variant = calloc(1, sizeof(struct radv_shader_variant));
if (!variant)
return NULL;
options->family = chip_family;
options->chip_class = device->physical_device->rad_info.chip_class;
- options->dump_shader = radv_can_dump_shader(device, module);
+ options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
options->dump_preoptir = options->dump_shader &&
device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
options->record_llvm_ir = device->keep_shader_info;
+ options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
+ options->address32_hi = device->physical_device->rad_info.address32_hi;
if (options->supports_spill)
tm_options |= AC_TM_SUPPORTS_SPILL;
if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
tm_options |= AC_TM_SISCHED;
- tm = ac_create_target_machine(chip_family, tm_options, NULL);
-
+ if (options->check_ir)
+ tm_options |= AC_TM_CHECK_IR;
+
+ thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
+ radv_init_llvm_once();
+ radv_init_llvm_compiler(&ac_llvm,
+ thread_compiler,
+ chip_family, tm_options);
if (gs_copy_shader) {
assert(shader_count == 1);
- radv_compile_gs_copy_shader(tm, *shaders, &binary,
+ radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
&variant->config, &variant->info,
options);
} else {
- radv_compile_nir_shader(tm, &binary, &variant->config,
+ radv_compile_nir_shader(&ac_llvm, &binary, &variant->config,
&variant->info, shaders, shader_count,
options);
}
- LLVMDisposeTargetMachine(tm);
+ radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
radv_fill_shader_variant(device, variant, &binary, stage);
options.key = *key;
options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
- options.supports_spill = device->llvm_supports_spill;
+ options.supports_spill = true;
return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
&options, false, code_out, code_size_out);
gl_shader_stage stage,
struct _mesa_string_buffer *buf)
{
- unsigned lds_increment = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
+ enum chip_class chip_class = device->physical_device->rad_info.chip_class;
+ unsigned lds_increment = chip_class >= CIK ? 512 : 256;
struct ac_shader_config *conf;
unsigned max_simd_waves;
unsigned lds_per_wave = 0;
lds_per_wave = conf->lds_size * lds_increment +
align(variant->info.fs.num_interp * 48,
lds_increment);
+ } else if (stage == MESA_SHADER_COMPUTE) {
+ unsigned max_workgroup_size =
+ radv_nir_get_max_workgroup_size(chip_class, variant->nir);
+ lds_per_wave = (conf->lds_size * lds_increment) /
+ DIV_ROUND_UP(max_workgroup_size, 64);
}
if (conf->num_sgprs)
max_simd_waves =
MIN2(max_simd_waves,
- radv_get_num_physical_sgprs(device->physical_device) / conf->num_sgprs);
+ ac_get_num_physical_sgprs(chip_class) / conf->num_sgprs);
if (conf->num_vgprs)
max_simd_waves =
/* Spec doesn't indicate what to do if the stage is invalid, so just
* return no info for this. */
if (!variant)
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
switch (infoType) {
case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
VkShaderStatisticsInfoAMD statistics = {};
statistics.shaderStageMask = shaderStage;
statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
- statistics.numPhysicalSgprs = radv_get_num_physical_sgprs(device->physical_device);
+ statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(device->physical_device->rad_info.chip_class);
statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
if (stage == MESA_SHADER_COMPUTE) {
unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
- ceil(workgroup_size / statistics.numPhysicalVgprs);
+ ceil((double)workgroup_size / statistics.numPhysicalVgprs);
statistics.computeWorkGroupSize[0] = local_size[0];
statistics.computeWorkGroupSize[1] = local_size[1];
buf = _mesa_string_buffer_create(NULL, 1024);
_mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(variant, stage));
+ _mesa_string_buffer_printf(buf, "%s\n\n", variant->llvm_ir_string);
_mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
generate_shader_stats(device, variant, stage, buf);