#include "ac_binary.h"
#include "ac_llvm_util.h"
#include "ac_nir_to_llvm.h"
+#include "ac_rtld.h"
#include "vk_format.h"
#include "util/debug.h"
#include "ac_exp_param.h"
.lower_fpow = true,
.lower_mul_2x32_64 = true,
.lower_rotate = true,
- .max_unroll_iterations = 32
+ .max_unroll_iterations = 32,
+ .use_interpolated_input_intrinsics = true,
};
VkResult radv_CreateShaderModule(
.int64_atomics = true,
.multiview = true,
.physical_storage_buffer_address = true,
+ .post_depth_coverage = true,
.runtime_descriptor_array = true,
.shader_viewport_index_layer = true,
.stencil_export = true,
.phys_ssbo_addr_format = nir_address_format_64bit_global,
.push_const_addr_format = nir_address_format_logical,
.shared_addr_format = nir_address_format_32bit_offset,
+ .frag_coord_is_sysval = true,
};
nir = spirv_to_nir(spirv, module->size / 4,
spec_entries, num_spec_entries,
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_split_per_member_structs);
+ if (nir->info.stage == MESA_SHADER_FRAGMENT)
+ NIR_PASS_V(nir, nir_lower_input_attachments, true);
+
NIR_PASS_V(nir, nir_remove_dead_variables,
nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
nir_lower_vars_to_ssa(nir);
if (nir->info.stage == MESA_SHADER_VERTEX ||
- nir->info.stage == MESA_SHADER_GEOMETRY) {
+ nir->info.stage == MESA_SHADER_GEOMETRY ||
+ nir->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(nir), true, true);
- } else if (nir->info.stage == MESA_SHADER_TESS_EVAL||
- nir->info.stage == MESA_SHADER_FRAGMENT) {
+ } else if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
}
return nir;
}
+static void mark_16bit_fs_input(struct radv_shader_variant_info *shader_info,
+ const struct glsl_type *type,
+ int location)
+{
+ if (glsl_type_is_scalar(type) || glsl_type_is_vector(type) || glsl_type_is_matrix(type)) {
+ unsigned attrib_count = glsl_count_attribute_slots(type, false);
+ if (glsl_type_is_16bit(type)) {
+ shader_info->fs.float16_shaded_mask |= ((1ull << attrib_count) - 1) << location;
+ }
+ } else if (glsl_type_is_array(type)) {
+ unsigned stride = glsl_count_attribute_slots(glsl_get_array_element(type), false);
+ for (unsigned i = 0; i < glsl_get_length(type); ++i) {
+ mark_16bit_fs_input(shader_info, glsl_get_array_element(type), location + i * stride);
+ }
+ } else {
+ assert(glsl_type_is_struct_or_ifc(type));
+ for (unsigned i = 0; i < glsl_get_length(type); i++) {
+ mark_16bit_fs_input(shader_info, glsl_get_struct_field(type, i), location);
+ location += glsl_count_attribute_slots(glsl_get_struct_field(type, i), false);
+ }
+ }
+}
+
+static void
+handle_fs_input_decl(struct radv_shader_variant_info *shader_info,
+ struct nir_variable *variable)
+{
+ unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
+
+ if (variable->data.compact) {
+ unsigned component_count = variable->data.location_frac +
+ glsl_get_length(variable->type);
+ attrib_count = (component_count + 3) / 4;
+ } else {
+ mark_16bit_fs_input(shader_info, variable->type,
+ variable->data.driver_location);
+ }
+
+ uint64_t mask = ((1ull << attrib_count) - 1);
+
+ if (variable->data.interpolation == INTERP_MODE_FLAT)
+ shader_info->fs.flat_shaded_mask |= mask << variable->data.driver_location;
+
+ if (variable->data.location >= VARYING_SLOT_VAR0)
+ shader_info->fs.input_mask |= mask << (variable->data.location - VARYING_SLOT_VAR0);
+}
+
+static int
+type_size_vec4(const struct glsl_type *type, bool bindless)
+{
+ return glsl_count_attribute_slots(type, false);
+}
+
+static nir_variable *
+find_layer_in_var(nir_shader *nir)
+{
+ nir_foreach_variable(var, &nir->inputs) {
+ if (var->data.location == VARYING_SLOT_LAYER) {
+ return var;
+ }
+ }
+
+ nir_variable *var =
+ nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
+ var->data.location = VARYING_SLOT_LAYER;
+ var->data.interpolation = INTERP_MODE_FLAT;
+ return var;
+}
+
+/* We use layered rendering to implement multiview, which means we need to map
+ * view_index to gl_Layer. The attachment lowering also uses needs to know the
+ * layer so that it can sample from the correct layer. The code generates a
+ * load from the layer_id sysval, but since we don't have a way to get at this
+ * information from the fragment shader, we also need to lower this to the
+ * gl_Layer varying. This pass lowers both to a varying load from the LAYER
+ * slot, before lowering io, so that nir_assign_var_locations() will give the
+ * LAYER varying the correct driver_location.
+ */
+
+static bool
+lower_view_index(nir_shader *nir)
+{
+ bool progress = false;
+ nir_function_impl *entry = nir_shader_get_entrypoint(nir);
+ nir_builder b;
+ nir_builder_init(&b, entry);
+
+ nir_variable *layer = NULL;
+ nir_foreach_block(block, entry) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
+ if (load->intrinsic != nir_intrinsic_load_view_index &&
+ load->intrinsic != nir_intrinsic_load_layer_id)
+ continue;
+
+ if (!layer)
+ layer = find_layer_in_var(nir);
+
+ b.cursor = nir_before_instr(instr);
+ nir_ssa_def *def = nir_load_var(&b, layer);
+ nir_ssa_def_rewrite_uses(&load->dest.ssa,
+ nir_src_for_ssa(def));
+
+ nir_instr_remove(instr);
+ progress = true;
+ }
+ }
+
+ return progress;
+}
+
+/* Gather information needed to setup the vs<->ps linking registers in
+ * radv_pipeline_generate_ps_inputs().
+ */
+
+static void
+handle_fs_inputs(nir_shader *nir, struct radv_shader_variant_info *shader_info)
+{
+ shader_info->fs.num_interp = nir->num_inputs;
+
+ nir_foreach_variable(variable, &nir->inputs)
+ handle_fs_input_decl(shader_info, variable);
+}
+
+static void
+lower_fs_io(nir_shader *nir, struct radv_shader_variant_info *shader_info)
+{
+ NIR_PASS_V(nir, lower_view_index);
+ nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
+ MESA_SHADER_FRAGMENT);
+
+ handle_fs_inputs(nir, shader_info);
+
+ NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
+
+ /* This pass needs actual constants */
+ nir_opt_constant_folding(nir);
+
+ NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
+}
+
+
void *
radv_alloc_shader_memory(struct radv_device *device,
struct radv_shader_variant *shader)
#define DEBUGGER_NUM_MARKERS 5
static unsigned
-radv_get_shader_binary_size(struct ac_shader_binary *binary)
+radv_get_shader_binary_size(size_t code_size)
{
- return binary->code_size + DEBUGGER_NUM_MARKERS * 4;
+ return code_size + DEBUGGER_NUM_MARKERS * 4;
}
-static void
-radv_fill_shader_variant(struct radv_device *device,
- struct radv_shader_variant *variant,
- struct radv_nir_compiler_options *options,
- struct ac_shader_binary *binary,
- gl_shader_stage stage)
+static void radv_postprocess_config(const struct radv_physical_device *pdevice,
+ const struct ac_shader_config *config_in,
+ const struct radv_shader_variant_info *info,
+ gl_shader_stage stage,
+ struct ac_shader_config *config_out)
{
- bool scratch_enabled = variant->config.scratch_bytes_per_wave > 0;
- struct radv_shader_info *info = &variant->info.info;
+ bool scratch_enabled = config_in->scratch_bytes_per_wave > 0;
unsigned vgpr_comp_cnt = 0;
+ unsigned num_input_vgprs = info->num_input_vgprs;
- variant->code_size = radv_get_shader_binary_size(binary);
- variant->rsrc2 = S_00B12C_USER_SGPR(variant->info.num_user_sgprs) |
- S_00B12C_USER_SGPR_MSB(variant->info.num_user_sgprs >> 5) |
- S_00B12C_SCRATCH_EN(scratch_enabled) |
- S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
- S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
- S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
- S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
- S_00B12C_SO_EN(!!info->so.num_outputs);
-
- variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
- S_00B848_SGPRS((variant->config.num_sgprs - 1) / 8) |
- S_00B848_DX10_CLAMP(1) |
- S_00B848_FLOAT_MODE(variant->config.float_mode);
+ if (stage == MESA_SHADER_FRAGMENT) {
+ num_input_vgprs = 0;
+ if (G_0286CC_PERSP_SAMPLE_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 2;
+ if (G_0286CC_PERSP_CENTER_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 2;
+ if (G_0286CC_PERSP_CENTROID_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 2;
+ if (G_0286CC_PERSP_PULL_MODEL_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 3;
+ if (G_0286CC_LINEAR_SAMPLE_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 2;
+ if (G_0286CC_LINEAR_CENTER_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 2;
+ if (G_0286CC_LINEAR_CENTROID_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 2;
+ if (G_0286CC_LINE_STIPPLE_TEX_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_POS_X_FLOAT_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_POS_Y_FLOAT_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_POS_Z_FLOAT_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_POS_W_FLOAT_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_FRONT_FACE_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_ANCILLARY_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_SAMPLE_COVERAGE_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ if (G_0286CC_POS_FIXED_PT_ENA(config_in->spi_ps_input_addr))
+ num_input_vgprs += 1;
+ }
+
+ unsigned num_vgprs = MAX2(config_in->num_vgprs, num_input_vgprs);
+ /* +3 for scratch wave offset and VCC */
+ unsigned num_sgprs = MAX2(config_in->num_sgprs, info->num_input_sgprs + 3);
+
+ *config_out = *config_in;
+ config_out->num_vgprs = num_vgprs;
+ config_out->num_sgprs = num_sgprs;
+
+ /* Enable 64-bit and 16-bit denormals, because there is no performance
+ * cost.
+ *
+ * If denormals are enabled, all floating-point output modifiers are
+ * ignored.
+ *
+ * Don't enable denormals for 32-bit floats, because:
+ * - Floating-point output modifiers would be ignored by the hw.
+ * - Some opcodes don't support denormals, such as v_mad_f32. We would
+ * have to stop using those.
+ * - GFX6 & GFX7 would be very slow.
+ */
+ config_out->float_mode |= V_00B028_FP_64_DENORMS;
+
+ config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
+ S_00B12C_SCRATCH_EN(scratch_enabled);
+
+ config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) / 4) |
+ S_00B848_DX10_CLAMP(1) |
+ S_00B848_FLOAT_MODE(config_out->float_mode);
+
+ if (pdevice->rad_info.chip_class >= GFX10) {
+ config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(info->num_user_sgprs >> 5);
+ } else {
+ config_out->rsrc1 |= S_00B228_SGPRS((num_sgprs - 1) / 8);
+ config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5) |
+ S_00B12C_SO_BASE0_EN(!!info->info.so.strides[0]) |
+ S_00B12C_SO_BASE1_EN(!!info->info.so.strides[1]) |
+ S_00B12C_SO_BASE2_EN(!!info->info.so.strides[2]) |
+ S_00B12C_SO_BASE3_EN(!!info->info.so.strides[3]) |
+ S_00B12C_SO_EN(!!info->info.so.num_outputs);
+ }
switch (stage) {
case MESA_SHADER_TESS_EVAL:
- if (options->key.tes.as_es) {
- assert(device->physical_device->rad_info.chip_class <= GFX8);
- vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
+ if (info->is_ngg) {
+ config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
+ config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1);
+ } else if (info->tes.as_es) {
+ assert(pdevice->rad_info.chip_class <= GFX8);
+ vgpr_comp_cnt = info->info.uses_prim_id ? 3 : 2;
+
+ config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
} else {
- bool enable_prim_id = options->key.tes.export_prim_id || info->uses_prim_id;
+ bool enable_prim_id = info->tes.export_prim_id || info->info.uses_prim_id;
vgpr_comp_cnt = enable_prim_id ? 3 : 2;
+
+ config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
+ config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
}
- variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
break;
case MESA_SHADER_TESS_CTRL:
- if (device->physical_device->rad_info.chip_class >= GFX9) {
+ if (pdevice->rad_info.chip_class >= GFX9) {
/* We need at least 2 components for LS.
* VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
- vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
+ if (pdevice->rad_info.chip_class >= GFX10) {
+ vgpr_comp_cnt = info->info.vs.needs_instance_id ? 3 : 1;
+ } else {
+ vgpr_comp_cnt = info->info.vs.needs_instance_id ? 2 : 1;
+ }
} else {
- variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
+ config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
}
+ config_out->rsrc1 |= S_00B428_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
+ S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
break;
case MESA_SHADER_VERTEX:
- if (variant->info.vs.as_ls) {
- assert(device->physical_device->rad_info.chip_class <= GFX8);
+ if (info->is_ngg) {
+ config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
+ } else if (info->vs.as_ls) {
+ assert(pdevice->rad_info.chip_class <= GFX8);
/* We need at least 2 components for LS.
* VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
- vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
- } else if (variant->info.vs.as_es) {
- assert(device->physical_device->rad_info.chip_class <= GFX8);
+ vgpr_comp_cnt = info->info.vs.needs_instance_id ? 2 : 1;
+ } else if (info->vs.as_es) {
+ assert(pdevice->rad_info.chip_class <= GFX8);
/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
+ vgpr_comp_cnt = info->info.vs.needs_instance_id ? 1 : 0;
} else {
/* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
* If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
- if (options->key.vs.export_prim_id) {
+ if (info->vs.export_prim_id) {
vgpr_comp_cnt = 2;
- } else if (info->vs.needs_instance_id) {
+ } else if (info->info.vs.needs_instance_id) {
vgpr_comp_cnt = 1;
} else {
vgpr_comp_cnt = 0;
}
+
+ config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
}
break;
case MESA_SHADER_FRAGMENT:
+ config_out->rsrc1 |= S_00B028_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
+ break;
case MESA_SHADER_GEOMETRY:
+ config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
+ S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
break;
case MESA_SHADER_COMPUTE:
- variant->rsrc2 |=
- S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
- S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
- S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
- S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
- info->cs.uses_thread_id[1] ? 1 : 0) |
- S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
- S_00B84C_LDS_SIZE(variant->config.lds_size);
+ config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
+ S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
+ config_out->rsrc2 |=
+ S_00B84C_TGID_X_EN(info->info.cs.uses_block_id[0]) |
+ S_00B84C_TGID_Y_EN(info->info.cs.uses_block_id[1]) |
+ S_00B84C_TGID_Z_EN(info->info.cs.uses_block_id[2]) |
+ S_00B84C_TIDIG_COMP_CNT(info->info.cs.uses_thread_id[2] ? 2 :
+ info->info.cs.uses_thread_id[1] ? 1 : 0) |
+ S_00B84C_TG_SIZE_EN(info->info.cs.uses_local_invocation_idx) |
+ S_00B84C_LDS_SIZE(config_in->lds_size);
break;
default:
unreachable("unsupported shader type");
break;
}
- if (device->physical_device->rad_info.chip_class >= GFX9 &&
- stage == MESA_SHADER_GEOMETRY) {
- unsigned es_type = variant->info.gs.es_type;
+ if (pdevice->rad_info.chip_class >= GFX10 && info->is_ngg &&
+ (stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_GEOMETRY)) {
+ unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
+ gl_shader_stage es_stage = stage;
+ if (stage == MESA_SHADER_GEOMETRY)
+ es_stage = info->gs.es_type;
+
+ /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
+ if (es_stage == MESA_SHADER_VERTEX) {
+ es_vgpr_comp_cnt = info->info.vs.needs_instance_id ? 3 : 0;
+ } else if (es_stage == MESA_SHADER_TESS_EVAL) {
+ bool enable_prim_id = info->tes.export_prim_id || info->info.uses_prim_id;
+ es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
+ } else
+ unreachable("Unexpected ES shader stage");
+
+ bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
+ info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
+ if (info->info.uses_invocation_id || stage == MESA_SHADER_VERTEX) {
+ gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
+ } else if (info->info.uses_prim_id) {
+ gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
+ } else if (info->gs.vertices_in >= 3 || tes_triangles) {
+ gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
+ } else {
+ gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
+ }
+
+ config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt) |
+ S_00B228_WGP_MODE(1);
+ config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
+ S_00B22C_LDS_SIZE(config_in->lds_size) |
+ S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL);
+ } else if (pdevice->rad_info.chip_class >= GFX9 &&
+ stage == MESA_SHADER_GEOMETRY) {
+ unsigned es_type = info->gs.es_type;
unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
if (es_type == MESA_SHADER_VERTEX) {
/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- es_vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
+ es_vgpr_comp_cnt = info->info.vs.needs_instance_id ? 1 : 0;
} else if (es_type == MESA_SHADER_TESS_EVAL) {
- es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
+ es_vgpr_comp_cnt = info->info.uses_prim_id ? 3 : 2;
} else {
unreachable("invalid shader ES type");
}
/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
* VGPR[0:4] are always loaded.
*/
- if (info->uses_invocation_id) {
+ if (info->info.uses_invocation_id) {
gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
- } else if (info->uses_prim_id) {
+ } else if (info->info.uses_prim_id) {
gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
- } else if (variant->info.gs.vertices_in >= 3) {
+ } else if (info->gs.vertices_in >= 3) {
gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
} else {
gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
}
- variant->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
- variant->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
- S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
- } else if (device->physical_device->rad_info.chip_class >= GFX9 &&
+ config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
+ config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
+ S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
+ } else if (pdevice->rad_info.chip_class >= GFX9 &&
stage == MESA_SHADER_TESS_CTRL) {
- variant->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
+ config_out->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
} else {
- variant->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
+ config_out->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
}
-
- void *ptr = radv_alloc_shader_memory(device, variant);
- memcpy(ptr, binary->code, binary->code_size);
-
- /* Add end-of-code markers for the UMR disassembler. */
- uint32_t *ptr32 = (uint32_t *)ptr + binary->code_size / 4;
- for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
- ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
-
}
static void radv_init_llvm_target()
call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
}
+struct radv_shader_variant *
+radv_shader_variant_create(struct radv_device *device,
+ const struct radv_shader_binary *binary)
+{
+ struct ac_shader_config config = {0};
+ struct ac_rtld_binary rtld_binary = {0};
+ struct radv_shader_variant *variant = calloc(1, sizeof(struct radv_shader_variant));
+ if (!variant)
+ return NULL;
+
+ variant->ref_count = 1;
+
+ if (binary->type == RADV_BINARY_TYPE_RTLD) {
+ struct ac_rtld_symbol lds_symbols[1];
+ unsigned num_lds_symbols = 0;
+ const char *elf_data = (const char *)((struct radv_shader_binary_rtld *)binary)->data;
+ size_t elf_size = ((struct radv_shader_binary_rtld *)binary)->elf_size;
+ unsigned esgs_ring_size = 0;
+
+ if (device->physical_device->rad_info.chip_class >= GFX9 &&
+ binary->stage == MESA_SHADER_GEOMETRY && !binary->is_gs_copy_shader) {
+ /* TODO: Do not hardcode this value */
+ esgs_ring_size = 32 * 1024;
+ }
+
+ if (binary->variant_info.is_ngg) {
+ /* GS stores Primitive IDs into LDS at the address
+ * corresponding to the ES thread of the provoking
+ * vertex. All ES threads load and export PrimitiveID
+ * for their thread.
+ */
+ if (binary->stage == MESA_SHADER_VERTEX &&
+ binary->variant_info.vs.export_prim_id) {
+ /* TODO: Do not harcode this value */
+ esgs_ring_size = 256 /* max_out_verts */ * 4;
+ }
+ }
+
+ if (esgs_ring_size) {
+ /* We add this symbol even on LLVM <= 8 to ensure that
+ * shader->config.lds_size is set correctly below.
+ */
+ struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
+ sym->name = "esgs_ring";
+ sym->size = esgs_ring_size;
+ sym->align = 64 * 1024;
+
+ /* Make sure to have LDS space for NGG scratch. */
+ /* TODO: Compute this correctly somehow? */
+ if (binary->variant_info.is_ngg)
+ sym->size -= 32;
+ }
+ struct ac_rtld_open_info open_info = {
+ .info = &device->physical_device->rad_info,
+ .shader_type = binary->stage,
+ .wave_size = 64,
+ .num_parts = 1,
+ .elf_ptrs = &elf_data,
+ .elf_sizes = &elf_size,
+ .num_shared_lds_symbols = num_lds_symbols,
+ .shared_lds_symbols = lds_symbols,
+ };
+
+ if (!ac_rtld_open(&rtld_binary, open_info)) {
+ free(variant);
+ return NULL;
+ }
+
+ if (!ac_rtld_read_config(&rtld_binary, &config)) {
+ ac_rtld_close(&rtld_binary);
+ free(variant);
+ return NULL;
+ }
+
+ if (rtld_binary.lds_size > 0) {
+ unsigned alloc_granularity = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
+ config.lds_size = align(rtld_binary.lds_size, alloc_granularity) / alloc_granularity;
+ }
+
+ variant->code_size = rtld_binary.rx_size;
+ } else {
+ assert(binary->type == RADV_BINARY_TYPE_LEGACY);
+ config = ((struct radv_shader_binary_legacy *)binary)->config;
+ variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
+ }
+
+ variant->info = binary->variant_info;
+ radv_postprocess_config(device->physical_device, &config, &binary->variant_info,
+ binary->stage, &variant->config);
+
+ void *dest_ptr = radv_alloc_shader_memory(device, variant);
+
+ if (binary->type == RADV_BINARY_TYPE_RTLD) {
+ struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
+ struct ac_rtld_upload_info info = {
+ .binary = &rtld_binary,
+ .rx_va = radv_buffer_get_va(variant->bo) + variant->bo_offset,
+ .rx_ptr = dest_ptr,
+ };
+
+ if (!ac_rtld_upload(&info)) {
+ radv_shader_variant_destroy(device, variant);
+ ac_rtld_close(&rtld_binary);
+ return NULL;
+ }
+
+ if (device->keep_shader_info) {
+ const char *disasm_data;
+ size_t disasm_size;
+ if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
+ radv_shader_variant_destroy(device, variant);
+ ac_rtld_close(&rtld_binary);
+ return NULL;
+ }
+
+ variant->llvm_ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
+ variant->disasm_string = malloc(disasm_size + 1);
+ memcpy(variant->disasm_string, disasm_data, disasm_size);
+ variant->disasm_string[disasm_size] = 0;
+ }
+
+ ac_rtld_close(&rtld_binary);
+ } else {
+ struct radv_shader_binary_legacy* bin = (struct radv_shader_binary_legacy *)binary;
+ memcpy(dest_ptr, bin->data, bin->code_size);
+
+ /* Add end-of-code markers for the UMR disassembler. */
+ uint32_t *ptr32 = (uint32_t *)dest_ptr + bin->code_size / 4;
+ for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
+ ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
+
+ variant->llvm_ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->code_size)) : NULL;
+ variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->code_size + bin->llvm_ir_size)) : NULL;
+ }
+ return variant;
+}
+
static struct radv_shader_variant *
-shader_variant_create(struct radv_device *device,
- struct radv_shader_module *module,
- struct nir_shader * const *shaders,
- int shader_count,
- gl_shader_stage stage,
- struct radv_nir_compiler_options *options,
- bool gs_copy_shader,
- void **code_out,
- unsigned *code_size_out)
+shader_variant_compile(struct radv_device *device,
+ struct radv_shader_module *module,
+ struct nir_shader * const *shaders,
+ int shader_count,
+ gl_shader_stage stage,
+ struct radv_nir_compiler_options *options,
+ bool gs_copy_shader,
+ struct radv_shader_binary **binary_out)
{
enum radeon_family chip_family = device->physical_device->rad_info.family;
enum ac_target_machine_options tm_options = 0;
- struct radv_shader_variant *variant;
- struct ac_shader_binary binary;
struct ac_llvm_compiler ac_llvm;
+ struct radv_shader_binary *binary = NULL;
+ struct radv_shader_variant_info variant_info = {0};
bool thread_compiler;
- variant = calloc(1, sizeof(struct radv_shader_variant));
- if (!variant)
- return NULL;
+
+ if (shaders[0]->info.stage == MESA_SHADER_FRAGMENT)
+ lower_fs_io(shaders[0], &variant_info);
options->family = chip_family;
options->chip_class = device->physical_device->rad_info.chip_class;
if (gs_copy_shader) {
assert(shader_count == 1);
radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
- &variant->config, &variant->info,
- options);
+ &variant_info, options);
} else {
- radv_compile_nir_shader(&ac_llvm, &binary, &variant->config,
- &variant->info, shaders, shader_count,
- options);
+ radv_compile_nir_shader(&ac_llvm, &binary, &variant_info,
+ shaders, shader_count, options);
}
+ binary->variant_info = variant_info;
radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
- radv_fill_shader_variant(device, variant, options, &binary, stage);
-
- if (code_out) {
- *code_out = binary.code;
- *code_size_out = binary.code_size;
- } else
- free(binary.code);
- free(binary.config);
- free(binary.rodata);
- free(binary.global_symbol_offsets);
- free(binary.relocs);
- variant->ref_count = 1;
+ struct radv_shader_variant *variant = radv_shader_variant_create(device, binary);
+ if (!variant) {
+ free(binary);
+ return NULL;
+ }
+
+ if (options->dump_shader) {
+ fprintf(stderr, "disasm:\n%s\n", variant->disasm_string);
+ }
+
if (device->keep_shader_info) {
- variant->disasm_string = binary.disasm_string;
- variant->llvm_ir_string = binary.llvm_ir_string;
if (!gs_copy_shader && !module->nir) {
variant->nir = *shaders;
variant->spirv = (uint32_t *)module->data;
variant->spirv_size = module->size;
}
- } else {
- free(binary.disasm_string);
}
+ if (binary_out)
+ *binary_out = binary;
+ else
+ free(binary);
+
return variant;
}
struct radv_shader_variant *
-radv_shader_variant_create(struct radv_device *device,
+radv_shader_variant_compile(struct radv_device *device,
struct radv_shader_module *module,
struct nir_shader *const *shaders,
int shader_count,
struct radv_pipeline_layout *layout,
const struct radv_shader_variant_key *key,
- void **code_out,
- unsigned *code_size_out)
+ struct radv_shader_binary **binary_out)
{
struct radv_nir_compiler_options options = {0};
options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
options.supports_spill = true;
- return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
- &options, false, code_out, code_size_out);
+ return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
+ &options, false, binary_out);
}
struct radv_shader_variant *
radv_create_gs_copy_shader(struct radv_device *device,
struct nir_shader *shader,
- void **code_out,
- unsigned *code_size_out,
+ struct radv_shader_binary **binary_out,
bool multiview)
{
struct radv_nir_compiler_options options = {0};
options.key.has_multiview_view_index = multiview;
- return shader_variant_create(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
- &options, true, code_out, code_size_out);
+ return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
+ &options, true, binary_out);
}
void
}
const char *
-radv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage)
+radv_get_shader_name(struct radv_shader_variant_info *info,
+ gl_shader_stage stage)
{
switch (stage) {
- case MESA_SHADER_VERTEX: return var->info.vs.as_ls ? "Vertex Shader as LS" : var->info.vs.as_es ? "Vertex Shader as ES" : "Vertex Shader as VS";
- case MESA_SHADER_GEOMETRY: return "Geometry Shader";
- case MESA_SHADER_FRAGMENT: return "Pixel Shader";
- case MESA_SHADER_COMPUTE: return "Compute Shader";
- case MESA_SHADER_TESS_CTRL: return "Tessellation Control Shader";
- case MESA_SHADER_TESS_EVAL: return var->info.tes.as_es ? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
+ case MESA_SHADER_VERTEX:
+ if (info->vs.as_ls)
+ return "Vertex Shader as LS";
+ else if (info->vs.as_es)
+ return "Vertex Shader as ES";
+ else if (info->is_ngg)
+ return "Vertex Shader as ESGS";
+ else
+ return "Vertex Shader as VS";
+ case MESA_SHADER_TESS_CTRL:
+ return "Tessellation Control Shader";
+ case MESA_SHADER_TESS_EVAL:
+ if (info->tes.as_es)
+ return "Tessellation Evaluation Shader as ES";
+ else if (info->is_ngg)
+ return "Tessellation Evaluation Shader as ESGS";
+ else
+ return "Tessellation Evaluation Shader as VS";
+ case MESA_SHADER_GEOMETRY:
+ return "Geometry Shader";
+ case MESA_SHADER_FRAGMENT:
+ return "Pixel Shader";
+ case MESA_SHADER_COMPUTE:
+ return "Compute Shader";
default:
return "Unknown shader";
};
lds_increment);
} else if (stage == MESA_SHADER_COMPUTE) {
unsigned max_workgroup_size =
- radv_nir_get_max_workgroup_size(chip_class, variant->nir);
+ radv_nir_get_max_workgroup_size(chip_class, stage, variant->nir);
lds_per_wave = (conf->lds_size * lds_increment) /
DIV_ROUND_UP(max_workgroup_size, 64);
}
generate_shader_stats(device, variant, stage, buf);
- fprintf(file, "\n%s:\n", radv_get_shader_name(variant, stage));
+ fprintf(file, "\n%s:\n", radv_get_shader_name(&variant->info, stage));
fprintf(file, "%s", buf->buf);
_mesa_string_buffer_destroy(buf);
case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
buf = _mesa_string_buffer_create(NULL, 1024);
- _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(variant, stage));
+ _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(&variant->info, stage));
_mesa_string_buffer_printf(buf, "%s\n\n", variant->llvm_ir_string);
_mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
generate_shader_stats(device, variant, stage, buf);