#include "radv_shader.h"
#include "nir/nir.h"
#include "nir/nir_builder.h"
+#include "nir/nir_xfb_info.h"
#include "spirv/nir_spirv.h"
#include "vk_util.h"
uint32_t lds_size;
};
+struct radv_ngg_state {
+ uint16_t ngg_emit_size; /* in dwords */
+ uint32_t hw_max_esverts;
+ uint32_t max_gsprims;
+ uint32_t max_out_verts;
+ uint32_t prim_amp_factor;
+ uint32_t vgt_esgs_ring_itemsize;
+ bool max_vert_out_per_gs_instance;
+};
+
+bool radv_pipeline_has_ngg(const struct radv_pipeline *pipeline)
+{
+ struct radv_shader_variant *variant = NULL;
+ if (pipeline->shaders[MESA_SHADER_GEOMETRY])
+ variant = pipeline->shaders[MESA_SHADER_GEOMETRY];
+ else if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
+ variant = pipeline->shaders[MESA_SHADER_TESS_EVAL];
+ else if (pipeline->shaders[MESA_SHADER_VERTEX])
+ variant = pipeline->shaders[MESA_SHADER_VERTEX];
+ else
+ return false;
+ return variant->info.is_ngg;
+}
+
+bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline *pipeline)
+{
+ if (!radv_pipeline_has_gs(pipeline))
+ return false;
+
+ /* The GS copy shader is required if the pipeline has GS on GFX6-GFX9.
+ * On GFX10, it might be required in rare cases if it's not possible to
+ * enable NGG.
+ */
+ if (radv_pipeline_has_ngg(pipeline))
+ return false;
+
+ assert(pipeline->gs_copy_shader);
+ return true;
+}
+
static void
radv_pipeline_destroy(struct radv_device *device,
struct radv_pipeline *pipeline,
if (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH)
hash_flags |= RADV_HASH_SHADER_UNSAFE_MATH;
+ if (device->instance->debug_flags & RADV_DEBUG_NO_NGG)
+ hash_flags |= RADV_HASH_SHADER_NO_NGG;
if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
hash_flags |= RADV_HASH_SHADER_SISCHED;
+ if (device->physical_device->cs_wave_size == 32)
+ hash_flags |= RADV_HASH_SHADER_CS_WAVE32;
+ if (device->physical_device->ps_wave_size == 32)
+ hash_flags |= RADV_HASH_SHADER_PS_WAVE32;
+ if (device->physical_device->ge_wave_size == 32)
+ hash_flags |= RADV_HASH_SHADER_GE_WAVE32;
return hash_flags;
}
blend.sx_mrt_blend_opt[i] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED);
}
- if (pipeline->device->physical_device->has_rbplus) {
+ if (pipeline->device->physical_device->rad_info.has_rbplus) {
/* Disable RB+ blend optimizations for dual source blending. */
if (blend.mrt0_is_dual_src) {
for (i = 0; i < 8; i++) {
return gs;
}
+static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
+ unsigned min_verts_per_prim, bool use_adjacency)
+{
+ unsigned max_reuse = max_esverts - min_verts_per_prim;
+ if (use_adjacency)
+ max_reuse /= 2;
+ *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
+}
+
+static unsigned
+radv_get_num_input_vertices(struct radv_pipeline *pipeline)
+{
+ if (radv_pipeline_has_gs(pipeline)) {
+ struct radv_shader_variant *gs =
+ radv_get_shader(pipeline, MESA_SHADER_GEOMETRY);
+
+ return gs->info.gs.vertices_in;
+ }
+
+ if (radv_pipeline_has_tess(pipeline)) {
+ struct radv_shader_variant *tes = radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL);
+
+ if (tes->info.tes.point_mode)
+ return 1;
+ if (tes->info.tes.primitive_mode == GL_ISOLINES)
+ return 2;
+ return 3;
+ }
+
+ return 3;
+}
+
+static struct radv_ngg_state
+calculate_ngg_info(const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ struct radv_pipeline *pipeline)
+{
+ struct radv_ngg_state ngg = {0};
+ struct radv_shader_variant_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
+ struct radv_es_output_info *es_info =
+ radv_pipeline_has_tess(pipeline) ? &gs_info->tes.es_info : &gs_info->vs.es_info;
+ unsigned gs_type = radv_pipeline_has_gs(pipeline) ? MESA_SHADER_GEOMETRY : MESA_SHADER_VERTEX;
+ unsigned max_verts_per_prim = radv_get_num_input_vertices(pipeline);
+ unsigned min_verts_per_prim =
+ gs_type == MESA_SHADER_GEOMETRY ? max_verts_per_prim : 1;
+ unsigned gs_num_invocations = radv_pipeline_has_gs(pipeline) ? MAX2(gs_info->gs.invocations, 1) : 1;
+ bool uses_adjacency;
+ switch(pCreateInfo->pInputAssemblyState->topology) {
+ case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
+ case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
+ uses_adjacency = true;
+ break;
+ default:
+ uses_adjacency = false;
+ break;
+ }
+
+ /* All these are in dwords: */
+ /* We can't allow using the whole LDS, because GS waves compete with
+ * other shader stages for LDS space.
+ *
+ * Streamout can increase the ESGS buffer size later on, so be more
+ * conservative with streamout and use 4K dwords. This may be suboptimal.
+ *
+ * Otherwise, use the limit of 7K dwords. The reason is that we need
+ * to leave some headroom for the max_esverts increase at the end.
+ *
+ * TODO: We should really take the shader's internal LDS use into
+ * account. The linker will fail if the size is greater than
+ * 8K dwords.
+ */
+ const unsigned max_lds_size = (0 /*gs_info->info.so.num_outputs*/ ? 4 : 7) * 1024 - 128;
+ const unsigned target_lds_size = max_lds_size;
+ unsigned esvert_lds_size = 0;
+ unsigned gsprim_lds_size = 0;
+
+ /* All these are per subgroup: */
+ bool max_vert_out_per_gs_instance = false;
+ unsigned max_esverts_base = 256;
+ unsigned max_gsprims_base = 128; /* default prim group size clamp */
+
+ /* Hardware has the following non-natural restrictions on the value
+ * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
+ * the draw:
+ * - at most 252 for any line input primitive type
+ * - at most 251 for any quad input primitive type
+ * - at most 251 for triangle strips with adjacency (this happens to
+ * be the natural limit for triangle *lists* with adjacency)
+ */
+ max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
+
+ if (gs_type == MESA_SHADER_GEOMETRY) {
+ unsigned max_out_verts_per_gsprim =
+ gs_info->gs.vertices_out * gs_num_invocations;
+
+ if (max_out_verts_per_gsprim <= 256) {
+ if (max_out_verts_per_gsprim) {
+ max_gsprims_base = MIN2(max_gsprims_base,
+ 256 / max_out_verts_per_gsprim);
+ }
+ } else {
+ /* Use special multi-cycling mode in which each GS
+ * instance gets its own subgroup. Does not work with
+ * tessellation. */
+ max_vert_out_per_gs_instance = true;
+ max_gsprims_base = 1;
+ max_out_verts_per_gsprim = gs_info->gs.vertices_out;
+ }
+
+ esvert_lds_size = es_info->esgs_itemsize / 4;
+ gsprim_lds_size = (gs_info->gs.gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
+ } else {
+ /* TODO: This needs to be adjusted once LDS use for compaction
+ * after culling is implemented. */
+ /*
+ if (es_info->info.so.num_outputs)
+ esvert_lds_size = 4 * es_info->info.so.num_outputs + 1;
+ */
+
+ /* LDS size for passing data from GS to ES.
+ * GS stores Primitive IDs (one DWORD) into LDS at the address
+ * corresponding to the ES thread of the provoking vertex. All
+ * ES threads load and export PrimitiveID for their thread.
+ */
+ if (!radv_pipeline_has_tess(pipeline) &&
+ pipeline->shaders[MESA_SHADER_VERTEX]->info.vs.export_prim_id)
+ esvert_lds_size = MAX2(esvert_lds_size, 1);
+ }
+
+ unsigned max_gsprims = max_gsprims_base;
+ unsigned max_esverts = max_esverts_base;
+
+ if (esvert_lds_size)
+ max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
+ if (gsprim_lds_size)
+ max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
+
+ max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+ clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
+ assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+
+ if (esvert_lds_size || gsprim_lds_size) {
+ /* Now that we have a rough proportionality between esverts
+ * and gsprims based on the primitive type, scale both of them
+ * down simultaneously based on required LDS space.
+ *
+ * We could be smarter about this if we knew how much vertex
+ * reuse to expect.
+ */
+ unsigned lds_total = max_esverts * esvert_lds_size +
+ max_gsprims * gsprim_lds_size;
+ if (lds_total > target_lds_size) {
+ max_esverts = max_esverts * target_lds_size / lds_total;
+ max_gsprims = max_gsprims * target_lds_size / lds_total;
+
+ max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+ clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
+ min_verts_per_prim, uses_adjacency);
+ assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+ }
+ }
+
+ /* Round up towards full wave sizes for better ALU utilization. */
+ if (!max_vert_out_per_gs_instance) {
+ const unsigned wavesize = pipeline->device->physical_device->ge_wave_size;
+ unsigned orig_max_esverts;
+ unsigned orig_max_gsprims;
+ do {
+ orig_max_esverts = max_esverts;
+ orig_max_gsprims = max_gsprims;
+
+ max_esverts = align(max_esverts, wavesize);
+ max_esverts = MIN2(max_esverts, max_esverts_base);
+ if (esvert_lds_size)
+ max_esverts = MIN2(max_esverts,
+ (max_lds_size - max_gsprims * gsprim_lds_size) /
+ esvert_lds_size);
+ max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+
+ max_gsprims = align(max_gsprims, wavesize);
+ max_gsprims = MIN2(max_gsprims, max_gsprims_base);
+ if (gsprim_lds_size)
+ max_gsprims = MIN2(max_gsprims,
+ (max_lds_size - max_esverts * esvert_lds_size) /
+ gsprim_lds_size);
+ clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
+ min_verts_per_prim, uses_adjacency);
+ assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+ } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
+ }
+
+ /* Hardware restriction: minimum value of max_esverts */
+ max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
+
+ unsigned max_out_vertices =
+ max_vert_out_per_gs_instance ? gs_info->gs.vertices_out :
+ gs_type == MESA_SHADER_GEOMETRY ?
+ max_gsprims * gs_num_invocations * gs_info->gs.vertices_out :
+ max_esverts;
+ assert(max_out_vertices <= 256);
+
+ unsigned prim_amp_factor = 1;
+ if (gs_type == MESA_SHADER_GEOMETRY) {
+ /* Number of output primitives per GS input primitive after
+ * GS instancing. */
+ prim_amp_factor = gs_info->gs.vertices_out;
+ }
+
+ /* The GE only checks against the maximum number of ES verts after
+ * allocating a full GS primitive. So we need to ensure that whenever
+ * this check passes, there is enough space for a full primitive without
+ * vertex reuse.
+ */
+ ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
+ ngg.max_gsprims = max_gsprims;
+ ngg.max_out_verts = max_out_vertices;
+ ngg.prim_amp_factor = prim_amp_factor;
+ ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
+ ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
+
+ if (gs_type == MESA_SHADER_GEOMETRY) {
+ ngg.vgt_esgs_ring_itemsize = es_info->esgs_itemsize / 4;
+ } else {
+ ngg.vgt_esgs_ring_itemsize = 1;
+ }
+
+ pipeline->graphics.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
+
+ assert(ngg.hw_max_esverts >= 24); /* HW limitation */
+
+ return ngg;
+}
+
static void
calculate_gs_ring_sizes(struct radv_pipeline *pipeline, const struct radv_gs_state *gs)
{
else
topology = V_028B6C_OUTPUT_TRIANGLE_CW;
- if (pipeline->device->has_distributed_tess) {
+ if (pipeline->device->physical_device->rad_info.has_distributed_tess) {
if (pipeline->device->physical_device->rad_info.family == CHIP_FIJI ||
pipeline->device->physical_device->rad_info.family >= CHIP_POLARIS10)
distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
static const struct radv_vs_output_info *get_vs_output_info(const struct radv_pipeline *pipeline)
{
if (radv_pipeline_has_gs(pipeline))
- return &pipeline->gs_copy_shader->info.vs.outinfo;
+ if (radv_pipeline_has_ngg(pipeline))
+ return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.vs.outinfo;
+ else
+ return &pipeline->gs_copy_shader->info.vs.outinfo;
else if (radv_pipeline_has_tess(pipeline))
return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.outinfo;
else
return key;
}
+static bool
+radv_nir_stage_uses_xfb(const nir_shader *nir)
+{
+ nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
+ bool uses_xfb = !!xfb;
+
+ ralloc_free(xfb);
+ return uses_xfb;
+}
+
static void
-radv_fill_shader_keys(struct radv_shader_variant_key *keys,
+radv_fill_shader_keys(struct radv_device *device,
+ struct radv_shader_variant_key *keys,
const struct radv_pipeline_key *key,
nir_shader **nir)
{
}
if (nir[MESA_SHADER_TESS_CTRL]) {
- keys[MESA_SHADER_VERTEX].vs.as_ls = true;
+ keys[MESA_SHADER_VERTEX].vs_common_out.as_ls = true;
keys[MESA_SHADER_TESS_CTRL].tcs.num_inputs = 0;
keys[MESA_SHADER_TESS_CTRL].tcs.input_vertices = key->tess_input_vertices;
keys[MESA_SHADER_TESS_CTRL].tcs.primitive_mode = nir[MESA_SHADER_TESS_EVAL]->info.tess.primitive_mode;
if (nir[MESA_SHADER_GEOMETRY]) {
if (nir[MESA_SHADER_TESS_CTRL])
- keys[MESA_SHADER_TESS_EVAL].tes.as_es = true;
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_es = true;
else
- keys[MESA_SHADER_VERTEX].vs.as_es = true;
+ keys[MESA_SHADER_VERTEX].vs_common_out.as_es = true;
+ }
+
+ if (device->physical_device->rad_info.chip_class >= GFX10 &&
+ device->physical_device->rad_info.family != CHIP_NAVI14 &&
+ !(device->instance->debug_flags & RADV_DEBUG_NO_NGG)) {
+ if (nir[MESA_SHADER_TESS_CTRL]) {
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = true;
+ } else {
+ keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg = true;
+ }
+
+ if (nir[MESA_SHADER_TESS_CTRL] &&
+ nir[MESA_SHADER_GEOMETRY] &&
+ nir[MESA_SHADER_GEOMETRY]->info.gs.invocations *
+ nir[MESA_SHADER_GEOMETRY]->info.gs.vertices_out > 256) {
+ /* Fallback to the legacy path if tessellation is
+ * enabled with extreme geometry because
+ * EN_MAX_VERT_OUT_PER_GS_INSTANCE doesn't work and it
+ * might hang.
+ */
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = false;
+ }
+
+ /*
+ * Disable NGG with geometry shaders. There are a bunch of
+ * issues still:
+ * * GS primitives in pipeline statistic queries do not get
+ * updates. See dEQP-VK.query_pool.statistics_query.geometry_shader_primitives
+ * * dEQP-VK.clipping.user_defined.clip_cull_distance_dynamic_index.*geom* failures
+ * * Interactions with tessellation failing:
+ * dEQP-VK.tessellation.geometry_interaction.passthrough.tessellate_isolines_passthrough_geometry_no_change
+ * * General issues with the last primitive missing/corrupt:
+ * https://bugs.freedesktop.org/show_bug.cgi?id=111248
+ *
+ * Furthermore, XGL/AMDVLK also disables this as of 9b632ef.
+ */
+ if (nir[MESA_SHADER_GEOMETRY]) {
+ if (nir[MESA_SHADER_TESS_CTRL])
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = false;
+ else
+ keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg = false;
+ }
+
+ /* TODO: Implement streamout support for NGG. */
+ gl_shader_stage last_xfb_stage = MESA_SHADER_VERTEX;
+
+ for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
+ if (nir[i])
+ last_xfb_stage = i;
+ }
+
+ if (nir[last_xfb_stage] &&
+ radv_nir_stage_uses_xfb(nir[last_xfb_stage])) {
+ if (nir[MESA_SHADER_TESS_CTRL])
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = false;
+ else
+ keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg = false;
+ }
}
for(int i = 0; i < MESA_SHADER_STAGES; ++i)
struct radv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
nir_shader *nir[MESA_SHADER_STAGES] = {0};
struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
- struct radv_shader_variant_key keys[MESA_SHADER_STAGES] = {{{{0}}}};
+ struct radv_shader_variant_key keys[MESA_SHADER_STAGES] = {{{{{0}}}}};
unsigned char hash[20], gs_copy_hash[20];
+ bool keep_executable_info = (flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) || device->keep_shader_info;
radv_start_feedback(pipeline_feedback);
gs_copy_hash[0] ^= 1;
bool found_in_application_cache = true;
- if (modules[MESA_SHADER_GEOMETRY]) {
+ if (modules[MESA_SHADER_GEOMETRY] && !keep_executable_info) {
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants,
&found_in_application_cache);
pipeline->gs_copy_shader = variants[MESA_SHADER_GEOMETRY];
}
- if (radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders,
+ if (!keep_executable_info &&
+ radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders,
&found_in_application_cache) &&
(!modules[MESA_SHADER_GEOMETRY] || pipeline->gs_copy_shader)) {
radv_stop_feedback(pipeline_feedback, found_in_application_cache);
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
if (nir[i]) {
- NIR_PASS_V(nir[i], nir_lower_bool_to_int32);
NIR_PASS_V(nir[i], nir_lower_non_uniform_access,
nir_lower_non_uniform_ubo_access |
nir_lower_non_uniform_ssbo_access |
nir_lower_non_uniform_texture_access |
nir_lower_non_uniform_image_access);
+ NIR_PASS_V(nir[i], nir_lower_bool_to_int32);
}
if (radv_can_dump_shader(device, modules[i], false))
nir_print_shader(nir[i], stderr);
}
- radv_fill_shader_keys(keys, key, nir);
+ radv_fill_shader_keys(device, keys, key, nir);
if (nir[MESA_SHADER_FRAGMENT]) {
if (!pipeline->shaders[MESA_SHADER_FRAGMENT]) {
pipeline->shaders[MESA_SHADER_FRAGMENT] =
radv_shader_variant_compile(device, modules[MESA_SHADER_FRAGMENT], &nir[MESA_SHADER_FRAGMENT], 1,
pipeline->layout, keys + MESA_SHADER_FRAGMENT,
- &binaries[MESA_SHADER_FRAGMENT]);
+ keep_executable_info, &binaries[MESA_SHADER_FRAGMENT]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_FRAGMENT], false);
}
/* TODO: These are no longer used as keys we should refactor this */
- keys[MESA_SHADER_VERTEX].vs.export_prim_id =
+ keys[MESA_SHADER_VERTEX].vs_common_out.export_prim_id =
pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.prim_id_input;
- keys[MESA_SHADER_VERTEX].vs.export_layer_id =
+ keys[MESA_SHADER_VERTEX].vs_common_out.export_layer_id =
pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.layer_input;
- keys[MESA_SHADER_VERTEX].vs.export_clip_dists =
+ keys[MESA_SHADER_VERTEX].vs_common_out.export_clip_dists =
!!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.num_input_clips_culls;
- keys[MESA_SHADER_TESS_EVAL].tes.export_prim_id =
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_prim_id =
pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.prim_id_input;
- keys[MESA_SHADER_TESS_EVAL].tes.export_layer_id =
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_layer_id =
pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.layer_input;
- keys[MESA_SHADER_TESS_EVAL].tes.export_clip_dists =
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_clip_dists =
!!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.num_input_clips_culls;
}
pipeline->shaders[MESA_SHADER_TESS_CTRL] = radv_shader_variant_compile(device, modules[MESA_SHADER_TESS_CTRL], combined_nir, 2,
pipeline->layout,
- &key, &binaries[MESA_SHADER_TESS_CTRL]);
+ &key, keep_executable_info,
+ &binaries[MESA_SHADER_TESS_CTRL]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_TESS_CTRL], false);
}
pipeline->shaders[MESA_SHADER_GEOMETRY] = radv_shader_variant_compile(device, modules[MESA_SHADER_GEOMETRY], combined_nir, 2,
pipeline->layout,
- &keys[pre_stage] , &binaries[MESA_SHADER_GEOMETRY]);
+ &keys[pre_stage], keep_executable_info,
+ &binaries[MESA_SHADER_GEOMETRY]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_GEOMETRY], false);
}
pipeline->shaders[i] = radv_shader_variant_compile(device, modules[i], &nir[i], 1,
pipeline->layout,
- keys + i, &binaries[i]);
+ keys + i, keep_executable_info,
+ &binaries[i]);
radv_stop_feedback(stage_feedbacks[i], false);
}
if(modules[MESA_SHADER_GEOMETRY]) {
struct radv_shader_binary *gs_copy_binary = NULL;
- if (!pipeline->gs_copy_shader) {
+ if (!pipeline->gs_copy_shader &&
+ !radv_pipeline_has_ngg(pipeline)) {
pipeline->gs_copy_shader = radv_create_gs_copy_shader(
device, nir[MESA_SHADER_GEOMETRY], &gs_copy_binary,
+ keep_executable_info,
keys[MESA_SHADER_GEOMETRY].has_multiview_view_index);
}
- if (pipeline->gs_copy_shader) {
+ if (!keep_executable_info && pipeline->gs_copy_shader) {
struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
free(gs_copy_binary);
}
- radv_pipeline_cache_insert_shaders(device, cache, hash, pipeline->shaders,
- binaries);
+ if (!keep_executable_info) {
+ radv_pipeline_cache_insert_shaders(device, cache, hash, pipeline->shaders,
+ binaries);
+ }
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
free(binaries[i]);
if (nir[i]) {
- if (!pipeline->device->keep_shader_info)
- ralloc_free(nir[i]);
+ ralloc_free(nir[i]);
if (radv_can_dump_shader_stats(device, modules[i]))
radv_shader_dump_stats(device,
{
bool has_gs = radv_pipeline_has_gs(pipeline);
bool has_tess = radv_pipeline_has_tess(pipeline);
+ bool has_ngg = radv_pipeline_has_ngg(pipeline);
+
switch (stage) {
case MESA_SHADER_FRAGMENT:
return R_00B030_SPI_SHADER_USER_DATA_PS_0;
}
}
+ if (has_ngg)
+ return R_00B230_SPI_SHADER_USER_DATA_GS_0;
+
return R_00B130_SPI_SHADER_USER_DATA_VS_0;
case MESA_SHADER_GEOMETRY:
return chip_class == GFX9 ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
if (has_gs) {
return chip_class >= GFX10 ? R_00B230_SPI_SHADER_USER_DATA_GS_0 :
R_00B330_SPI_SHADER_USER_DATA_ES_0;
+ } else if (has_ngg) {
+ return R_00B230_SPI_SHADER_USER_DATA_GS_0;
} else {
return R_00B130_SPI_SHADER_USER_DATA_VS_0;
}
};
static VkExtent2D
-radv_compute_bin_size(struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo)
+radv_gfx9_compute_bin_size(struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo)
{
static const struct radv_bin_size_entry color_size_table[][3][9] = {
{
while(ds_entry[1].bpp <= ds_bytes_per_pixel)
++ds_entry;
- extent.width = MIN2(extent.width, ds_entry->extent.width);
- extent.height = MIN2(extent.height, ds_entry->extent.height);
+ if (ds_entry->extent.width * ds_entry->extent.height < extent.width * extent.height)
+ extent = ds_entry->extent;
+ }
+
+ return extent;
+}
+
+static VkExtent2D
+radv_gfx10_compute_bin_size(struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo)
+{
+ RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
+ struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
+ VkExtent2D extent = {512, 512};
+
+ unsigned sdp_interface_count;
+
+ switch(pipeline->device->physical_device->rad_info.family) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ sdp_interface_count = 16;
+ break;
+ case CHIP_NAVI14:
+ sdp_interface_count = 8;
+ break;
+ default:
+ unreachable("Unhandled GFX10 chip");
+ }
+
+ const unsigned db_tag_size = 64;
+ const unsigned db_tag_count = 312;
+ const unsigned color_tag_size = 1024;
+ const unsigned color_tag_count = 31;
+ const unsigned fmask_tag_size = 256;
+ const unsigned fmask_tag_count = 44;
+
+ const unsigned rb_count = pipeline->device->physical_device->rad_info.num_render_backends;
+ const unsigned pipe_count = MAX2(rb_count, sdp_interface_count);
+
+ const unsigned db_tag_part = (db_tag_count * rb_count / pipe_count) * db_tag_size * pipe_count;
+ const unsigned color_tag_part = (color_tag_count * rb_count / pipe_count) * color_tag_size * pipe_count;
+ const unsigned fmask_tag_part = (fmask_tag_count * rb_count / pipe_count) * fmask_tag_size * pipe_count;
+
+ const unsigned total_samples = 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline->graphics.ms.pa_sc_aa_config);
+ const unsigned samples_log = util_logbase2_ceil(total_samples);
+
+ unsigned color_bytes_per_pixel = 0;
+ unsigned fmask_bytes_per_pixel = 0;
+
+ const VkPipelineColorBlendStateCreateInfo *vkblend = pCreateInfo->pColorBlendState;
+ if (vkblend) {
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ if (!vkblend->pAttachments[i].colorWriteMask)
+ continue;
+
+ if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ VkFormat format = pass->attachments[subpass->color_attachments[i].attachment].format;
+ color_bytes_per_pixel += vk_format_get_blocksize(format);
+
+ if (total_samples > 1) {
+ const unsigned fmask_array[] = {0, 1, 1, 4};
+ fmask_bytes_per_pixel += fmask_array[samples_log];
+ }
+ }
+
+ color_bytes_per_pixel *= total_samples;
+ }
+ color_bytes_per_pixel = MAX2(color_bytes_per_pixel, 1);
+
+ const unsigned color_pixel_count_log = util_logbase2(color_tag_part / color_bytes_per_pixel);
+ extent.width = 1ull << ((color_pixel_count_log + 1) / 2);
+ extent.height = 1ull << (color_pixel_count_log / 2);
+
+ if (fmask_bytes_per_pixel) {
+ const unsigned fmask_pixel_count_log = util_logbase2(fmask_tag_part / fmask_bytes_per_pixel);
+
+ const VkExtent2D fmask_extent = (VkExtent2D){
+ .width = 1ull << ((fmask_pixel_count_log + 1) / 2),
+ .height = 1ull << (color_pixel_count_log / 2)
+ };
+
+ if (fmask_extent.width * fmask_extent.height < extent.width * extent.height)
+ extent = fmask_extent;
+ }
+
+ if (subpass->depth_stencil_attachment) {
+ struct radv_render_pass_attachment *attachment = pass->attachments + subpass->depth_stencil_attachment->attachment;
+
+ /* Coefficients taken from AMDVLK */
+ unsigned depth_coeff = vk_format_is_depth(attachment->format) ? 5 : 0;
+ unsigned stencil_coeff = vk_format_is_stencil(attachment->format) ? 1 : 0;
+ unsigned db_bytes_per_pixel = (depth_coeff + stencil_coeff) * total_samples;
+
+ const unsigned db_pixel_count_log = util_logbase2(db_tag_part / db_bytes_per_pixel);
+
+ const VkExtent2D db_extent = (VkExtent2D){
+ .width = 1ull << ((db_pixel_count_log + 1) / 2),
+ .height = 1ull << (color_pixel_count_log / 2)
+ };
+
+ if (db_extent.width * db_extent.height < extent.width * extent.height)
+ extent = db_extent;
}
+ extent.width = MAX2(extent.width, 128);
+ extent.height = MAX2(extent.width, 64);
+
return extent;
}
+static void
+radv_pipeline_generate_disabled_binning_state(struct radeon_cmdbuf *ctx_cs,
+ struct radv_pipeline *pipeline,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo)
+{
+ uint32_t pa_sc_binner_cntl_0 =
+ S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
+ S_028C44_DISABLE_START_OF_PRIM(1);
+ uint32_t db_dfsm_control = S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF);
+
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
+ RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
+ struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
+ const VkPipelineColorBlendStateCreateInfo *vkblend = pCreateInfo->pColorBlendState;
+ unsigned min_bytes_per_pixel = 0;
+
+ if (vkblend) {
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ if (!vkblend->pAttachments[i].colorWriteMask)
+ continue;
+
+ if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ VkFormat format = pass->attachments[subpass->color_attachments[i].attachment].format;
+ unsigned bytes = vk_format_get_blocksize(format);
+ if (!min_bytes_per_pixel || bytes < min_bytes_per_pixel)
+ min_bytes_per_pixel = bytes;
+ }
+ }
+
+ pa_sc_binner_cntl_0 =
+ S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_NEW_SC) |
+ S_028C44_BIN_SIZE_X(0) |
+ S_028C44_BIN_SIZE_Y(0) |
+ S_028C44_BIN_SIZE_X_EXTEND(2) | /* 128 */
+ S_028C44_BIN_SIZE_Y_EXTEND(min_bytes_per_pixel <= 4 ? 2 : 1) | /* 128 or 64 */
+ S_028C44_DISABLE_START_OF_PRIM(1);
+ }
+
+ pipeline->graphics.binning.pa_sc_binner_cntl_0 = pa_sc_binner_cntl_0;
+ pipeline->graphics.binning.db_dfsm_control = db_dfsm_control;
+}
+
static void
radv_pipeline_generate_binning_state(struct radeon_cmdbuf *ctx_cs,
struct radv_pipeline *pipeline,
if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
return;
- uint32_t pa_sc_binner_cntl_0 =
- S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
- S_028C44_DISABLE_START_OF_PRIM(1);
- uint32_t db_dfsm_control = S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF);
-
- VkExtent2D bin_size = radv_compute_bin_size(pipeline, pCreateInfo);
+ VkExtent2D bin_size;
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
+ bin_size = radv_gfx10_compute_bin_size(pipeline, pCreateInfo);
+ } else if (pipeline->device->physical_device->rad_info.chip_class == GFX9) {
+ bin_size = radv_gfx9_compute_bin_size(pipeline, pCreateInfo);
+ } else
+ unreachable("Unhandled generation for binning bin size calculation");
if (pipeline->device->pbb_allowed && bin_size.width && bin_size.height) {
unsigned context_states_per_bin; /* allowed range: [1, 6] */
unsigned persistent_states_per_bin; /* allowed range: [1, 32] */
unsigned fpovs_per_batch; /* allowed range: [0, 255], 0 = unlimited */
- switch (pipeline->device->physical_device->rad_info.family) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ if (pipeline->device->physical_device->rad_info.has_dedicated_vram) {
context_states_per_bin = 1;
persistent_states_per_bin = 1;
fpovs_per_batch = 63;
- break;
- case CHIP_RAVEN:
- case CHIP_RAVEN2:
- context_states_per_bin = 6;
- persistent_states_per_bin = 32;
+ } else {
+ /* The context states are affected by the scissor bug. */
+ context_states_per_bin = pipeline->device->physical_device->rad_info.has_gfx9_scissor_bug ? 1 : 6;
+ /* 32 causes hangs for RAVEN. */
+ persistent_states_per_bin = 16;
fpovs_per_batch = 63;
- break;
- default:
- unreachable("unhandled family while determining binning state.");
}
- pa_sc_binner_cntl_0 =
+ const uint32_t pa_sc_binner_cntl_0 =
S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED) |
S_028C44_BIN_SIZE_X(bin_size.width == 16) |
S_028C44_BIN_SIZE_Y(bin_size.height == 16) |
S_028C44_DISABLE_START_OF_PRIM(1) |
S_028C44_FPOVS_PER_BATCH(fpovs_per_batch) |
S_028C44_OPTIMAL_BIN_SELECTION(1);
- }
- radeon_set_context_reg(ctx_cs, R_028C44_PA_SC_BINNER_CNTL_0,
- pa_sc_binner_cntl_0);
+ uint32_t db_dfsm_control = S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF);
- if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
- radeon_set_context_reg(ctx_cs, R_028038_DB_DFSM_CONTROL,
- db_dfsm_control);
- } else {
- radeon_set_context_reg(ctx_cs, R_028060_DB_DFSM_CONTROL,
- db_dfsm_control);
- }
+ pipeline->graphics.binning.pa_sc_binner_cntl_0 = pa_sc_binner_cntl_0;
+ pipeline->graphics.binning.db_dfsm_control = db_dfsm_control;
+ } else
+ radv_pipeline_generate_disabled_binning_state(ctx_cs, pipeline, pCreateInfo);
}
radeon_set_context_reg(ctx_cs, R_028808_CB_COLOR_CONTROL, blend->cb_color_control);
radeon_set_context_reg(ctx_cs, R_028B70_DB_ALPHA_TO_MASK, blend->db_alpha_to_mask);
- if (pipeline->device->physical_device->has_rbplus) {
+ if (pipeline->device->physical_device->rad_info.has_rbplus) {
radeon_set_context_reg_seq(ctx_cs, R_028760_SX_MRT0_BLEND_OPT, 8);
radeon_emit_array(ctx_cs, blend->sx_mrt_blend_opt, 8);
struct radv_pipeline *pipeline)
{
const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
-
- uint32_t vgt_primitiveid_en = false;
+ const struct radv_shader_variant *vs =
+ pipeline->shaders[MESA_SHADER_TESS_EVAL] ?
+ pipeline->shaders[MESA_SHADER_TESS_EVAL] :
+ pipeline->shaders[MESA_SHADER_VERTEX];
+ unsigned vgt_primitiveid_en = 0;
uint32_t vgt_gs_mode = 0;
+ if (radv_pipeline_has_ngg(pipeline))
+ return;
+
if (radv_pipeline_has_gs(pipeline)) {
const struct radv_shader_variant *gs =
pipeline->shaders[MESA_SHADER_GEOMETRY];
vgt_gs_mode = ac_vgt_gs_mode(gs->info.gs.vertices_out,
pipeline->device->physical_device->rad_info.chip_class);
- } else if (outinfo->export_prim_id) {
+ } else if (outinfo->export_prim_id || vs->info.info.uses_prim_id) {
vgt_gs_mode = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
- vgt_primitiveid_en = true;
+ vgt_primitiveid_en |= S_028A84_PRIMITIVEID_EN(1);
}
radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN, vgt_primitiveid_en);
bool misc_vec_ena = outinfo->writes_pointsize ||
outinfo->writes_layer ||
outinfo->writes_viewport_index;
+ unsigned spi_vs_out_config, nparams;
- radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG,
- S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo->param_exports) - 1));
+ /* VS is required to export at least one param. */
+ nparams = MAX2(outinfo->param_exports, 1);
+ spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
+
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
+ spi_vs_out_config |= S_0286C4_NO_PC_EXPORT(outinfo->param_exports == 0);
+ }
+
+ radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG, spi_vs_out_config);
radeon_set_context_reg(ctx_cs, R_02870C_SPI_SHADER_POS_FORMAT,
S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
radeon_emit(cs, rsrc2);
}
+static void
+radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf *ctx_cs,
+ struct radeon_cmdbuf *cs,
+ struct radv_pipeline *pipeline,
+ struct radv_shader_variant *shader,
+ const struct radv_ngg_state *ngg_state)
+{
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+ gl_shader_stage es_type =
+ radv_pipeline_has_tess(pipeline) ? MESA_SHADER_TESS_EVAL : MESA_SHADER_VERTEX;
+ struct radv_shader_variant *es =
+ es_type == MESA_SHADER_TESS_EVAL ? pipeline->shaders[MESA_SHADER_TESS_EVAL] : pipeline->shaders[MESA_SHADER_VERTEX];
+
+ radeon_set_sh_reg_seq(cs, R_00B320_SPI_SHADER_PGM_LO_ES, 2);
+ radeon_emit(cs, va >> 8);
+ radeon_emit(cs, S_00B324_MEM_BASE(va >> 40));
+ radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
+ radeon_emit(cs, shader->config.rsrc1);
+ radeon_emit(cs, shader->config.rsrc2);
+
+ const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
+ unsigned clip_dist_mask, cull_dist_mask, total_mask;
+ clip_dist_mask = outinfo->clip_dist_mask;
+ cull_dist_mask = outinfo->cull_dist_mask;
+ total_mask = clip_dist_mask | cull_dist_mask;
+ bool misc_vec_ena = outinfo->writes_pointsize ||
+ outinfo->writes_layer ||
+ outinfo->writes_viewport_index;
+ bool es_enable_prim_id = outinfo->export_prim_id ||
+ (es && es->info.info.uses_prim_id);
+ bool break_wave_at_eoi = false;
+ unsigned ge_cntl;
+ unsigned nparams;
+
+ if (es_type == MESA_SHADER_TESS_EVAL) {
+ struct radv_shader_variant *gs =
+ pipeline->shaders[MESA_SHADER_GEOMETRY];
+
+ if (es_enable_prim_id || (gs && gs->info.info.uses_prim_id))
+ break_wave_at_eoi = true;
+ }
+
+ nparams = MAX2(outinfo->param_exports, 1);
+ radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG,
+ S_0286C4_VS_EXPORT_COUNT(nparams - 1) |
+ S_0286C4_NO_PC_EXPORT(outinfo->param_exports == 0));
+
+ radeon_set_context_reg(ctx_cs, R_028708_SPI_SHADER_IDX_FORMAT,
+ S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP));
+ radeon_set_context_reg(ctx_cs, R_02870C_SPI_SHADER_POS_FORMAT,
+ S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
+ S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS2_EXPORT_FORMAT(outinfo->pos_exports > 2 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS3_EXPORT_FORMAT(outinfo->pos_exports > 3 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE));
+
+ radeon_set_context_reg(ctx_cs, R_028818_PA_CL_VTE_CNTL,
+ S_028818_VTX_W0_FMT(1) |
+ S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
+ S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
+ S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
+ radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
+ S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
+ S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
+ S_02881C_USE_VTX_VIEWPORT_INDX(outinfo->writes_viewport_index) |
+ S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
+ S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
+ S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) |
+ S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) |
+ cull_dist_mask << 8 |
+ clip_dist_mask);
+
+ radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN,
+ S_028A84_PRIMITIVEID_EN(es_enable_prim_id) |
+ S_028A84_NGG_DISABLE_PROVOK_REUSE(es_enable_prim_id));
+
+ radeon_set_context_reg(ctx_cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
+ ngg_state->vgt_esgs_ring_itemsize);
+
+ /* NGG specific registers. */
+ struct radv_shader_variant *gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
+ uint32_t gs_num_invocations = gs ? gs->info.gs.invocations : 1;
+
+ radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL,
+ S_028A44_ES_VERTS_PER_SUBGRP(ngg_state->hw_max_esverts) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state->max_gsprims) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state->max_gsprims * gs_num_invocations));
+ radeon_set_context_reg(ctx_cs, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
+ S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state->max_out_verts));
+ radeon_set_context_reg(ctx_cs, R_028B4C_GE_NGG_SUBGRP_CNTL,
+ S_028B4C_PRIM_AMP_FACTOR(ngg_state->prim_amp_factor) |
+ S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
+ radeon_set_context_reg(ctx_cs, R_028B90_VGT_GS_INSTANCE_CNT,
+ S_028B90_CNT(gs_num_invocations) |
+ S_028B90_ENABLE(gs_num_invocations > 1) |
+ S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state->max_vert_out_per_gs_instance));
+
+ /* User edge flags are set by the pos exports. If user edge flags are
+ * not used, we must use hw-generated edge flags and pass them via
+ * the prim export to prevent drawing lines on internal edges of
+ * decomposed primitives (such as quads) with polygon mode = lines.
+ *
+ * TODO: We should combine hw-generated edge flags with user edge
+ * flags in the shader.
+ */
+ radeon_set_context_reg(ctx_cs, R_028838_PA_CL_NGG_CNTL,
+ S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline) &&
+ !radv_pipeline_has_gs(pipeline)));
+
+ ge_cntl = S_03096C_PRIM_GRP_SIZE(ngg_state->max_gsprims) |
+ S_03096C_VERT_GRP_SIZE(ngg_state->hw_max_esverts) |
+ S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
+
+ /* Bug workaround for a possible hang with non-tessellation cases.
+ * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
+ *
+ * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
+ */
+ if ((pipeline->device->physical_device->rad_info.family == CHIP_NAVI10 ||
+ pipeline->device->physical_device->rad_info.family == CHIP_NAVI12 ||
+ pipeline->device->physical_device->rad_info.family == CHIP_NAVI14) &&
+ !radv_pipeline_has_tess(pipeline) &&
+ ngg_state->hw_max_esverts != 256) {
+ ge_cntl &= C_03096C_VERT_GRP_SIZE;
+
+ if (ngg_state->hw_max_esverts > 5) {
+ ge_cntl |= S_03096C_VERT_GRP_SIZE(ngg_state->hw_max_esverts - 5);
+ }
+ }
+
+ radeon_set_uconfig_reg(ctx_cs, R_03096C_GE_CNTL, ge_cntl);
+}
+
static void
radv_pipeline_generate_hw_hs(struct radeon_cmdbuf *cs,
struct radv_pipeline *pipeline,
radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf *ctx_cs,
struct radeon_cmdbuf *cs,
struct radv_pipeline *pipeline,
- const struct radv_tessellation_state *tess)
+ const struct radv_tessellation_state *tess,
+ const struct radv_ngg_state *ngg)
{
struct radv_shader_variant *vs;
radv_pipeline_generate_hw_ls(cs, pipeline, vs, tess);
else if (vs->info.vs.as_es)
radv_pipeline_generate_hw_es(cs, pipeline, vs);
+ else if (vs->info.is_ngg)
+ radv_pipeline_generate_hw_ngg(ctx_cs, cs, pipeline, vs, ngg);
else
radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, vs);
}
radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf *ctx_cs,
struct radeon_cmdbuf *cs,
struct radv_pipeline *pipeline,
- const struct radv_tessellation_state *tess)
+ const struct radv_tessellation_state *tess,
+ const struct radv_ngg_state *ngg)
{
if (!radv_pipeline_has_tess(pipeline))
return;
tes = pipeline->shaders[MESA_SHADER_TESS_EVAL];
if (tes) {
- if (tes->info.tes.as_es)
+ if (tes->info.is_ngg) {
+ radv_pipeline_generate_hw_ngg(ctx_cs, cs, pipeline, tes, ngg);
+ } else if (tes->info.tes.as_es)
radv_pipeline_generate_hw_es(cs, pipeline, tes);
else
radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, tes);
else
radeon_set_context_reg(ctx_cs, R_028B58_VGT_LS_HS_CONFIG,
tess->ls_hs_config);
+
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX10 &&
+ !radv_pipeline_has_gs(pipeline) && !radv_pipeline_has_ngg(pipeline)) {
+ radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL,
+ S_028A44_ES_VERTS_PER_SUBGRP(250) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(126) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
+ }
}
static void
-radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *ctx_cs,
- struct radeon_cmdbuf *cs,
- struct radv_pipeline *pipeline,
- const struct radv_gs_state *gs_state)
+radv_pipeline_generate_hw_gs(struct radeon_cmdbuf *ctx_cs,
+ struct radeon_cmdbuf *cs,
+ struct radv_pipeline *pipeline,
+ struct radv_shader_variant *gs,
+ const struct radv_gs_state *gs_state)
{
- struct radv_shader_variant *gs;
unsigned gs_max_out_vertices;
uint8_t *num_components;
uint8_t max_stream;
unsigned offset;
uint64_t va;
- gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
- if (!gs)
- return;
-
gs_max_out_vertices = gs->info.gs.vertices_out;
max_stream = gs->info.info.gs.max_stream;
num_components = gs->info.info.gs.num_stream_output_components;
offset += num_components[3] * gs_max_out_vertices;
radeon_set_context_reg(ctx_cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset);
- radeon_set_context_reg(ctx_cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.gs.vertices_out);
-
radeon_set_context_reg_seq(ctx_cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
radeon_emit(ctx_cs, num_components[0]);
radeon_emit(ctx_cs, (max_stream >= 1) ? num_components[1] : 0);
radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, pipeline->gs_copy_shader);
}
+static void
+radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *ctx_cs,
+ struct radeon_cmdbuf *cs,
+ struct radv_pipeline *pipeline,
+ const struct radv_gs_state *gs_state,
+ const struct radv_ngg_state *ngg_state)
+{
+ struct radv_shader_variant *gs;
+
+ gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
+ if (!gs)
+ return;
+
+ if (gs->info.is_ngg)
+ radv_pipeline_generate_hw_ngg(ctx_cs, cs, pipeline, gs, ngg_state);
+ else
+ radv_pipeline_generate_hw_gs(ctx_cs, cs, pipeline, gs, gs_state);
+
+ radeon_set_context_reg(ctx_cs, R_028B38_VGT_GS_MAX_VERT_OUT,
+ gs->info.gs.vertices_out);
+}
+
static uint32_t offset_to_ps_input(uint32_t offset, bool flat_shade, bool float16)
{
uint32_t ps_input_cntl;
}
if (ps->info.info.ps.layer_input ||
- ps->info.info.ps.uses_input_attachments ||
ps->info.info.needs_multiview_view_index) {
unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_LAYER];
if (vs_offset != AC_EXP_PARAM_UNDEFINED)
else
z_order = V_02880C_LATE_Z;
- bool disable_rbplus = device->physical_device->has_rbplus &&
- !device->physical_device->rbplus_allowed;
+ bool disable_rbplus = device->physical_device->rad_info.has_rbplus &&
+ !device->physical_device->rad_info.rbplus_allowed;
/* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
* but this appears to break Project Cars (DXVK). See
S_02880C_MASK_EXPORT_ENABLE(mask_export_enable) |
S_02880C_Z_ORDER(z_order) |
S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) |
+ S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps->info.fs.post_depth_coverage) |
S_02880C_EXEC_ON_HIER_FAIL(ps->info.info.ps.writes_memory) |
S_02880C_EXEC_ON_NOOP(ps->info.info.ps.writes_memory) |
S_02880C_DUAL_QUAD_DISABLE(disable_rbplus);
ps->config.spi_ps_input_addr);
radeon_set_context_reg(ctx_cs, R_0286D8_SPI_PS_IN_CONTROL,
- S_0286D8_NUM_INTERP(ps->info.fs.num_interp));
+ S_0286D8_NUM_INTERP(ps->info.fs.num_interp) |
+ S_0286D8_PS_W32_EN(ps->info.info.wave_size == 32));
radeon_set_context_reg(ctx_cs, R_0286E0_SPI_BARYC_CNTL, pipeline->graphics.spi_baryc_cntl);
S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
if (radv_pipeline_has_gs(pipeline))
- stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
- S_028B54_GS_EN(1) |
- S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
+ stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
+ S_028B54_GS_EN(1);
+ else if (radv_pipeline_has_ngg(pipeline))
+ stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
else
stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
-
- } else if (radv_pipeline_has_gs(pipeline))
+ } else if (radv_pipeline_has_gs(pipeline)) {
stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
- S_028B54_GS_EN(1) |
- S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
+ S_028B54_GS_EN(1);
+ } else if (radv_pipeline_has_ngg(pipeline)) {
+ stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
+ }
+
+ if (radv_pipeline_has_ngg(pipeline)) {
+ stages |= S_028B54_PRIMGEN_EN(1);
+ } else if (radv_pipeline_has_gs(pipeline)) {
+ stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
+ }
if (pipeline->device->physical_device->rad_info.chip_class >= GFX9)
stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
+ uint8_t hs_size = 64, gs_size = 64, vs_size = 64;
+
+ if (radv_pipeline_has_tess(pipeline))
+ hs_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.wave_size;
+
+ if (pipeline->shaders[MESA_SHADER_GEOMETRY]) {
+ vs_size = gs_size = pipeline->shaders[MESA_SHADER_GEOMETRY]->info.info.wave_size;
+ if (pipeline->gs_copy_shader)
+ vs_size = pipeline->gs_copy_shader->info.info.wave_size;
+ } else if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
+ vs_size = pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.info.wave_size;
+ else if (pipeline->shaders[MESA_SHADER_VERTEX])
+ vs_size = pipeline->shaders[MESA_SHADER_VERTEX]->info.info.wave_size;
+
+ if (radv_pipeline_has_ngg(pipeline))
+ gs_size = vs_size;
+
+ /* legacy GS only supports Wave64 */
+ stages |= S_028B54_HS_W32_EN(hs_size == 32 ? 1 : 0) |
+ S_028B54_GS_W32_EN(gs_size == 32 ? 1 : 0) |
+ S_028B54_VS_W32_EN(vs_size == 32 ? 1 : 0);
+ }
+
return stages;
}
const struct radv_blend_state *blend,
const struct radv_tessellation_state *tess,
const struct radv_gs_state *gs,
+ const struct radv_ngg_state *ngg,
unsigned prim, unsigned gs_out)
{
struct radeon_cmdbuf *ctx_cs = &pipeline->ctx_cs;
radv_pipeline_generate_raster_state(ctx_cs, pipeline, pCreateInfo);
radv_pipeline_generate_multisample_state(ctx_cs, pipeline);
radv_pipeline_generate_vgt_gs_mode(ctx_cs, pipeline);
- radv_pipeline_generate_vertex_shader(ctx_cs, cs, pipeline, tess);
- radv_pipeline_generate_tess_shaders(ctx_cs, cs, pipeline, tess);
- radv_pipeline_generate_geometry_shader(ctx_cs, cs, pipeline, gs);
+ radv_pipeline_generate_vertex_shader(ctx_cs, cs, pipeline, tess, ngg);
+ radv_pipeline_generate_tess_shaders(ctx_cs, cs, pipeline, tess, ngg);
+ radv_pipeline_generate_geometry_shader(ctx_cs, cs, pipeline, gs, ngg);
radv_pipeline_generate_fragment_shader(ctx_cs, cs, pipeline);
radv_pipeline_generate_ps_inputs(ctx_cs, pipeline);
radv_pipeline_generate_vgt_vertex_reuse(ctx_cs, pipeline);
radv_pipeline_generate_binning_state(ctx_cs, pipeline, pCreateInfo);
- if (pipeline->device->physical_device->rad_info.chip_class >= GFX10)
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX10 && !radv_pipeline_has_ngg(pipeline))
gfx10_pipeline_generate_ge_cntl(ctx_cs, pipeline, tess, gs);
radeon_set_context_reg(ctx_cs, R_0286E8_SPI_TMPRING_SIZE,
radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, radv_compute_vgt_shader_stages_en(pipeline));
if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) {
- radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
+ radeon_set_uconfig_reg_idx(pipeline->device->physical_device,
+ cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
} else {
radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
}
radv_pipeline_has_gs(pipeline))
ia_multi_vgt_param.partial_vs_wave = true;
/* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
- if (device->has_distributed_tess) {
+ if (device->physical_device->rad_info.has_distributed_tess) {
if (radv_pipeline_has_gs(pipeline)) {
if (device->physical_device->rad_info.chip_class <= GFX8)
ia_multi_vgt_param.partial_es_wave = true;
if (radv_pipeline_has_gs(pipeline)) {
gs_out = si_conv_gl_prim_to_gs_out(pipeline->shaders[MESA_SHADER_GEOMETRY]->info.gs.output_prim);
pipeline->graphics.can_use_guardband = gs_out == V_028A6C_OUTPRIM_TYPE_TRISTRIP;
+ } else if (radv_pipeline_has_tess(pipeline)) {
+ if (pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.point_mode)
+ gs_out = V_028A6C_OUTPRIM_TYPE_POINTLIST;
+ else
+ gs_out = si_conv_gl_prim_to_gs_out(pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.primitive_mode);
+ pipeline->graphics.can_use_guardband = gs_out == V_028A6C_OUTPRIM_TYPE_TRISTRIP;
} else {
gs_out = si_conv_prim_to_gs_out(pCreateInfo->pInputAssemblyState->topology);
}
prim = V_008958_DI_PT_RECTLIST;
gs_out = V_028A6C_OUTPRIM_TYPE_TRISTRIP;
pipeline->graphics.can_use_guardband = true;
+ if (radv_pipeline_has_ngg(pipeline))
+ gs_out = V_028A6C_VGT_OUT_RECT_V0;
}
pipeline->graphics.prim_restart_enable = !!pCreateInfo->pInputAssemblyState->primitiveRestartEnable;
/* prim vertex count will need TESS changes */
* stalls without this setting.
*
* Don't add this to CB_SHADER_MASK.
+ *
+ * GFX10 supports pixel shaders without exports by setting both the
+ * color and Z formats to SPI_SHADER_ZERO. The hw will skip export
+ * instructions if any are present.
*/
struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
- if (!blend.spi_shader_col_format) {
+ if ((pipeline->device->physical_device->rad_info.chip_class <= GFX9 ||
+ ps->info.fs.can_discard) &&
+ !blend.spi_shader_col_format) {
if (!ps->info.info.ps.writes_z &&
!ps->info.info.ps.writes_stencil &&
!ps->info.info.ps.writes_sample_mask)
}
}
+ struct radv_ngg_state ngg = {0};
struct radv_gs_state gs = {0};
- if (radv_pipeline_has_gs(pipeline)) {
+
+ if (radv_pipeline_has_ngg(pipeline)) {
+ ngg = calculate_ngg_info(pCreateInfo, pipeline);
+ } else if (radv_pipeline_has_gs(pipeline)) {
gs = calculate_gs_info(pCreateInfo, pipeline);
calculate_gs_ring_sizes(pipeline, &gs);
}
pipeline->streamout_shader = radv_pipeline_get_streamout_shader(pipeline);
result = radv_pipeline_scratch_init(device, pipeline);
- radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend, &tess, &gs, prim, gs_out);
+ radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend, &tess, &gs, &ngg, prim, gs_out);
return result;
}
{
struct radv_shader_variant *compute_shader;
struct radv_device *device = pipeline->device;
- unsigned compute_resource_limits;
+ unsigned threads_per_threadgroup;
+ unsigned threadgroups_per_cu = 1;
unsigned waves_per_threadgroup;
+ unsigned max_waves_per_sh = 0;
uint64_t va;
pipeline->cs.buf = malloc(20 * 4);
S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
/* Calculate best compute resource limits. */
- waves_per_threadgroup =
- DIV_ROUND_UP(compute_shader->info.cs.block_size[0] *
- compute_shader->info.cs.block_size[1] *
- compute_shader->info.cs.block_size[2], 64);
- compute_resource_limits =
- S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
+ threads_per_threadgroup = compute_shader->info.cs.block_size[0] *
+ compute_shader->info.cs.block_size[1] *
+ compute_shader->info.cs.block_size[2];
+ waves_per_threadgroup = DIV_ROUND_UP(threads_per_threadgroup,
+ device->physical_device->cs_wave_size);
- if (device->physical_device->rad_info.chip_class >= GFX7) {
- unsigned num_cu_per_se =
- device->physical_device->rad_info.num_good_compute_units /
- device->physical_device->rad_info.max_se;
-
- /* Force even distribution on all SIMDs in CU if the workgroup
- * size is 64. This has shown some good improvements if # of
- * CUs per SE is not a multiple of 4.
- */
- if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
- compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
- }
+ if (device->physical_device->rad_info.chip_class >= GFX10 &&
+ waves_per_threadgroup == 1)
+ threadgroups_per_cu = 2;
radeon_set_sh_reg(&pipeline->cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
- compute_resource_limits);
+ ac_get_compute_resource_limits(&device->physical_device->rad_info,
+ waves_per_threadgroup,
+ max_waves_per_sh,
+ threadgroups_per_cu));
radeon_set_sh_reg_seq(&pipeline->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(&pipeline->cs,
return result;
}
+
+
+static uint32_t radv_get_executable_count(const struct radv_pipeline *pipeline)
+{
+ uint32_t ret = 0;
+ for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
+ if (pipeline->shaders[i])
+ ret += i == MESA_SHADER_GEOMETRY ? 2u : 1u;
+
+ }
+ return ret;
+}
+
+static struct radv_shader_variant *
+radv_get_shader_from_executable_index(const struct radv_pipeline *pipeline, int index, gl_shader_stage *stage)
+{
+ for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
+ if (!pipeline->shaders[i])
+ continue;
+ if (!index) {
+ *stage = i;
+ return pipeline->shaders[i];
+ }
+
+ --index;
+
+ if (i == MESA_SHADER_GEOMETRY) {
+ if (!index) {
+ *stage = i;
+ return pipeline->gs_copy_shader;
+ }
+ --index;
+ }
+ }
+
+ *stage = -1;
+ return NULL;
+}
+
+/* Basically strlcpy (which does not exist on linux) specialized for
+ * descriptions. */
+static void desc_copy(char *desc, const char *src) {
+ int len = strlen(src);
+ assert(len < VK_MAX_DESCRIPTION_SIZE);
+ memcpy(desc, src, len);
+ memset(desc + len, 0, VK_MAX_DESCRIPTION_SIZE - len);
+}
+
+VkResult radv_GetPipelineExecutablePropertiesKHR(
+ VkDevice _device,
+ const VkPipelineInfoKHR* pPipelineInfo,
+ uint32_t* pExecutableCount,
+ VkPipelineExecutablePropertiesKHR* pProperties)
+{
+ RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelineInfo->pipeline);
+ const uint32_t total_count = radv_get_executable_count(pipeline);
+
+ if (!pProperties) {
+ *pExecutableCount = total_count;
+ return VK_SUCCESS;
+ }
+
+ const uint32_t count = MIN2(total_count, *pExecutableCount);
+ for (unsigned i = 0, executable_idx = 0;
+ i < MESA_SHADER_STAGES && executable_idx < count; ++i) {
+ if (!pipeline->shaders[i])
+ continue;
+ pProperties[executable_idx].stages = mesa_to_vk_shader_stage(i);
+ const char *name = NULL;
+ const char *description = NULL;
+ switch(i) {
+ case MESA_SHADER_VERTEX:
+ name = "Vertex Shader";
+ description = "Vulkan Vertex Shader";
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ if (!pipeline->shaders[MESA_SHADER_VERTEX]) {
+ pProperties[executable_idx].stages |= VK_SHADER_STAGE_VERTEX_BIT;
+ name = "Vertex + Tessellation Control Shaders";
+ description = "Combined Vulkan Vertex and Tessellation Control Shaders";
+ } else {
+ name = "Tessellation Control Shader";
+ description = "Vulkan Tessellation Control Shader";
+ }
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ name = "Tessellation Evaluation Shader";
+ description = "Vulkan Tessellation Evaluation Shader";
+ break;
+ case MESA_SHADER_GEOMETRY:
+ if (radv_pipeline_has_tess(pipeline) && !pipeline->shaders[MESA_SHADER_TESS_EVAL]) {
+ pProperties[executable_idx].stages |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
+ name = "Tessellation Evaluation + Geometry Shaders";
+ description = "Combined Vulkan Tessellation Evaluation and Geometry Shaders";
+ } else if (!radv_pipeline_has_tess(pipeline) && !pipeline->shaders[MESA_SHADER_VERTEX]) {
+ pProperties[executable_idx].stages |= VK_SHADER_STAGE_VERTEX_BIT;
+ name = "Vertex + Geometry Shader";
+ description = "Combined Vulkan Vertex and Geometry Shaders";
+ } else {
+ name = "Geometry Shader";
+ description = "Vulkan Geometry Shader";
+ }
+ break;
+ case MESA_SHADER_FRAGMENT:
+ name = "Fragment Shader";
+ description = "Vulkan Fragment Shader";
+ break;
+ case MESA_SHADER_COMPUTE:
+ name = "Compute Shader";
+ description = "Vulkan Compute Shader";
+ break;
+ }
+
+ desc_copy(pProperties[executable_idx].name, name);
+ desc_copy(pProperties[executable_idx].description, description);
+
+ ++executable_idx;
+ if (i == MESA_SHADER_GEOMETRY) {
+ assert(pipeline->gs_copy_shader);
+ if (executable_idx >= count)
+ break;
+
+ pProperties[executable_idx].stages = VK_SHADER_STAGE_GEOMETRY_BIT;
+ desc_copy(pProperties[executable_idx].name, "GS Copy Shader");
+ desc_copy(pProperties[executable_idx].description,
+ "Extra shader stage that loads the GS output ringbuffer into the rasterizer");
+
+ ++executable_idx;
+ }
+ }
+
+ for (unsigned i = 0; i < count; ++i)
+ pProperties[i].subgroupSize = 64;
+
+ VkResult result = *pExecutableCount < total_count ? VK_INCOMPLETE : VK_SUCCESS;
+ *pExecutableCount = count;
+ return result;
+}
+
+VkResult radv_GetPipelineExecutableStatisticsKHR(
+ VkDevice _device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pStatisticCount,
+ VkPipelineExecutableStatisticKHR* pStatistics)
+{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ RADV_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
+ gl_shader_stage stage;
+ struct radv_shader_variant *shader = radv_get_shader_from_executable_index(pipeline, pExecutableInfo->executableIndex, &stage);
+
+ enum chip_class chip_class = device->physical_device->rad_info.chip_class;
+ unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
+ unsigned max_waves = radv_get_max_waves(device, shader, stage);
+
+ VkPipelineExecutableStatisticKHR *s = pStatistics;
+ VkPipelineExecutableStatisticKHR *end = s + (pStatistics ? *pStatisticCount : 0);
+ VkResult result = VK_SUCCESS;
+
+ if (s < end) {
+ desc_copy(s->name, "SGPRs");
+ desc_copy(s->description, "Number of SGPR registers allocated per subgroup");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->config.num_sgprs;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "VGPRs");
+ desc_copy(s->description, "Number of VGPR registers allocated per subgroup");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->config.num_vgprs;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "Spilled SGPRs");
+ desc_copy(s->description, "Number of SGPR registers spilled per subgroup");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->config.spilled_sgprs;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "Spilled VGPRs");
+ desc_copy(s->description, "Number of VGPR registers spilled per subgroup");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->config.spilled_vgprs;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "PrivMem VGPRs");
+ desc_copy(s->description, "Number of VGPRs stored in private memory per subgroup");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->info.private_mem_vgprs;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "Code size");
+ desc_copy(s->description, "Code size in bytes");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->code_size;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "LDS size");
+ desc_copy(s->description, "LDS size in bytes per workgroup");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->config.lds_size * lds_increment;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "Scratch size");
+ desc_copy(s->description, "Private memory in bytes per subgroup");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = shader->config.scratch_bytes_per_wave;
+ }
+ ++s;
+
+ if (s < end) {
+ desc_copy(s->name, "Subgroups per SIMD");
+ desc_copy(s->description, "The maximum number of subgroups in flight on a SIMD unit");
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = max_waves;
+ }
+ ++s;
+
+ if (!pStatistics)
+ *pStatisticCount = s - pStatistics;
+ else if (s > end) {
+ *pStatisticCount = end - pStatistics;
+ result = VK_INCOMPLETE;
+ } else {
+ *pStatisticCount = s - pStatistics;
+ }
+
+ return result;
+}
+
+static VkResult radv_copy_representation(void *data, size_t *data_size, const char *src)
+{
+ size_t total_size = strlen(src) + 1;
+
+ if (!data) {
+ *data_size = total_size;
+ return VK_SUCCESS;
+ }
+
+ size_t size = MIN2(total_size, *data_size);
+
+ memcpy(data, src, size);
+ if (size)
+ *((char*)data + size - 1) = 0;
+ return size < total_size ? VK_INCOMPLETE : VK_SUCCESS;
+}
+
+VkResult radv_GetPipelineExecutableInternalRepresentationsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pInternalRepresentationCount,
+ VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
+{
+ RADV_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
+ gl_shader_stage stage;
+ struct radv_shader_variant *shader = radv_get_shader_from_executable_index(pipeline, pExecutableInfo->executableIndex, &stage);
+
+ VkPipelineExecutableInternalRepresentationKHR *p = pInternalRepresentations;
+ VkPipelineExecutableInternalRepresentationKHR *end = p + (pInternalRepresentations ? *pInternalRepresentationCount : 0);
+ VkResult result = VK_SUCCESS;
+ /* optimized NIR */
+ if (p < end) {
+ p->isText = true;
+ desc_copy(p->name, "NIR Shader(s)");
+ desc_copy(p->description, "The optimized NIR shader(s)");
+ if (radv_copy_representation(p->pData, &p->dataSize, shader->nir_string) != VK_SUCCESS)
+ result = VK_INCOMPLETE;
+ }
+ ++p;
+
+ /* LLVM IR */
+ if (p < end) {
+ p->isText = true;
+ desc_copy(p->name, "LLVM IR");
+ desc_copy(p->description, "The LLVM IR after some optimizations");
+ if (radv_copy_representation(p->pData, &p->dataSize, shader->llvm_ir_string) != VK_SUCCESS)
+ result = VK_INCOMPLETE;
+ }
+ ++p;
+
+ /* Disassembler */
+ if (p < end) {
+ p->isText = true;
+ desc_copy(p->name, "Assembly");
+ desc_copy(p->description, "Final Assembly");
+ if (radv_copy_representation(p->pData, &p->dataSize, shader->disasm_string) != VK_SUCCESS)
+ result = VK_INCOMPLETE;
+ }
+ ++p;
+
+ if (!pInternalRepresentations)
+ *pInternalRepresentationCount = p - pInternalRepresentations;
+ else if(p > end) {
+ result = VK_INCOMPLETE;
+ *pInternalRepresentationCount = end - pInternalRepresentations;
+ } else {
+ *pInternalRepresentationCount = p - pInternalRepresentations;
+ }
+
+ return result;
+}