#include "util/mesa-sha1.h"
#include "util/os_time.h"
#include "common/gen_l3_config.h"
+#include "common/gen_disasm.h"
#include "anv_private.h"
#include "compiler/brw_nir.h"
#include "anv_nir.h"
.module = module,
};
struct spirv_to_nir_options spirv_options = {
- .lower_workgroup_access_to_offsets = true,
+ .frag_coord_is_sysval = true,
.caps = {
.demote_to_helper_invocation = true,
.derivative_group = true,
anv_reloc_list_finish(&pipeline->batch_relocs,
pAllocator ? pAllocator : &device->alloc);
+
+ ralloc_free(pipeline->mem_ctx);
+
if (pipeline->blend_state.map)
anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
static void
populate_base_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
struct brw_base_prog_key *key)
{
- key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
+ if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
+ key->subgroup_size_type = BRW_SUBGROUP_SIZE_VARYING;
+ else
+ key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
populate_sampler_prog_key(devinfo, &key->tex);
}
static void
populate_vs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
struct brw_vs_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_base_prog_key(devinfo, &key->base);
+ populate_base_prog_key(devinfo, flags, &key->base);
/* XXX: Handle vertex input work-arounds */
static void
populate_tcs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
unsigned input_vertices,
struct brw_tcs_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_base_prog_key(devinfo, &key->base);
+ populate_base_prog_key(devinfo, flags, &key->base);
key->input_vertices = input_vertices;
}
static void
populate_tes_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
struct brw_tes_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_base_prog_key(devinfo, &key->base);
+ populate_base_prog_key(devinfo, flags, &key->base);
}
static void
populate_gs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
struct brw_gs_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_base_prog_key(devinfo, &key->base);
+ populate_base_prog_key(devinfo, flags, &key->base);
}
static void
populate_wm_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
const struct anv_subpass *subpass,
const VkPipelineMultisampleStateCreateInfo *ms_info,
struct brw_wm_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_base_prog_key(devinfo, &key->base);
+ populate_base_prog_key(devinfo, flags, &key->base);
/* We set this to 0 here and set to the actual value before we call
* brw_compile_fs.
static void
populate_cs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
struct brw_cs_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_base_prog_key(devinfo, &key->base);
+ populate_base_prog_key(devinfo, flags, &key->base);
+
+ if (rss_info) {
+ assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
+
+ /* These enum values are expressly chosen to be equal to the subgroup
+ * size that they require.
+ */
+ assert(rss_info->requiredSubgroupSize == 8 ||
+ rss_info->requiredSubgroupSize == 16 ||
+ rss_info->requiredSubgroupSize == 32);
+ key->base.subgroup_size_type = rss_info->requiredSubgroupSize;
+ } else if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
+ /* If the client expressly requests full subgroups and they don't
+ * specify a subgroup size, we need to pick one. If they're requested
+ * varying subgroup sizes, we set it to UNIFORM and let the back-end
+ * compiler pick. Otherwise, we specify the API value of 32.
+ * Performance will likely be terrible in this case but there's nothing
+ * we can do about that. The client should have chosen a size.
+ */
+ if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
+ key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
+ else
+ key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_32;
+ }
}
struct anv_pipeline_stage {
union brw_any_prog_data prog_data;
+ uint32_t num_stats;
+ struct brw_compile_stats stats[3];
+ char *disasm[3];
+
VkPipelineCreationFeedbackEXT feedback;
+
+ const unsigned *code;
};
static void
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
- NIR_PASS_V(nir, nir_lower_input_attachments, false);
+ NIR_PASS_V(nir, nir_lower_input_attachments, true);
}
NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
if (nir->info.stage != MESA_SHADER_COMPUTE)
NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
- if (nir->info.stage == MESA_SHADER_COMPUTE)
- prog_data->total_shared = nir->num_shared;
-
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
if (nir->num_uniforms > 0) {
brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
}
-static const unsigned *
+static void
anv_pipeline_compile_vs(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
vs_stage->nir->info.outputs_written,
vs_stage->nir->info.separate_shader);
- return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
- &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
+ vs_stage->num_stats = 1;
+ vs_stage->code = brw_compile_vs(compiler, device, mem_ctx,
+ &vs_stage->key.vs,
+ &vs_stage->prog_data.vs,
+ vs_stage->nir, -1,
+ vs_stage->stats, NULL);
}
static void
tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
}
-static const unsigned *
+static void
anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
tcs_stage->key.tcs.patch_outputs_written =
tcs_stage->nir->info.patch_outputs_written;
- return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
- &tcs_stage->prog_data.tcs, tcs_stage->nir,
- -1, NULL);
+ tcs_stage->num_stats = 1;
+ tcs_stage->code = brw_compile_tcs(compiler, device, mem_ctx,
+ &tcs_stage->key.tcs,
+ &tcs_stage->prog_data.tcs,
+ tcs_stage->nir, -1,
+ tcs_stage->stats, NULL);
}
static void
brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
}
-static const unsigned *
+static void
anv_pipeline_compile_tes(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
tes_stage->key.tes.patch_inputs_read =
tcs_stage->nir->info.patch_outputs_written;
- return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
- &tcs_stage->prog_data.tcs.base.vue_map,
- &tes_stage->prog_data.tes, tes_stage->nir,
- NULL, -1, NULL);
+ tes_stage->num_stats = 1;
+ tes_stage->code = brw_compile_tes(compiler, device, mem_ctx,
+ &tes_stage->key.tes,
+ &tcs_stage->prog_data.tcs.base.vue_map,
+ &tes_stage->prog_data.tes,
+ tes_stage->nir, NULL, -1,
+ tes_stage->stats, NULL);
}
static void
brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
}
-static const unsigned *
+static void
anv_pipeline_compile_gs(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
gs_stage->nir->info.outputs_written,
gs_stage->nir->info.separate_shader);
- return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
- &gs_stage->prog_data.gs, gs_stage->nir,
- NULL, -1, NULL);
+ gs_stage->num_stats = 1;
+ gs_stage->code = brw_compile_gs(compiler, device, mem_ctx,
+ &gs_stage->key.gs,
+ &gs_stage->prog_data.gs,
+ gs_stage->nir, NULL, -1,
+ gs_stage->stats, NULL);
}
static void
stage->bind_map.surface_count += num_rts;
}
-static const unsigned *
+static void
anv_pipeline_compile_fs(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
fs_stage->key.wm.input_slots_valid =
prev_stage->prog_data.vue.vue_map.slots_valid;
- const unsigned *code =
- brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
- &fs_stage->prog_data.wm, fs_stage->nir,
- NULL, -1, -1, -1, true, false, NULL, NULL);
+ fs_stage->code = brw_compile_fs(compiler, device, mem_ctx,
+ &fs_stage->key.wm,
+ &fs_stage->prog_data.wm,
+ fs_stage->nir, NULL, -1, -1, -1,
+ true, false, NULL,
+ fs_stage->stats, NULL);
+
+ fs_stage->num_stats = (uint32_t)fs_stage->prog_data.wm.dispatch_8 +
+ (uint32_t)fs_stage->prog_data.wm.dispatch_16 +
+ (uint32_t)fs_stage->prog_data.wm.dispatch_32;
if (fs_stage->key.wm.nr_color_regions == 0 &&
!fs_stage->prog_data.wm.has_side_effects &&
*/
memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
}
+}
+
+static void
+anv_pipeline_add_executable(struct anv_pipeline *pipeline,
+ struct anv_pipeline_stage *stage,
+ struct brw_compile_stats *stats,
+ uint32_t code_offset)
+{
+ char *disasm = NULL;
+ if (stage->code &&
+ (pipeline->flags &
+ VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
+ char *stream_data = NULL;
+ size_t stream_size = 0;
+ FILE *stream = open_memstream(&stream_data, &stream_size);
+
+ /* Creating this is far cheaper than it looks. It's perfectly fine to
+ * do it for every binary.
+ */
+ struct gen_disasm *d = gen_disasm_create(&pipeline->device->info);
+ gen_disasm_disassemble(d, stage->code, code_offset, stream);
+ gen_disasm_destroy(d);
+
+ fclose(stream);
+
+ /* Copy it to a ralloc'd thing */
+ disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
+ memcpy(disasm, stream_data, stream_size);
+ disasm[stream_size] = 0;
+
+ free(stream_data);
+ }
+
+ pipeline->executables[pipeline->num_executables++] =
+ (struct anv_pipeline_executable) {
+ .stage = stage->stage,
+ .stats = *stats,
+ .disasm = disasm,
+ };
+}
+
+static void
+anv_pipeline_add_executables(struct anv_pipeline *pipeline,
+ struct anv_pipeline_stage *stage,
+ struct anv_shader_bin *bin)
+{
+ if (stage->stage == MESA_SHADER_FRAGMENT) {
+ /* We pull the prog data and stats out of the anv_shader_bin because
+ * the anv_pipeline_stage may not be fully populated if we successfully
+ * looked up the shader in a cache.
+ */
+ const struct brw_wm_prog_data *wm_prog_data =
+ (const struct brw_wm_prog_data *)bin->prog_data;
+ struct brw_compile_stats *stats = bin->stats;
+
+ if (wm_prog_data->dispatch_8) {
+ anv_pipeline_add_executable(pipeline, stage, stats++, 0);
+ }
- return code;
+ if (wm_prog_data->dispatch_16) {
+ anv_pipeline_add_executable(pipeline, stage, stats++,
+ wm_prog_data->prog_offset_16);
+ }
+
+ if (wm_prog_data->dispatch_32) {
+ anv_pipeline_add_executable(pipeline, stage, stats++,
+ wm_prog_data->prog_offset_32);
+ }
+ } else {
+ anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
+ }
}
static VkResult
const struct gen_device_info *devinfo = &pipeline->device->info;
switch (stage) {
case MESA_SHADER_VERTEX:
- populate_vs_prog_key(devinfo, &stages[stage].key.vs);
+ populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
break;
case MESA_SHADER_TESS_CTRL:
- populate_tcs_prog_key(devinfo,
+ populate_tcs_prog_key(devinfo, sinfo->flags,
info->pTessellationState->patchControlPoints,
&stages[stage].key.tcs);
break;
case MESA_SHADER_TESS_EVAL:
- populate_tes_prog_key(devinfo, &stages[stage].key.tes);
+ populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes);
break;
case MESA_SHADER_GEOMETRY:
- populate_gs_prog_key(devinfo, &stages[stage].key.gs);
+ populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
break;
case MESA_SHADER_FRAGMENT:
- populate_wm_prog_key(devinfo, pipeline->subpass,
+ populate_wm_prog_key(devinfo, sinfo->flags,
+ pipeline->subpass,
info->pMultisampleState,
&stages[stage].key.wm);
break;
unsigned char sha1[20];
anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
- unsigned found = 0;
- unsigned cache_hits = 0;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (!stages[s].entrypoint)
continue;
- int64_t stage_start = os_time_get_nano();
-
stages[s].cache_key.stage = s;
memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
+ }
- bool cache_hit;
- struct anv_shader_bin *bin =
- anv_device_search_for_kernel(pipeline->device, cache,
- &stages[s].cache_key,
- sizeof(stages[s].cache_key), &cache_hit);
- if (bin) {
- found++;
- pipeline->shaders[s] = bin;
- }
+ const bool skip_cache_lookup =
+ (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
- if (cache_hit) {
- cache_hits++;
- stages[s].feedback.flags |=
- VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
- }
- stages[s].feedback.duration += os_time_get_nano() - stage_start;
- }
+ if (!skip_cache_lookup) {
+ unsigned found = 0;
+ unsigned cache_hits = 0;
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ int64_t stage_start = os_time_get_nano();
+
+ bool cache_hit;
+ struct anv_shader_bin *bin =
+ anv_device_search_for_kernel(pipeline->device, cache,
+ &stages[s].cache_key,
+ sizeof(stages[s].cache_key), &cache_hit);
+ if (bin) {
+ found++;
+ pipeline->shaders[s] = bin;
+ }
- if (found == __builtin_popcount(pipeline->active_stages)) {
- if (cache_hits == found) {
- pipeline_feedback.flags |=
- VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ if (cache_hit) {
+ cache_hits++;
+ stages[s].feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
}
- /* We found all our shaders in the cache. We're done. */
- goto done;
- } else if (found > 0) {
- /* We found some but not all of our shaders. This shouldn't happen
- * most of the time but it can if we have a partially populated
- * pipeline cache.
- */
- assert(found < __builtin_popcount(pipeline->active_stages));
-
- vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
- VK_DEBUG_REPORT_WARNING_BIT_EXT |
- VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
- (uint64_t)(uintptr_t)cache,
- 0, 0, "anv",
- "Found a partial pipeline in the cache. This is "
- "most likely caused by an incomplete pipeline cache "
- "import or export");
-
- /* We're going to have to recompile anyway, so just throw away our
- * references to the shaders in the cache. We'll get them out of the
- * cache again as part of the compilation process.
- */
- for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
- stages[s].feedback.flags = 0;
- if (pipeline->shaders[s]) {
- anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
- pipeline->shaders[s] = NULL;
+
+ if (found == __builtin_popcount(pipeline->active_stages)) {
+ if (cache_hits == found) {
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ /* We found all our shaders in the cache. We're done. */
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ anv_pipeline_add_executables(pipeline, &stages[s],
+ pipeline->shaders[s]);
+ }
+ goto done;
+ } else if (found > 0) {
+ /* We found some but not all of our shaders. This shouldn't happen
+ * most of the time but it can if we have a partially populated
+ * pipeline cache.
+ */
+ assert(found < __builtin_popcount(pipeline->active_stages));
+
+ vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT |
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
+ (uint64_t)(uintptr_t)cache,
+ 0, 0, "anv",
+ "Found a partial pipeline in the cache. This is "
+ "most likely caused by an incomplete pipeline cache "
+ "import or export");
+
+ /* We're going to have to recompile anyway, so just throw away our
+ * references to the shaders in the cache. We'll get them out of the
+ * cache again as part of the compilation process.
+ */
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ stages[s].feedback.flags = 0;
+ if (pipeline->shaders[s]) {
+ anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
+ pipeline->shaders[s] = NULL;
+ }
}
}
}
anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
- const unsigned *code;
switch (s) {
case MESA_SHADER_VERTEX:
- code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
- &stages[s]);
+ anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
+ &stages[s]);
break;
case MESA_SHADER_TESS_CTRL:
- code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_TESS_EVAL:
- code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_GEOMETRY:
- code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_FRAGMENT:
- code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
default:
unreachable("Invalid graphics shader stage");
}
- if (code == NULL) {
+ if (stages[s].code == NULL) {
ralloc_free(stage_ctx);
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
anv_device_upload_kernel(pipeline->device, cache,
&stages[s].cache_key,
sizeof(stages[s].cache_key),
- code, stages[s].prog_data.base.program_size,
+ stages[s].code,
+ stages[s].prog_data.base.program_size,
stages[s].nir->constant_data,
stages[s].nir->constant_data_size,
&stages[s].prog_data.base,
brw_prog_data_size(s),
+ stages[s].stats, stages[s].num_stats,
xfb_info, &stages[s].bind_map);
if (!bin) {
ralloc_free(stage_ctx);
goto fail;
}
+ anv_pipeline_add_executables(pipeline, &stages[s], bin);
+
pipeline->shaders[s] = bin;
ralloc_free(stage_ctx);
return result;
}
+static void
+shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+ assert(glsl_type_is_vector_or_scalar(type));
+
+ uint32_t comp_size = glsl_type_is_boolean(type)
+ ? 4 : glsl_get_bit_size(type) / 8;
+ unsigned length = glsl_get_vector_elements(type);
+ *size = comp_size * length,
+ *align = comp_size * (length == 3 ? 4 : length);
+}
+
VkResult
anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
struct anv_pipeline_cache *cache,
struct anv_shader_bin *bin = NULL;
- populate_cs_prog_key(&pipeline->device->info, &stage.key.cs);
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info =
+ vk_find_struct_const(info->stage.pNext,
+ PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
+
+ populate_cs_prog_key(&pipeline->device->info, info->stage.flags,
+ rss_info, &stage.key.cs);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
+ const bool skip_cache_lookup =
+ (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
+
anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
- bool cache_hit;
- bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
- sizeof(stage.cache_key), &cache_hit);
+ bool cache_hit = false;
+ if (!skip_cache_lookup) {
+ bin = anv_device_search_for_kernel(pipeline->device, cache,
+ &stage.cache_key,
+ sizeof(stage.cache_key),
+ &cache_hit);
+ }
+
+ void *mem_ctx = ralloc_context(NULL);
if (bin == NULL) {
int64_t stage_start = os_time_get_nano();
.set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
};
- void *mem_ctx = ralloc_context(NULL);
-
stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
if (stage.nir == NULL) {
ralloc_free(mem_ctx);
NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
&stage.prog_data.cs);
- const unsigned *shader_code =
- brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
- &stage.prog_data.cs, stage.nir, -1, NULL);
- if (shader_code == NULL) {
+ NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types,
+ nir_var_mem_shared, shared_type_info);
+ NIR_PASS_V(stage.nir, nir_lower_explicit_io,
+ nir_var_mem_shared, nir_address_format_32bit_offset);
+
+ stage.num_stats = 1;
+ stage.code = brw_compile_cs(compiler, pipeline->device, mem_ctx,
+ &stage.key.cs, &stage.prog_data.cs,
+ stage.nir, -1, stage.stats, NULL);
+ if (stage.code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
const unsigned code_size = stage.prog_data.base.program_size;
bin = anv_device_upload_kernel(pipeline->device, cache,
&stage.cache_key, sizeof(stage.cache_key),
- shader_code, code_size,
+ stage.code, code_size,
stage.nir->constant_data,
stage.nir->constant_data_size,
&stage.prog_data.base,
sizeof(stage.prog_data.cs),
+ stage.stats, stage.num_stats,
NULL, &stage.bind_map);
if (!bin) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- ralloc_free(mem_ctx);
-
stage.feedback.duration = os_time_get_nano() - stage_start;
}
+ anv_pipeline_add_executables(pipeline, &stage, bin);
+
+ ralloc_free(mem_ctx);
+
if (cache_hit) {
stage.feedback.flags |=
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
if (pCreateInfo->pDynamicState) {
/* Remove all of the states that are marked as dynamic */
uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
- for (uint32_t s = 0; s < count; s++)
- states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
+ for (uint32_t s = 0; s < count; s++) {
+ states &= ~anv_cmd_dirty_bit_for_vk_dynamic_state(
+ pCreateInfo->pDynamicState->pDynamicStates[s]);
+ }
}
struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
assert(pCreateInfo->pViewportState);
dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
- if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
typed_memcpy(dynamic->viewport.viewports,
pCreateInfo->pViewportState->pViewports,
pCreateInfo->pViewportState->viewportCount);
}
dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
- if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
typed_memcpy(dynamic->scissor.scissors,
pCreateInfo->pViewportState->pScissors,
pCreateInfo->pViewportState->scissorCount);
}
}
- if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
assert(pCreateInfo->pRasterizationState);
dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
}
- if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS) {
assert(pCreateInfo->pRasterizationState);
dynamic->depth_bias.bias =
pCreateInfo->pRasterizationState->depthBiasConstantFactor;
!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
assert(pCreateInfo->pColorBlendState);
- if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
+ if (states & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
typed_memcpy(dynamic->blend_constants,
pCreateInfo->pColorBlendState->blendConstants, 4);
}
subpass->depth_stencil_attachment) {
assert(pCreateInfo->pDepthStencilState);
- if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS) {
dynamic->depth_bounds.min =
pCreateInfo->pDepthStencilState->minDepthBounds;
dynamic->depth_bounds.max =
pCreateInfo->pDepthStencilState->maxDepthBounds;
}
- if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) {
dynamic->stencil_compare_mask.front =
pCreateInfo->pDepthStencilState->front.compareMask;
dynamic->stencil_compare_mask.back =
pCreateInfo->pDepthStencilState->back.compareMask;
}
- if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) {
dynamic->stencil_write_mask.front =
pCreateInfo->pDepthStencilState->front.writeMask;
dynamic->stencil_write_mask.back =
pCreateInfo->pDepthStencilState->back.writeMask;
}
- if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) {
dynamic->stencil_reference.front =
pCreateInfo->pDepthStencilState->front.reference;
dynamic->stencil_reference.back =
}
}
+ const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
+ vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
+ PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
+ if (line_state) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
+ dynamic->line_stipple.factor = line_state->lineStippleFactor;
+ dynamic->line_stipple.pattern = line_state->lineStipplePattern;
+ }
+ }
+
pipeline->dynamic_state_mask = states;
}
pipeline->batch.relocs = &pipeline->batch_relocs;
pipeline->batch.status = VK_SUCCESS;
+ pipeline->mem_ctx = ralloc_context(NULL);
+ pipeline->flags = pCreateInfo->flags;
+
copy_non_dynamic_state(pipeline, pCreateInfo);
pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
pCreateInfo->pRasterizationState->depthClampEnable;
* of various prog_data pointers. Make them NULL by default.
*/
memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
+ pipeline->num_executables = 0;
result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
if (result != VK_SUCCESS) {
+ ralloc_free(pipeline->mem_ctx);
anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
return result;
}
return VK_SUCCESS;
}
+
+#define WRITE_STR(field, ...) ({ \
+ memset(field, 0, sizeof(field)); \
+ UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
+ assert(i > 0 && i < sizeof(field)); \
+})
+
+VkResult anv_GetPipelineExecutablePropertiesKHR(
+ VkDevice device,
+ const VkPipelineInfoKHR* pPipelineInfo,
+ uint32_t* pExecutableCount,
+ VkPipelineExecutablePropertiesKHR* pProperties)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
+
+ for (uint32_t i = 0; i < pipeline->num_executables; i++) {
+ vk_outarray_append(&out, props) {
+ gl_shader_stage stage = pipeline->executables[i].stage;
+ props->stages = mesa_to_vk_shader_stage(stage);
+
+ unsigned simd_width = pipeline->executables[i].stats.dispatch_width;
+ if (stage == MESA_SHADER_FRAGMENT) {
+ WRITE_STR(props->name, "%s%d %s",
+ simd_width ? "SIMD" : "vec",
+ simd_width ? simd_width : 4,
+ _mesa_shader_stage_to_string(stage));
+ } else {
+ WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
+ }
+ WRITE_STR(props->description, "%s%d %s shader",
+ simd_width ? "SIMD" : "vec",
+ simd_width ? simd_width : 4,
+ _mesa_shader_stage_to_string(stage));
+
+ /* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
+ * wants a subgroup size of 1.
+ */
+ props->subgroupSize = MAX2(simd_width, 1);
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+VkResult anv_GetPipelineExecutableStatisticsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pStatisticCount,
+ VkPipelineExecutableStatisticKHR* pStatistics)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
+
+ assert(pExecutableInfo->executableIndex < pipeline->num_executables);
+ const struct anv_pipeline_executable *exe =
+ &pipeline->executables[pExecutableInfo->executableIndex];
+ const struct brw_stage_prog_data *prog_data =
+ pipeline->shaders[exe->stage]->prog_data;
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Instruction Count");
+ WRITE_STR(stat->description,
+ "Number of GEN instructions in the final generated "
+ "shader executable.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.instructions;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Loop Count");
+ WRITE_STR(stat->description,
+ "Number of loops (not unrolled) in the final generated "
+ "shader executable.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.loops;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Cycle Count");
+ WRITE_STR(stat->description,
+ "Estimate of the number of EU cycles required to execute "
+ "the final generated executable. This is an estimate only "
+ "and may vary greatly from actual run-time performance.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.cycles;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Spill Count");
+ WRITE_STR(stat->description,
+ "Number of scratch spill operations. This gives a rough "
+ "estimate of the cost incurred due to spilling temporary "
+ "values to memory. If this is non-zero, you may want to "
+ "adjust your shader to reduce register pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.spills;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Fill Count");
+ WRITE_STR(stat->description,
+ "Number of scratch fill operations. This gives a rough "
+ "estimate of the cost incurred due to spilling temporary "
+ "values to memory. If this is non-zero, you may want to "
+ "adjust your shader to reduce register pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.fills;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Scratch Memory Size");
+ WRITE_STR(stat->description,
+ "Number of bytes of scratch memory required by the "
+ "generated shader executable. If this is non-zero, you "
+ "may want to adjust your shader to reduce register "
+ "pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = prog_data->total_scratch;
+ }
+
+ if (exe->stage == MESA_SHADER_COMPUTE) {
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Workgroup Memory Size");
+ WRITE_STR(stat->description,
+ "Number of bytes of workgroup shared memory used by this "
+ "compute shader including any padding.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = prog_data->total_scratch;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static bool
+write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
+ const char *data)
+{
+ ir->isText = VK_TRUE;
+
+ size_t data_len = strlen(data) + 1;
+
+ if (ir->pData == NULL) {
+ ir->dataSize = data_len;
+ return true;
+ }
+
+ strncpy(ir->pData, data, ir->dataSize);
+ if (ir->dataSize < data_len)
+ return false;
+
+ ir->dataSize = data_len;
+ return true;
+}
+
+VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pInternalRepresentationCount,
+ VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pInternalRepresentations,
+ pInternalRepresentationCount);
+ bool incomplete_text = false;
+
+ assert(pExecutableInfo->executableIndex < pipeline->num_executables);
+ const struct anv_pipeline_executable *exe =
+ &pipeline->executables[pExecutableInfo->executableIndex];
+
+ if (exe->disasm) {
+ vk_outarray_append(&out, ir) {
+ WRITE_STR(ir->name, "GEN Assembly");
+ WRITE_STR(ir->description,
+ "Final GEN assembly for the generated shader binary");
+
+ if (!write_ir_text(ir, exe->disasm))
+ incomplete_text = true;
+ }
+ }
+
+ return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
+}