#include "util/mesa-sha1.h"
#include "util/os_time.h"
#include "common/gen_l3_config.h"
+#include "common/gen_disasm.h"
#include "anv_private.h"
#include "compiler/brw_nir.h"
#include "anv_nir.h"
const char *message)
{
struct anv_spirv_debug_data *debug_data = private_data;
+ struct anv_instance *instance = debug_data->device->physical->instance;
+
static const VkDebugReportFlagsEXT vk_flags[] = {
[NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
[NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
- vk_debug_report(&debug_data->device->instance->debug_report_callbacks,
+ vk_debug_report(&instance->debug_report_callbacks,
vk_flags[level],
VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
(uint64_t) (uintptr_t) debug_data->module,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info)
{
- const struct anv_physical_device *pdevice =
- &device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = device->physical;
const struct brw_compiler *compiler = pdevice->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage].NirOptions;
};
struct spirv_to_nir_options spirv_options = {
.frag_coord_is_sysval = true,
+ .use_scoped_memory_barrier = true,
.caps = {
.demote_to_helper_invocation = true,
.derivative_group = true,
.int16 = pdevice->info.gen >= 8,
.int64 = pdevice->info.gen >= 8,
.int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
+ .integer_functions2 = pdevice->info.gen >= 8,
.min_lod = true,
.multiview = true,
.physical_storage_buffer_address = pdevice->has_a64_buffer_access,
.post_depth_coverage = pdevice->info.gen >= 9,
.runtime_descriptor_array = true,
+ .float_controls = pdevice->info.gen >= 8,
+ .shader_clock = true,
.shader_viewport_index_layer = true,
.stencil_export = pdevice->info.gen >= 9,
.storage_8bit = pdevice->info.gen >= 8,
.tessellation = true,
.transform_feedback = pdevice->info.gen >= 8,
.variable_pointers = true,
+ .vk_memory_model = true,
+ .vk_memory_model_device_scope = true,
},
.ubo_addr_format = nir_address_format_32bit_index_offset,
.ssbo_addr_format =
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
NIR_PASS_V(nir, nir_opt_deref);
* nir_remove_dead_variables and split_per_member_structs below see the
* corresponding stores.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
/* Split member structs. We do this before lower_io_to_temporaries so that
* it doesn't lower system values to temporaries by accident.
anv_reloc_list_finish(&pipeline->batch_relocs,
pAllocator ? pAllocator : &device->alloc);
+
+ ralloc_free(pipeline->mem_ctx);
+
if (pipeline->blend_state.map)
anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
key->color_outputs_valid |= (1 << i);
}
- key->nr_color_regions = util_bitcount(key->color_outputs_valid);
+ key->nr_color_regions = subpass->color_count;
/* To reduce possible shader recompilations we would need to know if
* there is a SampleMask output variable to compute if we should emit
uint32_t num_stats;
struct brw_compile_stats stats[3];
+ char *disasm[3];
VkPipelineCreationFeedbackEXT feedback;
struct anv_pipeline_stage *stage)
{
const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ pipeline->device->physical->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage->stage].NirOptions;
nir_shader *nir;
struct anv_pipeline_stage *stage,
struct anv_pipeline_layout *layout)
{
- const struct anv_physical_device *pdevice =
- &pipeline->device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = pipeline->device->physical;
const struct brw_compiler *compiler = pdevice->compiler;
struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
- NIR_PASS_V(nir, anv_nir_lower_push_constants);
-
if (nir->info.stage != MESA_SHADER_COMPUTE)
NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- if (nir->num_uniforms > 0) {
- assert(prog_data->nr_params == 0);
-
- /* If the shader uses any push constants at all, we'll just give
- * them the maximum possible number
- */
- assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
- nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
- prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
- prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
-
- /* We now set the param values to be offsets into a
- * anv_push_constant_data structure. Since the compiler doesn't
- * actually dereference any of the gl_constant_value pointers in the
- * params array, it doesn't really matter what we put here.
- */
- struct anv_push_constants *null_data = NULL;
- /* Fill out the push constants section of the param array */
- for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
- prog_data->param[i] = ANV_PARAM_PUSH(
- (uintptr_t)&null_data->client_data[i * sizeof(float)]);
- }
- }
-
if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
pipeline->needs_data_cache = true;
nir_address_format_64bit_global);
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
- if (layout) {
- anv_nir_apply_pipeline_layout(pdevice,
- pipeline->device->robust_buffer_access,
- layout, nir, prog_data,
- &stage->bind_map);
-
- NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
- nir_address_format_32bit_index_offset);
- NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
- anv_nir_ssbo_addr_format(pdevice,
- pipeline->device->robust_buffer_access));
-
- NIR_PASS_V(nir, nir_opt_constant_folding);
-
- /* We don't support non-uniform UBOs and non-uniform SSBO access is
- * handled naturally by falling back to A64 messages.
- */
- NIR_PASS_V(nir, nir_lower_non_uniform_access,
- nir_lower_non_uniform_texture_access |
- nir_lower_non_uniform_image_access);
- }
+ anv_nir_apply_pipeline_layout(pdevice,
+ pipeline->device->robust_buffer_access,
+ layout, nir, &stage->bind_map);
- if (nir->info.stage != MESA_SHADER_COMPUTE)
- brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
+ nir_address_format_32bit_index_offset);
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
+ anv_nir_ssbo_addr_format(pdevice,
+ pipeline->device->robust_buffer_access));
+
+ NIR_PASS_V(nir, nir_opt_constant_folding);
+
+ /* We don't support non-uniform UBOs and non-uniform SSBO access is
+ * handled naturally by falling back to A64 messages.
+ */
+ NIR_PASS_V(nir, nir_lower_non_uniform_access,
+ nir_lower_non_uniform_texture_access |
+ nir_lower_non_uniform_image_access);
- assert(nir->num_uniforms == prog_data->nr_params * 4);
+ anv_nir_compute_push_layout(pdevice, nir, prog_data,
+ &stage->bind_map, mem_ctx);
stage->nir = nir;
}
&tes_stage->key.tes,
&tcs_stage->prog_data.tcs.base.vue_map,
&tes_stage->prog_data.tes,
- tes_stage->nir, NULL, -1,
+ tes_stage->nir, -1,
tes_stage->stats, NULL);
}
anv_pipeline_link_fs(const struct brw_compiler *compiler,
struct anv_pipeline_stage *stage)
{
- unsigned num_rts = 0;
- const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
- struct anv_pipeline_binding rt_bindings[max_rt];
- nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
- int rt_to_bindings[max_rt];
- memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
- bool rt_used[max_rt];
- memset(rt_used, 0, sizeof(rt_used));
-
- /* Flag used render targets */
- nir_foreach_variable_safe(var, &stage->nir->outputs) {
- if (var->data.location < FRAG_RESULT_DATA0)
- continue;
-
- const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
- /* Out-of-bounds */
- if (rt >= MAX_RTS)
- continue;
-
- const unsigned array_len =
- glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
- assert(rt + array_len <= max_rt);
-
- /* Unused */
- if (!(stage->key.wm.color_outputs_valid & BITFIELD_RANGE(rt, array_len))) {
- /* If this is the RT at location 0 and we have alpha to coverage
- * enabled we will have to create a null RT for it, so mark it as
- * used.
- */
- if (rt > 0 || !stage->key.wm.alpha_to_coverage)
- continue;
+ unsigned num_rt_bindings;
+ struct anv_pipeline_binding rt_bindings[MAX_RTS];
+ if (stage->key.wm.nr_color_regions > 0) {
+ assert(stage->key.wm.nr_color_regions <= MAX_RTS);
+ for (unsigned rt = 0; rt < stage->key.wm.nr_color_regions; rt++) {
+ if (stage->key.wm.color_outputs_valid & BITFIELD_BIT(rt)) {
+ rt_bindings[rt] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .index = rt,
+ };
+ } else {
+ /* Setup a null render target */
+ rt_bindings[rt] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .index = UINT32_MAX,
+ };
+ }
}
-
- for (unsigned i = 0; i < array_len; i++)
- rt_used[rt + i] = true;
+ num_rt_bindings = stage->key.wm.nr_color_regions;
+ } else {
+ /* Setup a null render target */
+ rt_bindings[0] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .index = UINT32_MAX,
+ };
+ num_rt_bindings = 1;
}
- /* Set new, compacted, location */
- for (unsigned i = 0; i < max_rt; i++) {
- if (!rt_used[i])
- continue;
-
- rt_to_bindings[i] = num_rts;
-
- if (stage->key.wm.color_outputs_valid & (1 << i)) {
- rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = i,
- };
- } else {
- /* Setup a null render target */
- rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = UINT32_MAX,
- };
- }
-
- num_rts++;
- }
+ assert(num_rt_bindings <= MAX_RTS);
+ assert(stage->bind_map.surface_count == 0);
+ typed_memcpy(stage->bind_map.surface_to_descriptor,
+ rt_bindings, num_rt_bindings);
+ stage->bind_map.surface_count += num_rt_bindings;
+ /* Now that we've set up the color attachments, we can go through and
+ * eliminate any shader outputs that map to VK_ATTACHMENT_UNUSED in the
+ * hopes that dead code can clean them up in this and any earlier shader
+ * stages.
+ */
+ nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
bool deleted_output = false;
nir_foreach_variable_safe(var, &stage->nir->outputs) {
+ /* TODO: We don't delete depth/stencil writes. We probably could if the
+ * subpass doesn't have a depth/stencil attachment.
+ */
if (var->data.location < FRAG_RESULT_DATA0)
continue;
const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
- if (rt >= MAX_RTS || !rt_used[rt]) {
- /* Unused or out-of-bounds, throw it away, unless it is the first
- * RT and we have alpha to coverage enabled.
- */
+ /* If this is the RT at location 0 and we have alpha to coverage
+ * enabled we still need that write because it will affect the coverage
+ * mask even if it's never written to a color target.
+ */
+ if (rt == 0 && stage->key.wm.alpha_to_coverage)
+ continue;
+
+ const unsigned array_len =
+ glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
+ assert(rt + array_len <= MAX_RTS);
+
+ if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid &
+ BITFIELD_RANGE(rt, array_len))) {
deleted_output = true;
var->data.mode = nir_var_function_temp;
exec_node_remove(&var->node);
exec_list_push_tail(&impl->locals, &var->node);
- continue;
}
-
- /* Give it the new location */
- assert(rt_to_bindings[rt] != -1);
- var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
}
if (deleted_output)
nir_fixup_deref_modes(stage->nir);
- if (num_rts == 0) {
- /* If we have no render targets, we need a null render target */
- rt_bindings[0] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = UINT32_MAX,
- };
- num_rts = 1;
- }
-
- /* Now that we've determined the actual number of render targets, adjust
- * the key accordingly.
+ /* We stored the number of subpass color attachments in nr_color_regions
+ * when calculating the key for caching. Now that we've computed the bind
+ * map, we can reduce this to the actual max before we go into the back-end
+ * compiler.
*/
- stage->key.wm.nr_color_regions = num_rts;
- stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
-
- assert(num_rts <= max_rt);
- assert(stage->bind_map.surface_count == 0);
- typed_memcpy(stage->bind_map.surface_to_descriptor,
- rt_bindings, num_rts);
- stage->bind_map.surface_count += num_rts;
+ stage->key.wm.nr_color_regions =
+ util_last_bit(stage->key.wm.color_outputs_valid);
}
static void
fs_stage->code = brw_compile_fs(compiler, device, mem_ctx,
&fs_stage->key.wm,
&fs_stage->prog_data.wm,
- fs_stage->nir, NULL, -1, -1, -1,
+ fs_stage->nir, -1, -1, -1,
true, false, NULL,
fs_stage->stats, NULL);
(uint32_t)fs_stage->prog_data.wm.dispatch_16 +
(uint32_t)fs_stage->prog_data.wm.dispatch_32;
- if (fs_stage->key.wm.nr_color_regions == 0 &&
+ if (fs_stage->key.wm.color_outputs_valid == 0 &&
!fs_stage->prog_data.wm.has_side_effects &&
+ !fs_stage->prog_data.wm.uses_omask &&
+ !fs_stage->key.wm.alpha_to_coverage &&
!fs_stage->prog_data.wm.uses_kill &&
fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
!fs_stage->prog_data.wm.computed_stencil) {
}
}
+static void
+anv_pipeline_add_executable(struct anv_pipeline *pipeline,
+ struct anv_pipeline_stage *stage,
+ struct brw_compile_stats *stats,
+ uint32_t code_offset)
+{
+ char *nir = NULL;
+ if (stage->nir &&
+ (pipeline->flags &
+ VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
+ char *stream_data = NULL;
+ size_t stream_size = 0;
+ FILE *stream = open_memstream(&stream_data, &stream_size);
+
+ nir_print_shader(stage->nir, stream);
+
+ fclose(stream);
+
+ /* Copy it to a ralloc'd thing */
+ nir = ralloc_size(pipeline->mem_ctx, stream_size + 1);
+ memcpy(nir, stream_data, stream_size);
+ nir[stream_size] = 0;
+
+ free(stream_data);
+ }
+
+ char *disasm = NULL;
+ if (stage->code &&
+ (pipeline->flags &
+ VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
+ char *stream_data = NULL;
+ size_t stream_size = 0;
+ FILE *stream = open_memstream(&stream_data, &stream_size);
+
+ /* Creating this is far cheaper than it looks. It's perfectly fine to
+ * do it for every binary.
+ */
+ struct gen_disasm *d = gen_disasm_create(&pipeline->device->info);
+ gen_disasm_disassemble(d, stage->code, code_offset, stream);
+ gen_disasm_destroy(d);
+
+ fclose(stream);
+
+ /* Copy it to a ralloc'd thing */
+ disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
+ memcpy(disasm, stream_data, stream_size);
+ disasm[stream_size] = 0;
+
+ free(stream_data);
+ }
+
+ pipeline->executables[pipeline->num_executables++] =
+ (struct anv_pipeline_executable) {
+ .stage = stage->stage,
+ .stats = *stats,
+ .nir = nir,
+ .disasm = disasm,
+ };
+}
+
+static void
+anv_pipeline_add_executables(struct anv_pipeline *pipeline,
+ struct anv_pipeline_stage *stage,
+ struct anv_shader_bin *bin)
+{
+ if (stage->stage == MESA_SHADER_FRAGMENT) {
+ /* We pull the prog data and stats out of the anv_shader_bin because
+ * the anv_pipeline_stage may not be fully populated if we successfully
+ * looked up the shader in a cache.
+ */
+ const struct brw_wm_prog_data *wm_prog_data =
+ (const struct brw_wm_prog_data *)bin->prog_data;
+ struct brw_compile_stats *stats = bin->stats;
+
+ if (wm_prog_data->dispatch_8) {
+ anv_pipeline_add_executable(pipeline, stage, stats++, 0);
+ }
+
+ if (wm_prog_data->dispatch_16) {
+ anv_pipeline_add_executable(pipeline, stage, stats++,
+ wm_prog_data->prog_offset_16);
+ }
+
+ if (wm_prog_data->dispatch_32) {
+ anv_pipeline_add_executable(pipeline, stage, stats++,
+ wm_prog_data->prog_offset_32);
+ }
+ } else {
+ anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
+ }
+}
+
static VkResult
anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
struct anv_pipeline_cache *cache,
};
int64_t pipeline_start = os_time_get_nano();
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ const struct brw_compiler *compiler = pipeline->device->physical->compiler;
struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
pipeline->active_stages = 0;
case MESA_SHADER_GEOMETRY:
populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
break;
- case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_FRAGMENT: {
+ const bool raster_enabled =
+ !info->pRasterizationState->rasterizerDiscardEnable;
populate_wm_prog_key(devinfo, sinfo->flags,
pipeline->subpass,
- info->pMultisampleState,
+ raster_enabled ? info->pMultisampleState : NULL,
&stages[stage].key.wm);
break;
+ }
default:
unreachable("Invalid graphics shader stage");
}
unsigned char sha1[20];
anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
- unsigned found = 0;
- unsigned cache_hits = 0;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (!stages[s].entrypoint)
continue;
- int64_t stage_start = os_time_get_nano();
-
stages[s].cache_key.stage = s;
memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
+ }
- bool cache_hit;
- struct anv_shader_bin *bin =
- anv_device_search_for_kernel(pipeline->device, cache,
- &stages[s].cache_key,
- sizeof(stages[s].cache_key), &cache_hit);
- if (bin) {
- found++;
- pipeline->shaders[s] = bin;
- }
+ const bool skip_cache_lookup =
+ (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
- if (cache_hit) {
- cache_hits++;
- stages[s].feedback.flags |=
- VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
- }
- stages[s].feedback.duration += os_time_get_nano() - stage_start;
- }
+ if (!skip_cache_lookup) {
+ unsigned found = 0;
+ unsigned cache_hits = 0;
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ int64_t stage_start = os_time_get_nano();
- if (found == __builtin_popcount(pipeline->active_stages)) {
- if (cache_hits == found) {
- pipeline_feedback.flags |=
- VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ bool cache_hit;
+ struct anv_shader_bin *bin =
+ anv_device_search_for_kernel(pipeline->device, cache,
+ &stages[s].cache_key,
+ sizeof(stages[s].cache_key), &cache_hit);
+ if (bin) {
+ found++;
+ pipeline->shaders[s] = bin;
+ }
+
+ if (cache_hit) {
+ cache_hits++;
+ stages[s].feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
}
- /* We found all our shaders in the cache. We're done. */
- goto done;
- } else if (found > 0) {
- /* We found some but not all of our shaders. This shouldn't happen
- * most of the time but it can if we have a partially populated
- * pipeline cache.
- */
- assert(found < __builtin_popcount(pipeline->active_stages));
-
- vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
- VK_DEBUG_REPORT_WARNING_BIT_EXT |
- VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
- (uint64_t)(uintptr_t)cache,
- 0, 0, "anv",
- "Found a partial pipeline in the cache. This is "
- "most likely caused by an incomplete pipeline cache "
- "import or export");
-
- /* We're going to have to recompile anyway, so just throw away our
- * references to the shaders in the cache. We'll get them out of the
- * cache again as part of the compilation process.
- */
- for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
- stages[s].feedback.flags = 0;
- if (pipeline->shaders[s]) {
- anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
- pipeline->shaders[s] = NULL;
+
+ if (found == __builtin_popcount(pipeline->active_stages)) {
+ if (cache_hits == found) {
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ /* We found all our shaders in the cache. We're done. */
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ anv_pipeline_add_executables(pipeline, &stages[s],
+ pipeline->shaders[s]);
+ }
+ goto done;
+ } else if (found > 0) {
+ /* We found some but not all of our shaders. This shouldn't happen
+ * most of the time but it can if we have a partially populated
+ * pipeline cache.
+ */
+ assert(found < __builtin_popcount(pipeline->active_stages));
+
+ vk_debug_report(&pipeline->device->physical->instance->debug_report_callbacks,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT |
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
+ (uint64_t)(uintptr_t)cache,
+ 0, 0, "anv",
+ "Found a partial pipeline in the cache. This is "
+ "most likely caused by an incomplete pipeline cache "
+ "import or export");
+
+ /* We're going to have to recompile anyway, so just throw away our
+ * references to the shaders in the cache. We'll get them out of the
+ * cache again as part of the compilation process.
+ */
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ stages[s].feedback.flags = 0;
+ if (pipeline->shaders[s]) {
+ anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
+ pipeline->shaders[s] = NULL;
+ }
}
}
}
goto fail;
}
+ anv_nir_validate_push_layout(&stages[s].prog_data.base,
+ &stages[s].bind_map);
+
struct anv_shader_bin *bin =
anv_device_upload_kernel(pipeline->device, cache,
&stages[s].cache_key,
goto fail;
}
+ anv_pipeline_add_executables(pipeline, &stages[s], bin);
+
pipeline->shaders[s] = bin;
ralloc_free(stage_ctx);
};
int64_t pipeline_start = os_time_get_nano();
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ const struct brw_compiler *compiler = pipeline->device->physical->compiler;
struct anv_pipeline_stage stage = {
.stage = MESA_SHADER_COMPUTE,
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
+ const bool skip_cache_lookup =
+ (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
+
anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
- bool cache_hit;
- bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
- sizeof(stage.cache_key), &cache_hit);
+ bool cache_hit = false;
+ if (!skip_cache_lookup) {
+ bin = anv_device_search_for_kernel(pipeline->device, cache,
+ &stage.cache_key,
+ sizeof(stage.cache_key),
+ &cache_hit);
+ }
+
+ void *mem_ctx = ralloc_context(NULL);
if (bin == NULL) {
int64_t stage_start = os_time_get_nano();
.set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
};
- void *mem_ctx = ralloc_context(NULL);
-
stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
if (stage.nir == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
+ NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
- NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
- &stage.prog_data.cs);
+ anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types,
nir_var_mem_shared, shared_type_info);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
+ anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
+
+ if (!stage.prog_data.cs.uses_num_work_groups) {
+ assert(stage.bind_map.surface_to_descriptor[0].set ==
+ ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS);
+ stage.bind_map.surface_to_descriptor[0].set = ANV_DESCRIPTOR_SET_NULL;
+ }
+
const unsigned code_size = stage.prog_data.base.program_size;
bin = anv_device_upload_kernel(pipeline->device, cache,
&stage.cache_key, sizeof(stage.cache_key),
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- ralloc_free(mem_ctx);
-
stage.feedback.duration = os_time_get_nano() - stage_start;
}
+ anv_pipeline_add_executables(pipeline, &stage, bin);
+
+ ralloc_free(mem_ctx);
+
if (cache_hit) {
stage.feedback.flags |=
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
pipeline->batch.relocs = &pipeline->batch_relocs;
pipeline->batch.status = VK_SUCCESS;
+ pipeline->mem_ctx = ralloc_context(NULL);
+ pipeline->flags = pCreateInfo->flags;
+
+ assert(pCreateInfo->pRasterizationState);
+
copy_non_dynamic_state(pipeline, pCreateInfo);
- pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
- pCreateInfo->pRasterizationState->depthClampEnable;
+ pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState->depthClampEnable;
/* Previously we enabled depth clipping when !depthClampEnable.
* DepthClipStateCreateInfo now makes depth clipping explicit so if the
PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
- pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
- pCreateInfo->pMultisampleState->sampleShadingEnable;
+ pipeline->sample_shading_enable =
+ !pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
+ pCreateInfo->pMultisampleState &&
+ pCreateInfo->pMultisampleState->sampleShadingEnable;
pipeline->needs_data_cache = false;
* of various prog_data pointers. Make them NULL by default.
*/
memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
+ pipeline->num_executables = 0;
result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
if (result != VK_SUCCESS) {
+ ralloc_free(pipeline->mem_ctx);
anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
return result;
}
return VK_SUCCESS;
}
+
+#define WRITE_STR(field, ...) ({ \
+ memset(field, 0, sizeof(field)); \
+ UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
+ assert(i > 0 && i < sizeof(field)); \
+})
+
+VkResult anv_GetPipelineExecutablePropertiesKHR(
+ VkDevice device,
+ const VkPipelineInfoKHR* pPipelineInfo,
+ uint32_t* pExecutableCount,
+ VkPipelineExecutablePropertiesKHR* pProperties)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
+
+ for (uint32_t i = 0; i < pipeline->num_executables; i++) {
+ vk_outarray_append(&out, props) {
+ gl_shader_stage stage = pipeline->executables[i].stage;
+ props->stages = mesa_to_vk_shader_stage(stage);
+
+ unsigned simd_width = pipeline->executables[i].stats.dispatch_width;
+ if (stage == MESA_SHADER_FRAGMENT) {
+ WRITE_STR(props->name, "%s%d %s",
+ simd_width ? "SIMD" : "vec",
+ simd_width ? simd_width : 4,
+ _mesa_shader_stage_to_string(stage));
+ } else {
+ WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
+ }
+ WRITE_STR(props->description, "%s%d %s shader",
+ simd_width ? "SIMD" : "vec",
+ simd_width ? simd_width : 4,
+ _mesa_shader_stage_to_string(stage));
+
+ /* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
+ * wants a subgroup size of 1.
+ */
+ props->subgroupSize = MAX2(simd_width, 1);
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+VkResult anv_GetPipelineExecutableStatisticsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pStatisticCount,
+ VkPipelineExecutableStatisticKHR* pStatistics)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
+
+ assert(pExecutableInfo->executableIndex < pipeline->num_executables);
+ const struct anv_pipeline_executable *exe =
+ &pipeline->executables[pExecutableInfo->executableIndex];
+ const struct brw_stage_prog_data *prog_data =
+ pipeline->shaders[exe->stage]->prog_data;
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Instruction Count");
+ WRITE_STR(stat->description,
+ "Number of GEN instructions in the final generated "
+ "shader executable.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.instructions;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Loop Count");
+ WRITE_STR(stat->description,
+ "Number of loops (not unrolled) in the final generated "
+ "shader executable.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.loops;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Cycle Count");
+ WRITE_STR(stat->description,
+ "Estimate of the number of EU cycles required to execute "
+ "the final generated executable. This is an estimate only "
+ "and may vary greatly from actual run-time performance.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.cycles;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Spill Count");
+ WRITE_STR(stat->description,
+ "Number of scratch spill operations. This gives a rough "
+ "estimate of the cost incurred due to spilling temporary "
+ "values to memory. If this is non-zero, you may want to "
+ "adjust your shader to reduce register pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.spills;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Fill Count");
+ WRITE_STR(stat->description,
+ "Number of scratch fill operations. This gives a rough "
+ "estimate of the cost incurred due to spilling temporary "
+ "values to memory. If this is non-zero, you may want to "
+ "adjust your shader to reduce register pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.fills;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Scratch Memory Size");
+ WRITE_STR(stat->description,
+ "Number of bytes of scratch memory required by the "
+ "generated shader executable. If this is non-zero, you "
+ "may want to adjust your shader to reduce register "
+ "pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = prog_data->total_scratch;
+ }
+
+ if (exe->stage == MESA_SHADER_COMPUTE) {
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Workgroup Memory Size");
+ WRITE_STR(stat->description,
+ "Number of bytes of workgroup shared memory used by this "
+ "compute shader including any padding.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = prog_data->total_scratch;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static bool
+write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
+ const char *data)
+{
+ ir->isText = VK_TRUE;
+
+ size_t data_len = strlen(data) + 1;
+
+ if (ir->pData == NULL) {
+ ir->dataSize = data_len;
+ return true;
+ }
+
+ strncpy(ir->pData, data, ir->dataSize);
+ if (ir->dataSize < data_len)
+ return false;
+
+ ir->dataSize = data_len;
+ return true;
+}
+
+VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pInternalRepresentationCount,
+ VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pInternalRepresentations,
+ pInternalRepresentationCount);
+ bool incomplete_text = false;
+
+ assert(pExecutableInfo->executableIndex < pipeline->num_executables);
+ const struct anv_pipeline_executable *exe =
+ &pipeline->executables[pExecutableInfo->executableIndex];
+
+ if (exe->nir) {
+ vk_outarray_append(&out, ir) {
+ WRITE_STR(ir->name, "Final NIR");
+ WRITE_STR(ir->description,
+ "Final NIR before going into the back-end compiler");
+
+ if (!write_ir_text(ir, exe->nir))
+ incomplete_text = true;
+ }
+ }
+
+ if (exe->disasm) {
+ vk_outarray_append(&out, ir) {
+ WRITE_STR(ir->name, "GEN Assembly");
+ WRITE_STR(ir->description,
+ "Final GEN assembly for the generated shader binary");
+
+ if (!write_ir_text(ir, exe->disasm))
+ incomplete_text = true;
+ }
+ }
+
+ return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
+}