#include "util/mesa-sha1.h"
#include "util/os_time.h"
#include "common/gen_l3_config.h"
+#include "common/gen_disasm.h"
#include "anv_private.h"
#include "compiler/brw_nir.h"
#include "anv_nir.h"
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- module = vk_alloc2(&device->alloc, pAllocator,
+ module = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*module) + pCreateInfo->codeSize, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (module == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &module->base,
+ VK_OBJECT_TYPE_SHADER_MODULE);
module->size = pCreateInfo->codeSize;
memcpy(module->data, pCreateInfo->pCode, module->size);
if (!module)
return;
- vk_free2(&device->alloc, pAllocator, module);
+ vk_object_base_finish(&module->base);
+ vk_free2(&device->vk.alloc, pAllocator, module);
}
#define SPIR_V_MAGIC_NUMBER 0x07230203
-static const uint64_t stage_to_debug[] = {
- [MESA_SHADER_VERTEX] = DEBUG_VS,
- [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
- [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
- [MESA_SHADER_GEOMETRY] = DEBUG_GS,
- [MESA_SHADER_FRAGMENT] = DEBUG_WM,
- [MESA_SHADER_COMPUTE] = DEBUG_CS,
-};
-
struct anv_spirv_debug_data {
struct anv_device *device;
const struct anv_shader_module *module;
const char *message)
{
struct anv_spirv_debug_data *debug_data = private_data;
+ struct anv_instance *instance = debug_data->device->physical->instance;
+
static const VkDebugReportFlagsEXT vk_flags[] = {
[NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
[NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
- vk_debug_report(&debug_data->device->instance->debug_report_callbacks,
+ vk_debug_report(&instance->debug_report_callbacks,
vk_flags[level],
VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
(uint64_t) (uintptr_t) debug_data->module,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info)
{
- const struct anv_physical_device *pdevice =
- &device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = device->physical;
const struct brw_compiler *compiler = pdevice->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage].NirOptions;
struct nir_spirv_specialization *spec_entries = NULL;
if (spec_info && spec_info->mapEntryCount > 0) {
num_spec_entries = spec_info->mapEntryCount;
- spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
+ spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
for (uint32_t i = 0; i < num_spec_entries; i++) {
VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
const void *data = spec_info->pData + entry.offset;
assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
spec_entries[i].id = spec_info->pMapEntries[i].constantID;
- if (spec_info->dataSize == 8)
- spec_entries[i].data64 = *(const uint64_t *)data;
- else
- spec_entries[i].data32 = *(const uint32_t *)data;
+ switch (entry.size) {
+ case 8:
+ spec_entries[i].value.u64 = *(const uint64_t *)data;
+ break;
+ case 4:
+ spec_entries[i].value.u32 = *(const uint32_t *)data;
+ break;
+ case 2:
+ spec_entries[i].value.u16 = *(const uint16_t *)data;
+ break;
+ case 1:
+ spec_entries[i].value.u8 = *(const uint8_t *)data;
+ break;
+ default:
+ assert(!"Invalid spec constant size");
+ break;
+ }
}
}
.module = module,
};
struct spirv_to_nir_options spirv_options = {
- .lower_workgroup_access_to_offsets = true,
.frag_coord_is_sysval = true,
.caps = {
.demote_to_helper_invocation = true,
.int16 = pdevice->info.gen >= 8,
.int64 = pdevice->info.gen >= 8,
.int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
+ .integer_functions2 = pdevice->info.gen >= 8,
.min_lod = true,
.multiview = true,
.physical_storage_buffer_address = pdevice->has_a64_buffer_access,
.post_depth_coverage = pdevice->info.gen >= 9,
.runtime_descriptor_array = true,
+ .float_controls = pdevice->info.gen >= 8,
+ .shader_clock = true,
.shader_viewport_index_layer = true,
.stencil_export = pdevice->info.gen >= 9,
.storage_8bit = pdevice->info.gen >= 8,
.tessellation = true,
.transform_feedback = pdevice->info.gen >= 8,
.variable_pointers = true,
+ .vk_memory_model = true,
+ .vk_memory_model_device_scope = true,
},
.ubo_addr_format = nir_address_format_32bit_index_offset,
.ssbo_addr_format =
free(spec_entries);
- if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
+ if (unlikely(INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage))) {
fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
gl_shader_stage_name(stage));
nir_print_shader(nir, stderr);
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_copy_prop);
NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
* nir_remove_dead_variables and split_per_member_structs below see the
* corresponding stores.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
/* Split member structs. We do this before lower_io_to_temporaries so that
* it doesn't lower system values to temporaries by accident.
NIR_PASS_V(nir, nir_split_per_member_structs);
NIR_PASS_V(nir, nir_remove_dead_variables,
- nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
+ nir_var_shader_in | nir_var_shader_out | nir_var_system_value,
+ NULL);
NIR_PASS_V(nir, nir_propagate_invariant);
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
return nir;
}
+VkResult
+anv_pipeline_init(struct anv_pipeline *pipeline,
+ struct anv_device *device,
+ enum anv_pipeline_type type,
+ VkPipelineCreateFlags flags,
+ const VkAllocationCallbacks *pAllocator)
+{
+ VkResult result;
+
+ memset(pipeline, 0, sizeof(*pipeline));
+
+ vk_object_base_init(&device->vk, &pipeline->base,
+ VK_OBJECT_TYPE_PIPELINE);
+ pipeline->device = device;
+
+ /* It's the job of the child class to provide actual backing storage for
+ * the batch by setting batch.start, batch.next, and batch.end.
+ */
+ pipeline->batch.alloc = pAllocator ? pAllocator : &device->vk.alloc;
+ pipeline->batch.relocs = &pipeline->batch_relocs;
+ pipeline->batch.status = VK_SUCCESS;
+
+ result = anv_reloc_list_init(&pipeline->batch_relocs,
+ pipeline->batch.alloc);
+ if (result != VK_SUCCESS)
+ return result;
+
+ pipeline->mem_ctx = ralloc_context(NULL);
+
+ pipeline->type = type;
+ pipeline->flags = flags;
+
+ util_dynarray_init(&pipeline->executables, pipeline->mem_ctx);
+
+ return VK_SUCCESS;
+}
+
+void
+anv_pipeline_finish(struct anv_pipeline *pipeline,
+ struct anv_device *device,
+ const VkAllocationCallbacks *pAllocator)
+{
+ anv_reloc_list_finish(&pipeline->batch_relocs,
+ pAllocator ? pAllocator : &device->vk.alloc);
+ ralloc_free(pipeline->mem_ctx);
+ vk_object_base_finish(&pipeline->base);
+}
+
void anv_DestroyPipeline(
VkDevice _device,
VkPipeline _pipeline,
if (!pipeline)
return;
- anv_reloc_list_finish(&pipeline->batch_relocs,
- pAllocator ? pAllocator : &device->alloc);
- if (pipeline->blend_state.map)
- anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
+ switch (pipeline->type) {
+ case ANV_PIPELINE_GRAPHICS: {
+ struct anv_graphics_pipeline *gfx_pipeline =
+ anv_pipeline_to_graphics(pipeline);
- for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
- if (pipeline->shaders[s])
- anv_shader_bin_unref(device, pipeline->shaders[s]);
+ if (gfx_pipeline->blend_state.map)
+ anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->blend_state);
+
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (gfx_pipeline->shaders[s])
+ anv_shader_bin_unref(device, gfx_pipeline->shaders[s]);
+ }
+ break;
+ }
+
+ case ANV_PIPELINE_COMPUTE: {
+ struct anv_compute_pipeline *compute_pipeline =
+ anv_pipeline_to_compute(pipeline);
+
+ if (compute_pipeline->cs)
+ anv_shader_bin_unref(device, compute_pipeline->cs);
+
+ break;
}
- vk_free2(&device->alloc, pAllocator, pipeline);
+ default:
+ unreachable("invalid pipeline type");
+ }
+
+ anv_pipeline_finish(pipeline, device, pAllocator);
+ vk_free2(&device->vk.alloc, pAllocator, pipeline);
}
static const uint32_t vk_to_gen_primitive_type[] = {
/* XXX Vulkan doesn't appear to specify */
key->clamp_fragment_color = false;
+ key->ignore_sample_mask_out = false;
+
assert(subpass->color_count <= MAX_RTS);
for (uint32_t i = 0; i < subpass->color_count; i++) {
if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
key->color_outputs_valid |= (1 << i);
}
- key->nr_color_regions = util_bitcount(key->color_outputs_valid);
+ key->nr_color_regions = subpass->color_count;
/* To reduce possible shader recompilations we would need to know if
* there is a SampleMask output variable to compute if we should emit
union brw_any_prog_data prog_data;
+ uint32_t num_stats;
+ struct brw_compile_stats stats[3];
+ char *disasm[3];
+
VkPipelineCreationFeedbackEXT feedback;
+
+ const unsigned *code;
};
static void
}
static void
-anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
+anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
struct anv_pipeline_layout *layout,
struct anv_pipeline_stage *stages,
unsigned char *sha1_out)
if (layout)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
- const bool rba = pipeline->device->robust_buffer_access;
+ const bool rba = pipeline->base.device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
}
static void
-anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
+anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
struct anv_pipeline_layout *layout,
struct anv_pipeline_stage *stage,
unsigned char *sha1_out)
if (layout)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
- const bool rba = pipeline->device->robust_buffer_access;
+ const bool rba = pipeline->base.device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
_mesa_sha1_update(&ctx, stage->shader_sha1,
struct anv_pipeline_stage *stage)
{
const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ pipeline->device->physical->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage->stage].NirOptions;
nir_shader *nir;
struct anv_pipeline_stage *stage,
struct anv_pipeline_layout *layout)
{
- const struct anv_physical_device *pdevice =
- &pipeline->device->instance->physicalDevice;
+ const struct anv_physical_device *pdevice = pipeline->device->physical;
const struct brw_compiler *compiler = pdevice->compiler;
struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
nir_shader *nir = stage->nir;
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
- NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
- NIR_PASS_V(nir, nir_lower_input_attachments, true);
+ NIR_PASS_V(nir, nir_lower_wpos_center,
+ anv_pipeline_to_graphics(pipeline)->sample_shading_enable);
+ NIR_PASS_V(nir, nir_lower_input_attachments,
+ &(nir_input_attachment_options) {
+ .use_fragcoord_sysval = true,
+ .use_layer_id_sysval = true,
+ });
}
NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
- NIR_PASS_V(nir, anv_nir_lower_push_constants);
-
- if (nir->info.stage != MESA_SHADER_COMPUTE)
- NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
-
- if (nir->info.stage == MESA_SHADER_COMPUTE)
- prog_data->total_shared = nir->num_shared;
-
- nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
-
- if (nir->num_uniforms > 0) {
- assert(prog_data->nr_params == 0);
-
- /* If the shader uses any push constants at all, we'll just give
- * them the maximum possible number
- */
- assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
- nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
- prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
- prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
-
- /* We now set the param values to be offsets into a
- * anv_push_constant_data structure. Since the compiler doesn't
- * actually dereference any of the gl_constant_value pointers in the
- * params array, it doesn't really matter what we put here.
- */
- struct anv_push_constants *null_data = NULL;
- /* Fill out the push constants section of the param array */
- for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
- prog_data->param[i] = ANV_PARAM_PUSH(
- (uintptr_t)&null_data->client_data[i * sizeof(float)]);
- }
+ if (pipeline->type == ANV_PIPELINE_GRAPHICS) {
+ NIR_PASS_V(nir, anv_nir_lower_multiview,
+ anv_pipeline_to_graphics(pipeline));
}
- if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
- pipeline->needs_data_cache = true;
+ nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
+ NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo, NULL);
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
nir_address_format_64bit_global);
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
- if (layout) {
- anv_nir_apply_pipeline_layout(pdevice,
- pipeline->device->robust_buffer_access,
- layout, nir, prog_data,
- &stage->bind_map);
-
- NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
- nir_address_format_32bit_index_offset);
- NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
- anv_nir_ssbo_addr_format(pdevice,
- pipeline->device->robust_buffer_access));
-
- NIR_PASS_V(nir, nir_opt_constant_folding);
-
- /* We don't support non-uniform UBOs and non-uniform SSBO access is
- * handled naturally by falling back to A64 messages.
- */
- NIR_PASS_V(nir, nir_lower_non_uniform_access,
- nir_lower_non_uniform_texture_access |
- nir_lower_non_uniform_image_access);
- }
+ anv_nir_apply_pipeline_layout(pdevice,
+ pipeline->device->robust_buffer_access,
+ layout, nir, &stage->bind_map);
+
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
+ nir_address_format_32bit_index_offset);
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
+ anv_nir_ssbo_addr_format(pdevice,
+ pipeline->device->robust_buffer_access));
- if (nir->info.stage != MESA_SHADER_COMPUTE)
- brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
+ NIR_PASS_V(nir, nir_opt_constant_folding);
- assert(nir->num_uniforms == prog_data->nr_params * 4);
+ /* We don't support non-uniform UBOs and non-uniform SSBO access is
+ * handled naturally by falling back to A64 messages.
+ */
+ NIR_PASS_V(nir, nir_lower_non_uniform_access,
+ nir_lower_non_uniform_texture_access |
+ nir_lower_non_uniform_image_access);
+
+ anv_nir_compute_push_layout(pdevice, pipeline->device->robust_buffer_access,
+ nir, prog_data, &stage->bind_map, mem_ctx);
stage->nir = nir;
}
brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
}
-static const unsigned *
+static void
anv_pipeline_compile_vs(const struct brw_compiler *compiler,
void *mem_ctx,
- struct anv_device *device,
+ struct anv_graphics_pipeline *pipeline,
struct anv_pipeline_stage *vs_stage)
{
+ /* When using Primitive Replication for multiview, each view gets its own
+ * position slot.
+ */
+ uint32_t pos_slots = pipeline->use_primitive_replication ?
+ anv_subpass_view_count(pipeline->subpass) : 1;
+
brw_compute_vue_map(compiler->devinfo,
&vs_stage->prog_data.vs.base.vue_map,
vs_stage->nir->info.outputs_written,
- vs_stage->nir->info.separate_shader);
-
- return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
- &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
+ vs_stage->nir->info.separate_shader,
+ pos_slots);
+
+ vs_stage->num_stats = 1;
+ vs_stage->code = brw_compile_vs(compiler, pipeline->base.device, mem_ctx,
+ &vs_stage->key.vs,
+ &vs_stage->prog_data.vs,
+ vs_stage->nir, -1,
+ vs_stage->stats, NULL);
}
static void
tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
}
-static const unsigned *
+static void
anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
tcs_stage->key.tcs.patch_outputs_written =
tcs_stage->nir->info.patch_outputs_written;
- return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
- &tcs_stage->prog_data.tcs, tcs_stage->nir,
- -1, NULL);
+ tcs_stage->num_stats = 1;
+ tcs_stage->code = brw_compile_tcs(compiler, device, mem_ctx,
+ &tcs_stage->key.tcs,
+ &tcs_stage->prog_data.tcs,
+ tcs_stage->nir, -1,
+ tcs_stage->stats, NULL);
}
static void
brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
}
-static const unsigned *
+static void
anv_pipeline_compile_tes(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
tes_stage->key.tes.patch_inputs_read =
tcs_stage->nir->info.patch_outputs_written;
- return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
- &tcs_stage->prog_data.tcs.base.vue_map,
- &tes_stage->prog_data.tes, tes_stage->nir,
- NULL, -1, NULL);
+ tes_stage->num_stats = 1;
+ tes_stage->code = brw_compile_tes(compiler, device, mem_ctx,
+ &tes_stage->key.tes,
+ &tcs_stage->prog_data.tcs.base.vue_map,
+ &tes_stage->prog_data.tes,
+ tes_stage->nir, -1,
+ tes_stage->stats, NULL);
}
static void
brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
}
-static const unsigned *
+static void
anv_pipeline_compile_gs(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
brw_compute_vue_map(compiler->devinfo,
&gs_stage->prog_data.gs.base.vue_map,
gs_stage->nir->info.outputs_written,
- gs_stage->nir->info.separate_shader);
-
- return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
- &gs_stage->prog_data.gs, gs_stage->nir,
- NULL, -1, NULL);
+ gs_stage->nir->info.separate_shader, 1);
+
+ gs_stage->num_stats = 1;
+ gs_stage->code = brw_compile_gs(compiler, device, mem_ctx,
+ &gs_stage->key.gs,
+ &gs_stage->prog_data.gs,
+ gs_stage->nir, NULL, -1,
+ gs_stage->stats, NULL);
}
static void
anv_pipeline_link_fs(const struct brw_compiler *compiler,
struct anv_pipeline_stage *stage)
{
- unsigned num_rts = 0;
- const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
- struct anv_pipeline_binding rt_bindings[max_rt];
- nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
- int rt_to_bindings[max_rt];
- memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
- bool rt_used[max_rt];
- memset(rt_used, 0, sizeof(rt_used));
-
- /* Flag used render targets */
- nir_foreach_variable_safe(var, &stage->nir->outputs) {
- if (var->data.location < FRAG_RESULT_DATA0)
- continue;
-
- const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
- /* Out-of-bounds */
- if (rt >= MAX_RTS)
- continue;
-
- const unsigned array_len =
- glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
- assert(rt + array_len <= max_rt);
-
- /* Unused */
- if (!(stage->key.wm.color_outputs_valid & BITFIELD_RANGE(rt, array_len))) {
- /* If this is the RT at location 0 and we have alpha to coverage
- * enabled we will have to create a null RT for it, so mark it as
- * used.
- */
- if (rt > 0 || !stage->key.wm.alpha_to_coverage)
- continue;
+ unsigned num_rt_bindings;
+ struct anv_pipeline_binding rt_bindings[MAX_RTS];
+ if (stage->key.wm.nr_color_regions > 0) {
+ assert(stage->key.wm.nr_color_regions <= MAX_RTS);
+ for (unsigned rt = 0; rt < stage->key.wm.nr_color_regions; rt++) {
+ if (stage->key.wm.color_outputs_valid & BITFIELD_BIT(rt)) {
+ rt_bindings[rt] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .index = rt,
+ };
+ } else {
+ /* Setup a null render target */
+ rt_bindings[rt] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .index = UINT32_MAX,
+ };
+ }
}
-
- for (unsigned i = 0; i < array_len; i++)
- rt_used[rt + i] = true;
+ num_rt_bindings = stage->key.wm.nr_color_regions;
+ } else {
+ /* Setup a null render target */
+ rt_bindings[0] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .index = UINT32_MAX,
+ };
+ num_rt_bindings = 1;
}
- /* Set new, compacted, location */
- for (unsigned i = 0; i < max_rt; i++) {
- if (!rt_used[i])
- continue;
-
- rt_to_bindings[i] = num_rts;
-
- if (stage->key.wm.color_outputs_valid & (1 << i)) {
- rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = i,
- };
- } else {
- /* Setup a null render target */
- rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = UINT32_MAX,
- };
- }
-
- num_rts++;
- }
+ assert(num_rt_bindings <= MAX_RTS);
+ assert(stage->bind_map.surface_count == 0);
+ typed_memcpy(stage->bind_map.surface_to_descriptor,
+ rt_bindings, num_rt_bindings);
+ stage->bind_map.surface_count += num_rt_bindings;
+ /* Now that we've set up the color attachments, we can go through and
+ * eliminate any shader outputs that map to VK_ATTACHMENT_UNUSED in the
+ * hopes that dead code can clean them up in this and any earlier shader
+ * stages.
+ */
+ nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
bool deleted_output = false;
- nir_foreach_variable_safe(var, &stage->nir->outputs) {
+ nir_foreach_shader_out_variable_safe(var, stage->nir) {
+ /* TODO: We don't delete depth/stencil writes. We probably could if the
+ * subpass doesn't have a depth/stencil attachment.
+ */
if (var->data.location < FRAG_RESULT_DATA0)
continue;
const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
- if (rt >= MAX_RTS || !rt_used[rt]) {
- /* Unused or out-of-bounds, throw it away, unless it is the first
- * RT and we have alpha to coverage enabled.
- */
+ /* If this is the RT at location 0 and we have alpha to coverage
+ * enabled we still need that write because it will affect the coverage
+ * mask even if it's never written to a color target.
+ */
+ if (rt == 0 && stage->key.wm.alpha_to_coverage)
+ continue;
+
+ const unsigned array_len =
+ glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
+ assert(rt + array_len <= MAX_RTS);
+
+ if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid &
+ BITFIELD_RANGE(rt, array_len))) {
deleted_output = true;
var->data.mode = nir_var_function_temp;
exec_node_remove(&var->node);
exec_list_push_tail(&impl->locals, &var->node);
- continue;
}
-
- /* Give it the new location */
- assert(rt_to_bindings[rt] != -1);
- var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
}
if (deleted_output)
nir_fixup_deref_modes(stage->nir);
- if (num_rts == 0) {
- /* If we have no render targets, we need a null render target */
- rt_bindings[0] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = UINT32_MAX,
- };
- num_rts = 1;
- }
-
- /* Now that we've determined the actual number of render targets, adjust
- * the key accordingly.
+ /* We stored the number of subpass color attachments in nr_color_regions
+ * when calculating the key for caching. Now that we've computed the bind
+ * map, we can reduce this to the actual max before we go into the back-end
+ * compiler.
*/
- stage->key.wm.nr_color_regions = num_rts;
- stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
-
- assert(num_rts <= max_rt);
- assert(stage->bind_map.surface_count == 0);
- typed_memcpy(stage->bind_map.surface_to_descriptor,
- rt_bindings, num_rts);
- stage->bind_map.surface_count += num_rts;
+ stage->key.wm.nr_color_regions =
+ util_last_bit(stage->key.wm.color_outputs_valid);
}
-static const unsigned *
+static void
anv_pipeline_compile_fs(const struct brw_compiler *compiler,
void *mem_ctx,
struct anv_device *device,
fs_stage->key.wm.input_slots_valid =
prev_stage->prog_data.vue.vue_map.slots_valid;
- const unsigned *code =
- brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
- &fs_stage->prog_data.wm, fs_stage->nir,
- NULL, -1, -1, -1, true, false, NULL, NULL);
+ fs_stage->code = brw_compile_fs(compiler, device, mem_ctx,
+ &fs_stage->key.wm,
+ &fs_stage->prog_data.wm,
+ fs_stage->nir, -1, -1, -1,
+ true, false, NULL,
+ fs_stage->stats, NULL);
- if (fs_stage->key.wm.nr_color_regions == 0 &&
+ fs_stage->num_stats = (uint32_t)fs_stage->prog_data.wm.dispatch_8 +
+ (uint32_t)fs_stage->prog_data.wm.dispatch_16 +
+ (uint32_t)fs_stage->prog_data.wm.dispatch_32;
+
+ if (fs_stage->key.wm.color_outputs_valid == 0 &&
!fs_stage->prog_data.wm.has_side_effects &&
+ !fs_stage->prog_data.wm.uses_omask &&
+ !fs_stage->key.wm.alpha_to_coverage &&
!fs_stage->prog_data.wm.uses_kill &&
fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
!fs_stage->prog_data.wm.computed_stencil) {
*/
memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
}
+}
+
+static void
+anv_pipeline_add_executable(struct anv_pipeline *pipeline,
+ struct anv_pipeline_stage *stage,
+ struct brw_compile_stats *stats,
+ uint32_t code_offset)
+{
+ char *nir = NULL;
+ if (stage->nir &&
+ (pipeline->flags &
+ VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
+ char *stream_data = NULL;
+ size_t stream_size = 0;
+ FILE *stream = open_memstream(&stream_data, &stream_size);
+
+ nir_print_shader(stage->nir, stream);
+
+ fclose(stream);
+
+ /* Copy it to a ralloc'd thing */
+ nir = ralloc_size(pipeline->mem_ctx, stream_size + 1);
+ memcpy(nir, stream_data, stream_size);
+ nir[stream_size] = 0;
+
+ free(stream_data);
+ }
+
+ char *disasm = NULL;
+ if (stage->code &&
+ (pipeline->flags &
+ VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
+ char *stream_data = NULL;
+ size_t stream_size = 0;
+ FILE *stream = open_memstream(&stream_data, &stream_size);
+
+ uint32_t push_size = 0;
+ for (unsigned i = 0; i < 4; i++)
+ push_size += stage->bind_map.push_ranges[i].length;
+ if (push_size > 0) {
+ fprintf(stream, "Push constant ranges:\n");
+ for (unsigned i = 0; i < 4; i++) {
+ if (stage->bind_map.push_ranges[i].length == 0)
+ continue;
+
+ fprintf(stream, " RANGE%d (%dB): ", i,
+ stage->bind_map.push_ranges[i].length * 32);
+
+ switch (stage->bind_map.push_ranges[i].set) {
+ case ANV_DESCRIPTOR_SET_NULL:
+ fprintf(stream, "NULL");
+ break;
+
+ case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
+ fprintf(stream, "Vulkan push constants and API params");
+ break;
+
+ case ANV_DESCRIPTOR_SET_DESCRIPTORS:
+ fprintf(stream, "Descriptor buffer for set %d (start=%dB)",
+ stage->bind_map.push_ranges[i].index,
+ stage->bind_map.push_ranges[i].start * 32);
+ break;
+
+ case ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS:
+ unreachable("gl_NumWorkgroups is never pushed");
+
+ case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS:
+ fprintf(stream, "Inline shader constant data (start=%dB)",
+ stage->bind_map.push_ranges[i].start * 32);
+ break;
+
+ case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
+ unreachable("Color attachments can't be pushed");
+
+ default:
+ fprintf(stream, "UBO (set=%d binding=%d start=%dB)",
+ stage->bind_map.push_ranges[i].set,
+ stage->bind_map.push_ranges[i].index,
+ stage->bind_map.push_ranges[i].start * 32);
+ break;
+ }
+ fprintf(stream, "\n");
+ }
+ fprintf(stream, "\n");
+ }
+
+ /* Creating this is far cheaper than it looks. It's perfectly fine to
+ * do it for every binary.
+ */
+ struct gen_disasm *d = gen_disasm_create(&pipeline->device->info);
+ gen_disasm_disassemble(d, stage->code, code_offset, stream);
+ gen_disasm_destroy(d);
+
+ fclose(stream);
- return code;
+ /* Copy it to a ralloc'd thing */
+ disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
+ memcpy(disasm, stream_data, stream_size);
+ disasm[stream_size] = 0;
+
+ free(stream_data);
+ }
+
+ const struct anv_pipeline_executable exe = {
+ .stage = stage->stage,
+ .stats = *stats,
+ .nir = nir,
+ .disasm = disasm,
+ };
+ util_dynarray_append(&pipeline->executables,
+ struct anv_pipeline_executable, exe);
+}
+
+static void
+anv_pipeline_add_executables(struct anv_pipeline *pipeline,
+ struct anv_pipeline_stage *stage,
+ struct anv_shader_bin *bin)
+{
+ if (stage->stage == MESA_SHADER_FRAGMENT) {
+ /* We pull the prog data and stats out of the anv_shader_bin because
+ * the anv_pipeline_stage may not be fully populated if we successfully
+ * looked up the shader in a cache.
+ */
+ const struct brw_wm_prog_data *wm_prog_data =
+ (const struct brw_wm_prog_data *)bin->prog_data;
+ struct brw_compile_stats *stats = bin->stats;
+
+ if (wm_prog_data->dispatch_8) {
+ anv_pipeline_add_executable(pipeline, stage, stats++, 0);
+ }
+
+ if (wm_prog_data->dispatch_16) {
+ anv_pipeline_add_executable(pipeline, stage, stats++,
+ wm_prog_data->prog_offset_16);
+ }
+
+ if (wm_prog_data->dispatch_32) {
+ anv_pipeline_add_executable(pipeline, stage, stats++,
+ wm_prog_data->prog_offset_32);
+ }
+ } else {
+ anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
+ }
+}
+
+static void
+anv_pipeline_init_from_cached_graphics(struct anv_graphics_pipeline *pipeline)
+{
+ /* TODO: Cache this pipeline-wide information. */
+
+ /* Primitive replication depends on information from all the shaders.
+ * Recover this bit from the fact that we have more than one position slot
+ * in the vertex shader when using it.
+ */
+ assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
+ int pos_slots = 0;
+ const struct brw_vue_prog_data *vue_prog_data =
+ (const void *) pipeline->shaders[MESA_SHADER_VERTEX]->prog_data;
+ const struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
+ for (int i = 0; i < vue_map->num_slots; i++) {
+ if (vue_map->slot_to_varying[i] == VARYING_SLOT_POS)
+ pos_slots++;
+ }
+ pipeline->use_primitive_replication = pos_slots > 1;
}
static VkResult
-anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
+anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *info)
{
};
int64_t pipeline_start = os_time_get_nano();
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
pipeline->active_stages = 0;
stages[stage].spec_info,
stages[stage].shader_sha1);
- const struct gen_device_info *devinfo = &pipeline->device->info;
+ const struct gen_device_info *devinfo = &pipeline->base.device->info;
switch (stage) {
case MESA_SHADER_VERTEX:
populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
case MESA_SHADER_GEOMETRY:
populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
break;
- case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_FRAGMENT: {
+ const bool raster_enabled =
+ !info->pRasterizationState->rasterizerDiscardEnable;
populate_wm_prog_key(devinfo, sinfo->flags,
pipeline->subpass,
- info->pMultisampleState,
+ raster_enabled ? info->pMultisampleState : NULL,
&stages[stage].key.wm);
break;
+ }
default:
unreachable("Invalid graphics shader stage");
}
unsigned char sha1[20];
anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
- unsigned found = 0;
- unsigned cache_hits = 0;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (!stages[s].entrypoint)
continue;
- int64_t stage_start = os_time_get_nano();
-
stages[s].cache_key.stage = s;
memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
+ }
- bool cache_hit;
- struct anv_shader_bin *bin =
- anv_device_search_for_kernel(pipeline->device, cache,
- &stages[s].cache_key,
- sizeof(stages[s].cache_key), &cache_hit);
- if (bin) {
- found++;
- pipeline->shaders[s] = bin;
- }
+ const bool skip_cache_lookup =
+ (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
- if (cache_hit) {
- cache_hits++;
- stages[s].feedback.flags |=
- VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
- }
- stages[s].feedback.duration += os_time_get_nano() - stage_start;
- }
+ if (!skip_cache_lookup) {
+ unsigned found = 0;
+ unsigned cache_hits = 0;
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ int64_t stage_start = os_time_get_nano();
- if (found == __builtin_popcount(pipeline->active_stages)) {
- if (cache_hits == found) {
- pipeline_feedback.flags |=
- VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ bool cache_hit;
+ struct anv_shader_bin *bin =
+ anv_device_search_for_kernel(pipeline->base.device, cache,
+ &stages[s].cache_key,
+ sizeof(stages[s].cache_key), &cache_hit);
+ if (bin) {
+ found++;
+ pipeline->shaders[s] = bin;
+ }
+
+ if (cache_hit) {
+ cache_hits++;
+ stages[s].feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
}
- /* We found all our shaders in the cache. We're done. */
- goto done;
- } else if (found > 0) {
- /* We found some but not all of our shaders. This shouldn't happen
- * most of the time but it can if we have a partially populated
- * pipeline cache.
- */
- assert(found < __builtin_popcount(pipeline->active_stages));
-
- vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
- VK_DEBUG_REPORT_WARNING_BIT_EXT |
- VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
- (uint64_t)(uintptr_t)cache,
- 0, 0, "anv",
- "Found a partial pipeline in the cache. This is "
- "most likely caused by an incomplete pipeline cache "
- "import or export");
-
- /* We're going to have to recompile anyway, so just throw away our
- * references to the shaders in the cache. We'll get them out of the
- * cache again as part of the compilation process.
- */
- for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
- stages[s].feedback.flags = 0;
- if (pipeline->shaders[s]) {
- anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
- pipeline->shaders[s] = NULL;
+
+ if (found == __builtin_popcount(pipeline->active_stages)) {
+ if (cache_hits == found) {
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ /* We found all our shaders in the cache. We're done. */
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ anv_pipeline_add_executables(&pipeline->base, &stages[s],
+ pipeline->shaders[s]);
+ }
+ anv_pipeline_init_from_cached_graphics(pipeline);
+ goto done;
+ } else if (found > 0) {
+ /* We found some but not all of our shaders. This shouldn't happen
+ * most of the time but it can if we have a partially populated
+ * pipeline cache.
+ */
+ assert(found < __builtin_popcount(pipeline->active_stages));
+
+ vk_debug_report(&pipeline->base.device->physical->instance->debug_report_callbacks,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT |
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
+ (uint64_t)(uintptr_t)cache,
+ 0, 0, "anv",
+ "Found a partial pipeline in the cache. This is "
+ "most likely caused by an incomplete pipeline cache "
+ "import or export");
+
+ /* We're going to have to recompile anyway, so just throw away our
+ * references to the shaders in the cache. We'll get them out of the
+ * cache again as part of the compilation process.
+ */
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ stages[s].feedback.flags = 0;
+ if (pipeline->shaders[s]) {
+ anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
+ pipeline->shaders[s] = NULL;
+ }
}
}
}
+ if (info->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT)
+ return VK_PIPELINE_COMPILE_REQUIRED_EXT;
+
void *pipeline_ctx = ralloc_context(NULL);
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
.sampler_to_descriptor = stages[s].sampler_to_descriptor
};
- stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
+ stages[s].nir = anv_pipeline_stage_get_nir(&pipeline->base, cache,
pipeline_ctx,
&stages[s]);
if (stages[s].nir == NULL) {
next_stage = &stages[s];
}
+ if (pipeline->base.device->info.gen >= 12 &&
+ pipeline->subpass->view_mask != 0) {
+ /* For some pipelines HW Primitive Replication can be used instead of
+ * instancing to implement Multiview. This depend on how viewIndex is
+ * used in all the active shaders, so this check can't be done per
+ * individual shaders.
+ */
+ nir_shader *shaders[MESA_SHADER_STAGES] = {};
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++)
+ shaders[s] = stages[s].nir;
+
+ pipeline->use_primitive_replication =
+ anv_check_for_primitive_replication(shaders, pipeline);
+ } else {
+ pipeline->use_primitive_replication = false;
+ }
+
struct anv_pipeline_stage *prev_stage = NULL;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (!stages[s].entrypoint)
void *stage_ctx = ralloc_context(NULL);
+ anv_pipeline_lower_nir(&pipeline->base, stage_ctx, &stages[s], layout);
+
+ if (prev_stage && compiler->glsl_compiler_options[s].NirOptions->unify_interfaces) {
+ prev_stage->nir->info.outputs_written |= stages[s].nir->info.inputs_read &
+ ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
+ stages[s].nir->info.inputs_read |= prev_stage->nir->info.outputs_written &
+ ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
+ prev_stage->nir->info.patch_outputs_written |= stages[s].nir->info.patch_inputs_read;
+ stages[s].nir->info.patch_inputs_read |= prev_stage->nir->info.patch_outputs_written;
+ }
+
+ ralloc_free(stage_ctx);
+
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
+
+ prev_stage = &stages[s];
+ }
+
+ prev_stage = NULL;
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ int64_t stage_start = os_time_get_nano();
+
+ void *stage_ctx = ralloc_context(NULL);
+
nir_xfb_info *xfb_info = NULL;
if (s == MESA_SHADER_VERTEX ||
s == MESA_SHADER_TESS_EVAL ||
s == MESA_SHADER_GEOMETRY)
xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
- anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
-
- const unsigned *code;
switch (s) {
case MESA_SHADER_VERTEX:
- code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
- &stages[s]);
+ anv_pipeline_compile_vs(compiler, stage_ctx, pipeline,
+ &stages[s]);
break;
case MESA_SHADER_TESS_CTRL:
- code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->base.device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_TESS_EVAL:
- code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->base.device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_GEOMETRY:
- code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->base.device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_FRAGMENT:
- code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
- &stages[s], prev_stage);
+ anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->base.device,
+ &stages[s], prev_stage);
break;
default:
unreachable("Invalid graphics shader stage");
}
- if (code == NULL) {
+ if (stages[s].code == NULL) {
ralloc_free(stage_ctx);
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
}
+ anv_nir_validate_push_layout(&stages[s].prog_data.base,
+ &stages[s].bind_map);
+
struct anv_shader_bin *bin =
- anv_device_upload_kernel(pipeline->device, cache,
+ anv_device_upload_kernel(pipeline->base.device, cache, s,
&stages[s].cache_key,
sizeof(stages[s].cache_key),
- code, stages[s].prog_data.base.program_size,
+ stages[s].code,
+ stages[s].prog_data.base.program_size,
stages[s].nir->constant_data,
stages[s].nir->constant_data_size,
&stages[s].prog_data.base,
brw_prog_data_size(s),
+ stages[s].stats, stages[s].num_stats,
xfb_info, &stages[s].bind_map);
if (!bin) {
ralloc_free(stage_ctx);
goto fail;
}
+ anv_pipeline_add_executables(&pipeline->base, &stages[s], bin);
+
pipeline->shaders[s] = bin;
ralloc_free(stage_ctx);
/* This can happen if we decided to implicitly disable the fragment
* shader. See anv_pipeline_compile_fs().
*/
- anv_shader_bin_unref(pipeline->device,
+ anv_shader_bin_unref(pipeline->base.device,
pipeline->shaders[MESA_SHADER_FRAGMENT]);
pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (pipeline->shaders[s])
- anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
+ anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
}
return result;
}
+static void
+shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+ assert(glsl_type_is_vector_or_scalar(type));
+
+ uint32_t comp_size = glsl_type_is_boolean(type)
+ ? 4 : glsl_get_bit_size(type) / 8;
+ unsigned length = glsl_get_vector_elements(type);
+ *size = comp_size * length,
+ *align = comp_size * (length == 3 ? 4 : length);
+}
+
VkResult
-anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
+anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
struct anv_pipeline_cache *cache,
const VkComputePipelineCreateInfo *info,
const struct anv_shader_module *module,
};
int64_t pipeline_start = os_time_get_nano();
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
+ const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
struct anv_pipeline_stage stage = {
.stage = MESA_SHADER_COMPUTE,
vk_find_struct_const(info->stage.pNext,
PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
- populate_cs_prog_key(&pipeline->device->info, info->stage.flags,
+ populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags,
rss_info, &stage.key.cs);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
+ const bool skip_cache_lookup =
+ (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
+
anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
- bool cache_hit;
- bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
- sizeof(stage.cache_key), &cache_hit);
+ bool cache_hit = false;
+ if (!skip_cache_lookup) {
+ bin = anv_device_search_for_kernel(pipeline->base.device, cache,
+ &stage.cache_key,
+ sizeof(stage.cache_key),
+ &cache_hit);
+ }
+
+ if (bin == NULL &&
+ (info->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT))
+ return VK_PIPELINE_COMPILE_REQUIRED_EXT;
+
+ void *mem_ctx = ralloc_context(NULL);
if (bin == NULL) {
int64_t stage_start = os_time_get_nano();
.set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
};
- void *mem_ctx = ralloc_context(NULL);
-
- stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
+ stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
if (stage.nir == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
+ NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
+
+ anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage, layout);
- NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
- &stage.prog_data.cs);
+ NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types,
+ nir_var_mem_shared, shared_type_info);
+ NIR_PASS_V(stage.nir, nir_lower_explicit_io,
+ nir_var_mem_shared, nir_address_format_32bit_offset);
+ NIR_PASS_V(stage.nir, brw_nir_lower_cs_intrinsics);
- const unsigned *shader_code =
- brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
- &stage.prog_data.cs, stage.nir, -1, NULL);
- if (shader_code == NULL) {
+ stage.num_stats = 1;
+ stage.code = brw_compile_cs(compiler, pipeline->base.device, mem_ctx,
+ &stage.key.cs, &stage.prog_data.cs,
+ stage.nir, -1, stage.stats, NULL);
+ if (stage.code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
+ anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
+
+ if (!stage.prog_data.cs.uses_num_work_groups) {
+ assert(stage.bind_map.surface_to_descriptor[0].set ==
+ ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS);
+ stage.bind_map.surface_to_descriptor[0].set = ANV_DESCRIPTOR_SET_NULL;
+ }
+
const unsigned code_size = stage.prog_data.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache,
+ bin = anv_device_upload_kernel(pipeline->base.device, cache,
+ MESA_SHADER_COMPUTE,
&stage.cache_key, sizeof(stage.cache_key),
- shader_code, code_size,
+ stage.code, code_size,
stage.nir->constant_data,
stage.nir->constant_data_size,
&stage.prog_data.base,
sizeof(stage.prog_data.cs),
+ stage.stats, stage.num_stats,
NULL, &stage.bind_map);
if (!bin) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- ralloc_free(mem_ctx);
-
stage.feedback.duration = os_time_get_nano() - stage_start;
}
+ anv_pipeline_add_executables(&pipeline->base, &stage, bin);
+
+ ralloc_free(mem_ctx);
+
if (cache_hit) {
stage.feedback.flags |=
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
}
- pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
- pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
+ pipeline->cs = bin;
return VK_SUCCESS;
}
+struct anv_cs_parameters
+anv_cs_parameters(const struct anv_compute_pipeline *pipeline)
+{
+ const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+
+ struct anv_cs_parameters cs_params = {};
+
+ cs_params.group_size = cs_prog_data->local_size[0] *
+ cs_prog_data->local_size[1] *
+ cs_prog_data->local_size[2];
+ cs_params.simd_size =
+ brw_cs_simd_size_for_group_size(&pipeline->base.device->info,
+ cs_prog_data, cs_params.group_size);
+ cs_params.threads = DIV_ROUND_UP(cs_params.group_size, cs_params.simd_size);
+
+ return cs_params;
+}
+
/**
* Copy pipeline state not marked as dynamic.
* Dynamic state is pipeline state which hasn't been provided at pipeline
* @param[in] pCreateInfo Source of non_dynamic state to be copied.
*/
static void
-copy_non_dynamic_state(struct anv_pipeline *pipeline,
+copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline,
const VkGraphicsPipelineCreateInfo *pCreateInfo)
{
anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
if (pCreateInfo->pDynamicState) {
/* Remove all of the states that are marked as dynamic */
uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
- for (uint32_t s = 0; s < count; s++)
- states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
+ for (uint32_t s = 0; s < count; s++) {
+ states &= ~anv_cmd_dirty_bit_for_vk_dynamic_state(
+ pCreateInfo->pDynamicState->pDynamicStates[s]);
+ }
}
struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
assert(pCreateInfo->pViewportState);
dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
- if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
typed_memcpy(dynamic->viewport.viewports,
pCreateInfo->pViewportState->pViewports,
pCreateInfo->pViewportState->viewportCount);
}
dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
- if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
typed_memcpy(dynamic->scissor.scissors,
pCreateInfo->pViewportState->pScissors,
pCreateInfo->pViewportState->scissorCount);
}
}
- if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
assert(pCreateInfo->pRasterizationState);
dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
}
- if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS) {
assert(pCreateInfo->pRasterizationState);
dynamic->depth_bias.bias =
pCreateInfo->pRasterizationState->depthBiasConstantFactor;
pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
}
+ if (states & ANV_CMD_DIRTY_DYNAMIC_CULL_MODE) {
+ assert(pCreateInfo->pRasterizationState);
+ dynamic->cull_mode =
+ pCreateInfo->pRasterizationState->cullMode;
+ }
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE) {
+ assert(pCreateInfo->pRasterizationState);
+ dynamic->front_face =
+ pCreateInfo->pRasterizationState->frontFace;
+ }
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY) {
+ assert(pCreateInfo->pInputAssemblyState);
+ bool has_tess = false;
+ for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+ const VkPipelineShaderStageCreateInfo *sinfo = &pCreateInfo->pStages[i];
+ gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
+ if (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL)
+ has_tess = true;
+ }
+ if (has_tess) {
+ const VkPipelineTessellationStateCreateInfo *tess_info =
+ pCreateInfo->pTessellationState;
+ dynamic->primitive_topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
+ } else {
+ dynamic->primitive_topology = pCreateInfo->pInputAssemblyState->topology;
+ }
+ }
+
/* Section 9.2 of the Vulkan 1.0.15 spec says:
*
* pColorBlendState is [...] NULL if the pipeline has rasterization
!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
assert(pCreateInfo->pColorBlendState);
- if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
+ if (states & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
typed_memcpy(dynamic->blend_constants,
pCreateInfo->pColorBlendState->blendConstants, 4);
}
subpass->depth_stencil_attachment) {
assert(pCreateInfo->pDepthStencilState);
- if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS) {
dynamic->depth_bounds.min =
pCreateInfo->pDepthStencilState->minDepthBounds;
dynamic->depth_bounds.max =
pCreateInfo->pDepthStencilState->maxDepthBounds;
}
- if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) {
dynamic->stencil_compare_mask.front =
pCreateInfo->pDepthStencilState->front.compareMask;
dynamic->stencil_compare_mask.back =
pCreateInfo->pDepthStencilState->back.compareMask;
}
- if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) {
dynamic->stencil_write_mask.front =
pCreateInfo->pDepthStencilState->front.writeMask;
dynamic->stencil_write_mask.back =
pCreateInfo->pDepthStencilState->back.writeMask;
}
- if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) {
dynamic->stencil_reference.front =
pCreateInfo->pDepthStencilState->front.reference;
dynamic->stencil_reference.back =
pCreateInfo->pDepthStencilState->back.reference;
}
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE) {
+ dynamic->depth_test_enable =
+ pCreateInfo->pDepthStencilState->depthTestEnable;
+ }
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE) {
+ dynamic->depth_write_enable =
+ pCreateInfo->pDepthStencilState->depthWriteEnable;
+ }
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP) {
+ dynamic->depth_compare_op =
+ pCreateInfo->pDepthStencilState->depthCompareOp;
+ }
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) {
+ dynamic->depth_bounds_test_enable =
+ pCreateInfo->pDepthStencilState->depthBoundsTestEnable;
+ }
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE) {
+ dynamic->stencil_test_enable =
+ pCreateInfo->pDepthStencilState->stencilTestEnable;
+ }
+
+ if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP) {
+ const VkPipelineDepthStencilStateCreateInfo *info =
+ pCreateInfo->pDepthStencilState;
+ memcpy(&dynamic->stencil_op.front, &info->front,
+ sizeof(dynamic->stencil_op.front));
+ memcpy(&dynamic->stencil_op.back, &info->back,
+ sizeof(dynamic->stencil_op.back));
+ }
+ }
+
+ const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
+ vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
+ PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
+ if (line_state) {
+ if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
+ dynamic->line_stipple.factor = line_state->lineStippleFactor;
+ dynamic->line_stipple.pattern = line_state->lineStipplePattern;
+ }
}
pipeline->dynamic_state_mask = states;
const struct gen_device_info *devinfo = &pipeline->device->info;
const struct gen_l3_weights w =
- gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
+ gen_get_default_l3_weights(devinfo, true, needs_slm);
- pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
- pipeline->urb.total_size =
- gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
+ pipeline->l3_config = gen_get_l3_config(devinfo, w);
}
VkResult
-anv_pipeline_init(struct anv_pipeline *pipeline,
- struct anv_device *device,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc)
+anv_graphics_pipeline_init(struct anv_graphics_pipeline *pipeline,
+ struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc)
{
VkResult result;
anv_pipeline_validate_create_info(pCreateInfo);
- if (alloc == NULL)
- alloc = &device->alloc;
+ result = anv_pipeline_init(&pipeline->base, device,
+ ANV_PIPELINE_GRAPHICS, pCreateInfo->flags,
+ alloc);
+ if (result != VK_SUCCESS)
+ return result;
- pipeline->device = device;
+ anv_batch_set_storage(&pipeline->base.batch, ANV_NULL_ADDRESS,
+ pipeline->batch_data, sizeof(pipeline->batch_data));
ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
assert(pCreateInfo->subpass < render_pass->subpass_count);
pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
- result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
- if (result != VK_SUCCESS)
- return result;
-
- pipeline->batch.alloc = alloc;
- pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
- pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
- pipeline->batch.relocs = &pipeline->batch_relocs;
- pipeline->batch.status = VK_SUCCESS;
+ assert(pCreateInfo->pRasterizationState);
copy_non_dynamic_state(pipeline, pCreateInfo);
- pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
- pCreateInfo->pRasterizationState->depthClampEnable;
+ pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState->depthClampEnable;
/* Previously we enabled depth clipping when !depthClampEnable.
* DepthClipStateCreateInfo now makes depth clipping explicit so if the
PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
- pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
- pCreateInfo->pMultisampleState->sampleShadingEnable;
-
- pipeline->needs_data_cache = false;
+ pipeline->sample_shading_enable =
+ !pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
+ pCreateInfo->pMultisampleState &&
+ pCreateInfo->pMultisampleState->sampleShadingEnable;
/* When we free the pipeline, we detect stages based on the NULL status
* of various prog_data pointers. Make them NULL by default.
result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
if (result != VK_SUCCESS) {
- anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
+ anv_pipeline_finish(&pipeline->base, device, alloc);
return result;
}
assert(pipeline->shaders[MESA_SHADER_VERTEX]);
- anv_pipeline_setup_l3_config(pipeline, false);
+ anv_pipeline_setup_l3_config(&pipeline->base, false);
const VkPipelineVertexInputStateCreateInfo *vi_info =
pCreateInfo->pVertexInputState;
* the instance divisor by the number of views ensure that we repeat the
* client's per-instance data once for each view.
*/
- if (pipeline->subpass->view_mask) {
+ if (pipeline->subpass->view_mask && !pipeline->use_primitive_replication) {
const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
if (pipeline->vb[vb].instanced)
return VK_SUCCESS;
}
+
+#define WRITE_STR(field, ...) ({ \
+ memset(field, 0, sizeof(field)); \
+ UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
+ assert(i > 0 && i < sizeof(field)); \
+})
+
+VkResult anv_GetPipelineExecutablePropertiesKHR(
+ VkDevice device,
+ const VkPipelineInfoKHR* pPipelineInfo,
+ uint32_t* pExecutableCount,
+ VkPipelineExecutablePropertiesKHR* pProperties)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
+
+ util_dynarray_foreach (&pipeline->executables, struct anv_pipeline_executable, exe) {
+ vk_outarray_append(&out, props) {
+ gl_shader_stage stage = exe->stage;
+ props->stages = mesa_to_vk_shader_stage(stage);
+
+ unsigned simd_width = exe->stats.dispatch_width;
+ if (stage == MESA_SHADER_FRAGMENT) {
+ WRITE_STR(props->name, "%s%d %s",
+ simd_width ? "SIMD" : "vec",
+ simd_width ? simd_width : 4,
+ _mesa_shader_stage_to_string(stage));
+ } else {
+ WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
+ }
+ WRITE_STR(props->description, "%s%d %s shader",
+ simd_width ? "SIMD" : "vec",
+ simd_width ? simd_width : 4,
+ _mesa_shader_stage_to_string(stage));
+
+ /* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
+ * wants a subgroup size of 1.
+ */
+ props->subgroupSize = MAX2(simd_width, 1);
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static const struct anv_pipeline_executable *
+anv_pipeline_get_executable(struct anv_pipeline *pipeline, uint32_t index)
+{
+ assert(index < util_dynarray_num_elements(&pipeline->executables,
+ struct anv_pipeline_executable));
+ return util_dynarray_element(
+ &pipeline->executables, struct anv_pipeline_executable, index);
+}
+
+VkResult anv_GetPipelineExecutableStatisticsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pStatisticCount,
+ VkPipelineExecutableStatisticKHR* pStatistics)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
+
+ const struct anv_pipeline_executable *exe =
+ anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
+
+ const struct brw_stage_prog_data *prog_data;
+ switch (pipeline->type) {
+ case ANV_PIPELINE_GRAPHICS: {
+ prog_data = anv_pipeline_to_graphics(pipeline)->shaders[exe->stage]->prog_data;
+ break;
+ }
+ case ANV_PIPELINE_COMPUTE: {
+ prog_data = anv_pipeline_to_compute(pipeline)->cs->prog_data;
+ break;
+ }
+ default:
+ unreachable("invalid pipeline type");
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Instruction Count");
+ WRITE_STR(stat->description,
+ "Number of GEN instructions in the final generated "
+ "shader executable.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.instructions;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "SEND Count");
+ WRITE_STR(stat->description,
+ "Number of instructions in the final generated shader "
+ "executable which access external units such as the "
+ "constant cache or the sampler.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.sends;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Loop Count");
+ WRITE_STR(stat->description,
+ "Number of loops (not unrolled) in the final generated "
+ "shader executable.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.loops;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Cycle Count");
+ WRITE_STR(stat->description,
+ "Estimate of the number of EU cycles required to execute "
+ "the final generated executable. This is an estimate only "
+ "and may vary greatly from actual run-time performance.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.cycles;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Spill Count");
+ WRITE_STR(stat->description,
+ "Number of scratch spill operations. This gives a rough "
+ "estimate of the cost incurred due to spilling temporary "
+ "values to memory. If this is non-zero, you may want to "
+ "adjust your shader to reduce register pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.spills;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Fill Count");
+ WRITE_STR(stat->description,
+ "Number of scratch fill operations. This gives a rough "
+ "estimate of the cost incurred due to spilling temporary "
+ "values to memory. If this is non-zero, you may want to "
+ "adjust your shader to reduce register pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.fills;
+ }
+
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Scratch Memory Size");
+ WRITE_STR(stat->description,
+ "Number of bytes of scratch memory required by the "
+ "generated shader executable. If this is non-zero, you "
+ "may want to adjust your shader to reduce register "
+ "pressure.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = prog_data->total_scratch;
+ }
+
+ if (exe->stage == MESA_SHADER_COMPUTE) {
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "Workgroup Memory Size");
+ WRITE_STR(stat->description,
+ "Number of bytes of workgroup shared memory used by this "
+ "compute shader including any padding.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = brw_cs_prog_data_const(prog_data)->slm_size;
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static bool
+write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
+ const char *data)
+{
+ ir->isText = VK_TRUE;
+
+ size_t data_len = strlen(data) + 1;
+
+ if (ir->pData == NULL) {
+ ir->dataSize = data_len;
+ return true;
+ }
+
+ strncpy(ir->pData, data, ir->dataSize);
+ if (ir->dataSize < data_len)
+ return false;
+
+ ir->dataSize = data_len;
+ return true;
+}
+
+VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pInternalRepresentationCount,
+ VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
+{
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
+ VK_OUTARRAY_MAKE(out, pInternalRepresentations,
+ pInternalRepresentationCount);
+ bool incomplete_text = false;
+
+ const struct anv_pipeline_executable *exe =
+ anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
+
+ if (exe->nir) {
+ vk_outarray_append(&out, ir) {
+ WRITE_STR(ir->name, "Final NIR");
+ WRITE_STR(ir->description,
+ "Final NIR before going into the back-end compiler");
+
+ if (!write_ir_text(ir, exe->nir))
+ incomplete_text = true;
+ }
+ }
+
+ if (exe->disasm) {
+ vk_outarray_append(&out, ir) {
+ WRITE_STR(ir->name, "GEN Assembly");
+ WRITE_STR(ir->description,
+ "Final GEN assembly for the generated shader binary");
+
+ if (!write_ir_text(ir, exe->disasm))
+ incomplete_text = true;
+ }
+ }
+
+ return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
+}