#include <fcntl.h>
#include "util/mesa-sha1.h"
+#include "util/os_time.h"
#include "common/gen_l3_config.h"
#include "anv_private.h"
#include "compiler/brw_nir.h"
#include "anv_nir.h"
+#include "nir/nir_xfb_info.h"
#include "spirv/nir_spirv.h"
+#include "vk_util.h"
/* Needed for SWIZZLE macros */
#include "program/prog_instruction.h"
[MESA_SHADER_COMPUTE] = DEBUG_CS,
};
+struct anv_spirv_debug_data {
+ struct anv_device *device;
+ const struct anv_shader_module *module;
+};
+
+static void anv_spirv_nir_debug(void *private_data,
+ enum nir_spirv_debug_level level,
+ size_t spirv_offset,
+ const char *message)
+{
+ struct anv_spirv_debug_data *debug_data = private_data;
+ static const VkDebugReportFlagsEXT vk_flags[] = {
+ [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+ [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ };
+ char buffer[256];
+
+ snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
+
+ vk_debug_report(&debug_data->device->instance->debug_report_callbacks,
+ vk_flags[level],
+ VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
+ (uint64_t) (uintptr_t) debug_data->module,
+ 0, 0, "anv", buffer);
+}
+
/* Eventually, this will become part of anv_CreateShader. Unfortunately,
* we can't do that yet because we don't have the ability to copy nir.
*/
static nir_shader *
-anv_shader_compile_to_nir(struct anv_pipeline *pipeline,
+anv_shader_compile_to_nir(struct anv_device *device,
void *mem_ctx,
- struct anv_shader_module *module,
+ const struct anv_shader_module *module,
const char *entrypoint_name,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info)
{
- const struct anv_device *device = pipeline->device;
-
- const struct brw_compiler *compiler =
- device->instance->physicalDevice.compiler;
+ const struct anv_physical_device *pdevice =
+ &device->instance->physicalDevice;
+ const struct brw_compiler *compiler = pdevice->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage].NirOptions;
}
}
+ struct anv_spirv_debug_data spirv_debug_data = {
+ .device = device,
+ .module = module,
+ };
struct spirv_to_nir_options spirv_options = {
.lower_workgroup_access_to_offsets = true,
.caps = {
- .float64 = device->instance->physicalDevice.info.gen >= 8,
- .int64 = device->instance->physicalDevice.info.gen >= 8,
- .tessellation = true,
+ .demote_to_helper_invocation = true,
+ .derivative_group = true,
+ .descriptor_array_dynamic_indexing = true,
+ .descriptor_array_non_uniform_indexing = true,
+ .descriptor_indexing = true,
.device_group = true,
.draw_parameters = true,
+ .float16 = pdevice->info.gen >= 8,
+ .float64 = pdevice->info.gen >= 8,
+ .fragment_shader_sample_interlock = pdevice->info.gen >= 9,
+ .fragment_shader_pixel_interlock = pdevice->info.gen >= 9,
+ .geometry_streams = true,
.image_write_without_format = true,
+ .int8 = pdevice->info.gen >= 8,
+ .int16 = pdevice->info.gen >= 8,
+ .int64 = pdevice->info.gen >= 8,
+ .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
+ .min_lod = true,
.multiview = true,
- .variable_pointers = true,
- .storage_16bit = device->instance->physicalDevice.info.gen >= 8,
- .int16 = device->instance->physicalDevice.info.gen >= 8,
+ .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
+ .post_depth_coverage = pdevice->info.gen >= 9,
+ .runtime_descriptor_array = true,
.shader_viewport_index_layer = true,
+ .stencil_export = pdevice->info.gen >= 9,
+ .storage_8bit = pdevice->info.gen >= 8,
+ .storage_16bit = pdevice->info.gen >= 8,
.subgroup_arithmetic = true,
.subgroup_basic = true,
.subgroup_ballot = true,
.subgroup_quad = true,
.subgroup_shuffle = true,
.subgroup_vote = true,
- .stencil_export = device->instance->physicalDevice.info.gen >= 9,
- .storage_8bit = device->instance->physicalDevice.info.gen >= 8,
+ .tessellation = true,
+ .transform_feedback = pdevice->info.gen >= 8,
+ .variable_pointers = true,
+ },
+ .ubo_addr_format = nir_address_format_32bit_index_offset,
+ .ssbo_addr_format =
+ anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
+ .phys_ssbo_addr_format = nir_address_format_64bit_global,
+ .push_const_addr_format = nir_address_format_logical,
+
+ /* TODO: Consider changing this to an address format that has the NULL
+ * pointer equals to 0. That might be a better format to play nice
+ * with certain code / code generators.
+ */
+ .shared_addr_format = nir_address_format_32bit_offset,
+ .debug = {
+ .func = anv_spirv_nir_debug,
+ .private_data = &spirv_debug_data,
},
};
- nir_function *entry_point =
+
+ nir_shader *nir =
spirv_to_nir(spirv, module->size / 4,
spec_entries, num_spec_entries,
stage, entrypoint_name, &spirv_options, nir_options);
- nir_shader *nir = entry_point->shader;
assert(nir->info.stage == stage);
- nir_validate_shader(nir);
+ nir_validate_shader(nir, "after spirv_to_nir");
ralloc_steal(mem_ctx, nir);
free(spec_entries);
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
+ NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
- NIR_PASS_V(nir, nir_copy_prop);
+ NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
- if (func != entry_point)
+ if (!func->is_entrypoint)
exec_node_remove(&func->node);
}
assert(exec_list_length(&nir->functions) == 1);
- entry_point->name = ralloc_strdup(entry_point, "main");
/* Now that we've deleted all but the main function, we can go ahead and
* lower the rest of the constant initializers. We do this here so that
NIR_PASS_V(nir, nir_remove_dead_variables,
nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
- if (stage == MESA_SHADER_FRAGMENT)
- NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
-
NIR_PASS_V(nir, nir_propagate_invariant);
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
- entry_point->impl, true, false);
+ nir_shader_get_entrypoint(nir), true, false);
+
+ NIR_PASS_V(nir, nir_lower_frexp);
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
- nir = brw_preprocess_nir(compiler, nir);
-
- if (stage == MESA_SHADER_FRAGMENT)
- NIR_PASS_V(nir, anv_nir_lower_input_attachments);
+ brw_preprocess_nir(compiler, nir, NULL);
return nir;
}
}
}
+static void
+populate_base_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
+ struct brw_base_prog_key *key)
+{
+ if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
+ key->subgroup_size_type = BRW_SUBGROUP_SIZE_VARYING;
+ else
+ key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
+
+ populate_sampler_prog_key(devinfo, &key->tex);
+}
+
static void
populate_vs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
struct brw_vs_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_sampler_prog_key(devinfo, &key->tex);
+ populate_base_prog_key(devinfo, flags, &key->base);
/* XXX: Handle vertex input work-arounds */
/* XXX: Handle sampler_prog_key */
}
+static void
+populate_tcs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
+ unsigned input_vertices,
+ struct brw_tcs_prog_key *key)
+{
+ memset(key, 0, sizeof(*key));
+
+ populate_base_prog_key(devinfo, flags, &key->base);
+
+ key->input_vertices = input_vertices;
+}
+
+static void
+populate_tes_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
+ struct brw_tes_prog_key *key)
+{
+ memset(key, 0, sizeof(*key));
+
+ populate_base_prog_key(devinfo, flags, &key->base);
+}
+
static void
populate_gs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
struct brw_gs_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_sampler_prog_key(devinfo, &key->tex);
+ populate_base_prog_key(devinfo, flags, &key->base);
}
static void
-populate_wm_prog_key(const struct anv_pipeline *pipeline,
- const VkGraphicsPipelineCreateInfo *info,
+populate_wm_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
+ const struct anv_subpass *subpass,
+ const VkPipelineMultisampleStateCreateInfo *ms_info,
struct brw_wm_prog_key *key)
{
- const struct gen_device_info *devinfo = &pipeline->device->info;
-
memset(key, 0, sizeof(*key));
- populate_sampler_prog_key(devinfo, &key->tex);
+ populate_base_prog_key(devinfo, flags, &key->base);
- /* TODO: we could set this to 0 based on the information in nir_shader, but
- * this function is called before spirv_to_nir. */
- const struct brw_vue_map *vue_map =
- &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
- key->input_slots_valid = vue_map->slots_valid;
+ /* We set this to 0 here and set to the actual value before we call
+ * brw_compile_fs.
+ */
+ key->input_slots_valid = 0;
/* Vulkan doesn't specify a default */
key->high_quality_derivatives = false;
/* XXX Vulkan doesn't appear to specify */
key->clamp_fragment_color = false;
- key->nr_color_regions = pipeline->subpass->color_count;
+ assert(subpass->color_count <= MAX_RTS);
+ for (uint32_t i = 0; i < subpass->color_count; i++) {
+ if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
+ key->color_outputs_valid |= (1 << i);
+ }
+
+ key->nr_color_regions = util_bitcount(key->color_outputs_valid);
+
+ /* To reduce possible shader recompilations we would need to know if
+ * there is a SampleMask output variable to compute if we should emit
+ * code to workaround the issue that hardware disables alpha to coverage
+ * when there is SampleMask output.
+ */
+ key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
- key->replicate_alpha = key->nr_color_regions > 1 &&
- info->pMultisampleState &&
- info->pMultisampleState->alphaToCoverageEnable;
+ /* Vulkan doesn't support fixed-function alpha test */
+ key->alpha_test_replicate_alpha = false;
- if (info->pMultisampleState) {
+ if (ms_info) {
/* We should probably pull this out of the shader, but it's fairly
* harmless to compute it and then let dead-code take care of it.
*/
- if (info->pMultisampleState->rasterizationSamples > 1) {
- key->persample_interp =
- (info->pMultisampleState->minSampleShading *
- info->pMultisampleState->rasterizationSamples) > 1;
+ if (ms_info->rasterizationSamples > 1) {
+ key->persample_interp = ms_info->sampleShadingEnable &&
+ (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
key->multisample_fbo = true;
}
- key->frag_coord_adds_sample_pos =
- info->pMultisampleState->sampleShadingEnable;
+ key->frag_coord_adds_sample_pos = key->persample_interp;
}
}
static void
populate_cs_prog_key(const struct gen_device_info *devinfo,
+ VkPipelineShaderStageCreateFlags flags,
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
struct brw_cs_prog_key *key)
{
memset(key, 0, sizeof(*key));
- populate_sampler_prog_key(devinfo, &key->tex);
+ populate_base_prog_key(devinfo, flags, &key->base);
+
+ if (rss_info) {
+ assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
+
+ /* These enum values are expressly chosen to be equal to the subgroup
+ * size that they require.
+ */
+ assert(rss_info->requiredSubgroupSize == 8 ||
+ rss_info->requiredSubgroupSize == 16 ||
+ rss_info->requiredSubgroupSize == 32);
+ key->base.subgroup_size_type = rss_info->requiredSubgroupSize;
+ } else if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
+ /* If the client expressly requests full subgroups and they don't
+ * specify a subgroup size, we need to pick one. If they're requested
+ * varying subgroup sizes, we set it to UNIFORM and let the back-end
+ * compiler pick. Otherwise, we specify the API value of 32.
+ * Performance will likely be terrible in this case but there's nothing
+ * we can do about that. The client should have chosen a size.
+ */
+ if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
+ key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
+ else
+ key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_32;
+ }
}
+struct anv_pipeline_stage {
+ gl_shader_stage stage;
+
+ const struct anv_shader_module *module;
+ const char *entrypoint;
+ const VkSpecializationInfo *spec_info;
+
+ unsigned char shader_sha1[20];
+
+ union brw_any_prog_key key;
+
+ struct {
+ gl_shader_stage stage;
+ unsigned char sha1[20];
+ } cache_key;
+
+ nir_shader *nir;
+
+ struct anv_pipeline_binding surface_to_descriptor[256];
+ struct anv_pipeline_binding sampler_to_descriptor[256];
+ struct anv_pipeline_bind_map bind_map;
+
+ union brw_any_prog_data prog_data;
+
+ VkPipelineCreationFeedbackEXT feedback;
+};
+
static void
-anv_pipeline_hash_shader(struct anv_pipeline *pipeline,
- struct anv_pipeline_layout *layout,
- struct anv_shader_module *module,
+anv_pipeline_hash_shader(const struct anv_shader_module *module,
const char *entrypoint,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info,
- const void *key, size_t key_size,
unsigned char *sha1_out)
{
struct mesa_sha1 ctx;
-
_mesa_sha1_init(&ctx);
- if (stage != MESA_SHADER_COMPUTE) {
- _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
- sizeof(pipeline->subpass->view_mask));
- }
- if (layout)
- _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
+
_mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
_mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
_mesa_sha1_update(&ctx, &stage, sizeof(stage));
if (spec_info) {
_mesa_sha1_update(&ctx, spec_info->pMapEntries,
- spec_info->mapEntryCount * sizeof(*spec_info->pMapEntries));
- _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
+ spec_info->mapEntryCount *
+ sizeof(*spec_info->pMapEntries));
+ _mesa_sha1_update(&ctx, spec_info->pData,
+ spec_info->dataSize);
+ }
+
+ _mesa_sha1_final(&ctx, sha1_out);
+}
+
+static void
+anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
+ struct anv_pipeline_layout *layout,
+ struct anv_pipeline_stage *stages,
+ unsigned char *sha1_out)
+{
+ struct mesa_sha1 ctx;
+ _mesa_sha1_init(&ctx);
+
+ _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
+ sizeof(pipeline->subpass->view_mask));
+
+ if (layout)
+ _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
+
+ const bool rba = pipeline->device->robust_buffer_access;
+ _mesa_sha1_update(&ctx, &rba, sizeof(rba));
+
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (stages[s].entrypoint) {
+ _mesa_sha1_update(&ctx, stages[s].shader_sha1,
+ sizeof(stages[s].shader_sha1));
+ _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
+ }
}
- _mesa_sha1_update(&ctx, key, key_size);
+
+ _mesa_sha1_final(&ctx, sha1_out);
+}
+
+static void
+anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
+ struct anv_pipeline_layout *layout,
+ struct anv_pipeline_stage *stage,
+ unsigned char *sha1_out)
+{
+ struct mesa_sha1 ctx;
+ _mesa_sha1_init(&ctx);
+
+ if (layout)
+ _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
+
+ const bool rba = pipeline->device->robust_buffer_access;
+ _mesa_sha1_update(&ctx, &rba, sizeof(rba));
+
+ _mesa_sha1_update(&ctx, stage->shader_sha1,
+ sizeof(stage->shader_sha1));
+ _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
+
_mesa_sha1_final(&ctx, sha1_out);
}
static nir_shader *
-anv_pipeline_compile(struct anv_pipeline *pipeline,
- void *mem_ctx,
- struct anv_pipeline_layout *layout,
- struct anv_shader_module *module,
- const char *entrypoint,
- gl_shader_stage stage,
- const VkSpecializationInfo *spec_info,
- struct brw_stage_prog_data *prog_data,
- struct anv_pipeline_bind_map *map)
+anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
+ struct anv_pipeline_cache *cache,
+ void *mem_ctx,
+ struct anv_pipeline_stage *stage)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
+ const nir_shader_compiler_options *nir_options =
+ compiler->glsl_compiler_options[stage->stage].NirOptions;
+ nir_shader *nir;
+
+ nir = anv_device_search_for_nir(pipeline->device, cache,
+ nir_options,
+ stage->shader_sha1,
+ mem_ctx);
+ if (nir) {
+ assert(nir->info.stage == stage->stage);
+ return nir;
+ }
- nir_shader *nir = anv_shader_compile_to_nir(pipeline, mem_ctx,
- module, entrypoint, stage,
- spec_info);
- if (nir == NULL)
- return NULL;
+ nir = anv_shader_compile_to_nir(pipeline->device,
+ mem_ctx,
+ stage->module,
+ stage->entrypoint,
+ stage->stage,
+ stage->spec_info);
+ if (nir) {
+ anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
+ return nir;
+ }
+
+ return NULL;
+}
+
+static void
+anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
+ void *mem_ctx,
+ struct anv_pipeline_stage *stage,
+ struct anv_pipeline_layout *layout)
+{
+ const struct anv_physical_device *pdevice =
+ &pipeline->device->instance->physicalDevice;
+ const struct brw_compiler *compiler = pdevice->compiler;
+
+ struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
+ nir_shader *nir = stage->nir;
+
+ if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+ NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
+ NIR_PASS_V(nir, nir_lower_input_attachments, false);
+ }
NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
NIR_PASS_V(nir, anv_nir_lower_push_constants);
- if (stage != MESA_SHADER_COMPUTE)
+ if (nir->info.stage != MESA_SHADER_COMPUTE)
NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
- if (stage == MESA_SHADER_COMPUTE)
+ if (nir->info.stage == MESA_SHADER_COMPUTE)
prog_data->total_shared = nir->num_shared;
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
pipeline->needs_data_cache = true;
+ NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
+
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
+ nir_address_format_64bit_global);
+
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
- if (layout)
- anv_nir_apply_pipeline_layout(pipeline, layout, nir, prog_data, map);
+ if (layout) {
+ anv_nir_apply_pipeline_layout(pdevice,
+ pipeline->device->robust_buffer_access,
+ layout, nir, prog_data,
+ &stage->bind_map);
+
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
+ nir_address_format_32bit_index_offset);
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
+ anv_nir_ssbo_addr_format(pdevice,
+ pipeline->device->robust_buffer_access));
+
+ NIR_PASS_V(nir, nir_opt_constant_folding);
+
+ /* We don't support non-uniform UBOs and non-uniform SSBO access is
+ * handled naturally by falling back to A64 messages.
+ */
+ NIR_PASS_V(nir, nir_lower_non_uniform_access,
+ nir_lower_non_uniform_texture_access |
+ nir_lower_non_uniform_image_access);
+ }
- if (stage != MESA_SHADER_COMPUTE)
- brw_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
+ if (nir->info.stage != MESA_SHADER_COMPUTE)
+ brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
assert(nir->num_uniforms == prog_data->nr_params * 4);
- return nir;
+ stage->nir = nir;
}
static void
-anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
+anv_pipeline_link_vs(const struct brw_compiler *compiler,
+ struct anv_pipeline_stage *vs_stage,
+ struct anv_pipeline_stage *next_stage)
{
- prog_data->binding_table.size_bytes = 0;
- prog_data->binding_table.texture_start = bias;
- prog_data->binding_table.gather_texture_start = bias;
- prog_data->binding_table.ubo_start = bias;
- prog_data->binding_table.ssbo_start = bias;
- prog_data->binding_table.image_start = bias;
+ if (next_stage)
+ brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
}
-static void
-anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
- gl_shader_stage stage,
- struct anv_shader_bin *shader)
-{
- pipeline->shaders[stage] = shader;
-}
-
-static VkResult
-anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *info,
- struct anv_shader_module *module,
- const char *entrypoint,
- const VkSpecializationInfo *spec_info)
+static const unsigned *
+anv_pipeline_compile_vs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *vs_stage)
{
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
- struct brw_vs_prog_key key;
- struct anv_shader_bin *bin = NULL;
-
- populate_vs_prog_key(&pipeline->device->info, &key);
-
- ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
-
- unsigned char sha1[20];
- anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
- MESA_SHADER_VERTEX, spec_info,
- &key, sizeof(key), sha1);
- bin = anv_device_search_for_kernel(pipeline->device, cache, sha1, 20);
-
- if (bin == NULL) {
- struct brw_vs_prog_data prog_data = {};
- struct anv_pipeline_binding surface_to_descriptor[256];
- struct anv_pipeline_binding sampler_to_descriptor[256];
-
- struct anv_pipeline_bind_map map = {
- .surface_to_descriptor = surface_to_descriptor,
- .sampler_to_descriptor = sampler_to_descriptor
- };
-
- void *mem_ctx = ralloc_context(NULL);
-
- nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
- module, entrypoint,
- MESA_SHADER_VERTEX, spec_info,
- &prog_data.base.base, &map);
- if (nir == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- anv_fill_binding_table(&prog_data.base.base, 0);
-
- brw_compute_vue_map(&pipeline->device->info,
- &prog_data.base.vue_map,
- nir->info.outputs_written,
- nir->info.separate_shader);
-
- const unsigned *shader_code =
- brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
- -1, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+ brw_compute_vue_map(compiler->devinfo,
+ &vs_stage->prog_data.vs.base.vue_map,
+ vs_stage->nir->info.outputs_written,
+ vs_stage->nir->info.separate_shader);
- unsigned code_size = prog_data.base.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache, sha1, 20,
- shader_code, code_size,
- nir->constant_data,
- nir->constant_data_size,
- &prog_data.base.base, sizeof(prog_data),
- &map);
- if (!bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- ralloc_free(mem_ctx);
- }
-
- anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin);
-
- return VK_SUCCESS;
+ return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
+ &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
}
static void
tes_info->tess.point_mode |= tcs_info->tess.point_mode;
}
-static VkResult
-anv_pipeline_compile_tcs_tes(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *info,
- struct anv_shader_module *tcs_module,
- const char *tcs_entrypoint,
- const VkSpecializationInfo *tcs_spec_info,
- struct anv_shader_module *tes_module,
- const char *tes_entrypoint,
- const VkSpecializationInfo *tes_spec_info)
+static void
+anv_pipeline_link_tcs(const struct brw_compiler *compiler,
+ struct anv_pipeline_stage *tcs_stage,
+ struct anv_pipeline_stage *tes_stage)
{
- const struct gen_device_info *devinfo = &pipeline->device->info;
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
- struct brw_tcs_prog_key tcs_key = {};
- struct brw_tes_prog_key tes_key = {};
- struct anv_shader_bin *tcs_bin = NULL;
- struct anv_shader_bin *tes_bin = NULL;
+ assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
- populate_sampler_prog_key(&pipeline->device->info, &tcs_key.tex);
- populate_sampler_prog_key(&pipeline->device->info, &tes_key.tex);
- tcs_key.input_vertices = info->pTessellationState->patchControlPoints;
+ brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
- ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
+ nir_lower_patch_vertices(tes_stage->nir,
+ tcs_stage->nir->info.tess.tcs_vertices_out,
+ NULL);
- unsigned char tcs_sha1[40];
- unsigned char tes_sha1[40];
- anv_pipeline_hash_shader(pipeline, layout, tcs_module, tcs_entrypoint,
- MESA_SHADER_TESS_CTRL, tcs_spec_info,
- &tcs_key, sizeof(tcs_key), tcs_sha1);
- anv_pipeline_hash_shader(pipeline, layout, tes_module, tes_entrypoint,
- MESA_SHADER_TESS_EVAL, tes_spec_info,
- &tes_key, sizeof(tes_key), tes_sha1);
- memcpy(&tcs_sha1[20], tes_sha1, 20);
- memcpy(&tes_sha1[20], tcs_sha1, 20);
-
- tcs_bin = anv_device_search_for_kernel(pipeline->device, cache,
- tcs_sha1, sizeof(tcs_sha1));
- tes_bin = anv_device_search_for_kernel(pipeline->device, cache,
- tes_sha1, sizeof(tes_sha1));
-
- if (tcs_bin == NULL || tes_bin == NULL) {
- struct brw_tcs_prog_data tcs_prog_data = {};
- struct brw_tes_prog_data tes_prog_data = {};
- struct anv_pipeline_binding tcs_surface_to_descriptor[256];
- struct anv_pipeline_binding tcs_sampler_to_descriptor[256];
- struct anv_pipeline_binding tes_surface_to_descriptor[256];
- struct anv_pipeline_binding tes_sampler_to_descriptor[256];
-
- struct anv_pipeline_bind_map tcs_map = {
- .surface_to_descriptor = tcs_surface_to_descriptor,
- .sampler_to_descriptor = tcs_sampler_to_descriptor
- };
- struct anv_pipeline_bind_map tes_map = {
- .surface_to_descriptor = tes_surface_to_descriptor,
- .sampler_to_descriptor = tes_sampler_to_descriptor
- };
+ /* Copy TCS info into the TES info */
+ merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
- void *mem_ctx = ralloc_context(NULL);
-
- nir_shader *tcs_nir =
- anv_pipeline_compile(pipeline, mem_ctx, layout,
- tcs_module, tcs_entrypoint,
- MESA_SHADER_TESS_CTRL, tcs_spec_info,
- &tcs_prog_data.base.base, &tcs_map);
- nir_shader *tes_nir =
- anv_pipeline_compile(pipeline, mem_ctx, layout,
- tes_module, tes_entrypoint,
- MESA_SHADER_TESS_EVAL, tes_spec_info,
- &tes_prog_data.base.base, &tes_map);
- if (tcs_nir == NULL || tes_nir == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- nir_lower_tes_patch_vertices(tes_nir,
- tcs_nir->info.tess.tcs_vertices_out);
+ /* Whacking the key after cache lookup is a bit sketchy, but all of
+ * this comes from the SPIR-V, which is part of the hash used for the
+ * pipeline cache. So it should be safe.
+ */
+ tcs_stage->key.tcs.tes_primitive_mode =
+ tes_stage->nir->info.tess.primitive_mode;
+ tcs_stage->key.tcs.quads_workaround =
+ compiler->devinfo->gen < 9 &&
+ tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
+ tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
+}
- /* Copy TCS info into the TES info */
- merge_tess_info(&tes_nir->info, &tcs_nir->info);
+static const unsigned *
+anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *tcs_stage,
+ struct anv_pipeline_stage *prev_stage)
+{
+ tcs_stage->key.tcs.outputs_written =
+ tcs_stage->nir->info.outputs_written;
+ tcs_stage->key.tcs.patch_outputs_written =
+ tcs_stage->nir->info.patch_outputs_written;
+
+ return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
+ &tcs_stage->prog_data.tcs, tcs_stage->nir,
+ -1, NULL);
+}
- anv_fill_binding_table(&tcs_prog_data.base.base, 0);
- anv_fill_binding_table(&tes_prog_data.base.base, 0);
+static void
+anv_pipeline_link_tes(const struct brw_compiler *compiler,
+ struct anv_pipeline_stage *tes_stage,
+ struct anv_pipeline_stage *next_stage)
+{
+ if (next_stage)
+ brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
+}
- /* Whacking the key after cache lookup is a bit sketchy, but all of
- * this comes from the SPIR-V, which is part of the hash used for the
- * pipeline cache. So it should be safe.
- */
- tcs_key.tes_primitive_mode = tes_nir->info.tess.primitive_mode;
- tcs_key.outputs_written = tcs_nir->info.outputs_written;
- tcs_key.patch_outputs_written = tcs_nir->info.patch_outputs_written;
- tcs_key.quads_workaround =
- devinfo->gen < 9 &&
- tes_nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
- tes_nir->info.tess.spacing == TESS_SPACING_EQUAL;
-
- tes_key.inputs_read = tcs_key.outputs_written;
- tes_key.patch_inputs_read = tcs_key.patch_outputs_written;
-
- const int shader_time_index = -1;
- const unsigned *shader_code;
-
- shader_code =
- brw_compile_tcs(compiler, NULL, mem_ctx, &tcs_key, &tcs_prog_data,
- tcs_nir, shader_time_index, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+static const unsigned *
+anv_pipeline_compile_tes(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *tes_stage,
+ struct anv_pipeline_stage *tcs_stage)
+{
+ tes_stage->key.tes.inputs_read =
+ tcs_stage->nir->info.outputs_written;
+ tes_stage->key.tes.patch_inputs_read =
+ tcs_stage->nir->info.patch_outputs_written;
+
+ return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
+ &tcs_stage->prog_data.tcs.base.vue_map,
+ &tes_stage->prog_data.tes, tes_stage->nir,
+ NULL, -1, NULL);
+}
- unsigned code_size = tcs_prog_data.base.base.program_size;
- tcs_bin = anv_device_upload_kernel(pipeline->device, cache,
- tcs_sha1, sizeof(tcs_sha1),
- shader_code, code_size,
- tcs_nir->constant_data,
- tcs_nir->constant_data_size,
- &tcs_prog_data.base.base,
- sizeof(tcs_prog_data),
- &tcs_map);
- if (!tcs_bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+static void
+anv_pipeline_link_gs(const struct brw_compiler *compiler,
+ struct anv_pipeline_stage *gs_stage,
+ struct anv_pipeline_stage *next_stage)
+{
+ if (next_stage)
+ brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
+}
- shader_code =
- brw_compile_tes(compiler, NULL, mem_ctx, &tes_key,
- &tcs_prog_data.base.vue_map, &tes_prog_data, tes_nir,
- NULL, shader_time_index, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+static const unsigned *
+anv_pipeline_compile_gs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *gs_stage,
+ struct anv_pipeline_stage *prev_stage)
+{
+ brw_compute_vue_map(compiler->devinfo,
+ &gs_stage->prog_data.gs.base.vue_map,
+ gs_stage->nir->info.outputs_written,
+ gs_stage->nir->info.separate_shader);
+
+ return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
+ &gs_stage->prog_data.gs, gs_stage->nir,
+ NULL, -1, NULL);
+}
- code_size = tes_prog_data.base.base.program_size;
- tes_bin = anv_device_upload_kernel(pipeline->device, cache,
- tes_sha1, sizeof(tes_sha1),
- shader_code, code_size,
- tes_nir->constant_data,
- tes_nir->constant_data_size,
- &tes_prog_data.base.base,
- sizeof(tes_prog_data),
- &tes_map);
- if (!tes_bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+static void
+anv_pipeline_link_fs(const struct brw_compiler *compiler,
+ struct anv_pipeline_stage *stage)
+{
+ unsigned num_rts = 0;
+ const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
+ struct anv_pipeline_binding rt_bindings[max_rt];
+ nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
+ int rt_to_bindings[max_rt];
+ memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
+ bool rt_used[max_rt];
+ memset(rt_used, 0, sizeof(rt_used));
+
+ /* Flag used render targets */
+ nir_foreach_variable_safe(var, &stage->nir->outputs) {
+ if (var->data.location < FRAG_RESULT_DATA0)
+ continue;
+
+ const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
+ /* Out-of-bounds */
+ if (rt >= MAX_RTS)
+ continue;
+
+ const unsigned array_len =
+ glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
+ assert(rt + array_len <= max_rt);
+
+ /* Unused */
+ if (!(stage->key.wm.color_outputs_valid & BITFIELD_RANGE(rt, array_len))) {
+ /* If this is the RT at location 0 and we have alpha to coverage
+ * enabled we will have to create a null RT for it, so mark it as
+ * used.
+ */
+ if (rt > 0 || !stage->key.wm.alpha_to_coverage)
+ continue;
}
- ralloc_free(mem_ctx);
+ for (unsigned i = 0; i < array_len; i++)
+ rt_used[rt + i] = true;
}
- anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin);
- anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_TESS_EVAL, tes_bin);
+ /* Set new, compacted, location */
+ for (unsigned i = 0; i < max_rt; i++) {
+ if (!rt_used[i])
+ continue;
- return VK_SUCCESS;
-}
+ rt_to_bindings[i] = num_rts;
-static VkResult
-anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *info,
- struct anv_shader_module *module,
- const char *entrypoint,
- const VkSpecializationInfo *spec_info)
-{
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
- struct brw_gs_prog_key key;
- struct anv_shader_bin *bin = NULL;
-
- populate_gs_prog_key(&pipeline->device->info, &key);
-
- ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
-
- unsigned char sha1[20];
- anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
- MESA_SHADER_GEOMETRY, spec_info,
- &key, sizeof(key), sha1);
- bin = anv_device_search_for_kernel(pipeline->device, cache, sha1, 20);
+ if (stage->key.wm.color_outputs_valid & (1 << i)) {
+ rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .binding = 0,
+ .index = i,
+ };
+ } else {
+ /* Setup a null render target */
+ rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .binding = 0,
+ .index = UINT32_MAX,
+ };
+ }
- if (bin == NULL) {
- struct brw_gs_prog_data prog_data = {};
- struct anv_pipeline_binding surface_to_descriptor[256];
- struct anv_pipeline_binding sampler_to_descriptor[256];
+ num_rts++;
+ }
- struct anv_pipeline_bind_map map = {
- .surface_to_descriptor = surface_to_descriptor,
- .sampler_to_descriptor = sampler_to_descriptor
- };
+ bool deleted_output = false;
+ nir_foreach_variable_safe(var, &stage->nir->outputs) {
+ if (var->data.location < FRAG_RESULT_DATA0)
+ continue;
- void *mem_ctx = ralloc_context(NULL);
+ const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
- nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
- module, entrypoint,
- MESA_SHADER_GEOMETRY, spec_info,
- &prog_data.base.base, &map);
- if (nir == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (rt >= MAX_RTS || !rt_used[rt]) {
+ /* Unused or out-of-bounds, throw it away, unless it is the first
+ * RT and we have alpha to coverage enabled.
+ */
+ deleted_output = true;
+ var->data.mode = nir_var_function_temp;
+ exec_node_remove(&var->node);
+ exec_list_push_tail(&impl->locals, &var->node);
+ continue;
}
- anv_fill_binding_table(&prog_data.base.base, 0);
+ /* Give it the new location */
+ assert(rt_to_bindings[rt] != -1);
+ var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
+ }
- brw_compute_vue_map(&pipeline->device->info,
- &prog_data.base.vue_map,
- nir->info.outputs_written,
- nir->info.separate_shader);
+ if (deleted_output)
+ nir_fixup_deref_modes(stage->nir);
- const unsigned *shader_code =
- brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
- NULL, -1, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+ if (num_rts == 0) {
+ /* If we have no render targets, we need a null render target */
+ rt_bindings[0] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .binding = 0,
+ .index = UINT32_MAX,
+ };
+ num_rts = 1;
+ }
- /* TODO: SIMD8 GS */
- const unsigned code_size = prog_data.base.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache, sha1, 20,
- shader_code, code_size,
- nir->constant_data,
- nir->constant_data_size,
- &prog_data.base.base, sizeof(prog_data),
- &map);
- if (!bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+ /* Now that we've determined the actual number of render targets, adjust
+ * the key accordingly.
+ */
+ stage->key.wm.nr_color_regions = num_rts;
+ stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
+
+ assert(num_rts <= max_rt);
+ assert(stage->bind_map.surface_count == 0);
+ typed_memcpy(stage->bind_map.surface_to_descriptor,
+ rt_bindings, num_rts);
+ stage->bind_map.surface_count += num_rts;
+}
- ralloc_free(mem_ctx);
+static const unsigned *
+anv_pipeline_compile_fs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *fs_stage,
+ struct anv_pipeline_stage *prev_stage)
+{
+ /* TODO: we could set this to 0 based on the information in nir_shader, but
+ * we need this before we call spirv_to_nir.
+ */
+ assert(prev_stage);
+ fs_stage->key.wm.input_slots_valid =
+ prev_stage->prog_data.vue.vue_map.slots_valid;
+
+ const unsigned *code =
+ brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
+ &fs_stage->prog_data.wm, fs_stage->nir,
+ NULL, -1, -1, -1, true, false, NULL, NULL);
+
+ if (fs_stage->key.wm.nr_color_regions == 0 &&
+ !fs_stage->prog_data.wm.has_side_effects &&
+ !fs_stage->prog_data.wm.uses_kill &&
+ fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
+ !fs_stage->prog_data.wm.computed_stencil) {
+ /* This fragment shader has no outputs and no side effects. Go ahead
+ * and return the code pointer so we don't accidentally think the
+ * compile failed but zero out prog_data which will set program_size to
+ * zero and disable the stage.
+ */
+ memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
}
- anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin);
-
- return VK_SUCCESS;
+ return code;
}
static VkResult
-anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *info,
- struct anv_shader_module *module,
- const char *entrypoint,
- const VkSpecializationInfo *spec_info)
+anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
+ struct anv_pipeline_cache *cache,
+ const VkGraphicsPipelineCreateInfo *info)
{
+ VkPipelineCreationFeedbackEXT pipeline_feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
+ };
+ int64_t pipeline_start = os_time_get_nano();
+
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
- struct brw_wm_prog_key key;
- struct anv_shader_bin *bin = NULL;
+ struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
+
+ pipeline->active_stages = 0;
+
+ VkResult result;
+ for (uint32_t i = 0; i < info->stageCount; i++) {
+ const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
+ gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
+
+ pipeline->active_stages |= sinfo->stage;
+
+ int64_t stage_start = os_time_get_nano();
+
+ stages[stage].stage = stage;
+ stages[stage].module = anv_shader_module_from_handle(sinfo->module);
+ stages[stage].entrypoint = sinfo->pName;
+ stages[stage].spec_info = sinfo->pSpecializationInfo;
+ anv_pipeline_hash_shader(stages[stage].module,
+ stages[stage].entrypoint,
+ stage,
+ stages[stage].spec_info,
+ stages[stage].shader_sha1);
+
+ const struct gen_device_info *devinfo = &pipeline->device->info;
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ populate_tcs_prog_key(devinfo, sinfo->flags,
+ info->pTessellationState->patchControlPoints,
+ &stages[stage].key.tcs);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes);
+ break;
+ case MESA_SHADER_GEOMETRY:
+ populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ populate_wm_prog_key(devinfo, sinfo->flags,
+ pipeline->subpass,
+ info->pMultisampleState,
+ &stages[stage].key.wm);
+ break;
+ default:
+ unreachable("Invalid graphics shader stage");
+ }
+
+ stages[stage].feedback.duration += os_time_get_nano() - stage_start;
+ stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
+ }
- populate_wm_prog_key(pipeline, info, &key);
+ if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
+ pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
+
+ assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
unsigned char sha1[20];
- anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
- MESA_SHADER_FRAGMENT, spec_info,
- &key, sizeof(key), sha1);
- bin = anv_device_search_for_kernel(pipeline->device, cache, sha1, 20);
-
- if (bin == NULL) {
- struct brw_wm_prog_data prog_data = {};
- struct anv_pipeline_binding surface_to_descriptor[256];
- struct anv_pipeline_binding sampler_to_descriptor[256];
+ anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
- struct anv_pipeline_bind_map map = {
- .surface_to_descriptor = surface_to_descriptor + 8,
- .sampler_to_descriptor = sampler_to_descriptor
- };
+ unsigned found = 0;
+ unsigned cache_hits = 0;
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
+
+ int64_t stage_start = os_time_get_nano();
+
+ stages[s].cache_key.stage = s;
+ memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
+
+ bool cache_hit;
+ struct anv_shader_bin *bin =
+ anv_device_search_for_kernel(pipeline->device, cache,
+ &stages[s].cache_key,
+ sizeof(stages[s].cache_key), &cache_hit);
+ if (bin) {
+ found++;
+ pipeline->shaders[s] = bin;
+ }
- void *mem_ctx = ralloc_context(NULL);
+ if (cache_hit) {
+ cache_hits++;
+ stages[s].feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
+ }
- nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
- module, entrypoint,
- MESA_SHADER_FRAGMENT, spec_info,
- &prog_data.base, &map);
- if (nir == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (found == __builtin_popcount(pipeline->active_stages)) {
+ if (cache_hits == found) {
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
}
+ /* We found all our shaders in the cache. We're done. */
+ goto done;
+ } else if (found > 0) {
+ /* We found some but not all of our shaders. This shouldn't happen
+ * most of the time but it can if we have a partially populated
+ * pipeline cache.
+ */
+ assert(found < __builtin_popcount(pipeline->active_stages));
+
+ vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT |
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
+ (uint64_t)(uintptr_t)cache,
+ 0, 0, "anv",
+ "Found a partial pipeline in the cache. This is "
+ "most likely caused by an incomplete pipeline cache "
+ "import or export");
+
+ /* We're going to have to recompile anyway, so just throw away our
+ * references to the shaders in the cache. We'll get them out of the
+ * cache again as part of the compilation process.
+ */
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ stages[s].feedback.flags = 0;
+ if (pipeline->shaders[s]) {
+ anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
+ pipeline->shaders[s] = NULL;
+ }
+ }
+ }
- unsigned num_rts = 0;
- const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
- struct anv_pipeline_binding rt_bindings[max_rt];
- nir_function_impl *impl = nir_shader_get_entrypoint(nir);
- int rt_to_bindings[max_rt];
- memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
- bool rt_used[max_rt];
- memset(rt_used, 0, sizeof(rt_used));
-
- /* Flag used render targets */
- nir_foreach_variable_safe(var, &nir->outputs) {
- if (var->data.location < FRAG_RESULT_DATA0)
- continue;
+ void *pipeline_ctx = ralloc_context(NULL);
- const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
- /* Out-of-bounds */
- if (rt >= key.nr_color_regions)
- continue;
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
- const unsigned array_len =
- glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
- assert(rt + array_len <= max_rt);
+ int64_t stage_start = os_time_get_nano();
- for (unsigned i = 0; i < array_len; i++)
- rt_used[rt + i] = true;
- }
+ assert(stages[s].stage == s);
+ assert(pipeline->shaders[s] == NULL);
- /* Set new, compacted, location */
- for (unsigned i = 0; i < max_rt; i++) {
- if (!rt_used[i])
- continue;
+ stages[s].bind_map = (struct anv_pipeline_bind_map) {
+ .surface_to_descriptor = stages[s].surface_to_descriptor,
+ .sampler_to_descriptor = stages[s].sampler_to_descriptor
+ };
- rt_to_bindings[i] = num_rts;
- rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = i,
- };
- num_rts++;
+ stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
+ pipeline_ctx,
+ &stages[s]);
+ if (stages[s].nir == NULL) {
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ goto fail;
}
- nir_foreach_variable_safe(var, &nir->outputs) {
- if (var->data.location < FRAG_RESULT_DATA0)
- continue;
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
+ }
- const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
- if (rt >= key.nr_color_regions) {
- /* Out-of-bounds, throw it away */
- var->data.mode = nir_var_local;
- exec_node_remove(&var->node);
- exec_list_push_tail(&impl->locals, &var->node);
- continue;
- }
+ /* Walk backwards to link */
+ struct anv_pipeline_stage *next_stage = NULL;
+ for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
+ if (!stages[s].entrypoint)
+ continue;
- /* Give it the new location */
- assert(rt_to_bindings[rt] != -1);
- var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
+ switch (s) {
+ case MESA_SHADER_VERTEX:
+ anv_pipeline_link_vs(compiler, &stages[s], next_stage);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ anv_pipeline_link_tes(compiler, &stages[s], next_stage);
+ break;
+ case MESA_SHADER_GEOMETRY:
+ anv_pipeline_link_gs(compiler, &stages[s], next_stage);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ anv_pipeline_link_fs(compiler, &stages[s]);
+ break;
+ default:
+ unreachable("Invalid graphics shader stage");
}
- if (num_rts == 0) {
- /* If we have no render targets, we need a null render target */
- rt_bindings[0] = (struct anv_pipeline_binding) {
- .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
- .binding = 0,
- .index = UINT32_MAX,
- };
- num_rts = 1;
- }
+ next_stage = &stages[s];
+ }
- assert(num_rts <= max_rt);
- map.surface_to_descriptor -= num_rts;
- map.surface_count += num_rts;
- assert(map.surface_count <= 256);
- memcpy(map.surface_to_descriptor, rt_bindings,
- num_rts * sizeof(*rt_bindings));
+ struct anv_pipeline_stage *prev_stage = NULL;
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (!stages[s].entrypoint)
+ continue;
- anv_fill_binding_table(&prog_data.base, num_rts);
+ int64_t stage_start = os_time_get_nano();
- const unsigned *shader_code =
- brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
- NULL, -1, -1, -1, true, false, NULL, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ void *stage_ctx = ralloc_context(NULL);
+
+ nir_xfb_info *xfb_info = NULL;
+ if (s == MESA_SHADER_VERTEX ||
+ s == MESA_SHADER_TESS_EVAL ||
+ s == MESA_SHADER_GEOMETRY)
+ xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
+
+ anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
+
+ const unsigned *code;
+ switch (s) {
+ case MESA_SHADER_VERTEX:
+ code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
+ &stages[s]);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
+ break;
+ case MESA_SHADER_GEOMETRY:
+ code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
+ break;
+ default:
+ unreachable("Invalid graphics shader stage");
+ }
+ if (code == NULL) {
+ ralloc_free(stage_ctx);
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ goto fail;
}
- unsigned code_size = prog_data.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache, sha1, 20,
- shader_code, code_size,
- nir->constant_data,
- nir->constant_data_size,
- &prog_data.base, sizeof(prog_data),
- &map);
+ struct anv_shader_bin *bin =
+ anv_device_upload_kernel(pipeline->device, cache,
+ &stages[s].cache_key,
+ sizeof(stages[s].cache_key),
+ code, stages[s].prog_data.base.program_size,
+ stages[s].nir->constant_data,
+ stages[s].nir->constant_data_size,
+ &stages[s].prog_data.base,
+ brw_prog_data_size(s),
+ xfb_info, &stages[s].bind_map);
if (!bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ ralloc_free(stage_ctx);
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ goto fail;
}
- ralloc_free(mem_ctx);
+ pipeline->shaders[s] = bin;
+ ralloc_free(stage_ctx);
+
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
+
+ prev_stage = &stages[s];
}
- anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin);
+ ralloc_free(pipeline_ctx);
+
+done:
+
+ if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
+ pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
+ /* This can happen if we decided to implicitly disable the fragment
+ * shader. See anv_pipeline_compile_fs().
+ */
+ anv_shader_bin_unref(pipeline->device,
+ pipeline->shaders[MESA_SHADER_FRAGMENT]);
+ pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
+ pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+
+ pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
+
+ const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
+ vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
+ if (create_feedback) {
+ *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
+
+ assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
+ for (uint32_t i = 0; i < info->stageCount; i++) {
+ gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
+ create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
+ }
+ }
return VK_SUCCESS;
+
+fail:
+ ralloc_free(pipeline_ctx);
+
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ if (pipeline->shaders[s])
+ anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
+ }
+
+ return result;
}
VkResult
anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
struct anv_pipeline_cache *cache,
const VkComputePipelineCreateInfo *info,
- struct anv_shader_module *module,
+ const struct anv_shader_module *module,
const char *entrypoint,
const VkSpecializationInfo *spec_info)
{
+ VkPipelineCreationFeedbackEXT pipeline_feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
+ };
+ int64_t pipeline_start = os_time_get_nano();
+
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
- struct brw_cs_prog_key key;
+
+ struct anv_pipeline_stage stage = {
+ .stage = MESA_SHADER_COMPUTE,
+ .module = module,
+ .entrypoint = entrypoint,
+ .spec_info = spec_info,
+ .cache_key = {
+ .stage = MESA_SHADER_COMPUTE,
+ },
+ .feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
+ },
+ };
+ anv_pipeline_hash_shader(stage.module,
+ stage.entrypoint,
+ MESA_SHADER_COMPUTE,
+ stage.spec_info,
+ stage.shader_sha1);
+
struct anv_shader_bin *bin = NULL;
- populate_cs_prog_key(&pipeline->device->info, &key);
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info =
+ vk_find_struct_const(info->stage.pNext,
+ PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
+
+ populate_cs_prog_key(&pipeline->device->info, info->stage.flags,
+ rss_info, &stage.key.cs);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
- unsigned char sha1[20];
- anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
- MESA_SHADER_COMPUTE, spec_info,
- &key, sizeof(key), sha1);
- bin = anv_device_search_for_kernel(pipeline->device, cache, sha1, 20);
+ anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
+ bool cache_hit;
+ bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
+ sizeof(stage.cache_key), &cache_hit);
if (bin == NULL) {
- struct brw_cs_prog_data prog_data = {};
- struct anv_pipeline_binding surface_to_descriptor[256];
- struct anv_pipeline_binding sampler_to_descriptor[256];
+ int64_t stage_start = os_time_get_nano();
+
+ stage.bind_map = (struct anv_pipeline_bind_map) {
+ .surface_to_descriptor = stage.surface_to_descriptor,
+ .sampler_to_descriptor = stage.sampler_to_descriptor
+ };
- struct anv_pipeline_bind_map map = {
- .surface_to_descriptor = surface_to_descriptor,
- .sampler_to_descriptor = sampler_to_descriptor
+ /* Set up a binding for the gl_NumWorkGroups */
+ stage.bind_map.surface_count = 1;
+ stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
};
void *mem_ctx = ralloc_context(NULL);
- nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
- module, entrypoint,
- MESA_SHADER_COMPUTE, spec_info,
- &prog_data.base, &map);
- if (nir == NULL) {
+ stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
+ if (stage.nir == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- NIR_PASS_V(nir, anv_nir_add_base_work_group_id, &prog_data);
+ anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
- anv_fill_binding_table(&prog_data.base, 1);
+ NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
+ &stage.prog_data.cs);
const unsigned *shader_code =
- brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
- -1, NULL);
+ brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
+ &stage.prog_data.cs, stage.nir, -1, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- const unsigned code_size = prog_data.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache, sha1, 20,
+ const unsigned code_size = stage.prog_data.base.program_size;
+ bin = anv_device_upload_kernel(pipeline->device, cache,
+ &stage.cache_key, sizeof(stage.cache_key),
shader_code, code_size,
- nir->constant_data,
- nir->constant_data_size,
- &prog_data.base, sizeof(prog_data),
- &map);
+ stage.nir->constant_data,
+ stage.nir->constant_data_size,
+ &stage.prog_data.base,
+ sizeof(stage.prog_data.cs),
+ NULL, &stage.bind_map);
if (!bin) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
ralloc_free(mem_ctx);
+
+ stage.feedback.duration = os_time_get_nano() - stage_start;
+ }
+
+ if (cache_hit) {
+ stage.feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
}
+ pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
- anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin);
+ const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
+ vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
+ if (create_feedback) {
+ *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
+
+ assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
+ create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
+ }
+
+ pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
+ pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
return VK_SUCCESS;
}
pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
pCreateInfo->pRasterizationState->depthClampEnable;
+ /* Previously we enabled depth clipping when !depthClampEnable.
+ * DepthClipStateCreateInfo now makes depth clipping explicit so if the
+ * clipping info is available, use its enable value to determine clipping,
+ * otherwise fallback to the previous !depthClampEnable logic.
+ */
+ const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
+ vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
+ PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
+ pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
+
pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
pCreateInfo->pMultisampleState->sampleShadingEnable;
*/
memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
- pipeline->active_stages = 0;
-
- const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = {};
- struct anv_shader_module *modules[MESA_SHADER_STAGES] = {};
- for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
- VkShaderStageFlagBits vk_stage = pCreateInfo->pStages[i].stage;
- gl_shader_stage stage = vk_to_mesa_shader_stage(vk_stage);
- pStages[stage] = &pCreateInfo->pStages[i];
- modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
- pipeline->active_stages |= vk_stage;
- }
-
- if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
- pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
-
- assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
-
- if (modules[MESA_SHADER_VERTEX]) {
- result = anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
- modules[MESA_SHADER_VERTEX],
- pStages[MESA_SHADER_VERTEX]->pName,
- pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
- if (result != VK_SUCCESS)
- goto compile_fail;
- }
-
- if (modules[MESA_SHADER_TESS_EVAL]) {
- result = anv_pipeline_compile_tcs_tes(pipeline, cache, pCreateInfo,
- modules[MESA_SHADER_TESS_CTRL],
- pStages[MESA_SHADER_TESS_CTRL]->pName,
- pStages[MESA_SHADER_TESS_CTRL]->pSpecializationInfo,
- modules[MESA_SHADER_TESS_EVAL],
- pStages[MESA_SHADER_TESS_EVAL]->pName,
- pStages[MESA_SHADER_TESS_EVAL]->pSpecializationInfo);
- if (result != VK_SUCCESS)
- goto compile_fail;
- }
-
- if (modules[MESA_SHADER_GEOMETRY]) {
- result = anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
- modules[MESA_SHADER_GEOMETRY],
- pStages[MESA_SHADER_GEOMETRY]->pName,
- pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
- if (result != VK_SUCCESS)
- goto compile_fail;
- }
-
- if (modules[MESA_SHADER_FRAGMENT]) {
- result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo,
- modules[MESA_SHADER_FRAGMENT],
- pStages[MESA_SHADER_FRAGMENT]->pName,
- pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
- if (result != VK_SUCCESS)
- goto compile_fail;
+ result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
+ if (result != VK_SUCCESS) {
+ anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
+ return result;
}
assert(pipeline->shaders[MESA_SHADER_VERTEX]);
pipeline->vb[desc->binding].instance_divisor = 1;
}
+ const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
+ vk_find_struct_const(vi_info->pNext,
+ PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
+ if (vi_div_state) {
+ for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
+ const VkVertexInputBindingDivisorDescriptionEXT *desc =
+ &vi_div_state->pVertexBindingDivisors[i];
+
+ pipeline->vb[desc->binding].instance_divisor = desc->divisor;
+ }
+ }
/* Our implementation of VK_KHR_multiview uses instancing to draw the
* different views. If the client asks for instancing, we need to multiply
pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
return VK_SUCCESS;
-
-compile_fail:
- for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
- if (pipeline->shaders[s])
- anv_shader_bin_unref(device, pipeline->shaders[s]);
- }
-
- anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
-
- return result;
}