#include <fcntl.h>
#include "util/mesa-sha1.h"
+#include "util/os_time.h"
#include "common/gen_l3_config.h"
#include "anv_private.h"
#include "compiler/brw_nir.h"
#include "anv_nir.h"
+#include "nir/nir_xfb_info.h"
#include "spirv/nir_spirv.h"
#include "vk_util.h"
* we can't do that yet because we don't have the ability to copy nir.
*/
static nir_shader *
-anv_shader_compile_to_nir(struct anv_pipeline *pipeline,
+anv_shader_compile_to_nir(struct anv_device *device,
void *mem_ctx,
const struct anv_shader_module *module,
const char *entrypoint_name,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info)
{
- const struct anv_device *device = pipeline->device;
-
- const struct brw_compiler *compiler =
- device->instance->physicalDevice.compiler;
+ const struct anv_physical_device *pdevice =
+ &device->instance->physicalDevice;
+ const struct brw_compiler *compiler = pdevice->compiler;
const nir_shader_compiler_options *nir_options =
compiler->glsl_compiler_options[stage].NirOptions;
struct spirv_to_nir_options spirv_options = {
.lower_workgroup_access_to_offsets = true,
.caps = {
- .float64 = device->instance->physicalDevice.info.gen >= 8,
- .int64 = device->instance->physicalDevice.info.gen >= 8,
- .tessellation = true,
+ .derivative_group = true,
.device_group = true,
.draw_parameters = true,
+ .float16 = pdevice->info.gen >= 8,
+ .float64 = pdevice->info.gen >= 8,
+ .geometry_streams = true,
.image_write_without_format = true,
+ .int8 = pdevice->info.gen >= 8,
+ .int16 = pdevice->info.gen >= 8,
+ .int64 = pdevice->info.gen >= 8,
+ .min_lod = true,
.multiview = true,
- .variable_pointers = true,
- .storage_16bit = device->instance->physicalDevice.info.gen >= 8,
- .int16 = device->instance->physicalDevice.info.gen >= 8,
+ .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
+ .post_depth_coverage = pdevice->info.gen >= 9,
.shader_viewport_index_layer = true,
+ .stencil_export = pdevice->info.gen >= 9,
+ .storage_8bit = pdevice->info.gen >= 8,
+ .storage_16bit = pdevice->info.gen >= 8,
.subgroup_arithmetic = true,
.subgroup_basic = true,
.subgroup_ballot = true,
.subgroup_quad = true,
.subgroup_shuffle = true,
.subgroup_vote = true,
- .stencil_export = device->instance->physicalDevice.info.gen >= 9,
- .storage_8bit = device->instance->physicalDevice.info.gen >= 8,
- .post_depth_coverage = device->instance->physicalDevice.info.gen >= 9,
+ .tessellation = true,
+ .transform_feedback = pdevice->info.gen >= 8,
+ .variable_pointers = true,
},
+ .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
+ .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
+ .push_const_ptr_type = glsl_uint_type(),
+ .shared_ptr_type = glsl_uint_type(),
};
+ if (pdevice->has_a64_buffer_access) {
+ if (device->robust_buffer_access)
+ spirv_options.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 4);
+ else
+ spirv_options.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1);
+ } else {
+ spirv_options.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2);
+ }
+
nir_function *entry_point =
spirv_to_nir(spirv, module->size / 4,
spec_entries, num_spec_entries,
stage, entrypoint_name, &spirv_options, nir_options);
nir_shader *nir = entry_point->shader;
assert(nir->info.stage == stage);
- nir_validate_shader(nir);
+ nir_validate_shader(nir, "after spirv_to_nir");
ralloc_steal(mem_ctx, nir);
free(spec_entries);
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
+ NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
- NIR_PASS_V(nir, nir_copy_prop);
+ NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
exec_node_remove(&func->node);
}
assert(exec_list_length(&nir->functions) == 1);
- entry_point->name = ralloc_strdup(entry_point, "main");
/* Now that we've deleted all but the main function, we can go ahead and
* lower the rest of the constant initializers. We do this here so that
NIR_PASS_V(nir, nir_remove_dead_variables,
nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
- if (stage == MESA_SHADER_FRAGMENT)
- NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
+ nir_address_format_64bit_global);
NIR_PASS_V(nir, nir_propagate_invariant);
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
entry_point->impl, true, false);
+ NIR_PASS_V(nir, nir_lower_frexp);
+
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
- nir = brw_preprocess_nir(compiler, nir);
-
- if (stage == MESA_SHADER_FRAGMENT)
- NIR_PASS_V(nir, anv_nir_lower_input_attachments);
+ nir = brw_preprocess_nir(compiler, nir, NULL);
return nir;
}
key->color_outputs_valid |= (1 << i);
}
- key->nr_color_regions = _mesa_bitcount(key->color_outputs_valid);
+ key->nr_color_regions = util_bitcount(key->color_outputs_valid);
- key->replicate_alpha = key->nr_color_regions > 1 &&
- ms_info && ms_info->alphaToCoverageEnable;
+ /* To reduce possible shader recompilations we would need to know if
+ * there is a SampleMask output variable to compute if we should emit
+ * code to workaround the issue that hardware disables alpha to coverage
+ * when there is SampleMask output.
+ */
+ key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
+
+ /* Vulkan doesn't support fixed-function alpha test */
+ key->alpha_test_replicate_alpha = false;
if (ms_info) {
/* We should probably pull this out of the shader, but it's fairly
const char *entrypoint;
const VkSpecializationInfo *spec_info;
+ unsigned char shader_sha1[20];
+
union brw_any_prog_key key;
struct {
struct anv_pipeline_bind_map bind_map;
union brw_any_prog_data prog_data;
+
+ VkPipelineCreationFeedbackEXT feedback;
};
static void
-anv_pipeline_hash_shader(struct mesa_sha1 *ctx,
- struct anv_pipeline_stage *stage)
+anv_pipeline_hash_shader(const struct anv_shader_module *module,
+ const char *entrypoint,
+ gl_shader_stage stage,
+ const VkSpecializationInfo *spec_info,
+ unsigned char *sha1_out)
{
- _mesa_sha1_update(ctx, stage->module->sha1, sizeof(stage->module->sha1));
- _mesa_sha1_update(ctx, stage->entrypoint, strlen(stage->entrypoint));
- _mesa_sha1_update(ctx, &stage->stage, sizeof(stage->stage));
- if (stage->spec_info) {
- _mesa_sha1_update(ctx, stage->spec_info->pMapEntries,
- stage->spec_info->mapEntryCount *
- sizeof(*stage->spec_info->pMapEntries));
- _mesa_sha1_update(ctx, stage->spec_info->pData,
- stage->spec_info->dataSize);
+ struct mesa_sha1 ctx;
+ _mesa_sha1_init(&ctx);
+
+ _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
+ _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
+ _mesa_sha1_update(&ctx, &stage, sizeof(stage));
+ if (spec_info) {
+ _mesa_sha1_update(&ctx, spec_info->pMapEntries,
+ spec_info->mapEntryCount *
+ sizeof(*spec_info->pMapEntries));
+ _mesa_sha1_update(&ctx, spec_info->pData,
+ spec_info->dataSize);
}
- _mesa_sha1_update(ctx, &stage->key, brw_prog_key_size(stage->stage));
+
+ _mesa_sha1_final(&ctx, sha1_out);
}
static void
if (layout)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
+ const bool rba = pipeline->device->robust_buffer_access;
+ _mesa_sha1_update(&ctx, &rba, sizeof(rba));
+
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
- if (stages[s].entrypoint)
- anv_pipeline_hash_shader(&ctx, &stages[s]);
+ if (stages[s].entrypoint) {
+ _mesa_sha1_update(&ctx, stages[s].shader_sha1,
+ sizeof(stages[s].shader_sha1));
+ _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
+ }
}
_mesa_sha1_final(&ctx, sha1_out);
if (layout)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
- anv_pipeline_hash_shader(&ctx, stage);
+ const bool rba = pipeline->device->robust_buffer_access;
+ _mesa_sha1_update(&ctx, &rba, sizeof(rba));
+
+ _mesa_sha1_update(&ctx, stage->shader_sha1,
+ sizeof(stage->shader_sha1));
+ _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
_mesa_sha1_final(&ctx, sha1_out);
}
static nir_shader *
-anv_pipeline_compile(struct anv_pipeline *pipeline,
- void *mem_ctx,
- struct anv_pipeline_layout *layout,
- struct anv_pipeline_stage *stage,
- struct brw_stage_prog_data *prog_data,
- struct anv_pipeline_bind_map *map)
+anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
+ struct anv_pipeline_cache *cache,
+ void *mem_ctx,
+ struct anv_pipeline_stage *stage)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
+ const nir_shader_compiler_options *nir_options =
+ compiler->glsl_compiler_options[stage->stage].NirOptions;
+ nir_shader *nir;
- nir_shader *nir = anv_shader_compile_to_nir(pipeline, mem_ctx,
- stage->module,
- stage->entrypoint,
- stage->stage,
- stage->spec_info);
- if (nir == NULL)
- return NULL;
+ nir = anv_device_search_for_nir(pipeline->device, cache,
+ nir_options,
+ stage->shader_sha1,
+ mem_ctx);
+ if (nir) {
+ assert(nir->info.stage == stage->stage);
+ return nir;
+ }
+
+ nir = anv_shader_compile_to_nir(pipeline->device,
+ mem_ctx,
+ stage->module,
+ stage->entrypoint,
+ stage->stage,
+ stage->spec_info);
+ if (nir) {
+ anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
+ return nir;
+ }
+
+ return NULL;
+}
+
+static void
+anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
+ void *mem_ctx,
+ struct anv_pipeline_stage *stage,
+ struct anv_pipeline_layout *layout)
+{
+ const struct anv_physical_device *pdevice =
+ &pipeline->device->instance->physicalDevice;
+ const struct brw_compiler *compiler = pdevice->compiler;
+
+ struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
+ nir_shader *nir = stage->nir;
+
+ if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+ NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
+ NIR_PASS_V(nir, anv_nir_lower_input_attachments);
+ }
NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
pipeline->needs_data_cache = true;
+ NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
+
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
- if (layout)
- anv_nir_apply_pipeline_layout(pipeline, layout, nir, prog_data, map);
+ if (layout) {
+ anv_nir_apply_pipeline_layout(pdevice,
+ pipeline->device->robust_buffer_access,
+ layout, nir, prog_data,
+ &stage->bind_map);
+
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
+ nir_address_format_32bit_index_offset);
+
+ nir_address_format ssbo_address_format;
+ if (pdevice->has_a64_buffer_access) {
+ if (pipeline->device->robust_buffer_access)
+ ssbo_address_format = nir_address_format_64bit_bounded_global;
+ else
+ ssbo_address_format = nir_address_format_64bit_global;
+ } else {
+ ssbo_address_format = nir_address_format_32bit_index_offset;
+ }
+ NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
+ ssbo_address_format);
+
+ NIR_PASS_V(nir, nir_opt_constant_folding);
+ }
if (nir->info.stage != MESA_SHADER_COMPUTE)
brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
assert(nir->num_uniforms == prog_data->nr_params * 4);
- return nir;
-}
-
-static void
-anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
-{
- prog_data->binding_table.size_bytes = 0;
- prog_data->binding_table.texture_start = bias;
- prog_data->binding_table.gather_texture_start = bias;
- prog_data->binding_table.ubo_start = bias;
- prog_data->binding_table.ssbo_start = bias;
- prog_data->binding_table.image_start = bias;
+ stage->nir = nir;
}
static void
struct anv_pipeline_stage *vs_stage,
struct anv_pipeline_stage *next_stage)
{
- anv_fill_binding_table(&vs_stage->prog_data.vs.base.base, 0);
+ if (next_stage)
+ brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
}
-static VkResult
-anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- struct anv_pipeline_stage *stage)
+static const unsigned *
+anv_pipeline_compile_vs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *vs_stage)
{
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
- struct anv_shader_bin *bin = NULL;
-
- if (bin == NULL) {
- void *mem_ctx = ralloc_context(NULL);
-
- brw_compute_vue_map(&pipeline->device->info,
- &stage->prog_data.vs.base.vue_map,
- stage->nir->info.outputs_written,
- stage->nir->info.separate_shader);
-
- const unsigned *shader_code =
- brw_compile_vs(compiler, NULL, mem_ctx, &stage->key.vs,
- &stage->prog_data.vs, stage->nir, -1, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- unsigned code_size = stage->prog_data.vs.base.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache,
- &stage->cache_key,
- sizeof(stage->cache_key),
- shader_code, code_size,
- stage->nir->constant_data,
- stage->nir->constant_data_size,
- &stage->prog_data.base,
- sizeof(stage->prog_data.vs),
- &stage->bind_map);
- if (!bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+ brw_compute_vue_map(compiler->devinfo,
+ &vs_stage->prog_data.vs.base.vue_map,
+ vs_stage->nir->info.outputs_written,
+ vs_stage->nir->info.separate_shader);
- ralloc_free(mem_ctx);
- }
-
- pipeline->shaders[MESA_SHADER_VERTEX] = bin;
-
- return VK_SUCCESS;
+ return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
+ &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
}
static void
{
assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
- anv_fill_binding_table(&tcs_stage->prog_data.tcs.base.base, 0);
+ brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
nir_lower_patch_vertices(tes_stage->nir,
tcs_stage->nir->info.tess.tcs_vertices_out,
/* Copy TCS info into the TES info */
merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
- anv_fill_binding_table(&tcs_stage->prog_data.tcs.base.base, 0);
- anv_fill_binding_table(&tes_stage->prog_data.tes.base.base, 0);
-
/* Whacking the key after cache lookup is a bit sketchy, but all of
* this comes from the SPIR-V, which is part of the hash used for the
* pipeline cache. So it should be safe.
*/
tcs_stage->key.tcs.tes_primitive_mode =
tes_stage->nir->info.tess.primitive_mode;
- tcs_stage->key.tcs.outputs_written =
- tcs_stage->nir->info.outputs_written;
- tcs_stage->key.tcs.patch_outputs_written =
- tcs_stage->nir->info.patch_outputs_written;
tcs_stage->key.tcs.quads_workaround =
compiler->devinfo->gen < 9 &&
tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
+}
- tes_stage->key.tes.inputs_read =
+static const unsigned *
+anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *tcs_stage,
+ struct anv_pipeline_stage *prev_stage)
+{
+ tcs_stage->key.tcs.outputs_written =
tcs_stage->nir->info.outputs_written;
- tes_stage->key.tes.patch_inputs_read =
+ tcs_stage->key.tcs.patch_outputs_written =
tcs_stage->nir->info.patch_outputs_written;
+
+ return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
+ &tcs_stage->prog_data.tcs, tcs_stage->nir,
+ -1, NULL);
}
static void
struct anv_pipeline_stage *tes_stage,
struct anv_pipeline_stage *next_stage)
{
- anv_fill_binding_table(&tes_stage->prog_data.tes.base.base, 0);
+ if (next_stage)
+ brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
}
-static VkResult
-anv_pipeline_compile_tcs_tes(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- struct anv_pipeline_stage *tcs_stage,
- struct anv_pipeline_stage *tes_stage)
+static const unsigned *
+anv_pipeline_compile_tes(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *tes_stage,
+ struct anv_pipeline_stage *tcs_stage)
{
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
- struct anv_shader_bin *tcs_bin = NULL;
- struct anv_shader_bin *tes_bin = NULL;
-
- if (tcs_bin == NULL || tes_bin == NULL) {
- void *mem_ctx = ralloc_context(NULL);
-
- const int shader_time_index = -1;
- const unsigned *shader_code;
-
- shader_code =
- brw_compile_tcs(compiler, NULL, mem_ctx, &tcs_stage->key.tcs,
- &tcs_stage->prog_data.tcs, tcs_stage->nir,
- shader_time_index, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- unsigned code_size = tcs_stage->prog_data.base.program_size;
- tcs_bin = anv_device_upload_kernel(pipeline->device, cache,
- &tcs_stage->cache_key,
- sizeof(tcs_stage->cache_key),
- shader_code, code_size,
- tcs_stage->nir->constant_data,
- tcs_stage->nir->constant_data_size,
- &tcs_stage->prog_data.base,
- sizeof(tcs_stage->prog_data.tcs),
- &tcs_stage->bind_map);
- if (!tcs_bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- shader_code =
- brw_compile_tes(compiler, NULL, mem_ctx, &tes_stage->key.tes,
- &tcs_stage->prog_data.tcs.base.vue_map,
- &tes_stage->prog_data.tes, tes_stage->nir,
- NULL, shader_time_index, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- code_size = tes_stage->prog_data.base.program_size;
- tes_bin = anv_device_upload_kernel(pipeline->device, cache,
- &tes_stage->cache_key,
- sizeof(tes_stage->cache_key),
- shader_code, code_size,
- tes_stage->nir->constant_data,
- tes_stage->nir->constant_data_size,
- &tes_stage->prog_data.base,
- sizeof(tes_stage->prog_data.tes),
- &tes_stage->bind_map);
- if (!tes_bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- ralloc_free(mem_ctx);
- }
-
- pipeline->shaders[MESA_SHADER_TESS_CTRL] = tcs_bin;
- pipeline->shaders[MESA_SHADER_TESS_EVAL] = tes_bin;
+ tes_stage->key.tes.inputs_read =
+ tcs_stage->nir->info.outputs_written;
+ tes_stage->key.tes.patch_inputs_read =
+ tcs_stage->nir->info.patch_outputs_written;
- return VK_SUCCESS;
+ return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
+ &tcs_stage->prog_data.tcs.base.vue_map,
+ &tes_stage->prog_data.tes, tes_stage->nir,
+ NULL, -1, NULL);
}
static void
struct anv_pipeline_stage *gs_stage,
struct anv_pipeline_stage *next_stage)
{
- anv_fill_binding_table(&gs_stage->prog_data.gs.base.base, 0);
+ if (next_stage)
+ brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
}
-static VkResult
-anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- struct anv_pipeline_stage *stage)
+static const unsigned *
+anv_pipeline_compile_gs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *gs_stage,
+ struct anv_pipeline_stage *prev_stage)
{
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
- struct anv_shader_bin *bin = NULL;
-
- if (bin == NULL) {
- void *mem_ctx = ralloc_context(NULL);
-
- brw_compute_vue_map(&pipeline->device->info,
- &stage->prog_data.gs.base.vue_map,
- stage->nir->info.outputs_written,
- stage->nir->info.separate_shader);
-
- const unsigned *shader_code =
- brw_compile_gs(compiler, NULL, mem_ctx, &stage->key.gs,
- &stage->prog_data.gs, stage->nir,
- NULL, -1, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- /* TODO: SIMD8 GS */
- const unsigned code_size = stage->prog_data.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache,
- &stage->cache_key,
- sizeof(stage->cache_key),
- shader_code, code_size,
- stage->nir->constant_data,
- stage->nir->constant_data_size,
- &stage->prog_data.base,
- sizeof(stage->prog_data.gs),
- &stage->bind_map);
- if (!bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- ralloc_free(mem_ctx);
- }
-
- pipeline->shaders[MESA_SHADER_GEOMETRY] = bin;
-
- return VK_SUCCESS;
+ brw_compute_vue_map(compiler->devinfo,
+ &gs_stage->prog_data.gs.base.vue_map,
+ gs_stage->nir->info.outputs_written,
+ gs_stage->nir->info.separate_shader);
+
+ return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
+ &gs_stage->prog_data.gs, gs_stage->nir,
+ NULL, -1, NULL);
}
static void
!(stage->key.wm.color_outputs_valid & (1 << rt))) {
/* Unused or out-of-bounds, throw it away */
deleted_output = true;
- var->data.mode = nir_var_local;
+ var->data.mode = nir_var_function_temp;
exec_node_remove(&var->node);
exec_list_push_tail(&impl->locals, &var->node);
continue;
stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
assert(num_rts <= max_rt);
- assert(stage->bind_map.surface_count + num_rts <= 256);
- memmove(stage->bind_map.surface_to_descriptor + num_rts,
- stage->bind_map.surface_to_descriptor,
- stage->bind_map.surface_count *
- sizeof(*stage->bind_map.surface_to_descriptor));
+ assert(stage->bind_map.surface_count == 0);
typed_memcpy(stage->bind_map.surface_to_descriptor,
rt_bindings, num_rts);
stage->bind_map.surface_count += num_rts;
-
- anv_fill_binding_table(&stage->prog_data.wm.base, num_rts);
}
-static VkResult
-anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
- struct anv_pipeline_cache *cache,
- struct anv_pipeline_stage *stage)
+static const unsigned *
+anv_pipeline_compile_fs(const struct brw_compiler *compiler,
+ void *mem_ctx,
+ struct anv_device *device,
+ struct anv_pipeline_stage *fs_stage,
+ struct anv_pipeline_stage *prev_stage)
{
- const struct brw_compiler *compiler =
- pipeline->device->instance->physicalDevice.compiler;
- struct anv_shader_bin *bin = NULL;
-
/* TODO: we could set this to 0 based on the information in nir_shader, but
* we need this before we call spirv_to_nir.
*/
- const struct brw_vue_map *vue_map =
- &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
- stage->key.wm.input_slots_valid = vue_map->slots_valid;
-
- if (bin == NULL) {
- void *mem_ctx = ralloc_context(NULL);
-
- const unsigned *shader_code =
- brw_compile_fs(compiler, NULL, mem_ctx, &stage->key.wm,
- &stage->prog_data.wm, stage->nir,
- NULL, -1, -1, -1, true, false, NULL, NULL);
- if (shader_code == NULL) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- unsigned code_size = stage->prog_data.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache,
- &stage->cache_key,
- sizeof(stage->cache_key),
- shader_code, code_size,
- stage->nir->constant_data,
- stage->nir->constant_data_size,
- &stage->prog_data.base,
- sizeof(stage->prog_data.wm),
- &stage->bind_map);
- if (!bin) {
- ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- ralloc_free(mem_ctx);
+ assert(prev_stage);
+ fs_stage->key.wm.input_slots_valid =
+ prev_stage->prog_data.vue.vue_map.slots_valid;
+
+ const unsigned *code =
+ brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
+ &fs_stage->prog_data.wm, fs_stage->nir,
+ NULL, -1, -1, -1, true, false, NULL, NULL);
+
+ if (fs_stage->key.wm.nr_color_regions == 0 &&
+ !fs_stage->prog_data.wm.has_side_effects &&
+ !fs_stage->prog_data.wm.uses_kill &&
+ fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
+ !fs_stage->prog_data.wm.computed_stencil) {
+ /* This fragment shader has no outputs and no side effects. Go ahead
+ * and return the code pointer so we don't accidentally think the
+ * compile failed but zero out prog_data which will set program_size to
+ * zero and disable the stage.
+ */
+ memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
}
- pipeline->shaders[MESA_SHADER_FRAGMENT] = bin;
-
- return VK_SUCCESS;
+ return code;
}
static VkResult
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *info)
{
+ VkPipelineCreationFeedbackEXT pipeline_feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
+ };
+ int64_t pipeline_start = os_time_get_nano();
+
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
pipeline->active_stages |= sinfo->stage;
+ int64_t stage_start = os_time_get_nano();
+
stages[stage].stage = stage;
stages[stage].module = anv_shader_module_from_handle(sinfo->module);
stages[stage].entrypoint = sinfo->pName;
stages[stage].spec_info = sinfo->pSpecializationInfo;
+ anv_pipeline_hash_shader(stages[stage].module,
+ stages[stage].entrypoint,
+ stage,
+ stages[stage].spec_info,
+ stages[stage].shader_sha1);
const struct gen_device_info *devinfo = &pipeline->device->info;
switch (stage) {
default:
unreachable("Invalid graphics shader stage");
}
+
+ stages[stage].feedback.duration += os_time_get_nano() - stage_start;
+ stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
}
if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
unsigned found = 0;
+ unsigned cache_hits = 0;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (!stages[s].entrypoint)
continue;
+ int64_t stage_start = os_time_get_nano();
+
stages[s].cache_key.stage = s;
memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
+ bool cache_hit;
struct anv_shader_bin *bin =
anv_device_search_for_kernel(pipeline->device, cache,
&stages[s].cache_key,
- sizeof(stages[s].cache_key));
+ sizeof(stages[s].cache_key), &cache_hit);
if (bin) {
found++;
pipeline->shaders[s] = bin;
}
+
+ if (cache_hit) {
+ cache_hits++;
+ stages[s].feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
}
if (found == __builtin_popcount(pipeline->active_stages)) {
+ if (cache_hits == found) {
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
/* We found all our shaders in the cache. We're done. */
- return VK_SUCCESS;
+ goto done;
} else if (found > 0) {
/* We found some but not all of our shaders. This shouldn't happen
* most of the time but it can if we have a partially populated
* cache again as part of the compilation process.
*/
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ stages[s].feedback.flags = 0;
if (pipeline->shaders[s]) {
anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
pipeline->shaders[s] = NULL;
if (!stages[s].entrypoint)
continue;
+ int64_t stage_start = os_time_get_nano();
+
assert(stages[s].stage == s);
assert(pipeline->shaders[s] == NULL);
.sampler_to_descriptor = stages[s].sampler_to_descriptor
};
- stages[s].nir = anv_pipeline_compile(pipeline, pipeline_ctx, layout,
- &stages[s],
- &stages[s].prog_data.base,
- &stages[s].bind_map);
- if (stages[s].nir == NULL)
+ stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
+ pipeline_ctx,
+ &stages[s]);
+ if (stages[s].nir == NULL) {
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
+ }
+
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
}
/* Walk backwards to link */
next_stage = &stages[s];
}
+ struct anv_pipeline_stage *prev_stage = NULL;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (!stages[s].entrypoint)
continue;
+ int64_t stage_start = os_time_get_nano();
+
+ void *stage_ctx = ralloc_context(NULL);
+
+ nir_xfb_info *xfb_info = NULL;
+ if (s == MESA_SHADER_VERTEX ||
+ s == MESA_SHADER_TESS_EVAL ||
+ s == MESA_SHADER_GEOMETRY)
+ xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
+
+ anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
+
+ const unsigned *code;
switch (s) {
case MESA_SHADER_VERTEX:
- result = anv_pipeline_compile_vs(pipeline, cache, &stages[s]);
+ code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
+ &stages[s]);
break;
case MESA_SHADER_TESS_CTRL:
- /* Handled with TESS_EVAL */
+ code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_TESS_EVAL:
- result = anv_pipeline_compile_tcs_tes(pipeline, cache,
- &stages[MESA_SHADER_TESS_CTRL],
- &stages[MESA_SHADER_TESS_EVAL]);
+ code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_GEOMETRY:
- result = anv_pipeline_compile_gs(pipeline, cache, &stages[s]);
+ code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
case MESA_SHADER_FRAGMENT:
- result = anv_pipeline_compile_fs(pipeline, cache, &stages[s]);
+ code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
+ &stages[s], prev_stage);
break;
default:
unreachable("Invalid graphics shader stage");
}
- if (result != VK_SUCCESS)
+ if (code == NULL) {
+ ralloc_free(stage_ctx);
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
+ }
+
+ struct anv_shader_bin *bin =
+ anv_device_upload_kernel(pipeline->device, cache,
+ &stages[s].cache_key,
+ sizeof(stages[s].cache_key),
+ code, stages[s].prog_data.base.program_size,
+ stages[s].nir->constant_data,
+ stages[s].nir->constant_data_size,
+ &stages[s].prog_data.base,
+ brw_prog_data_size(s),
+ xfb_info, &stages[s].bind_map);
+ if (!bin) {
+ ralloc_free(stage_ctx);
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ goto fail;
+ }
+
+ pipeline->shaders[s] = bin;
+ ralloc_free(stage_ctx);
+
+ stages[s].feedback.duration += os_time_get_nano() - stage_start;
+
+ prev_stage = &stages[s];
}
ralloc_free(pipeline_ctx);
+done:
+
+ if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
+ pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
+ /* This can happen if we decided to implicitly disable the fragment
+ * shader. See anv_pipeline_compile_fs().
+ */
+ anv_shader_bin_unref(pipeline->device,
+ pipeline->shaders[MESA_SHADER_FRAGMENT]);
+ pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
+ pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+
+ pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
+
+ const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
+ vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
+ if (create_feedback) {
+ *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
+
+ assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
+ for (uint32_t i = 0; i < info->stageCount; i++) {
+ gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
+ create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
+ }
+ }
+
return VK_SUCCESS;
fail:
const char *entrypoint,
const VkSpecializationInfo *spec_info)
{
+ VkPipelineCreationFeedbackEXT pipeline_feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
+ };
+ int64_t pipeline_start = os_time_get_nano();
+
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
.module = module,
.entrypoint = entrypoint,
.spec_info = spec_info,
+ .cache_key = {
+ .stage = MESA_SHADER_COMPUTE,
+ },
+ .feedback = {
+ .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
+ },
};
+ anv_pipeline_hash_shader(stage.module,
+ stage.entrypoint,
+ MESA_SHADER_COMPUTE,
+ stage.spec_info,
+ stage.shader_sha1);
struct anv_shader_bin *bin = NULL;
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
- unsigned char sha1[20];
- anv_pipeline_hash_compute(pipeline, layout, &stage, sha1);
- bin = anv_device_search_for_kernel(pipeline->device, cache, sha1, 20);
+ anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
+ bool cache_hit;
+ bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
+ sizeof(stage.cache_key), &cache_hit);
if (bin == NULL) {
- struct brw_cs_prog_data prog_data = {};
+ int64_t stage_start = os_time_get_nano();
stage.bind_map = (struct anv_pipeline_bind_map) {
.surface_to_descriptor = stage.surface_to_descriptor,
.sampler_to_descriptor = stage.sampler_to_descriptor
};
+ /* Set up a binding for the gl_NumWorkGroups */
+ stage.bind_map.surface_count = 1;
+ stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
+ };
+
void *mem_ctx = ralloc_context(NULL);
- nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout, &stage,
- &prog_data.base, &stage.bind_map);
- if (nir == NULL) {
+ stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
+ if (stage.nir == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- NIR_PASS_V(nir, anv_nir_add_base_work_group_id, &prog_data);
+ anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
- anv_fill_binding_table(&prog_data.base, 1);
+ NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
+ &stage.prog_data.cs);
const unsigned *shader_code =
- brw_compile_cs(compiler, NULL, mem_ctx, &stage.key.cs,
- &prog_data, nir, -1, NULL);
+ brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
+ &stage.prog_data.cs, stage.nir, -1, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- const unsigned code_size = prog_data.base.program_size;
- bin = anv_device_upload_kernel(pipeline->device, cache, sha1, 20,
+ const unsigned code_size = stage.prog_data.base.program_size;
+ bin = anv_device_upload_kernel(pipeline->device, cache,
+ &stage.cache_key, sizeof(stage.cache_key),
shader_code, code_size,
- nir->constant_data,
- nir->constant_data_size,
- &prog_data.base, sizeof(prog_data),
- &stage.bind_map);
+ stage.nir->constant_data,
+ stage.nir->constant_data_size,
+ &stage.prog_data.base,
+ sizeof(stage.prog_data.cs),
+ NULL, &stage.bind_map);
if (!bin) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
ralloc_free(mem_ctx);
+
+ stage.feedback.duration = os_time_get_nano() - stage_start;
+ }
+
+ if (cache_hit) {
+ stage.feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ pipeline_feedback.flags |=
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+ }
+ pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
+
+ const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
+ vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
+ if (create_feedback) {
+ *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
+
+ assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
+ create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
}
pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
pCreateInfo->pRasterizationState->depthClampEnable;
+ /* Previously we enabled depth clipping when !depthClampEnable.
+ * DepthClipStateCreateInfo now makes depth clipping explicit so if the
+ * clipping info is available, use its enable value to determine clipping,
+ * otherwise fallback to the previous !depthClampEnable logic.
+ */
+ const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
+ vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
+ PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
+ pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
+
pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
pCreateInfo->pMultisampleState->sampleShadingEnable;