X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;ds=sidebyside;f=src%2Fintel%2Fvulkan%2Fanv_pipeline.c;h=9b65e353a903676312ebcd5530c32291a13f12e4;hb=b178652b41410483dcd82aba495eab6bc892ab15;hp=22af44d6020107af05a9dd2c91238187b4987e50;hpb=75af420cb1145f5fc34af6728047a2404b5f1add;p=mesa.git diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c index 22af44d6020..9b65e353a90 100644 --- a/src/intel/vulkan/anv_pipeline.c +++ b/src/intel/vulkan/anv_pipeline.c @@ -28,10 +28,11 @@ #include #include "util/mesa-sha1.h" +#include "common/gen_l3_config.h" #include "anv_private.h" #include "brw_nir.h" #include "anv_nir.h" -#include "nir/spirv/nir_spirv.h" +#include "spirv/nir_spirv.h" /* Needed for SWIZZLE macros */ #include "program/prog_instruction.h" @@ -50,13 +51,12 @@ VkResult anv_CreateShaderModule( assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO); assert(pCreateInfo->flags == 0); - module = anv_alloc2(&device->alloc, pAllocator, + module = vk_alloc2(&device->alloc, pAllocator, sizeof(*module) + pCreateInfo->codeSize, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (module == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - module->nir = NULL; module->size = pCreateInfo->codeSize; memcpy(module->data, pCreateInfo->pCode, module->size); @@ -75,7 +75,10 @@ void anv_DestroyShaderModule( ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_shader_module, module, _module); - anv_free2(&device->alloc, pAllocator, module); + if (!module) + return; + + vk_free2(&device->alloc, pAllocator, module); } #define SPIR_V_MAGIC_NUMBER 0x07230203 @@ -99,86 +102,88 @@ anv_shader_compile_to_nir(struct anv_device *device, const nir_shader_compiler_options *nir_options = compiler->glsl_compiler_options[stage].NirOptions; - nir_shader *nir; - nir_function *entry_point; - if (module->nir) { - /* Some things such as our meta clear/blit code will give us a NIR - * shader directly. In that case, we just ignore the SPIR-V entirely - * and just use the NIR shader */ - nir = module->nir; - nir->options = nir_options; - nir_validate_shader(nir); - - assert(exec_list_length(&nir->functions) == 1); - struct exec_node *node = exec_list_get_head(&nir->functions); - entry_point = exec_node_data(nir_function, node, node); - } else { - uint32_t *spirv = (uint32_t *) module->data; - assert(spirv[0] == SPIR_V_MAGIC_NUMBER); - assert(module->size % 4 == 0); - - uint32_t num_spec_entries = 0; - struct nir_spirv_specialization *spec_entries = NULL; - if (spec_info && spec_info->mapEntryCount > 0) { - num_spec_entries = spec_info->mapEntryCount; - spec_entries = malloc(num_spec_entries * sizeof(*spec_entries)); - for (uint32_t i = 0; i < num_spec_entries; i++) { - const uint32_t *data = - spec_info->pData + spec_info->pMapEntries[i].offset; - assert((const void *)(data + 1) <= - spec_info->pData + spec_info->dataSize); - - spec_entries[i].id = spec_info->pMapEntries[i].constantID; - spec_entries[i].data = *data; - } + uint32_t *spirv = (uint32_t *) module->data; + assert(spirv[0] == SPIR_V_MAGIC_NUMBER); + assert(module->size % 4 == 0); + + uint32_t num_spec_entries = 0; + struct nir_spirv_specialization *spec_entries = NULL; + if (spec_info && spec_info->mapEntryCount > 0) { + num_spec_entries = spec_info->mapEntryCount; + spec_entries = malloc(num_spec_entries * sizeof(*spec_entries)); + for (uint32_t i = 0; i < num_spec_entries; i++) { + VkSpecializationMapEntry entry = spec_info->pMapEntries[i]; + const void *data = spec_info->pData + entry.offset; + assert(data + entry.size <= spec_info->pData + spec_info->dataSize); + + spec_entries[i].id = spec_info->pMapEntries[i].constantID; + spec_entries[i].data = *(const uint32_t *)data; } + } - entry_point = spirv_to_nir(spirv, module->size / 4, - spec_entries, num_spec_entries, - stage, entrypoint_name, nir_options); - nir = entry_point->shader; - assert(nir->stage == stage); - nir_validate_shader(nir); + nir_function *entry_point = + spirv_to_nir(spirv, module->size / 4, + spec_entries, num_spec_entries, + stage, entrypoint_name, nir_options); + nir_shader *nir = entry_point->shader; + assert(nir->stage == stage); + nir_validate_shader(nir); - free(spec_entries); + free(spec_entries); - nir_lower_returns(nir); + if (stage == MESA_SHADER_FRAGMENT) { + nir_lower_wpos_center(nir); nir_validate_shader(nir); + } - nir_inline_functions(nir); - nir_validate_shader(nir); + nir_lower_returns(nir); + nir_validate_shader(nir); - /* Pick off the single entrypoint that we want */ - foreach_list_typed_safe(nir_function, func, node, &nir->functions) { - if (func != entry_point) - exec_node_remove(&func->node); - } - assert(exec_list_length(&nir->functions) == 1); - entry_point->name = ralloc_strdup(entry_point, "main"); + nir_inline_functions(nir); + nir_validate_shader(nir); - nir_remove_dead_variables(nir, nir_var_shader_in); - nir_remove_dead_variables(nir, nir_var_shader_out); - nir_remove_dead_variables(nir, nir_var_system_value); - nir_validate_shader(nir); + /* Pick off the single entrypoint that we want */ + foreach_list_typed_safe(nir_function, func, node, &nir->functions) { + if (func != entry_point) + exec_node_remove(&func->node); + } + assert(exec_list_length(&nir->functions) == 1); + entry_point->name = ralloc_strdup(entry_point, "main"); - nir_lower_outputs_to_temporaries(entry_point->shader, entry_point); + nir_remove_dead_variables(nir, nir_var_shader_in); + nir_remove_dead_variables(nir, nir_var_shader_out); + nir_remove_dead_variables(nir, nir_var_system_value); + nir_validate_shader(nir); - nir_lower_system_values(nir); - nir_validate_shader(nir); - } + nir_propagate_invariant(nir); + nir_validate_shader(nir); + + nir_lower_io_to_temporaries(entry_point->shader, entry_point->impl, + true, false); + + nir_lower_system_values(nir); + nir_validate_shader(nir); /* Vulkan uses the separate-shader linking model */ - nir->info.separate_shader = true; + nir->info->separate_shader = true; - nir = brw_preprocess_nir(nir, compiler->scalar_stage[stage]); + nir = brw_preprocess_nir(compiler, nir); + + nir_lower_clip_cull_distance_arrays(nir); + nir_validate_shader(nir); + + if (stage == MESA_SHADER_FRAGMENT) + anv_nir_lower_input_attachments(nir); nir_shader_gather_info(nir, entry_point->impl); - uint32_t indirect_mask = 0; + nir_variable_mode indirect_mask = 0; if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput) - indirect_mask |= (1 << nir_var_shader_in); + indirect_mask |= nir_var_shader_in; + if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput) + indirect_mask |= nir_var_shader_out; if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp) - indirect_mask |= 1 << nir_var_local; + indirect_mask |= nir_var_local; nir_lower_indirect_derefs(nir, indirect_mask); @@ -193,11 +198,20 @@ void anv_DestroyPipeline( ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline); + if (!pipeline) + return; + anv_reloc_list_finish(&pipeline->batch_relocs, pAllocator ? pAllocator : &device->alloc); if (pipeline->blend_state.map) anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state); - anv_free2(&device->alloc, pAllocator, pipeline); + + for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { + if (pipeline->shaders[s]) + anv_shader_bin_unref(device, pipeline->shaders[s]); + } + + vk_free2(&device->alloc, pAllocator, pipeline); } static const uint32_t vk_to_gen_primitive_type[] = { @@ -215,7 +229,7 @@ static const uint32_t vk_to_gen_primitive_type[] = { }; static void -populate_sampler_prog_key(const struct brw_device_info *devinfo, +populate_sampler_prog_key(const struct gen_device_info *devinfo, struct brw_sampler_prog_key_data *key) { /* XXX: Handle texture swizzle on HSW- */ @@ -226,7 +240,7 @@ populate_sampler_prog_key(const struct brw_device_info *devinfo, } static void -populate_vs_prog_key(const struct brw_device_info *devinfo, +populate_vs_prog_key(const struct gen_device_info *devinfo, struct brw_vs_prog_key *key) { memset(key, 0, sizeof(*key)); @@ -239,7 +253,7 @@ populate_vs_prog_key(const struct brw_device_info *devinfo, } static void -populate_gs_prog_key(const struct brw_device_info *devinfo, +populate_gs_prog_key(const struct gen_device_info *devinfo, struct brw_gs_prog_key *key) { memset(key, 0, sizeof(*key)); @@ -248,9 +262,8 @@ populate_gs_prog_key(const struct brw_device_info *devinfo, } static void -populate_wm_prog_key(const struct brw_device_info *devinfo, +populate_wm_prog_key(const struct gen_device_info *devinfo, const VkGraphicsPipelineCreateInfo *info, - const struct anv_graphics_pipeline_create_info *extra, struct brw_wm_prog_key *key) { ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass); @@ -267,16 +280,8 @@ populate_wm_prog_key(const struct brw_device_info *devinfo, /* XXX Vulkan doesn't appear to specify */ key->clamp_fragment_color = false; - /* Vulkan always specifies upper-left coordinates */ - key->drawable_height = 0; - key->render_to_fbo = false; - - if (extra && extra->color_attachment_count >= 0) { - key->nr_color_regions = extra->color_attachment_count; - } else { - key->nr_color_regions = - render_pass->subpasses[info->subpass].color_count; - } + key->nr_color_regions = + render_pass->subpasses[info->subpass].color_count; key->replicate_alpha = key->nr_color_regions > 1 && info->pMultisampleState && @@ -286,17 +291,15 @@ populate_wm_prog_key(const struct brw_device_info *devinfo, /* We should probably pull this out of the shader, but it's fairly * harmless to compute it and then let dead-code take care of it. */ - key->persample_shading = info->pMultisampleState->sampleShadingEnable; - if (key->persample_shading) - key->persample_2x = info->pMultisampleState->rasterizationSamples == 2; - - key->compute_pos_offset = info->pMultisampleState->sampleShadingEnable; - key->compute_sample_id = info->pMultisampleState->sampleShadingEnable; + key->persample_interp = + (info->pMultisampleState->minSampleShading * + info->pMultisampleState->rasterizationSamples) > 1; + key->multisample_fbo = true; } } static void -populate_cs_prog_key(const struct brw_device_info *devinfo, +populate_cs_prog_key(const struct gen_device_info *devinfo, struct brw_cs_prog_key *key) { memset(key, 0, sizeof(*key)); @@ -313,16 +316,13 @@ anv_pipeline_compile(struct anv_pipeline *pipeline, struct brw_stage_prog_data *prog_data, struct anv_pipeline_bind_map *map) { - const struct brw_compiler *compiler = - pipeline->device->instance->physicalDevice.compiler; - nir_shader *nir = anv_shader_compile_to_nir(pipeline->device, module, entrypoint, stage, spec_info); if (nir == NULL) return NULL; - anv_nir_lower_push_constants(nir, compiler->scalar_stage[stage]); + anv_nir_lower_push_constants(nir); /* Figure out the number of parameters */ prog_data->nr_params = 0; @@ -331,14 +331,24 @@ anv_pipeline_compile(struct anv_pipeline *pipeline, /* If the shader uses any push constants at all, we'll just give * them the maximum possible number */ + assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE); prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float); } if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets) prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2; - if (nir->info.num_images > 0) - prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE; + if (nir->info->num_images > 0) { + prog_data->nr_params += nir->info->num_images * BRW_IMAGE_PARAM_SIZE; + pipeline->needs_data_cache = true; + } + + if (stage == MESA_SHADER_COMPUTE) + ((struct brw_cs_prog_data *)prog_data)->thread_local_id_index = + prog_data->nr_params++; /* The CS Thread ID uniform */ + + if (nir->info->num_ssbos > 0) + pipeline->needs_data_cache = true; if (prog_data->nr_params > 0) { /* XXX: I think we're leaking this */ @@ -366,10 +376,6 @@ anv_pipeline_compile(struct anv_pipeline *pipeline, if (pipeline->layout) anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map); - /* Finish the optimization and compilation process */ - if (nir->stage == MESA_SHADER_COMPUTE) - brw_nir_lower_shared(nir); - /* nir_lower_io will only handle the push constants; we need to set this * to the full number of possible uniforms. */ @@ -383,34 +389,42 @@ anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias) { prog_data->binding_table.size_bytes = 0; prog_data->binding_table.texture_start = bias; + prog_data->binding_table.gather_texture_start = bias; prog_data->binding_table.ubo_start = bias; prog_data->binding_table.ssbo_start = bias; prog_data->binding_table.image_start = bias; } +static struct anv_shader_bin * +anv_pipeline_upload_kernel(struct anv_pipeline *pipeline, + struct anv_pipeline_cache *cache, + const void *key_data, uint32_t key_size, + const void *kernel_data, uint32_t kernel_size, + const struct brw_stage_prog_data *prog_data, + uint32_t prog_data_size, + const struct anv_pipeline_bind_map *bind_map) +{ + if (cache) { + return anv_pipeline_cache_upload_kernel(cache, key_data, key_size, + kernel_data, kernel_size, + prog_data, prog_data_size, + bind_map); + } else { + return anv_shader_bin_create(pipeline->device, key_data, key_size, + kernel_data, kernel_size, + prog_data, prog_data_size, + prog_data->param, bind_map); + } +} + + static void anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline, gl_shader_stage stage, - const struct brw_stage_prog_data *prog_data, - struct anv_pipeline_bind_map *map) + struct anv_shader_bin *shader) { - struct brw_device_info *devinfo = &pipeline->device->info; - uint32_t max_threads[] = { - [MESA_SHADER_VERTEX] = devinfo->max_vs_threads, - [MESA_SHADER_TESS_CTRL] = devinfo->max_hs_threads, - [MESA_SHADER_TESS_EVAL] = devinfo->max_ds_threads, - [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads, - [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads, - [MESA_SHADER_COMPUTE] = devinfo->max_cs_threads, - }; - - pipeline->prog_data[stage] = prog_data; + pipeline->shaders[stage] = shader; pipeline->active_stages |= mesa_to_vk_shader_stage(stage); - pipeline->scratch_start[stage] = pipeline->total_scratch; - pipeline->total_scratch = - align_u32(pipeline->total_scratch, 1024) + - prog_data->total_scratch * max_threads[stage]; - pipeline->bindings[stage] = *map; } static VkResult @@ -423,20 +437,20 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline, { const struct brw_compiler *compiler = pipeline->device->instance->physicalDevice.compiler; - const struct brw_stage_prog_data *stage_prog_data; struct anv_pipeline_bind_map map; struct brw_vs_prog_key key; - uint32_t kernel = NO_KERNEL; + struct anv_shader_bin *bin = NULL; unsigned char sha1[20]; populate_vs_prog_key(&pipeline->device->info, &key); - if (module->size > 0) { - anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info); - kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map); + if (cache) { + anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, + pipeline->layout, spec_info); + bin = anv_pipeline_cache_search(cache, sha1, 20); } - if (kernel == NO_KERNEL) { + if (bin == NULL) { struct brw_vs_prog_data prog_data = { 0, }; struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256]; @@ -456,15 +470,14 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline, void *mem_ctx = ralloc_context(NULL); - if (module->nir == NULL) - ralloc_steal(mem_ctx, nir); + ralloc_steal(mem_ctx, nir); - prog_data.inputs_read = nir->info.inputs_read; + prog_data.inputs_read = nir->info->inputs_read; brw_compute_vue_map(&pipeline->device->info, &prog_data.base.vue_map, - nir->info.outputs_written, - nir->info.separate_shader); + nir->info->outputs_written, + nir->info->separate_shader); unsigned code_size; const unsigned *shader_code = @@ -475,28 +488,19 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline, return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } - stage_prog_data = &prog_data.base.base; - kernel = anv_pipeline_cache_upload_kernel(cache, - module->size > 0 ? sha1 : NULL, - shader_code, code_size, - &stage_prog_data, sizeof(prog_data), - &map); - ralloc_free(mem_ctx); - } - - const struct brw_vs_prog_data *vs_prog_data = - (const struct brw_vs_prog_data *) stage_prog_data; + bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20, + shader_code, code_size, + &prog_data.base.base, sizeof(prog_data), + &map); + if (!bin) { + ralloc_free(mem_ctx); + return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); + } - if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) { - pipeline->vs_simd8 = kernel; - pipeline->vs_vec4 = NO_KERNEL; - } else { - pipeline->vs_simd8 = NO_KERNEL; - pipeline->vs_vec4 = kernel; + ralloc_free(mem_ctx); } - anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, - stage_prog_data, &map); + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin); return VK_SUCCESS; } @@ -511,20 +515,20 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline, { const struct brw_compiler *compiler = pipeline->device->instance->physicalDevice.compiler; - const struct brw_stage_prog_data *stage_prog_data; struct anv_pipeline_bind_map map; struct brw_gs_prog_key key; - uint32_t kernel = NO_KERNEL; + struct anv_shader_bin *bin = NULL; unsigned char sha1[20]; populate_gs_prog_key(&pipeline->device->info, &key); - if (module->size > 0) { - anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info); - kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map); + if (cache) { + anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, + pipeline->layout, spec_info); + bin = anv_pipeline_cache_search(cache, sha1, 20); } - if (kernel == NO_KERNEL) { + if (bin == NULL) { struct brw_gs_prog_data prog_data = { 0, }; struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256]; @@ -544,13 +548,12 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline, void *mem_ctx = ralloc_context(NULL); - if (module->nir == NULL) - ralloc_steal(mem_ctx, nir); + ralloc_steal(mem_ctx, nir); brw_compute_vue_map(&pipeline->device->info, &prog_data.base.vue_map, - nir->info.outputs_written, - nir->info.separate_shader); + nir->info->outputs_written, + nir->info->separate_shader); unsigned code_size; const unsigned *shader_code = @@ -562,20 +565,19 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline, } /* TODO: SIMD8 GS */ - stage_prog_data = &prog_data.base.base; - kernel = anv_pipeline_cache_upload_kernel(cache, - module->size > 0 ? sha1 : NULL, - shader_code, code_size, - &stage_prog_data, sizeof(prog_data), - &map); + bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20, + shader_code, code_size, + &prog_data.base.base, sizeof(prog_data), + &map); + if (!bin) { + ralloc_free(mem_ctx); + return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); + } ralloc_free(mem_ctx); } - pipeline->gs_kernel = kernel; - - anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, - stage_prog_data, &map); + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin); return VK_SUCCESS; } @@ -584,36 +586,32 @@ static VkResult anv_pipeline_compile_fs(struct anv_pipeline *pipeline, struct anv_pipeline_cache *cache, const VkGraphicsPipelineCreateInfo *info, - const struct anv_graphics_pipeline_create_info *extra, struct anv_shader_module *module, const char *entrypoint, const VkSpecializationInfo *spec_info) { const struct brw_compiler *compiler = pipeline->device->instance->physicalDevice.compiler; - const struct brw_stage_prog_data *stage_prog_data; struct anv_pipeline_bind_map map; struct brw_wm_prog_key key; - uint32_t kernel = NO_KERNEL; + struct anv_shader_bin *bin = NULL; unsigned char sha1[20]; - populate_wm_prog_key(&pipeline->device->info, info, extra, &key); + populate_wm_prog_key(&pipeline->device->info, info, &key); - if (pipeline->use_repclear) - key.nr_color_regions = 1; - - if (module->size > 0) { - anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info); - kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map); + if (cache) { + anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, + pipeline->layout, spec_info); + bin = anv_pipeline_cache_search(cache, sha1, 20); } - if (kernel == NO_KERNEL) { + if (bin == NULL) { struct brw_wm_prog_data prog_data = { 0, }; struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256]; map = (struct anv_pipeline_bind_map) { - .surface_to_descriptor = surface_to_descriptor, + .surface_to_descriptor = surface_to_descriptor + 8, .sampler_to_descriptor = sampler_to_descriptor }; @@ -623,75 +621,85 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline, if (nir == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - nir_function_impl *impl = nir_shader_get_entrypoint(nir)->impl; + unsigned num_rts = 0; + struct anv_pipeline_binding rt_bindings[8]; + nir_function_impl *impl = nir_shader_get_entrypoint(nir); nir_foreach_variable_safe(var, &nir->outputs) { if (var->data.location < FRAG_RESULT_DATA0) continue; unsigned rt = var->data.location - FRAG_RESULT_DATA0; if (rt >= key.nr_color_regions) { + /* Out-of-bounds, throw it away */ var->data.mode = nir_var_local; exec_node_remove(&var->node); exec_list_push_tail(&impl->locals, &var->node); + continue; + } + + /* Give it a new, compacted, location */ + var->data.location = FRAG_RESULT_DATA0 + num_rts; + + unsigned array_len = + glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1; + assert(num_rts + array_len <= 8); + + for (unsigned i = 0; i < array_len; i++) { + rt_bindings[num_rts + i] = (struct anv_pipeline_binding) { + .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS, + .binding = 0, + .index = rt + i, + }; } + + num_rts += array_len; + } + + if (num_rts == 0) { + /* If we have no render targets, we need a null render target */ + rt_bindings[0] = (struct anv_pipeline_binding) { + .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS, + .binding = 0, + .index = UINT8_MAX, + }; + num_rts = 1; } - anv_fill_binding_table(&prog_data.base, MAX_RTS); + assert(num_rts <= 8); + map.surface_to_descriptor -= num_rts; + map.surface_count += num_rts; + assert(map.surface_count <= 256); + memcpy(map.surface_to_descriptor, rt_bindings, + num_rts * sizeof(*rt_bindings)); + + anv_fill_binding_table(&prog_data.base, num_rts); void *mem_ctx = ralloc_context(NULL); - if (module->nir == NULL) - ralloc_steal(mem_ctx, nir); + ralloc_steal(mem_ctx, nir); unsigned code_size; const unsigned *shader_code = brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir, - NULL, -1, -1, pipeline->use_repclear, &code_size, NULL); + NULL, -1, -1, true, false, NULL, &code_size, NULL); if (shader_code == NULL) { ralloc_free(mem_ctx); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } - stage_prog_data = &prog_data.base; - kernel = anv_pipeline_cache_upload_kernel(cache, - module->size > 0 ? sha1 : NULL, - shader_code, code_size, - &stage_prog_data, sizeof(prog_data), - &map); + bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20, + shader_code, code_size, + &prog_data.base, sizeof(prog_data), + &map); + if (!bin) { + ralloc_free(mem_ctx); + return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); + } ralloc_free(mem_ctx); } - const struct brw_wm_prog_data *wm_prog_data = - (const struct brw_wm_prog_data *) stage_prog_data; - - if (wm_prog_data->no_8) - pipeline->ps_simd8 = NO_KERNEL; - else - pipeline->ps_simd8 = kernel; - - if (wm_prog_data->no_8 || wm_prog_data->prog_offset_16) { - pipeline->ps_simd16 = kernel + wm_prog_data->prog_offset_16; - } else { - pipeline->ps_simd16 = NO_KERNEL; - } - - pipeline->ps_ksp2 = 0; - pipeline->ps_grf_start2 = 0; - if (pipeline->ps_simd8 != NO_KERNEL) { - pipeline->ps_ksp0 = pipeline->ps_simd8; - pipeline->ps_grf_start0 = wm_prog_data->base.dispatch_grf_start_reg; - if (pipeline->ps_simd16 != NO_KERNEL) { - pipeline->ps_ksp2 = pipeline->ps_simd16; - pipeline->ps_grf_start2 = wm_prog_data->dispatch_grf_start_reg_16; - } - } else if (pipeline->ps_simd16 != NO_KERNEL) { - pipeline->ps_ksp0 = pipeline->ps_simd16; - pipeline->ps_grf_start0 = wm_prog_data->dispatch_grf_start_reg_16; - } - - anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, - stage_prog_data, &map); + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin); return VK_SUCCESS; } @@ -706,20 +714,20 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, { const struct brw_compiler *compiler = pipeline->device->instance->physicalDevice.compiler; - const struct brw_stage_prog_data *stage_prog_data; struct anv_pipeline_bind_map map; struct brw_cs_prog_key key; - uint32_t kernel = NO_KERNEL; + struct anv_shader_bin *bin = NULL; unsigned char sha1[20]; populate_cs_prog_key(&pipeline->device->info, &key); - if (module->size > 0) { - anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info); - kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map); + if (cache) { + anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, + pipeline->layout, spec_info); + bin = anv_pipeline_cache_search(cache, sha1, 20); } - if (module->size == 0 || kernel == NO_KERNEL) { + if (bin == NULL) { struct brw_cs_prog_data prog_data = { 0, }; struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256]; @@ -737,12 +745,9 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, anv_fill_binding_table(&prog_data.base, 1); - prog_data.base.total_shared = nir->num_shared; - void *mem_ctx = ralloc_context(NULL); - if (module->nir == NULL) - ralloc_steal(mem_ctx, nir); + ralloc_steal(mem_ctx, nir); unsigned code_size; const unsigned *shader_code = @@ -753,189 +758,39 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline, return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } - stage_prog_data = &prog_data.base; - kernel = anv_pipeline_cache_upload_kernel(cache, - module->size > 0 ? sha1 : NULL, - shader_code, code_size, - &stage_prog_data, sizeof(prog_data), - &map); + bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20, + shader_code, code_size, + &prog_data.base, sizeof(prog_data), + &map); + if (!bin) { + ralloc_free(mem_ctx); + return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); + } ralloc_free(mem_ctx); } - pipeline->cs_simd = kernel; - - anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, - stage_prog_data, &map); + anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin); return VK_SUCCESS; } +/** + * Copy pipeline state not marked as dynamic. + * Dynamic state is pipeline state which hasn't been provided at pipeline + * creation time, but is dynamically provided afterwards using various + * vkCmdSet* functions. + * + * The set of state considered "non_dynamic" is determined by the pieces of + * state that have their corresponding VkDynamicState enums omitted from + * VkPipelineDynamicStateCreateInfo::pDynamicStates. + * + * @param[out] pipeline Destination non_dynamic state. + * @param[in] pCreateInfo Source of non_dynamic state to be copied. + */ static void -gen7_compute_urb_partition(struct anv_pipeline *pipeline) -{ - const struct brw_device_info *devinfo = &pipeline->device->info; - bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT; - unsigned vs_size = vs_present ? - get_vs_prog_data(pipeline)->base.urb_entry_size : 1; - unsigned vs_entry_size_bytes = vs_size * 64; - bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT; - unsigned gs_size = gs_present ? - get_gs_prog_data(pipeline)->base.urb_entry_size : 1; - unsigned gs_entry_size_bytes = gs_size * 64; - - /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS): - * - * VS Number of URB Entries must be divisible by 8 if the VS URB Entry - * Allocation Size is less than 9 512-bit URB entries. - * - * Similar text exists for GS. - */ - unsigned vs_granularity = (vs_size < 9) ? 8 : 1; - unsigned gs_granularity = (gs_size < 9) ? 8 : 1; - - /* URB allocations must be done in 8k chunks. */ - unsigned chunk_size_bytes = 8192; - - /* Determine the size of the URB in chunks. */ - unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes; - - /* Reserve space for push constants */ - unsigned push_constant_kb; - if (pipeline->device->info.gen >= 8) - push_constant_kb = 32; - else if (pipeline->device->info.is_haswell) - push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16; - else - push_constant_kb = 16; - - unsigned push_constant_bytes = push_constant_kb * 1024; - unsigned push_constant_chunks = - push_constant_bytes / chunk_size_bytes; - - /* Initially, assign each stage the minimum amount of URB space it needs, - * and make a note of how much additional space it "wants" (the amount of - * additional space it could actually make use of). - */ - - /* VS has a lower limit on the number of URB entries */ - unsigned vs_chunks = - ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes, - chunk_size_bytes) / chunk_size_bytes; - unsigned vs_wants = - ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes, - chunk_size_bytes) / chunk_size_bytes - vs_chunks; - - unsigned gs_chunks = 0; - unsigned gs_wants = 0; - if (gs_present) { - /* There are two constraints on the minimum amount of URB space we can - * allocate: - * - * (1) We need room for at least 2 URB entries, since we always operate - * the GS in DUAL_OBJECT mode. - * - * (2) We can't allocate less than nr_gs_entries_granularity. - */ - gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes, - chunk_size_bytes) / chunk_size_bytes; - gs_wants = - ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes, - chunk_size_bytes) / chunk_size_bytes - gs_chunks; - } - - /* There should always be enough URB space to satisfy the minimum - * requirements of each stage. - */ - unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks; - assert(total_needs <= urb_chunks); - - /* Mete out remaining space (if any) in proportion to "wants". */ - unsigned total_wants = vs_wants + gs_wants; - unsigned remaining_space = urb_chunks - total_needs; - if (remaining_space > total_wants) - remaining_space = total_wants; - if (remaining_space > 0) { - unsigned vs_additional = (unsigned) - round(vs_wants * (((double) remaining_space) / total_wants)); - vs_chunks += vs_additional; - remaining_space -= vs_additional; - gs_chunks += remaining_space; - } - - /* Sanity check that we haven't over-allocated. */ - assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks); - - /* Finally, compute the number of entries that can fit in the space - * allocated to each stage. - */ - unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes; - unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes; - - /* Since we rounded up when computing *_wants, this may be slightly more - * than the maximum allowed amount, so correct for that. - */ - nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries); - nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries); - - /* Ensure that we program a multiple of the granularity. */ - nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity); - nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity); - - /* Finally, sanity check to make sure we have at least the minimum number - * of entries needed for each stage. - */ - assert(nr_vs_entries >= devinfo->urb.min_vs_entries); - if (gs_present) - assert(nr_gs_entries >= 2); - - /* Lay out the URB in the following order: - * - push constants - * - VS - * - GS - */ - pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks; - pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size; - pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries; - - pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks; - pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size; - pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries; - - pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks; - pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1; - pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0; - - pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks; - pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1; - pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0; - - const unsigned stages = - _mesa_bitcount(pipeline->active_stages & VK_SHADER_STAGE_ALL_GRAPHICS); - unsigned size_per_stage = stages ? (push_constant_kb / stages) : 0; - unsigned used_kb = 0; - - /* Broadwell+ and Haswell gt3 require that the push constant sizes be in - * units of 2KB. Incidentally, these are the same platforms that have - * 32KB worth of push constant space. - */ - if (push_constant_kb == 32) - size_per_stage &= ~1u; - - for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) { - pipeline->urb.push_size[i] = - (pipeline->active_stages & (1 << i)) ? size_per_stage : 0; - used_kb += pipeline->urb.push_size[i]; - assert(used_kb <= push_constant_kb); - } - - pipeline->urb.push_size[MESA_SHADER_FRAGMENT] = - push_constant_kb - used_kb; -} - -static void -anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline, - const VkGraphicsPipelineCreateInfo *pCreateInfo) +copy_non_dynamic_state(struct anv_pipeline *pipeline, + const VkGraphicsPipelineCreateInfo *pCreateInfo) { anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL; ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass); @@ -952,18 +807,27 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline, struct anv_dynamic_state *dynamic = &pipeline->dynamic_state; - dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount; - if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) { - typed_memcpy(dynamic->viewport.viewports, - pCreateInfo->pViewportState->pViewports, - pCreateInfo->pViewportState->viewportCount); - } + /* Section 9.2 of the Vulkan 1.0.15 spec says: + * + * pViewportState is [...] NULL if the pipeline + * has rasterization disabled. + */ + if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) { + assert(pCreateInfo->pViewportState); + + dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount; + if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) { + typed_memcpy(dynamic->viewport.viewports, + pCreateInfo->pViewportState->pViewports, + pCreateInfo->pViewportState->viewportCount); + } - dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount; - if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) { - typed_memcpy(dynamic->scissor.scissors, - pCreateInfo->pViewportState->pScissors, - pCreateInfo->pViewportState->scissorCount); + dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount; + if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) { + typed_memcpy(dynamic->scissor.scissors, + pCreateInfo->pViewportState->pScissors, + pCreateInfo->pViewportState->scissorCount); + } } if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) { @@ -981,10 +845,27 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline, pCreateInfo->pRasterizationState->depthBiasSlopeFactor; } - if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) { + /* Section 9.2 of the Vulkan 1.0.15 spec says: + * + * pColorBlendState is [...] NULL if the pipeline has rasterization + * disabled or if the subpass of the render pass the pipeline is + * created against does not use any color attachments. + */ + bool uses_color_att = false; + for (unsigned i = 0; i < subpass->color_count; ++i) { + if (subpass->color_attachments[i] != VK_ATTACHMENT_UNUSED) { + uses_color_att = true; + break; + } + } + + if (uses_color_att && + !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) { assert(pCreateInfo->pColorBlendState); - typed_memcpy(dynamic->blend_constants, - pCreateInfo->pColorBlendState->blendConstants, 4); + + if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) + typed_memcpy(dynamic->blend_constants, + pCreateInfo->pColorBlendState->blendConstants, 4); } /* If there is no depthstencil attachment, then don't read @@ -993,14 +874,17 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline, * no need to override the depthstencil defaults in * anv_pipeline::dynamic_state when there is no depthstencil attachment. * - * From the Vulkan spec (20 Oct 2015, git-aa308cb): + * Section 9.2 of the Vulkan 1.0.15 spec says: * - * pDepthStencilState [...] may only be NULL if renderPass and subpass - * specify a subpass that has no depth/stencil attachment. + * pDepthStencilState is [...] NULL if the pipeline has rasterization + * disabled or if the subpass of the render pass the pipeline is created + * against does not use a depth/stencil attachment. */ - if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) { + if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable && + subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) { + assert(pCreateInfo->pDepthStencilState); + if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) { - assert(pCreateInfo->pDepthStencilState); dynamic->depth_bounds.min = pCreateInfo->pDepthStencilState->minDepthBounds; dynamic->depth_bounds.max = @@ -1008,7 +892,6 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline, } if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) { - assert(pCreateInfo->pDepthStencilState); dynamic->stencil_compare_mask.front = pCreateInfo->pDepthStencilState->front.compareMask; dynamic->stencil_compare_mask.back = @@ -1016,7 +899,6 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline, } if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) { - assert(pCreateInfo->pDepthStencilState); dynamic->stencil_write_mask.front = pCreateInfo->pDepthStencilState->front.writeMask; dynamic->stencil_write_mask.back = @@ -1024,7 +906,6 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline, } if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) { - assert(pCreateInfo->pDepthStencilState); dynamic->stencil_reference.front = pCreateInfo->pDepthStencilState->front.reference; dynamic->stencil_reference.back = @@ -1042,30 +923,30 @@ anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info) struct anv_subpass *subpass = NULL; /* Assert that all required members of VkGraphicsPipelineCreateInfo are - * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section - * 4.2 Graphics Pipeline. + * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines. */ assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO); renderpass = anv_render_pass_from_handle(info->renderPass); assert(renderpass); - if (renderpass != &anv_meta_dummy_renderpass) { - assert(info->subpass < renderpass->subpass_count); - subpass = &renderpass->subpasses[info->subpass]; - } + assert(info->subpass < renderpass->subpass_count); + subpass = &renderpass->subpasses[info->subpass]; assert(info->stageCount >= 1); assert(info->pVertexInputState); assert(info->pInputAssemblyState); - assert(info->pViewportState); assert(info->pRasterizationState); + if (!info->pRasterizationState->rasterizerDiscardEnable) { + assert(info->pViewportState); + assert(info->pMultisampleState); - if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) - assert(info->pDepthStencilState); + if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) + assert(info->pDepthStencilState); - if (subpass && subpass->color_count > 0) - assert(info->pColorBlendState); + if (subpass && subpass->color_count > 0) + assert(info->pColorBlendState); + } for (uint32_t i = 0; i < info->stageCount; ++i) { switch (info->pStages[i].stage) { @@ -1079,12 +960,31 @@ anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info) } } +/** + * Calculate the desired L3 partitioning based on the current state of the + * pipeline. For now this simply returns the conservative defaults calculated + * by get_default_l3_weights(), but we could probably do better by gathering + * more statistics from the pipeline state (e.g. guess of expected URB usage + * and bound surfaces), or by using feed-back from performance counters. + */ +void +anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm) +{ + const struct gen_device_info *devinfo = &pipeline->device->info; + + const struct gen_l3_weights w = + gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm); + + pipeline->urb.l3_config = gen_get_l3_config(devinfo, w); + pipeline->urb.total_size = + gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config); +} + VkResult anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device, struct anv_pipeline_cache *cache, const VkGraphicsPipelineCreateInfo *pCreateInfo, - const struct anv_graphics_pipeline_create_info *extra, const VkAllocationCallbacks *alloc) { VkResult result; @@ -1108,24 +1008,18 @@ anv_pipeline_init(struct anv_pipeline *pipeline, pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data); pipeline->batch.relocs = &pipeline->batch_relocs; - anv_pipeline_init_dynamic_state(pipeline, pCreateInfo); + copy_non_dynamic_state(pipeline, pCreateInfo); + pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState && + pCreateInfo->pRasterizationState->depthClampEnable; - pipeline->use_repclear = extra && extra->use_repclear; + pipeline->needs_data_cache = false; /* When we free the pipeline, we detect stages based on the NULL status * of various prog_data pointers. Make them NULL by default. */ - memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data)); - memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start)); - memset(pipeline->bindings, 0, sizeof(pipeline->bindings)); - - pipeline->vs_simd8 = NO_KERNEL; - pipeline->vs_vec4 = NO_KERNEL; - pipeline->gs_kernel = NO_KERNEL; - pipeline->ps_ksp0 = NO_KERNEL; + memset(pipeline->shaders, 0, sizeof(pipeline->shaders)); pipeline->active_stages = 0; - pipeline->total_scratch = 0; const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, }; struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, }; @@ -1136,49 +1030,43 @@ anv_pipeline_init(struct anv_pipeline *pipeline, } if (modules[MESA_SHADER_VERTEX]) { - anv_pipeline_compile_vs(pipeline, cache, pCreateInfo, - modules[MESA_SHADER_VERTEX], - pStages[MESA_SHADER_VERTEX]->pName, - pStages[MESA_SHADER_VERTEX]->pSpecializationInfo); + result = anv_pipeline_compile_vs(pipeline, cache, pCreateInfo, + modules[MESA_SHADER_VERTEX], + pStages[MESA_SHADER_VERTEX]->pName, + pStages[MESA_SHADER_VERTEX]->pSpecializationInfo); + if (result != VK_SUCCESS) + goto compile_fail; } if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL]) anv_finishme("no tessellation support"); if (modules[MESA_SHADER_GEOMETRY]) { - anv_pipeline_compile_gs(pipeline, cache, pCreateInfo, - modules[MESA_SHADER_GEOMETRY], - pStages[MESA_SHADER_GEOMETRY]->pName, - pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo); + result = anv_pipeline_compile_gs(pipeline, cache, pCreateInfo, + modules[MESA_SHADER_GEOMETRY], + pStages[MESA_SHADER_GEOMETRY]->pName, + pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo); + if (result != VK_SUCCESS) + goto compile_fail; } if (modules[MESA_SHADER_FRAGMENT]) { - anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra, - modules[MESA_SHADER_FRAGMENT], - pStages[MESA_SHADER_FRAGMENT]->pName, - pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo); + result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, + modules[MESA_SHADER_FRAGMENT], + pStages[MESA_SHADER_FRAGMENT]->pName, + pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo); + if (result != VK_SUCCESS) + goto compile_fail; } - if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) { - /* Vertex is only optional if disable_vs is set */ - assert(extra->disable_vs); - } + assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT); - gen7_compute_urb_partition(pipeline); + anv_pipeline_setup_l3_config(pipeline, false); const VkPipelineVertexInputStateCreateInfo *vi_info = pCreateInfo->pVertexInputState; - uint64_t inputs_read; - if (extra && extra->disable_vs) { - /* If the VS is disabled, just assume the user knows what they're - * doing and apply the layout blindly. This can only come from - * meta, so this *should* be safe. - */ - inputs_read = ~0ull; - } else { - inputs_read = get_vs_prog_data(pipeline)->inputs_read; - } + const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read; pipeline->vb_used = 0; for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) { @@ -1214,125 +1102,15 @@ anv_pipeline_init(struct anv_pipeline *pipeline, pipeline->primitive_restart = ia_info->primitiveRestartEnable; pipeline->topology = vk_to_gen_primitive_type[ia_info->topology]; - if (extra && extra->use_rectlist) - pipeline->topology = _3DPRIM_RECTLIST; - - while (anv_block_pool_size(&device->scratch_block_pool) < - pipeline->total_scratch) - anv_block_pool_alloc(&device->scratch_block_pool); - return VK_SUCCESS; -} -VkResult -anv_graphics_pipeline_create( - VkDevice _device, - VkPipelineCache _cache, - const VkGraphicsPipelineCreateInfo *pCreateInfo, - const struct anv_graphics_pipeline_create_info *extra, - const VkAllocationCallbacks *pAllocator, - VkPipeline *pPipeline) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache); - - if (cache == NULL) - cache = &device->default_pipeline_cache; - - switch (device->info.gen) { - case 7: - if (device->info.is_haswell) - return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline); - else - return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline); - case 8: - return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline); - case 9: - return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline); - default: - unreachable("unsupported gen\n"); +compile_fail: + for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { + if (pipeline->shaders[s]) + anv_shader_bin_unref(device, pipeline->shaders[s]); } -} -VkResult anv_CreateGraphicsPipelines( - VkDevice _device, - VkPipelineCache pipelineCache, - uint32_t count, - const VkGraphicsPipelineCreateInfo* pCreateInfos, - const VkAllocationCallbacks* pAllocator, - VkPipeline* pPipelines) -{ - VkResult result = VK_SUCCESS; - - unsigned i = 0; - for (; i < count; i++) { - result = anv_graphics_pipeline_create(_device, - pipelineCache, - &pCreateInfos[i], - NULL, pAllocator, &pPipelines[i]); - if (result != VK_SUCCESS) { - for (unsigned j = 0; j < i; j++) { - anv_DestroyPipeline(_device, pPipelines[j], pAllocator); - } + anv_reloc_list_finish(&pipeline->batch_relocs, alloc); - return result; - } - } - - return VK_SUCCESS; -} - -static VkResult anv_compute_pipeline_create( - VkDevice _device, - VkPipelineCache _cache, - const VkComputePipelineCreateInfo* pCreateInfo, - const VkAllocationCallbacks* pAllocator, - VkPipeline* pPipeline) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache); - - if (cache == NULL) - cache = &device->default_pipeline_cache; - - switch (device->info.gen) { - case 7: - if (device->info.is_haswell) - return gen75_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline); - else - return gen7_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline); - case 8: - return gen8_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline); - case 9: - return gen9_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline); - default: - unreachable("unsupported gen\n"); - } -} - -VkResult anv_CreateComputePipelines( - VkDevice _device, - VkPipelineCache pipelineCache, - uint32_t count, - const VkComputePipelineCreateInfo* pCreateInfos, - const VkAllocationCallbacks* pAllocator, - VkPipeline* pPipelines) -{ - VkResult result = VK_SUCCESS; - - unsigned i = 0; - for (; i < count; i++) { - result = anv_compute_pipeline_create(_device, pipelineCache, - &pCreateInfos[i], - pAllocator, &pPipelines[i]); - if (result != VK_SUCCESS) { - for (unsigned j = 0; j < i; j++) { - anv_DestroyPipeline(_device, pPipelines[j], pAllocator); - } - - return result; - } - } - - return VK_SUCCESS; + return result; }