i965: Move the back-end compiler to src/intel/compiler
[mesa.git] / src / intel / vulkan / anv_pipeline.c
index 56d79a02d79c56c88652e12a9e02a47a61402758..6362c2db6b15ddb2ab8ca7f88c0982c9e18d2dda 100644 (file)
 #include <fcntl.h>
 
 #include "util/mesa-sha1.h"
+#include "common/gen_l3_config.h"
 #include "anv_private.h"
-#include "brw_nir.h"
+#include "compiler/brw_nir.h"
 #include "anv_nir.h"
-#include "nir/spirv/nir_spirv.h"
+#include "spirv/nir_spirv.h"
 
 /* Needed for SWIZZLE macros */
 #include "program/prog_instruction.h"
@@ -50,13 +51,12 @@ VkResult anv_CreateShaderModule(
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
    assert(pCreateInfo->flags == 0);
 
-   module = anv_alloc2(&device->alloc, pAllocator,
+   module = vk_alloc2(&device->alloc, pAllocator,
                        sizeof(*module) + pCreateInfo->codeSize, 8,
                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (module == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   module->nir = NULL;
    module->size = pCreateInfo->codeSize;
    memcpy(module->data, pCreateInfo->pCode, module->size);
 
@@ -75,7 +75,10 @@ void anv_DestroyShaderModule(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_shader_module, module, _module);
 
-   anv_free2(&device->alloc, pAllocator, module);
+   if (!module)
+      return;
+
+   vk_free2(&device->alloc, pAllocator, module);
 }
 
 #define SPIR_V_MAGIC_NUMBER 0x07230203
@@ -90,97 +93,93 @@ anv_shader_compile_to_nir(struct anv_device *device,
                           gl_shader_stage stage,
                           const VkSpecializationInfo *spec_info)
 {
-   if (strcmp(entrypoint_name, "main") != 0) {
-      anv_finishme("Multiple shaders per module not really supported");
-   }
-
    const struct brw_compiler *compiler =
       device->instance->physicalDevice.compiler;
    const nir_shader_compiler_options *nir_options =
       compiler->glsl_compiler_options[stage].NirOptions;
 
-   nir_shader *nir;
-   nir_function *entry_point;
-   if (module->nir) {
-      /* Some things such as our meta clear/blit code will give us a NIR
-       * shader directly.  In that case, we just ignore the SPIR-V entirely
-       * and just use the NIR shader */
-      nir = module->nir;
-      nir->options = nir_options;
-      nir_validate_shader(nir);
-
-      assert(exec_list_length(&nir->functions) == 1);
-      struct exec_node *node = exec_list_get_head(&nir->functions);
-      entry_point = exec_node_data(nir_function, node, node);
-   } else {
-      uint32_t *spirv = (uint32_t *) module->data;
-      assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
-      assert(module->size % 4 == 0);
-
-      uint32_t num_spec_entries = 0;
-      struct nir_spirv_specialization *spec_entries = NULL;
-      if (spec_info && spec_info->mapEntryCount > 0) {
-         num_spec_entries = spec_info->mapEntryCount;
-         spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
-         for (uint32_t i = 0; i < num_spec_entries; i++) {
-            const uint32_t *data =
-               spec_info->pData + spec_info->pMapEntries[i].offset;
-            assert((const void *)(data + 1) <=
-                   spec_info->pData + spec_info->dataSize);
-
-            spec_entries[i].id = spec_info->pMapEntries[i].constantID;
-            spec_entries[i].data = *data;
-         }
+   uint32_t *spirv = (uint32_t *) module->data;
+   assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
+   assert(module->size % 4 == 0);
+
+   uint32_t num_spec_entries = 0;
+   struct nir_spirv_specialization *spec_entries = NULL;
+   if (spec_info && spec_info->mapEntryCount > 0) {
+      num_spec_entries = spec_info->mapEntryCount;
+      spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
+      for (uint32_t i = 0; i < num_spec_entries; i++) {
+         VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
+         const void *data = spec_info->pData + entry.offset;
+         assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
+
+         spec_entries[i].id = spec_info->pMapEntries[i].constantID;
+         if (spec_info->dataSize == 8)
+            spec_entries[i].data64 = *(const uint64_t *)data;
+         else
+            spec_entries[i].data32 = *(const uint32_t *)data;
       }
+   }
 
-      entry_point = spirv_to_nir(spirv, module->size / 4,
-                                 spec_entries, num_spec_entries,
-                                 stage, entrypoint_name, nir_options);
-      nir = entry_point->shader;
-      assert(nir->stage == stage);
-      nir_validate_shader(nir);
-
-      free(spec_entries);
+   const struct nir_spirv_supported_extensions supported_ext = {
+      .float64 = device->instance->physicalDevice.info.gen >= 8,
+      .int64 = device->instance->physicalDevice.info.gen >= 8,
+      .tessellation = true,
+      .draw_parameters = true,
+      .image_write_without_format = true,
+   };
 
-      nir_lower_returns(nir);
-      nir_validate_shader(nir);
+   nir_function *entry_point =
+      spirv_to_nir(spirv, module->size / 4,
+                   spec_entries, num_spec_entries,
+                   stage, entrypoint_name, &supported_ext, nir_options);
+   nir_shader *nir = entry_point->shader;
+   assert(nir->stage == stage);
+   nir_validate_shader(nir);
 
-      nir_inline_functions(nir);
-      nir_validate_shader(nir);
+   free(spec_entries);
 
-      /* Pick off the single entrypoint that we want */
-      foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
-         if (func != entry_point)
-            exec_node_remove(&func->node);
-      }
-      assert(exec_list_length(&nir->functions) == 1);
-      entry_point->name = ralloc_strdup(entry_point, "main");
+   /* We have to lower away local constant initializers right before we
+    * inline functions.  That way they get properly initialized at the top
+    * of the function and not at the top of its caller.
+    */
+   NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
+   NIR_PASS_V(nir, nir_lower_returns);
+   NIR_PASS_V(nir, nir_inline_functions);
+
+   /* Pick off the single entrypoint that we want */
+   foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
+      if (func != entry_point)
+         exec_node_remove(&func->node);
+   }
+   assert(exec_list_length(&nir->functions) == 1);
+   entry_point->name = ralloc_strdup(entry_point, "main");
 
-      nir_remove_dead_variables(nir, nir_var_shader_in);
-      nir_remove_dead_variables(nir, nir_var_shader_out);
-      nir_remove_dead_variables(nir, nir_var_system_value);
-      nir_validate_shader(nir);
+   NIR_PASS_V(nir, nir_remove_dead_variables,
+              nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
 
-      nir_lower_outputs_to_temporaries(entry_point->shader, entry_point);
+   if (stage == MESA_SHADER_FRAGMENT)
+      NIR_PASS_V(nir, nir_lower_wpos_center);
 
-      nir_lower_system_values(nir);
-      nir_validate_shader(nir);
-   }
+   /* Now that we've deleted all but the main function, we can go ahead and
+    * lower the rest of the constant initializers.
+    */
+   NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
+   NIR_PASS_V(nir, nir_propagate_invariant);
+   NIR_PASS_V(nir, nir_lower_io_to_temporaries,
+              entry_point->impl, true, false);
+   NIR_PASS_V(nir, nir_lower_system_values);
 
    /* Vulkan uses the separate-shader linking model */
-   nir->info.separate_shader = true;
+   nir->info->separate_shader = true;
 
-   nir = brw_preprocess_nir(nir, compiler->scalar_stage[stage]);
+   nir = brw_preprocess_nir(compiler, nir);
 
-   nir_shader_gather_info(nir, entry_point->impl);
+   NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
 
-   uint32_t indirect_mask = 0;
-   if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
-      indirect_mask |= (1 << nir_var_shader_in);
-   if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
-      indirect_mask |= 1 << nir_var_local;
+   if (stage == MESA_SHADER_FRAGMENT)
+      NIR_PASS_V(nir, anv_nir_lower_input_attachments);
 
-   nir_lower_indirect_derefs(nir, indirect_mask);
+   nir_shader_gather_info(nir, entry_point->impl);
 
    return nir;
 }
@@ -193,11 +192,20 @@ void anv_DestroyPipeline(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
 
+   if (!pipeline)
+      return;
+
    anv_reloc_list_finish(&pipeline->batch_relocs,
                          pAllocator ? pAllocator : &device->alloc);
    if (pipeline->blend_state.map)
       anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
-   anv_free2(&device->alloc, pAllocator, pipeline);
+
+   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+      if (pipeline->shaders[s])
+         anv_shader_bin_unref(device, pipeline->shaders[s]);
+   }
+
+   vk_free2(&device->alloc, pAllocator, pipeline);
 }
 
 static const uint32_t vk_to_gen_primitive_type[] = {
@@ -211,13 +219,31 @@ static const uint32_t vk_to_gen_primitive_type[] = {
    [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
    [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY]  = _3DPRIM_TRILIST_ADJ,
    [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
-/*   [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST]                = _3DPRIM_PATCHLIST_1 */
 };
 
 static void
-populate_sampler_prog_key(const struct brw_device_info *devinfo,
+populate_sampler_prog_key(const struct gen_device_info *devinfo,
                           struct brw_sampler_prog_key_data *key)
 {
+   /* Almost all multisampled textures are compressed.  The only time when we
+    * don't compress a multisampled texture is for 16x MSAA with a surface
+    * width greater than 8k which is a bit of an edge case.  Since the sampler
+    * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
+    * to tell the compiler to always assume compression.
+    */
+   key->compressed_multisample_layout_mask = ~0;
+
+   /* SkyLake added support for 16x MSAA.  With this came a new message for
+    * reading from a 16x MSAA surface with compression.  The new message was
+    * needed because now the MCS data is 64 bits instead of 32 or lower as is
+    * the case for 8x, 4x, and 2x.  The key->msaa_16 bit-field controls which
+    * message we use.  Fortunately, the 16x message works for 8x, 4x, and 2x
+    * so we can just use it unconditionally.  This may not be quite as
+    * efficient but it saves us from recompiling.
+    */
+   if (devinfo->gen >= 9)
+      key->msaa_16 = ~0;
+
    /* XXX: Handle texture swizzle on HSW- */
    for (int i = 0; i < MAX_SAMPLERS; i++) {
       /* Assume color sampler, no swizzling. (Works for BDW+) */
@@ -226,7 +252,7 @@ populate_sampler_prog_key(const struct brw_device_info *devinfo,
 }
 
 static void
-populate_vs_prog_key(const struct brw_device_info *devinfo,
+populate_vs_prog_key(const struct gen_device_info *devinfo,
                      struct brw_vs_prog_key *key)
 {
    memset(key, 0, sizeof(*key));
@@ -239,7 +265,7 @@ populate_vs_prog_key(const struct brw_device_info *devinfo,
 }
 
 static void
-populate_gs_prog_key(const struct brw_device_info *devinfo,
+populate_gs_prog_key(const struct gen_device_info *devinfo,
                      struct brw_gs_prog_key *key)
 {
    memset(key, 0, sizeof(*key));
@@ -248,18 +274,22 @@ populate_gs_prog_key(const struct brw_device_info *devinfo,
 }
 
 static void
-populate_wm_prog_key(const struct brw_device_info *devinfo,
+populate_wm_prog_key(const struct anv_pipeline *pipeline,
                      const VkGraphicsPipelineCreateInfo *info,
-                     const struct anv_graphics_pipeline_create_info *extra,
                      struct brw_wm_prog_key *key)
 {
+   const struct gen_device_info *devinfo = &pipeline->device->info;
    ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
 
    memset(key, 0, sizeof(*key));
 
    populate_sampler_prog_key(devinfo, &key->tex);
 
-   /* TODO: Fill out key->input_slots_valid */
+   /* TODO: we could set this to 0 based on the information in nir_shader, but
+    * this function is called before spirv_to_nir. */
+   const struct brw_vue_map *vue_map =
+      &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
+   key->input_slots_valid = vue_map->slots_valid;
 
    /* Vulkan doesn't specify a default */
    key->high_quality_derivatives = false;
@@ -267,16 +297,8 @@ populate_wm_prog_key(const struct brw_device_info *devinfo,
    /* XXX Vulkan doesn't appear to specify */
    key->clamp_fragment_color = false;
 
-   /* Vulkan always specifies upper-left coordinates */
-   key->drawable_height = 0;
-   key->render_to_fbo = false;
-
-   if (extra && extra->color_attachment_count >= 0) {
-      key->nr_color_regions = extra->color_attachment_count;
-   } else {
-      key->nr_color_regions =
-         render_pass->subpasses[info->subpass].color_count;
-   }
+   key->nr_color_regions =
+      render_pass->subpasses[info->subpass].color_count;
 
    key->replicate_alpha = key->nr_color_regions > 1 &&
                           info->pMultisampleState &&
@@ -286,17 +308,15 @@ populate_wm_prog_key(const struct brw_device_info *devinfo,
       /* We should probably pull this out of the shader, but it's fairly
        * harmless to compute it and then let dead-code take care of it.
        */
-      key->persample_shading = info->pMultisampleState->sampleShadingEnable;
-      if (key->persample_shading)
-         key->persample_2x = info->pMultisampleState->rasterizationSamples == 2;
-
-      key->compute_pos_offset = info->pMultisampleState->sampleShadingEnable;
-      key->compute_sample_id = info->pMultisampleState->sampleShadingEnable;
+      key->persample_interp =
+         (info->pMultisampleState->minSampleShading *
+          info->pMultisampleState->rasterizationSamples) > 1;
+      key->multisample_fbo = true;
    }
 }
 
 static void
-populate_cs_prog_key(const struct brw_device_info *devinfo,
+populate_cs_prog_key(const struct gen_device_info *devinfo,
                      struct brw_cs_prog_key *key)
 {
    memset(key, 0, sizeof(*key));
@@ -313,16 +333,13 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
                      struct brw_stage_prog_data *prog_data,
                      struct anv_pipeline_bind_map *map)
 {
-   const struct brw_compiler *compiler =
-      pipeline->device->instance->physicalDevice.compiler;
-
    nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
                                                module, entrypoint, stage,
                                                spec_info);
    if (nir == NULL)
       return NULL;
 
-   anv_nir_lower_push_constants(nir, compiler->scalar_stage[stage]);
+   NIR_PASS_V(nir, anv_nir_lower_push_constants);
 
    /* Figure out the number of parameters */
    prog_data->nr_params = 0;
@@ -331,14 +348,24 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
       /* If the shader uses any push constants at all, we'll just give
        * them the maximum possible number
        */
+      assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
       prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
    }
 
    if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
       prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
 
-   if (nir->info.num_images > 0)
-      prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
+   if (nir->info->num_images > 0) {
+      prog_data->nr_params += nir->info->num_images * BRW_IMAGE_PARAM_SIZE;
+      pipeline->needs_data_cache = true;
+   }
+
+   if (stage == MESA_SHADER_COMPUTE)
+      ((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
+         prog_data->nr_params++; /* The CS Thread ID uniform */
+
+   if (nir->info->num_ssbos > 0)
+      pipeline->needs_data_cache = true;
 
    if (prog_data->nr_params > 0) {
       /* XXX: I think we're leaking this */
@@ -379,34 +406,42 @@ anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
 {
    prog_data->binding_table.size_bytes = 0;
    prog_data->binding_table.texture_start = bias;
+   prog_data->binding_table.gather_texture_start = bias;
    prog_data->binding_table.ubo_start = bias;
    prog_data->binding_table.ssbo_start = bias;
    prog_data->binding_table.image_start = bias;
 }
 
+static struct anv_shader_bin *
+anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
+                           struct anv_pipeline_cache *cache,
+                           const void *key_data, uint32_t key_size,
+                           const void *kernel_data, uint32_t kernel_size,
+                           const struct brw_stage_prog_data *prog_data,
+                           uint32_t prog_data_size,
+                           const struct anv_pipeline_bind_map *bind_map)
+{
+   if (cache) {
+      return anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
+                                              kernel_data, kernel_size,
+                                              prog_data, prog_data_size,
+                                              bind_map);
+   } else {
+      return anv_shader_bin_create(pipeline->device, key_data, key_size,
+                                   kernel_data, kernel_size,
+                                   prog_data, prog_data_size,
+                                   prog_data->param, bind_map);
+   }
+}
+
+
 static void
 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
                                 gl_shader_stage stage,
-                                const struct brw_stage_prog_data *prog_data,
-                                struct anv_pipeline_bind_map *map)
+                                struct anv_shader_bin *shader)
 {
-   struct brw_device_info *devinfo = &pipeline->device->info;
-   uint32_t max_threads[] = {
-      [MESA_SHADER_VERTEX]                  = devinfo->max_vs_threads,
-      [MESA_SHADER_TESS_CTRL]               = devinfo->max_hs_threads,
-      [MESA_SHADER_TESS_EVAL]               = devinfo->max_ds_threads,
-      [MESA_SHADER_GEOMETRY]                = devinfo->max_gs_threads,
-      [MESA_SHADER_FRAGMENT]                = devinfo->max_wm_threads,
-      [MESA_SHADER_COMPUTE]                 = devinfo->max_cs_threads,
-   };
-
-   pipeline->prog_data[stage] = prog_data;
+   pipeline->shaders[stage] = shader;
    pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
-   pipeline->scratch_start[stage] = pipeline->total_scratch;
-   pipeline->total_scratch =
-      align_u32(pipeline->total_scratch, 1024) +
-      prog_data->total_scratch * max_threads[stage];
-   pipeline->bindings[stage] = *map;
 }
 
 static VkResult
@@ -419,20 +454,20 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
-   const struct brw_stage_prog_data *stage_prog_data;
    struct anv_pipeline_bind_map map;
    struct brw_vs_prog_key key;
-   uint32_t kernel = NO_KERNEL;
+   struct anv_shader_bin *bin = NULL;
    unsigned char sha1[20];
 
    populate_vs_prog_key(&pipeline->device->info, &key);
 
-   if (module->size > 0) {
-      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
-      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   if (cache) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
+                      pipeline->layout, spec_info);
+      bin = anv_pipeline_cache_search(cache, sha1, 20);
    }
 
-   if (kernel == NO_KERNEL) {
+   if (bin == NULL) {
       struct brw_vs_prog_data prog_data = { 0, };
       struct anv_pipeline_binding surface_to_descriptor[256];
       struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -452,15 +487,15 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
 
       void *mem_ctx = ralloc_context(NULL);
 
-      if (module->nir == NULL)
-         ralloc_steal(mem_ctx, nir);
+      ralloc_steal(mem_ctx, nir);
 
-      prog_data.inputs_read = nir->info.inputs_read;
+      prog_data.inputs_read = nir->info->inputs_read;
+      prog_data.double_inputs_read = nir->info->double_inputs_read;
 
       brw_compute_vue_map(&pipeline->device->info,
                           &prog_data.base.vue_map,
-                          nir->info.outputs_written,
-                          nir->info.separate_shader);
+                          nir->info->outputs_written,
+                          nir->info->separate_shader);
 
       unsigned code_size;
       const unsigned *shader_code =
@@ -471,28 +506,201 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
       }
 
-      stage_prog_data = &prog_data.base.base;
-      kernel = anv_pipeline_cache_upload_kernel(cache,
-                                                module->size > 0 ? sha1 : NULL,
-                                                shader_code, code_size,
-                                                &stage_prog_data, sizeof(prog_data),
-                                                &map);
+      bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
+                                       shader_code, code_size,
+                                       &prog_data.base.base, sizeof(prog_data),
+                                       &map);
+      if (!bin) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
       ralloc_free(mem_ctx);
    }
 
-   const struct brw_vs_prog_data *vs_prog_data =
-      (const struct brw_vs_prog_data *) stage_prog_data;
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin);
 
-   if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
-      pipeline->vs_simd8 = kernel;
-      pipeline->vs_vec4 = NO_KERNEL;
-   } else {
-      pipeline->vs_simd8 = NO_KERNEL;
-      pipeline->vs_vec4 = kernel;
+   return VK_SUCCESS;
+}
+
+static void
+merge_tess_info(struct shader_info *tes_info,
+                const struct shader_info *tcs_info)
+{
+   /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
+    *
+    *    "PointMode. Controls generation of points rather than triangles
+    *     or lines. This functionality defaults to disabled, and is
+    *     enabled if either shader stage includes the execution mode.
+    *
+    * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
+    * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
+    * and OutputVertices, it says:
+    *
+    *    "One mode must be set in at least one of the tessellation
+    *     shader stages."
+    *
+    * So, the fields can be set in either the TCS or TES, but they must
+    * agree if set in both.  Our backend looks at TES, so bitwise-or in
+    * the values from the TCS.
+    */
+   assert(tcs_info->tess.tcs_vertices_out == 0 ||
+          tes_info->tess.tcs_vertices_out == 0 ||
+          tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
+   tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
+
+   assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
+          tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
+          tcs_info->tess.spacing == tes_info->tess.spacing);
+   tes_info->tess.spacing |= tcs_info->tess.spacing;
+
+   tes_info->tess.ccw |= tcs_info->tess.ccw;
+   tes_info->tess.point_mode |= tcs_info->tess.point_mode;
+}
+
+static VkResult
+anv_pipeline_compile_tcs_tes(struct anv_pipeline *pipeline,
+                             struct anv_pipeline_cache *cache,
+                             const VkGraphicsPipelineCreateInfo *info,
+                             struct anv_shader_module *tcs_module,
+                             const char *tcs_entrypoint,
+                             const VkSpecializationInfo *tcs_spec_info,
+                             struct anv_shader_module *tes_module,
+                             const char *tes_entrypoint,
+                             const VkSpecializationInfo *tes_spec_info)
+{
+   const struct gen_device_info *devinfo = &pipeline->device->info;
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   struct anv_pipeline_bind_map tcs_map;
+   struct anv_pipeline_bind_map tes_map;
+   struct brw_tcs_prog_key tcs_key = { 0, };
+   struct brw_tes_prog_key tes_key = { 0, };
+   struct anv_shader_bin *tcs_bin = NULL;
+   struct anv_shader_bin *tes_bin = NULL;
+   unsigned char tcs_sha1[40];
+   unsigned char tes_sha1[40];
+
+   populate_sampler_prog_key(&pipeline->device->info, &tcs_key.tex);
+   populate_sampler_prog_key(&pipeline->device->info, &tes_key.tex);
+   tcs_key.input_vertices = info->pTessellationState->patchControlPoints;
+
+   if (cache) {
+      anv_hash_shader(tcs_sha1, &tcs_key, sizeof(tcs_key), tcs_module,
+                      tcs_entrypoint, pipeline->layout, tcs_spec_info);
+      anv_hash_shader(tes_sha1, &tes_key, sizeof(tes_key), tes_module,
+                      tes_entrypoint, pipeline->layout, tes_spec_info);
+      memcpy(&tcs_sha1[20], tes_sha1, 20);
+      memcpy(&tes_sha1[20], tcs_sha1, 20);
+      tcs_bin = anv_pipeline_cache_search(cache, tcs_sha1, sizeof(tcs_sha1));
+      tes_bin = anv_pipeline_cache_search(cache, tes_sha1, sizeof(tes_sha1));
+   }
+
+   if (tcs_bin == NULL || tes_bin == NULL) {
+      struct brw_tcs_prog_data tcs_prog_data = { 0, };
+      struct brw_tes_prog_data tes_prog_data = { 0, };
+      struct anv_pipeline_binding tcs_surface_to_descriptor[256];
+      struct anv_pipeline_binding tcs_sampler_to_descriptor[256];
+      struct anv_pipeline_binding tes_surface_to_descriptor[256];
+      struct anv_pipeline_binding tes_sampler_to_descriptor[256];
+
+      tcs_map = (struct anv_pipeline_bind_map) {
+         .surface_to_descriptor = tcs_surface_to_descriptor,
+         .sampler_to_descriptor = tcs_sampler_to_descriptor
+      };
+      tes_map = (struct anv_pipeline_bind_map) {
+         .surface_to_descriptor = tes_surface_to_descriptor,
+         .sampler_to_descriptor = tes_sampler_to_descriptor
+      };
+
+      nir_shader *tcs_nir =
+         anv_pipeline_compile(pipeline, tcs_module, tcs_entrypoint,
+                              MESA_SHADER_TESS_CTRL, tcs_spec_info,
+                              &tcs_prog_data.base.base, &tcs_map);
+      nir_shader *tes_nir =
+         anv_pipeline_compile(pipeline, tes_module, tes_entrypoint,
+                              MESA_SHADER_TESS_EVAL, tes_spec_info,
+                              &tes_prog_data.base.base, &tes_map);
+      if (tcs_nir == NULL || tes_nir == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      nir_lower_tes_patch_vertices(tes_nir,
+                                   tcs_nir->info->tess.tcs_vertices_out);
+
+      /* Copy TCS info into the TES info */
+      merge_tess_info(tes_nir->info, tcs_nir->info);
+
+      anv_fill_binding_table(&tcs_prog_data.base.base, 0);
+      anv_fill_binding_table(&tes_prog_data.base.base, 0);
+
+      void *mem_ctx = ralloc_context(NULL);
+
+      ralloc_steal(mem_ctx, tcs_nir);
+      ralloc_steal(mem_ctx, tes_nir);
+
+      /* Whacking the key after cache lookup is a bit sketchy, but all of
+       * this comes from the SPIR-V, which is part of the hash used for the
+       * pipeline cache.  So it should be safe.
+       */
+      tcs_key.tes_primitive_mode = tes_nir->info->tess.primitive_mode;
+      tcs_key.outputs_written = tcs_nir->info->outputs_written;
+      tcs_key.patch_outputs_written = tcs_nir->info->patch_outputs_written;
+      tcs_key.quads_workaround =
+         devinfo->gen < 9 &&
+         tes_nir->info->tess.primitive_mode == 7 /* GL_QUADS */ &&
+         tes_nir->info->tess.spacing == TESS_SPACING_EQUAL;
+
+      tes_key.inputs_read = tcs_key.outputs_written;
+      tes_key.patch_inputs_read = tcs_key.patch_outputs_written;
+
+      unsigned code_size;
+      const int shader_time_index = -1;
+      const unsigned *shader_code;
+
+      shader_code =
+         brw_compile_tcs(compiler, NULL, mem_ctx, &tcs_key, &tcs_prog_data,
+                         tcs_nir, shader_time_index, &code_size, NULL);
+      if (shader_code == NULL) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      tcs_bin = anv_pipeline_upload_kernel(pipeline, cache,
+                                           tcs_sha1, sizeof(tcs_sha1),
+                                           shader_code, code_size,
+                                           &tcs_prog_data.base.base,
+                                           sizeof(tcs_prog_data),
+                                           &tcs_map);
+      if (!tcs_bin) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      shader_code =
+         brw_compile_tes(compiler, NULL, mem_ctx, &tes_key,
+                         &tcs_prog_data.base.vue_map, &tes_prog_data, tes_nir,
+                         NULL, shader_time_index, &code_size, NULL);
+      if (shader_code == NULL) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      tes_bin = anv_pipeline_upload_kernel(pipeline, cache,
+                                           tes_sha1, sizeof(tes_sha1),
+                                           shader_code, code_size,
+                                           &tes_prog_data.base.base,
+                                           sizeof(tes_prog_data),
+                                           &tes_map);
+      if (!tes_bin) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
+
+      ralloc_free(mem_ctx);
    }
 
-   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
-                                   stage_prog_data, &map);
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin);
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_TESS_EVAL, tes_bin);
 
    return VK_SUCCESS;
 }
@@ -507,20 +715,20 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
-   const struct brw_stage_prog_data *stage_prog_data;
    struct anv_pipeline_bind_map map;
    struct brw_gs_prog_key key;
-   uint32_t kernel = NO_KERNEL;
+   struct anv_shader_bin *bin = NULL;
    unsigned char sha1[20];
 
    populate_gs_prog_key(&pipeline->device->info, &key);
 
-   if (module->size > 0) {
-      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
-      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   if (cache) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
+                      pipeline->layout, spec_info);
+      bin = anv_pipeline_cache_search(cache, sha1, 20);
    }
 
-   if (kernel == NO_KERNEL) {
+   if (bin == NULL) {
       struct brw_gs_prog_data prog_data = { 0, };
       struct anv_pipeline_binding surface_to_descriptor[256];
       struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -540,13 +748,12 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
 
       void *mem_ctx = ralloc_context(NULL);
 
-      if (module->nir == NULL)
-         ralloc_steal(mem_ctx, nir);
+      ralloc_steal(mem_ctx, nir);
 
       brw_compute_vue_map(&pipeline->device->info,
                           &prog_data.base.vue_map,
-                          nir->info.outputs_written,
-                          nir->info.separate_shader);
+                          nir->info->outputs_written,
+                          nir->info->separate_shader);
 
       unsigned code_size;
       const unsigned *shader_code =
@@ -558,20 +765,19 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
       }
 
       /* TODO: SIMD8 GS */
-      stage_prog_data = &prog_data.base.base;
-      kernel = anv_pipeline_cache_upload_kernel(cache,
-                                                module->size > 0 ? sha1 : NULL,
-                                                shader_code, code_size,
-                                                &stage_prog_data, sizeof(prog_data),
-                                                &map);
+      bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
+                                       shader_code, code_size,
+                                       &prog_data.base.base, sizeof(prog_data),
+                                       &map);
+      if (!bin) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
 
       ralloc_free(mem_ctx);
    }
 
-   pipeline->gs_kernel = kernel;
-
-   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
-                                   stage_prog_data, &map);
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin);
 
    return VK_SUCCESS;
 }
@@ -580,27 +786,26 @@ static VkResult
 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
                         struct anv_pipeline_cache *cache,
                         const VkGraphicsPipelineCreateInfo *info,
-                        const struct anv_graphics_pipeline_create_info *extra,
                         struct anv_shader_module *module,
                         const char *entrypoint,
                         const VkSpecializationInfo *spec_info)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
-   const struct brw_stage_prog_data *stage_prog_data;
    struct anv_pipeline_bind_map map;
    struct brw_wm_prog_key key;
-   uint32_t kernel = NO_KERNEL;
+   struct anv_shader_bin *bin = NULL;
    unsigned char sha1[20];
 
-   populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
+   populate_wm_prog_key(pipeline, info, &key);
 
-   if (module->size > 0) {
-      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
-      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   if (cache) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
+                      pipeline->layout, spec_info);
+      bin = anv_pipeline_cache_search(cache, sha1, 20);
    }
 
-   if (kernel == NO_KERNEL) {
+   if (bin == NULL) {
       struct brw_wm_prog_data prog_data = { 0, };
       struct anv_pipeline_binding surface_to_descriptor[256];
       struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -618,7 +823,7 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
 
       unsigned num_rts = 0;
       struct anv_pipeline_binding rt_bindings[8];
-      nir_function_impl *impl = nir_shader_get_entrypoint(nir)->impl;
+      nir_function_impl *impl = nir_shader_get_entrypoint(nir);
       nir_foreach_variable_safe(var, &nir->outputs) {
          if (var->data.location < FRAG_RESULT_DATA0)
             continue;
@@ -640,25 +845,22 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
          assert(num_rts + array_len <= 8);
 
          for (unsigned i = 0; i < array_len; i++) {
-            rt_bindings[num_rts] = (struct anv_pipeline_binding) {
+            rt_bindings[num_rts + i] = (struct anv_pipeline_binding) {
                .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
-               .offset = rt + i,
+               .binding = 0,
+               .index = rt + i,
             };
          }
 
          num_rts += array_len;
       }
 
-      if (pipeline->use_repclear) {
-         assert(num_rts == 1);
-         key.nr_color_regions = 1;
-      }
-
       if (num_rts == 0) {
          /* If we have no render targets, we need a null render target */
          rt_bindings[0] = (struct anv_pipeline_binding) {
             .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
-            .offset = UINT16_MAX,
+            .binding = 0,
+            .index = UINT8_MAX,
          };
          num_rts = 1;
       }
@@ -674,58 +876,30 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
 
       void *mem_ctx = ralloc_context(NULL);
 
-      if (module->nir == NULL)
-         ralloc_steal(mem_ctx, nir);
+      ralloc_steal(mem_ctx, nir);
 
       unsigned code_size;
       const unsigned *shader_code =
          brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
-                        NULL, -1, -1, pipeline->use_repclear, &code_size, NULL);
+                        NULL, -1, -1, true, false, NULL, &code_size, NULL);
       if (shader_code == NULL) {
          ralloc_free(mem_ctx);
          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
       }
 
-      stage_prog_data = &prog_data.base;
-      kernel = anv_pipeline_cache_upload_kernel(cache,
-                                                module->size > 0 ? sha1 : NULL,
-                                                shader_code, code_size,
-                                                &stage_prog_data, sizeof(prog_data),
-                                                &map);
+      bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
+                                       shader_code, code_size,
+                                       &prog_data.base, sizeof(prog_data),
+                                       &map);
+      if (!bin) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
 
       ralloc_free(mem_ctx);
    }
 
-   const struct brw_wm_prog_data *wm_prog_data =
-      (const struct brw_wm_prog_data *) stage_prog_data;
-
-   if (wm_prog_data->no_8)
-      pipeline->ps_simd8 = NO_KERNEL;
-   else
-      pipeline->ps_simd8 = kernel;
-
-   if (wm_prog_data->no_8 || wm_prog_data->prog_offset_16) {
-      pipeline->ps_simd16 = kernel + wm_prog_data->prog_offset_16;
-   } else {
-      pipeline->ps_simd16 = NO_KERNEL;
-   }
-
-   pipeline->ps_ksp2 = 0;
-   pipeline->ps_grf_start2 = 0;
-   if (pipeline->ps_simd8 != NO_KERNEL) {
-      pipeline->ps_ksp0 = pipeline->ps_simd8;
-      pipeline->ps_grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
-      if (pipeline->ps_simd16 != NO_KERNEL) {
-         pipeline->ps_ksp2 = pipeline->ps_simd16;
-         pipeline->ps_grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
-      }
-   } else if (pipeline->ps_simd16 != NO_KERNEL) {
-      pipeline->ps_ksp0 = pipeline->ps_simd16;
-      pipeline->ps_grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
-   }
-
-   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
-                                   stage_prog_data, &map);
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin);
 
    return VK_SUCCESS;
 }
@@ -740,20 +914,20 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
-   const struct brw_stage_prog_data *stage_prog_data;
    struct anv_pipeline_bind_map map;
    struct brw_cs_prog_key key;
-   uint32_t kernel = NO_KERNEL;
+   struct anv_shader_bin *bin = NULL;
    unsigned char sha1[20];
 
    populate_cs_prog_key(&pipeline->device->info, &key);
 
-   if (module->size > 0) {
-      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
-      kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
+   if (cache) {
+      anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
+                      pipeline->layout, spec_info);
+      bin = anv_pipeline_cache_search(cache, sha1, 20);
    }
 
-   if (module->size == 0 || kernel == NO_KERNEL) {
+   if (bin == NULL) {
       struct brw_cs_prog_data prog_data = { 0, };
       struct anv_pipeline_binding surface_to_descriptor[256];
       struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -773,8 +947,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
 
       void *mem_ctx = ralloc_context(NULL);
 
-      if (module->nir == NULL)
-         ralloc_steal(mem_ctx, nir);
+      ralloc_steal(mem_ctx, nir);
 
       unsigned code_size;
       const unsigned *shader_code =
@@ -785,189 +958,39 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
       }
 
-      stage_prog_data = &prog_data.base;
-      kernel = anv_pipeline_cache_upload_kernel(cache,
-                                                module->size > 0 ? sha1 : NULL,
-                                                shader_code, code_size,
-                                                &stage_prog_data, sizeof(prog_data),
-                                                &map);
+      bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
+                                       shader_code, code_size,
+                                       &prog_data.base, sizeof(prog_data),
+                                       &map);
+      if (!bin) {
+         ralloc_free(mem_ctx);
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+      }
 
       ralloc_free(mem_ctx);
    }
 
-   pipeline->cs_simd = kernel;
-
-   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
-                                   stage_prog_data, &map);
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin);
 
    return VK_SUCCESS;
 }
 
+/**
+ * Copy pipeline state not marked as dynamic.
+ * Dynamic state is pipeline state which hasn't been provided at pipeline
+ * creation time, but is dynamically provided afterwards using various
+ * vkCmdSet* functions.
+ *
+ * The set of state considered "non_dynamic" is determined by the pieces of
+ * state that have their corresponding VkDynamicState enums omitted from
+ * VkPipelineDynamicStateCreateInfo::pDynamicStates.
+ *
+ * @param[out] pipeline    Destination non_dynamic state.
+ * @param[in]  pCreateInfo Source of non_dynamic state to be copied.
+ */
 static void
-gen7_compute_urb_partition(struct anv_pipeline *pipeline)
-{
-   const struct brw_device_info *devinfo = &pipeline->device->info;
-   bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
-   unsigned vs_size = vs_present ?
-      get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
-   unsigned vs_entry_size_bytes = vs_size * 64;
-   bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
-   unsigned gs_size = gs_present ?
-      get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
-   unsigned gs_entry_size_bytes = gs_size * 64;
-
-   /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
-    *
-    *     VS Number of URB Entries must be divisible by 8 if the VS URB Entry
-    *     Allocation Size is less than 9 512-bit URB entries.
-    *
-    * Similar text exists for GS.
-    */
-   unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
-   unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
-
-   /* URB allocations must be done in 8k chunks. */
-   unsigned chunk_size_bytes = 8192;
-
-   /* Determine the size of the URB in chunks. */
-   unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
-
-   /* Reserve space for push constants */
-   unsigned push_constant_kb;
-   if (pipeline->device->info.gen >= 8)
-      push_constant_kb = 32;
-   else if (pipeline->device->info.is_haswell)
-      push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
-   else
-      push_constant_kb = 16;
-
-   unsigned push_constant_bytes = push_constant_kb * 1024;
-   unsigned push_constant_chunks =
-      push_constant_bytes / chunk_size_bytes;
-
-   /* Initially, assign each stage the minimum amount of URB space it needs,
-    * and make a note of how much additional space it "wants" (the amount of
-    * additional space it could actually make use of).
-    */
-
-   /* VS has a lower limit on the number of URB entries */
-   unsigned vs_chunks =
-      ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
-            chunk_size_bytes) / chunk_size_bytes;
-   unsigned vs_wants =
-      ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
-            chunk_size_bytes) / chunk_size_bytes - vs_chunks;
-
-   unsigned gs_chunks = 0;
-   unsigned gs_wants = 0;
-   if (gs_present) {
-      /* There are two constraints on the minimum amount of URB space we can
-       * allocate:
-       *
-       * (1) We need room for at least 2 URB entries, since we always operate
-       * the GS in DUAL_OBJECT mode.
-       *
-       * (2) We can't allocate less than nr_gs_entries_granularity.
-       */
-      gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
-                        chunk_size_bytes) / chunk_size_bytes;
-      gs_wants =
-         ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
-               chunk_size_bytes) / chunk_size_bytes - gs_chunks;
-   }
-
-   /* There should always be enough URB space to satisfy the minimum
-    * requirements of each stage.
-    */
-   unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
-   assert(total_needs <= urb_chunks);
-
-   /* Mete out remaining space (if any) in proportion to "wants". */
-   unsigned total_wants = vs_wants + gs_wants;
-   unsigned remaining_space = urb_chunks - total_needs;
-   if (remaining_space > total_wants)
-      remaining_space = total_wants;
-   if (remaining_space > 0) {
-      unsigned vs_additional = (unsigned)
-         round(vs_wants * (((double) remaining_space) / total_wants));
-      vs_chunks += vs_additional;
-      remaining_space -= vs_additional;
-      gs_chunks += remaining_space;
-   }
-
-   /* Sanity check that we haven't over-allocated. */
-   assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
-
-   /* Finally, compute the number of entries that can fit in the space
-    * allocated to each stage.
-    */
-   unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
-   unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
-
-   /* Since we rounded up when computing *_wants, this may be slightly more
-    * than the maximum allowed amount, so correct for that.
-    */
-   nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
-   nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
-
-   /* Ensure that we program a multiple of the granularity. */
-   nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
-   nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
-
-   /* Finally, sanity check to make sure we have at least the minimum number
-    * of entries needed for each stage.
-    */
-   assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
-   if (gs_present)
-      assert(nr_gs_entries >= 2);
-
-   /* Lay out the URB in the following order:
-    * - push constants
-    * - VS
-    * - GS
-    */
-   pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks;
-   pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size;
-   pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries;
-
-   pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks;
-   pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size;
-   pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries;
-
-   pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks;
-   pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1;
-   pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0;
-
-   pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks;
-   pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1;
-   pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0;
-
-   const unsigned stages =
-      _mesa_bitcount(pipeline->active_stages & VK_SHADER_STAGE_ALL_GRAPHICS);
-   unsigned size_per_stage = stages ? (push_constant_kb / stages) : 0;
-   unsigned used_kb = 0;
-
-   /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
-    * units of 2KB.  Incidentally, these are the same platforms that have
-    * 32KB worth of push constant space.
-    */
-   if (push_constant_kb == 32)
-      size_per_stage &= ~1u;
-
-   for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
-      pipeline->urb.push_size[i] =
-         (pipeline->active_stages & (1 << i)) ? size_per_stage : 0;
-      used_kb += pipeline->urb.push_size[i];
-      assert(used_kb <= push_constant_kb);
-   }
-
-   pipeline->urb.push_size[MESA_SHADER_FRAGMENT] =
-      push_constant_kb - used_kb;
-}
-
-static void
-anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
-                                const VkGraphicsPipelineCreateInfo *pCreateInfo)
+copy_non_dynamic_state(struct anv_pipeline *pipeline,
+                       const VkGraphicsPipelineCreateInfo *pCreateInfo)
 {
    anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
    ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
@@ -984,18 +1007,27 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
 
    struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
 
-   dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
-   if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
-      typed_memcpy(dynamic->viewport.viewports,
-                   pCreateInfo->pViewportState->pViewports,
-                   pCreateInfo->pViewportState->viewportCount);
-   }
+   /* Section 9.2 of the Vulkan 1.0.15 spec says:
+    *
+    *    pViewportState is [...] NULL if the pipeline
+    *    has rasterization disabled.
+    */
+   if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
+      assert(pCreateInfo->pViewportState);
+
+      dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
+      if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+         typed_memcpy(dynamic->viewport.viewports,
+                     pCreateInfo->pViewportState->pViewports,
+                     pCreateInfo->pViewportState->viewportCount);
+      }
 
-   dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
-   if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
-      typed_memcpy(dynamic->scissor.scissors,
-                   pCreateInfo->pViewportState->pScissors,
-                   pCreateInfo->pViewportState->scissorCount);
+      dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
+      if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+         typed_memcpy(dynamic->scissor.scissors,
+                     pCreateInfo->pViewportState->pScissors,
+                     pCreateInfo->pViewportState->scissorCount);
+      }
    }
 
    if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
@@ -1013,10 +1045,27 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
          pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
    }
 
-   if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
+   /* Section 9.2 of the Vulkan 1.0.15 spec says:
+    *
+    *    pColorBlendState is [...] NULL if the pipeline has rasterization
+    *    disabled or if the subpass of the render pass the pipeline is
+    *    created against does not use any color attachments.
+    */
+   bool uses_color_att = false;
+   for (unsigned i = 0; i < subpass->color_count; ++i) {
+      if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
+         uses_color_att = true;
+         break;
+      }
+   }
+
+   if (uses_color_att &&
+       !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
       assert(pCreateInfo->pColorBlendState);
-      typed_memcpy(dynamic->blend_constants,
-                   pCreateInfo->pColorBlendState->blendConstants, 4);
+
+      if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
+         typed_memcpy(dynamic->blend_constants,
+                     pCreateInfo->pColorBlendState->blendConstants, 4);
    }
 
    /* If there is no depthstencil attachment, then don't read
@@ -1025,14 +1074,17 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
     * no need to override the depthstencil defaults in
     * anv_pipeline::dynamic_state when there is no depthstencil attachment.
     *
-    * From the Vulkan spec (20 Oct 2015, git-aa308cb):
+    * Section 9.2 of the Vulkan 1.0.15 spec says:
     *
-    *    pDepthStencilState [...] may only be NULL if renderPass and subpass
-    *    specify a subpass that has no depth/stencil attachment.
+    *    pDepthStencilState is [...] NULL if the pipeline has rasterization
+    *    disabled or if the subpass of the render pass the pipeline is created
+    *    against does not use a depth/stencil attachment.
     */
-   if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
+   if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
+       subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
+      assert(pCreateInfo->pDepthStencilState);
+
       if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
-         assert(pCreateInfo->pDepthStencilState);
          dynamic->depth_bounds.min =
             pCreateInfo->pDepthStencilState->minDepthBounds;
          dynamic->depth_bounds.max =
@@ -1040,7 +1092,6 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
       }
 
       if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
-         assert(pCreateInfo->pDepthStencilState);
          dynamic->stencil_compare_mask.front =
             pCreateInfo->pDepthStencilState->front.compareMask;
          dynamic->stencil_compare_mask.back =
@@ -1048,7 +1099,6 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
       }
 
       if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
-         assert(pCreateInfo->pDepthStencilState);
          dynamic->stencil_write_mask.front =
             pCreateInfo->pDepthStencilState->front.writeMask;
          dynamic->stencil_write_mask.back =
@@ -1056,7 +1106,6 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
       }
 
       if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
-         assert(pCreateInfo->pDepthStencilState);
          dynamic->stencil_reference.front =
             pCreateInfo->pDepthStencilState->front.reference;
          dynamic->stencil_reference.back =
@@ -1070,34 +1119,35 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
 static void
 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
 {
+#ifdef DEBUG
    struct anv_render_pass *renderpass = NULL;
    struct anv_subpass *subpass = NULL;
 
    /* Assert that all required members of VkGraphicsPipelineCreateInfo are
-    * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
-    * 4.2 Graphics Pipeline.
+    * present.  See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
     */
    assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
 
    renderpass = anv_render_pass_from_handle(info->renderPass);
    assert(renderpass);
 
-   if (renderpass != &anv_meta_dummy_renderpass) {
-      assert(info->subpass < renderpass->subpass_count);
-      subpass = &renderpass->subpasses[info->subpass];
-   }
+   assert(info->subpass < renderpass->subpass_count);
+   subpass = &renderpass->subpasses[info->subpass];
 
    assert(info->stageCount >= 1);
    assert(info->pVertexInputState);
    assert(info->pInputAssemblyState);
-   assert(info->pViewportState);
    assert(info->pRasterizationState);
+   if (!info->pRasterizationState->rasterizerDiscardEnable) {
+      assert(info->pViewportState);
+      assert(info->pMultisampleState);
 
-   if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
-      assert(info->pDepthStencilState);
+      if (subpass && subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED)
+         assert(info->pDepthStencilState);
 
-   if (subpass && subpass->color_count > 0)
-      assert(info->pColorBlendState);
+      if (subpass && subpass->color_count > 0)
+         assert(info->pColorBlendState);
+   }
 
    for (uint32_t i = 0; i < info->stageCount; ++i) {
       switch (info->pStages[i].stage) {
@@ -1109,6 +1159,27 @@ anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
          break;
       }
    }
+#endif
+}
+
+/**
+ * Calculate the desired L3 partitioning based on the current state of the
+ * pipeline.  For now this simply returns the conservative defaults calculated
+ * by get_default_l3_weights(), but we could probably do better by gathering
+ * more statistics from the pipeline state (e.g. guess of expected URB usage
+ * and bound surfaces), or by using feed-back from performance counters.
+ */
+void
+anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
+{
+   const struct gen_device_info *devinfo = &pipeline->device->info;
+
+   const struct gen_l3_weights w =
+      gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
+
+   pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
+   pipeline->urb.total_size =
+      gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
 }
 
 VkResult
@@ -1116,14 +1187,11 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
                   struct anv_device *device,
                   struct anv_pipeline_cache *cache,
                   const VkGraphicsPipelineCreateInfo *pCreateInfo,
-                  const struct anv_graphics_pipeline_create_info *extra,
                   const VkAllocationCallbacks *alloc)
 {
    VkResult result;
 
-   anv_validate {
-      anv_pipeline_validate_create_info(pCreateInfo);
-   }
+   anv_pipeline_validate_create_info(pCreateInfo);
 
    if (alloc == NULL)
       alloc = &device->alloc;
@@ -1140,24 +1208,18 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
    pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
    pipeline->batch.relocs = &pipeline->batch_relocs;
 
-   anv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
+   copy_non_dynamic_state(pipeline, pCreateInfo);
+   pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
+                                  pCreateInfo->pRasterizationState->depthClampEnable;
 
-   pipeline->use_repclear = extra && extra->use_repclear;
+   pipeline->needs_data_cache = false;
 
    /* When we free the pipeline, we detect stages based on the NULL status
     * of various prog_data pointers.  Make them NULL by default.
     */
-   memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
-   memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
-   memset(pipeline->bindings, 0, sizeof(pipeline->bindings));
-
-   pipeline->vs_simd8 = NO_KERNEL;
-   pipeline->vs_vec4 = NO_KERNEL;
-   pipeline->gs_kernel = NO_KERNEL;
-   pipeline->ps_ksp0 = NO_KERNEL;
+   memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
 
    pipeline->active_stages = 0;
-   pipeline->total_scratch = 0;
 
    const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
    struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
@@ -1168,49 +1230,50 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
    }
 
    if (modules[MESA_SHADER_VERTEX]) {
-      anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
-                              modules[MESA_SHADER_VERTEX],
-                              pStages[MESA_SHADER_VERTEX]->pName,
-                              pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
+      result = anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
+                                       modules[MESA_SHADER_VERTEX],
+                                       pStages[MESA_SHADER_VERTEX]->pName,
+                                       pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
+      if (result != VK_SUCCESS)
+         goto compile_fail;
    }
 
-   if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
-      anv_finishme("no tessellation support");
+   if (modules[MESA_SHADER_TESS_EVAL]) {
+      anv_pipeline_compile_tcs_tes(pipeline, cache, pCreateInfo,
+                                   modules[MESA_SHADER_TESS_CTRL],
+                                   pStages[MESA_SHADER_TESS_CTRL]->pName,
+                                   pStages[MESA_SHADER_TESS_CTRL]->pSpecializationInfo,
+                                   modules[MESA_SHADER_TESS_EVAL],
+                                   pStages[MESA_SHADER_TESS_EVAL]->pName,
+                                   pStages[MESA_SHADER_TESS_EVAL]->pSpecializationInfo);
+   }
 
    if (modules[MESA_SHADER_GEOMETRY]) {
-      anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
-                              modules[MESA_SHADER_GEOMETRY],
-                              pStages[MESA_SHADER_GEOMETRY]->pName,
-                              pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
+      result = anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
+                                       modules[MESA_SHADER_GEOMETRY],
+                                       pStages[MESA_SHADER_GEOMETRY]->pName,
+                                       pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
+      if (result != VK_SUCCESS)
+         goto compile_fail;
    }
 
    if (modules[MESA_SHADER_FRAGMENT]) {
-      anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
-                              modules[MESA_SHADER_FRAGMENT],
-                              pStages[MESA_SHADER_FRAGMENT]->pName,
-                              pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
+      result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo,
+                                       modules[MESA_SHADER_FRAGMENT],
+                                       pStages[MESA_SHADER_FRAGMENT]->pName,
+                                       pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
+      if (result != VK_SUCCESS)
+         goto compile_fail;
    }
 
-   if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
-      /* Vertex is only optional if disable_vs is set */
-      assert(extra->disable_vs);
-   }
+   assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
 
-   gen7_compute_urb_partition(pipeline);
+   anv_pipeline_setup_l3_config(pipeline, false);
 
    const VkPipelineVertexInputStateCreateInfo *vi_info =
       pCreateInfo->pVertexInputState;
 
-   uint64_t inputs_read;
-   if (extra && extra->disable_vs) {
-      /* If the VS is disabled, just assume the user knows what they're
-       * doing and apply the layout blindly.  This can only come from
-       * meta, so this *should* be safe.
-       */
-      inputs_read = ~0ull;
-   } else {
-      inputs_read = get_vs_prog_data(pipeline)->inputs_read;
-   }
+   const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
 
    pipeline->vb_used = 0;
    for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
@@ -1243,128 +1306,24 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
 
    const VkPipelineInputAssemblyStateCreateInfo *ia_info =
       pCreateInfo->pInputAssemblyState;
+   const VkPipelineTessellationStateCreateInfo *tess_info =
+      pCreateInfo->pTessellationState;
    pipeline->primitive_restart = ia_info->primitiveRestartEnable;
-   pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
-
-   if (extra && extra->use_rectlist)
-      pipeline->topology = _3DPRIM_RECTLIST;
-
-   while (anv_block_pool_size(&device->scratch_block_pool) <
-          pipeline->total_scratch)
-      anv_block_pool_alloc(&device->scratch_block_pool);
-
-   return VK_SUCCESS;
-}
-
-VkResult
-anv_graphics_pipeline_create(
-   VkDevice _device,
-   VkPipelineCache _cache,
-   const VkGraphicsPipelineCreateInfo *pCreateInfo,
-   const struct anv_graphics_pipeline_create_info *extra,
-   const VkAllocationCallbacks *pAllocator,
-   VkPipeline *pPipeline)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
-
-   if (cache == NULL)
-      cache = &device->default_pipeline_cache;
-
-   switch (device->info.gen) {
-   case 7:
-      if (device->info.is_haswell)
-         return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
-      else
-         return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
-   case 8:
-      return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
-   case 9:
-      return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
-   default:
-      unreachable("unsupported gen\n");
-   }
-}
-
-VkResult anv_CreateGraphicsPipelines(
-    VkDevice                                    _device,
-    VkPipelineCache                             pipelineCache,
-    uint32_t                                    count,
-    const VkGraphicsPipelineCreateInfo*         pCreateInfos,
-    const VkAllocationCallbacks*                pAllocator,
-    VkPipeline*                                 pPipelines)
-{
-   VkResult result = VK_SUCCESS;
-
-   unsigned i = 0;
-   for (; i < count; i++) {
-      result = anv_graphics_pipeline_create(_device,
-                                            pipelineCache,
-                                            &pCreateInfos[i],
-                                            NULL, pAllocator, &pPipelines[i]);
-      if (result != VK_SUCCESS) {
-         for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
-         }
 
-         return result;
-      }
-   }
+   if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
+      pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
+   else
+      pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
 
    return VK_SUCCESS;
-}
 
-static VkResult anv_compute_pipeline_create(
-    VkDevice                                    _device,
-    VkPipelineCache                             _cache,
-    const VkComputePipelineCreateInfo*          pCreateInfo,
-    const VkAllocationCallbacks*                pAllocator,
-    VkPipeline*                                 pPipeline)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
-
-   if (cache == NULL)
-      cache = &device->default_pipeline_cache;
-
-   switch (device->info.gen) {
-   case 7:
-      if (device->info.is_haswell)
-         return gen75_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
-      else
-         return gen7_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
-   case 8:
-      return gen8_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
-   case 9:
-      return gen9_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
-   default:
-      unreachable("unsupported gen\n");
+compile_fail:
+   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+      if (pipeline->shaders[s])
+         anv_shader_bin_unref(device, pipeline->shaders[s]);
    }
-}
 
-VkResult anv_CreateComputePipelines(
-    VkDevice                                    _device,
-    VkPipelineCache                             pipelineCache,
-    uint32_t                                    count,
-    const VkComputePipelineCreateInfo*          pCreateInfos,
-    const VkAllocationCallbacks*                pAllocator,
-    VkPipeline*                                 pPipelines)
-{
-   VkResult result = VK_SUCCESS;
-
-   unsigned i = 0;
-   for (; i < count; i++) {
-      result = anv_compute_pipeline_create(_device, pipelineCache,
-                                           &pCreateInfos[i],
-                                           pAllocator, &pPipelines[i]);
-      if (result != VK_SUCCESS) {
-         for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
-         }
-
-         return result;
-      }
-   }
+   anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
 
-   return VK_SUCCESS;
+   return result;
 }