anv/pipeline: Allow the user to pass a null MultisampleCreateInfo
[mesa.git] / src / vulkan / anv_pipeline.c
index 817b644eefb8858ae06f420914945794c2184123..bf983ed8f2ab7107b4cca04b241880f42572ac4a 100644 (file)
 #include <fcntl.h>
 
 #include "anv_private.h"
+#include "brw_nir.h"
+#include "anv_nir.h"
+#include "glsl/nir/nir_spirv.h"
+
+/* Needed for SWIZZLE macros */
+#include "program/prog_instruction.h"
 
 // Shader functions
 
 VkResult anv_CreateShaderModule(
     VkDevice                                    _device,
     const VkShaderModuleCreateInfo*             pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkShaderModule*                             pShaderModule)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
@@ -42,11 +49,13 @@ VkResult anv_CreateShaderModule(
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
    assert(pCreateInfo->flags == 0);
 
-   module = anv_device_alloc(device, sizeof(*module) + pCreateInfo->codeSize, 8,
-                             VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   module = anv_alloc2(&device->alloc, pAllocator,
+                       sizeof(*module) + pCreateInfo->codeSize, 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (module == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
+   module->nir = NULL;
    module->size = pCreateInfo->codeSize;
    memcpy(module->data, pCreateInfo->pCode, module->size);
 
@@ -55,93 +64,102 @@ VkResult anv_CreateShaderModule(
    return VK_SUCCESS;
 }
 
-VkResult anv_DestroyShaderModule(
+void anv_DestroyShaderModule(
     VkDevice                                    _device,
-    VkShaderModule                              _module)
+    VkShaderModule                              _module,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_shader_module, module, _module);
 
-   anv_device_free(device, module);
-
-   return VK_SUCCESS;
+   anv_free2(&device->alloc, pAllocator, module);
 }
 
-VkResult anv_CreateShader(
-    VkDevice                                    _device,
-    const VkShaderCreateInfo*                   pCreateInfo,
-    VkShader*                                   pShader)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->module);
-   struct anv_shader *shader;
+#define SPIR_V_MAGIC_NUMBER 0x07230203
 
-   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
-   assert(pCreateInfo->flags == 0);
-
-   size_t name_len = strlen(pCreateInfo->pName);
-
-   if (strcmp(pCreateInfo->pName, "main") != 0) {
+/* Eventually, this will become part of anv_CreateShader.  Unfortunately,
+ * we can't do that yet because we don't have the ability to copy nir.
+ */
+static nir_shader *
+anv_shader_compile_to_nir(struct anv_device *device,
+                          struct anv_shader_module *module,
+                          const char *entrypoint_name,
+                          gl_shader_stage stage)
+{
+   if (strcmp(entrypoint_name, "main") != 0) {
       anv_finishme("Multiple shaders per module not really supported");
    }
 
-   shader = anv_device_alloc(device, sizeof(*shader) + name_len + 1, 8,
-                             VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-   if (shader == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   const struct brw_compiler *compiler =
+      device->instance->physicalDevice.compiler;
+   const nir_shader_compiler_options *nir_options =
+      compiler->glsl_compiler_options[stage].NirOptions;
+
+   nir_shader *nir;
+   if (module->nir) {
+      /* Some things such as our meta clear/blit code will give us a NIR
+       * shader directly.  In that case, we just ignore the SPIR-V entirely
+       * and just use the NIR shader */
+      nir = module->nir;
+      nir->options = nir_options;
+   } else {
+      uint32_t *spirv = (uint32_t *) module->data;
+      assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
+      assert(module->size % 4 == 0);
 
-   shader->module = module;
-   memcpy(shader->entrypoint, pCreateInfo->pName, name_len + 1);
+      nir = spirv_to_nir(spirv, module->size / 4, stage, nir_options);
+   }
+   nir_validate_shader(nir);
+
+   /* Vulkan uses the separate-shader linking model */
+   nir->info.separate_shader = true;
+
+   /* Make sure the provided shader has exactly one entrypoint and that the
+    * name matches the name that came in from the VkShader.
+    */
+   nir_function_impl *entrypoint = NULL;
+   nir_foreach_overload(nir, overload) {
+      if (strcmp(entrypoint_name, overload->function->name) == 0 &&
+          overload->impl) {
+         assert(entrypoint == NULL);
+         entrypoint = overload->impl;
+      }
+   }
+   assert(entrypoint != NULL);
 
-   *pShader = anv_shader_to_handle(shader);
+   nir = brw_preprocess_nir(nir, compiler->scalar_stage[stage]);
 
-   return VK_SUCCESS;
-}
+   nir_shader_gather_info(nir, entrypoint);
 
-VkResult anv_DestroyShader(
-    VkDevice                                    _device,
-    VkShader                                    _shader)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_shader, shader, _shader);
-
-   anv_device_free(device, shader);
-
-   return VK_SUCCESS;
+   return nir;
 }
 
-
 VkResult anv_CreatePipelineCache(
     VkDevice                                    device,
     const VkPipelineCacheCreateInfo*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkPipelineCache*                            pPipelineCache)
 {
-   pPipelineCache->handle = 1;
+   *pPipelineCache = (VkPipelineCache)1;
 
    stub_return(VK_SUCCESS);
 }
 
-VkResult anv_DestroyPipelineCache(
+void anv_DestroyPipelineCache(
     VkDevice                                    _device,
-    VkPipelineCache                             _cache)
-{
-   /* VkPipelineCache is a dummy object. */
-   return VK_SUCCESS;
-}
-
-size_t anv_GetPipelineCacheSize(
-    VkDevice                                    device,
-    VkPipelineCache                             pipelineCache)
+    VkPipelineCache                             _cache,
+    const VkAllocationCallbacks*                pAllocator)
 {
-   stub_return(0);
 }
 
 VkResult anv_GetPipelineCacheData(
     VkDevice                                    device,
     VkPipelineCache                             pipelineCache,
+    size_t*                                     pDataSize,
     void*                                       pData)
 {
-   stub_return(VK_UNSUPPORTED);
+   *pDataSize = 0;
+   stub_return(VK_SUCCESS);
 }
 
 VkResult anv_MergePipelineCaches(
@@ -150,736 +168,913 @@ VkResult anv_MergePipelineCaches(
     uint32_t                                    srcCacheCount,
     const VkPipelineCache*                      pSrcCaches)
 {
-   stub_return(VK_UNSUPPORTED);
+   stub_return(VK_SUCCESS);
 }
 
-// Pipeline functions
-
-static void
-emit_vertex_input(struct anv_pipeline *pipeline,
-                  const VkPipelineVertexInputStateCreateInfo *info)
+void anv_DestroyPipeline(
+    VkDevice                                    _device,
+    VkPipeline                                  _pipeline,
+    const VkAllocationCallbacks*                pAllocator)
 {
-   const uint32_t num_dwords = 1 + info->attributeCount * 2;
-   uint32_t *p;
-   bool instancing_enable[32];
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
 
-   pipeline->vb_used = 0;
-   for (uint32_t i = 0; i < info->bindingCount; i++) {
-      const VkVertexInputBindingDescription *desc =
-         &info->pVertexBindingDescriptions[i];
+   anv_reloc_list_finish(&pipeline->batch_relocs,
+                         pAllocator ? pAllocator : &device->alloc);
+   anv_state_stream_finish(&pipeline->program_stream);
+   if (pipeline->blend_state.map)
+      anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
+   anv_free2(&device->alloc, pAllocator, pipeline);
+}
 
-      pipeline->vb_used |= 1 << desc->binding;
-      pipeline->binding_stride[desc->binding] = desc->strideInBytes;
+static const uint32_t vk_to_gen_primitive_type[] = {
+   [VK_PRIMITIVE_TOPOLOGY_POINT_LIST]                    = _3DPRIM_POINTLIST,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST]                     = _3DPRIM_LINELIST,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP]                    = _3DPRIM_LINESTRIP,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST]                 = _3DPRIM_TRILIST,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP]                = _3DPRIM_TRISTRIP,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN]                  = _3DPRIM_TRIFAN,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY]      = _3DPRIM_LINELIST_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY]  = _3DPRIM_TRILIST_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
+/*   [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST]                = _3DPRIM_PATCHLIST_1 */
+};
 
-      /* Step rate is programmed per vertex element (attribute), not
-       * binding. Set up a map of which bindings step per instance, for
-       * reference by vertex element setup. */
-      switch (desc->stepRate) {
-      default:
-      case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
-         instancing_enable[desc->binding] = false;
-         break;
-      case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
-         instancing_enable[desc->binding] = true;
-         break;
-      }
+static void
+populate_sampler_prog_key(const struct brw_device_info *devinfo,
+                          struct brw_sampler_prog_key_data *key)
+{
+   /* XXX: Handle texture swizzle on HSW- */
+   for (int i = 0; i < MAX_SAMPLERS; i++) {
+      /* Assume color sampler, no swizzling. (Works for BDW+) */
+      key->swizzles[i] = SWIZZLE_XYZW;
    }
-
-   p = anv_batch_emitn(&pipeline->batch, num_dwords,
-                       GEN8_3DSTATE_VERTEX_ELEMENTS);
-
-   for (uint32_t i = 0; i < info->attributeCount; i++) {
-      const VkVertexInputAttributeDescription *desc =
-         &info->pVertexAttributeDescriptions[i];
-      const struct anv_format *format = anv_format_for_vk_format(desc->format);
-
-      struct GEN8_VERTEX_ELEMENT_STATE element = {
-         .VertexBufferIndex = desc->binding,
-         .Valid = true,
-         .SourceElementFormat = format->surface_format,
-         .EdgeFlagEnable = false,
-         .SourceElementOffset = desc->offsetInBytes,
-         .Component0Control = VFCOMP_STORE_SRC,
-         .Component1Control = format->num_channels >= 2 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
-         .Component2Control = format->num_channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
-         .Component3Control = format->num_channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP
-      };
-      GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element);
-
-      anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING,
-                     .InstancingEnable = instancing_enable[desc->binding],
-                     .VertexElementIndex = i,
-                     /* Vulkan so far doesn't have an instance divisor, so
-                      * this is always 1 (ignored if not instancing). */
-                     .InstanceDataStepRate = 1);
-   }
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS,
-                  .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid,
-                  .VertexIDComponentNumber = 2,
-                  .VertexIDElementOffset = info->bindingCount,
-                  .InstanceIDEnable = pipeline->vs_prog_data.uses_instanceid,
-                  .InstanceIDComponentNumber = 3,
-                  .InstanceIDElementOffset = info->bindingCount);
 }
 
 static void
-emit_ia_state(struct anv_pipeline *pipeline,
-              const VkPipelineInputAssemblyStateCreateInfo *info,
-              const struct anv_pipeline_create_info *extra)
+populate_vs_prog_key(const struct brw_device_info *devinfo,
+                     struct brw_vs_prog_key *key)
 {
-   static const uint32_t vk_to_gen_primitive_type[] = {
-      [VK_PRIMITIVE_TOPOLOGY_POINT_LIST]        = _3DPRIM_POINTLIST,
-      [VK_PRIMITIVE_TOPOLOGY_LINE_LIST]         = _3DPRIM_LINELIST,
-      [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP]        = _3DPRIM_LINESTRIP,
-      [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST]     = _3DPRIM_TRILIST,
-      [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP]    = _3DPRIM_TRISTRIP,
-      [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN]      = _3DPRIM_TRIFAN,
-      [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ]     = _3DPRIM_LINELIST_ADJ,
-      [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ]    = _3DPRIM_LINESTRIP_ADJ,
-      [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ] = _3DPRIM_TRILIST_ADJ,
-      [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ] = _3DPRIM_TRISTRIP_ADJ,
-      [VK_PRIMITIVE_TOPOLOGY_PATCH]             = _3DPRIM_PATCHLIST_1
-   };
-   uint32_t topology = vk_to_gen_primitive_type[info->topology];
+   memset(key, 0, sizeof(*key));
 
-   if (extra && extra->use_rectlist)
-      topology = _3DPRIM_RECTLIST;
+   populate_sampler_prog_key(devinfo, &key->tex);
 
-   struct GEN8_3DSTATE_VF vf = {
-      GEN8_3DSTATE_VF_header,
-      .IndexedDrawCutIndexEnable = info->primitiveRestartEnable,
-   };
-   GEN8_3DSTATE_VF_pack(NULL, pipeline->state_vf, &vf);
+   /* XXX: Handle vertex input work-arounds */
 
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY,
-                  .PrimitiveTopologyType = topology);
+   /* XXX: Handle sampler_prog_key */
 }
 
 static void
-emit_rs_state(struct anv_pipeline *pipeline,
-              const VkPipelineRasterStateCreateInfo *info,
-              const struct anv_pipeline_create_info *extra)
+populate_gs_prog_key(const struct brw_device_info *devinfo,
+                     struct brw_gs_prog_key *key)
 {
-   static const uint32_t vk_to_gen_cullmode[] = {
-      [VK_CULL_MODE_NONE]                       = CULLMODE_NONE,
-      [VK_CULL_MODE_FRONT]                      = CULLMODE_FRONT,
-      [VK_CULL_MODE_BACK]                       = CULLMODE_BACK,
-      [VK_CULL_MODE_FRONT_AND_BACK]             = CULLMODE_BOTH
-   };
+   memset(key, 0, sizeof(*key));
 
-   static const uint32_t vk_to_gen_fillmode[] = {
-      [VK_FILL_MODE_POINTS]                     = RASTER_POINT,
-      [VK_FILL_MODE_WIREFRAME]                  = RASTER_WIREFRAME,
-      [VK_FILL_MODE_SOLID]                      = RASTER_SOLID
-   };
+   populate_sampler_prog_key(devinfo, &key->tex);
+}
 
-   static const uint32_t vk_to_gen_front_face[] = {
-      [VK_FRONT_FACE_CCW]                       = CounterClockwise,
-      [VK_FRONT_FACE_CW]                        = Clockwise
-   };
+static void
+populate_wm_prog_key(const struct brw_device_info *devinfo,
+                     const VkGraphicsPipelineCreateInfo *info,
+                     struct brw_wm_prog_key *key)
+{
+   ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
 
-   struct GEN8_3DSTATE_SF sf = {
-      GEN8_3DSTATE_SF_header,
-      .ViewportTransformEnable = !(extra && extra->disable_viewport),
-      .TriangleStripListProvokingVertexSelect = 0,
-      .LineStripListProvokingVertexSelect = 0,
-      .TriangleFanProvokingVertexSelect = 0,
-      .PointWidthSource = pipeline->writes_point_size ? Vertex : State,
-      .PointWidth = 1.0,
-   };
+   memset(key, 0, sizeof(*key));
 
-   /* FINISHME: VkBool32 rasterizerDiscardEnable; */
+   populate_sampler_prog_key(devinfo, &key->tex);
 
-   GEN8_3DSTATE_SF_pack(NULL, pipeline->state_sf, &sf);
+   /* TODO: Fill out key->input_slots_valid */
 
-   struct GEN8_3DSTATE_RASTER raster = {
-      GEN8_3DSTATE_RASTER_header,
-      .FrontWinding = vk_to_gen_front_face[info->frontFace],
-      .CullMode = vk_to_gen_cullmode[info->cullMode],
-      .FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode],
-      .BackFaceFillMode = vk_to_gen_fillmode[info->fillMode],
-      .ScissorRectangleEnable = !(extra && extra->disable_scissor),
-      .ViewportZClipTestEnable = info->depthClipEnable
-   };
+   /* Vulkan doesn't specify a default */
+   key->high_quality_derivatives = false;
+
+   /* XXX Vulkan doesn't appear to specify */
+   key->clamp_fragment_color = false;
+
+   /* Vulkan always specifies upper-left coordinates */
+   key->drawable_height = 0;
+   key->render_to_fbo = false;
+
+   key->nr_color_regions = render_pass->subpasses[info->subpass].color_count;
 
-   GEN8_3DSTATE_RASTER_pack(NULL, pipeline->state_raster, &raster);
+   key->replicate_alpha = key->nr_color_regions > 1 &&
+                          info->pMultisampleState &&
+                          info->pMultisampleState->alphaToCoverageEnable;
 
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE,
-                  .ForceVertexURBEntryReadLength = false,
-                  .ForceVertexURBEntryReadOffset = false,
-                  .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
-                  .NumberofSFOutputAttributes =
-                     pipeline->wm_prog_data.num_varying_inputs);
+   if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
+      /* We should probably pull this out of the shader, but it's fairly
+       * harmless to compute it and then let dead-code take care of it.
+       */
+      key->persample_shading = info->pMultisampleState->sampleShadingEnable;
+      if (key->persample_shading)
+         key->persample_2x = info->pMultisampleState->rasterizationSamples == 2;
 
+      key->compute_pos_offset = info->pMultisampleState->sampleShadingEnable;
+      key->compute_sample_id = info->pMultisampleState->sampleShadingEnable;
+   }
 }
 
 static void
-emit_cb_state(struct anv_pipeline *pipeline,
-              const VkPipelineColorBlendStateCreateInfo *info)
+populate_cs_prog_key(const struct brw_device_info *devinfo,
+                     struct brw_cs_prog_key *key)
 {
-   struct anv_device *device = pipeline->device;
-
-   static const uint32_t vk_to_gen_logic_op[] = {
-      [VK_LOGIC_OP_COPY]                        = LOGICOP_COPY,
-      [VK_LOGIC_OP_CLEAR]                       = LOGICOP_CLEAR,
-      [VK_LOGIC_OP_AND]                         = LOGICOP_AND,
-      [VK_LOGIC_OP_AND_REVERSE]                 = LOGICOP_AND_REVERSE,
-      [VK_LOGIC_OP_AND_INVERTED]                = LOGICOP_AND_INVERTED,
-      [VK_LOGIC_OP_NOOP]                        = LOGICOP_NOOP,
-      [VK_LOGIC_OP_XOR]                         = LOGICOP_XOR,
-      [VK_LOGIC_OP_OR]                          = LOGICOP_OR,
-      [VK_LOGIC_OP_NOR]                         = LOGICOP_NOR,
-      [VK_LOGIC_OP_EQUIV]                       = LOGICOP_EQUIV,
-      [VK_LOGIC_OP_INVERT]                      = LOGICOP_INVERT,
-      [VK_LOGIC_OP_OR_REVERSE]                  = LOGICOP_OR_REVERSE,
-      [VK_LOGIC_OP_COPY_INVERTED]               = LOGICOP_COPY_INVERTED,
-      [VK_LOGIC_OP_OR_INVERTED]                 = LOGICOP_OR_INVERTED,
-      [VK_LOGIC_OP_NAND]                        = LOGICOP_NAND,
-      [VK_LOGIC_OP_SET]                         = LOGICOP_SET,
-   };
+   memset(key, 0, sizeof(*key));
 
-   static const uint32_t vk_to_gen_blend[] = {
-      [VK_BLEND_ZERO]                           = BLENDFACTOR_ZERO,
-      [VK_BLEND_ONE]                            = BLENDFACTOR_ONE,
-      [VK_BLEND_SRC_COLOR]                      = BLENDFACTOR_SRC_COLOR,
-      [VK_BLEND_ONE_MINUS_SRC_COLOR]            = BLENDFACTOR_INV_SRC_COLOR,
-      [VK_BLEND_DEST_COLOR]                     = BLENDFACTOR_DST_COLOR,
-      [VK_BLEND_ONE_MINUS_DEST_COLOR]           = BLENDFACTOR_INV_DST_COLOR,
-      [VK_BLEND_SRC_ALPHA]                      = BLENDFACTOR_SRC_ALPHA,
-      [VK_BLEND_ONE_MINUS_SRC_ALPHA]            = BLENDFACTOR_INV_SRC_ALPHA,
-      [VK_BLEND_DEST_ALPHA]                     = BLENDFACTOR_DST_ALPHA,
-      [VK_BLEND_ONE_MINUS_DEST_ALPHA]           = BLENDFACTOR_INV_DST_ALPHA,
-      [VK_BLEND_CONSTANT_COLOR]                 = BLENDFACTOR_CONST_COLOR,
-      [VK_BLEND_ONE_MINUS_CONSTANT_COLOR]       = BLENDFACTOR_INV_CONST_COLOR,
-      [VK_BLEND_CONSTANT_ALPHA]                 = BLENDFACTOR_CONST_ALPHA,
-      [VK_BLEND_ONE_MINUS_CONSTANT_ALPHA]       = BLENDFACTOR_INV_CONST_ALPHA,
-      [VK_BLEND_SRC_ALPHA_SATURATE]             = BLENDFACTOR_SRC_ALPHA_SATURATE,
-      [VK_BLEND_SRC1_COLOR]                     = BLENDFACTOR_SRC1_COLOR,
-      [VK_BLEND_ONE_MINUS_SRC1_COLOR]           = BLENDFACTOR_INV_SRC1_COLOR,
-      [VK_BLEND_SRC1_ALPHA]                     = BLENDFACTOR_SRC1_ALPHA,
-      [VK_BLEND_ONE_MINUS_SRC1_ALPHA]           = BLENDFACTOR_INV_SRC1_ALPHA,
-   };
+   populate_sampler_prog_key(devinfo, &key->tex);
+}
 
-   static const uint32_t vk_to_gen_blend_op[] = {
-      [VK_BLEND_OP_ADD]                         = BLENDFUNCTION_ADD,
-      [VK_BLEND_OP_SUBTRACT]                    = BLENDFUNCTION_SUBTRACT,
-      [VK_BLEND_OP_REVERSE_SUBTRACT]            = BLENDFUNCTION_REVERSE_SUBTRACT,
-      [VK_BLEND_OP_MIN]                         = BLENDFUNCTION_MIN,
-      [VK_BLEND_OP_MAX]                         = BLENDFUNCTION_MAX,
-   };
+static nir_shader *
+anv_pipeline_compile(struct anv_pipeline *pipeline,
+                     struct anv_shader_module *module,
+                     const char *entrypoint,
+                     gl_shader_stage stage,
+                     struct brw_stage_prog_data *prog_data)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
 
-   uint32_t num_dwords = 1 + info->attachmentCount * 2;
-   pipeline->blend_state =
-      anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
+   nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
+                                               module, entrypoint, stage);
+   if (nir == NULL)
+      return NULL;
 
-   struct GEN8_BLEND_STATE blend_state = {
-      .AlphaToCoverageEnable = info->alphaToCoverageEnable,
-   };
+   anv_nir_lower_push_constants(nir, compiler->scalar_stage[stage]);
+
+   /* Figure out the number of parameters */
+   prog_data->nr_params = 0;
+
+   if (nir->num_uniforms > 0) {
+      /* If the shader uses any push constants at all, we'll just give
+       * them the maximum possible number
+       */
+      prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
+   }
+
+   if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
+      prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
+
+   if (pipeline->layout && pipeline->layout->stage[stage].image_count > 0)
+      prog_data->nr_params += pipeline->layout->stage[stage].image_count *
+                              BRW_IMAGE_PARAM_SIZE;
+
+   if (prog_data->nr_params > 0) {
+      /* XXX: I think we're leaking this */
+      prog_data->param = (const union gl_constant_value **)
+         malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
 
-   uint32_t *state = pipeline->blend_state.map;
-   GEN8_BLEND_STATE_pack(NULL, state, &blend_state);
-
-   for (uint32_t i = 0; i < info->attachmentCount; i++) {
-      const VkPipelineColorBlendAttachmentState *a = &info->pAttachments[i];
-
-      struct GEN8_BLEND_STATE_ENTRY entry = {
-         .LogicOpEnable = info->logicOpEnable,
-         .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
-         .ColorBufferBlendEnable = a->blendEnable,
-         .PreBlendSourceOnlyClampEnable = false,
-         .PreBlendColorClampEnable = false,
-         .PostBlendColorClampEnable = false,
-         .SourceBlendFactor = vk_to_gen_blend[a->srcBlendColor],
-         .DestinationBlendFactor = vk_to_gen_blend[a->destBlendColor],
-         .ColorBlendFunction = vk_to_gen_blend_op[a->blendOpColor],
-         .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcBlendAlpha],
-         .DestinationAlphaBlendFactor = vk_to_gen_blend[a->destBlendAlpha],
-         .AlphaBlendFunction = vk_to_gen_blend_op[a->blendOpAlpha],
-         .WriteDisableAlpha = !(a->channelWriteMask & VK_CHANNEL_A_BIT),
-         .WriteDisableRed = !(a->channelWriteMask & VK_CHANNEL_R_BIT),
-         .WriteDisableGreen = !(a->channelWriteMask & VK_CHANNEL_G_BIT),
-         .WriteDisableBlue = !(a->channelWriteMask & VK_CHANNEL_B_BIT),
-      };
-
-      GEN8_BLEND_STATE_ENTRY_pack(NULL, state + i * 2 + 1, &entry);
-   }
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_BLEND_STATE_POINTERS,
-                  .BlendStatePointer = pipeline->blend_state.offset,
-                  .BlendStatePointerValid = true);
+      /* We now set the param values to be offsets into a
+       * anv_push_constant_data structure.  Since the compiler doesn't
+       * actually dereference any of the gl_constant_value pointers in the
+       * params array, it doesn't really matter what we put here.
+       */
+      struct anv_push_constants *null_data = NULL;
+      if (nir->num_uniforms > 0) {
+         /* Fill out the push constants section of the param array */
+         for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
+            prog_data->param[i] = (const union gl_constant_value *)
+               &null_data->client_data[i * sizeof(float)];
+      }
+   }
+
+   /* Set up dynamic offsets */
+   anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
+
+   /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
+   if (pipeline->layout)
+      anv_nir_apply_pipeline_layout(nir, prog_data, pipeline->layout);
+
+   /* All binding table offsets provided by apply_pipeline_layout() are
+    * relative to the start of the bindint table (plus MAX_RTS for VS).
+    */
+   unsigned bias = stage == MESA_SHADER_FRAGMENT ? MAX_RTS : 0;
+   prog_data->binding_table.size_bytes = 0;
+   prog_data->binding_table.texture_start = bias;
+   prog_data->binding_table.ubo_start = bias;
+   prog_data->binding_table.ssbo_start = bias;
+   prog_data->binding_table.image_start = bias;
+
+   /* Finish the optimization and compilation process */
+   nir = brw_lower_nir(nir, &pipeline->device->info, NULL,
+                       compiler->scalar_stage[stage]);
+
+   /* nir_lower_io will only handle the push constants; we need to set this
+    * to the full number of possible uniforms.
+    */
+   nir->num_uniforms = prog_data->nr_params * 4;
+
+   return nir;
 }
 
-static const uint32_t vk_to_gen_compare_op[] = {
-   [VK_COMPARE_OP_NEVER]                        = COMPAREFUNCTION_NEVER,
-   [VK_COMPARE_OP_LESS]                         = COMPAREFUNCTION_LESS,
-   [VK_COMPARE_OP_EQUAL]                        = COMPAREFUNCTION_EQUAL,
-   [VK_COMPARE_OP_LESS_EQUAL]                   = COMPAREFUNCTION_LEQUAL,
-   [VK_COMPARE_OP_GREATER]                      = COMPAREFUNCTION_GREATER,
-   [VK_COMPARE_OP_NOT_EQUAL]                    = COMPAREFUNCTION_NOTEQUAL,
-   [VK_COMPARE_OP_GREATER_EQUAL]                = COMPAREFUNCTION_GEQUAL,
-   [VK_COMPARE_OP_ALWAYS]                       = COMPAREFUNCTION_ALWAYS,
-};
+static uint32_t
+anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
+                           const void *data, size_t size)
+{
+   struct anv_state state =
+      anv_state_stream_alloc(&pipeline->program_stream, size, 64);
 
-static const uint32_t vk_to_gen_stencil_op[] = {
-   [VK_STENCIL_OP_KEEP]                         = 0,
-   [VK_STENCIL_OP_ZERO]                         = 0,
-   [VK_STENCIL_OP_REPLACE]                      = 0,
-   [VK_STENCIL_OP_INC_CLAMP]                    = 0,
-   [VK_STENCIL_OP_DEC_CLAMP]                    = 0,
-   [VK_STENCIL_OP_INVERT]                       = 0,
-   [VK_STENCIL_OP_INC_WRAP]                     = 0,
-   [VK_STENCIL_OP_DEC_WRAP]                     = 0
-};
+   assert(size < pipeline->program_stream.block_pool->block_size);
+
+   memcpy(state.map, data, size);
+
+   if (!pipeline->device->info.has_llc)
+      anv_state_clflush(state);
+
+   return state.offset;
+}
 
 static void
-emit_ds_state(struct anv_pipeline *pipeline,
-              const VkPipelineDepthStencilStateCreateInfo *info)
+anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
+                                gl_shader_stage stage,
+                                struct brw_stage_prog_data *prog_data)
 {
-   if (info == NULL) {
-      /* We're going to OR this together with the dynamic state.  We need
-       * to make sure it's initialized to something useful.
-       */
-      memset(pipeline->state_wm_depth_stencil, 0,
-             sizeof(pipeline->state_wm_depth_stencil));
-      return;
-   }
-
-   /* VkBool32 depthBoundsEnable;          // optional (depth_bounds_test) */
-
-   struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
-      .DepthTestEnable = info->depthTestEnable,
-      .DepthBufferWriteEnable = info->depthWriteEnable,
-      .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
-      .DoubleSidedStencilEnable = true,
-
-      .StencilTestEnable = info->stencilTestEnable,
-      .StencilFailOp = vk_to_gen_stencil_op[info->front.stencilFailOp],
-      .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.stencilPassOp],
-      .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.stencilDepthFailOp],
-      .StencilTestFunction = vk_to_gen_compare_op[info->front.stencilCompareOp],
-      .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.stencilFailOp],
-      .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.stencilPassOp],
-      .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.stencilDepthFailOp],
-      .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.stencilCompareOp],
+   struct brw_device_info *devinfo = &pipeline->device->info;
+   uint32_t max_threads[] = {
+      [MESA_SHADER_VERTEX]                  = devinfo->max_vs_threads,
+      [MESA_SHADER_TESS_CTRL]               = 0,
+      [MESA_SHADER_TESS_EVAL]               = 0,
+      [MESA_SHADER_GEOMETRY]                = devinfo->max_gs_threads,
+      [MESA_SHADER_FRAGMENT]                = devinfo->max_wm_threads,
+      [MESA_SHADER_COMPUTE]                 = devinfo->max_cs_threads,
    };
 
-   GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, pipeline->state_wm_depth_stencil, &wm_depth_stencil);
+   pipeline->prog_data[stage] = prog_data;
+   pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
+   pipeline->scratch_start[stage] = pipeline->total_scratch;
+   pipeline->total_scratch =
+      align_u32(pipeline->total_scratch, 1024) +
+      prog_data->total_scratch * max_threads[stage];
 }
 
-VkResult
-anv_pipeline_create(
-    VkDevice                                    _device,
-    const VkGraphicsPipelineCreateInfo*         pCreateInfo,
-    const struct anv_pipeline_create_info *     extra,
-    VkPipeline*                                 pPipeline)
+static VkResult
+anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
+                        const VkGraphicsPipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
 {
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   struct anv_pipeline *pipeline;
-   VkResult result;
-   uint32_t offset, length;
-
-   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
-   
-   pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
-                               VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-   if (pipeline == NULL)
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
+   struct brw_vs_prog_key key;
+
+   populate_vs_prog_key(&pipeline->device->info, &key);
+
+   /* TODO: Look up shader in cache */
+
+   memset(prog_data, 0, sizeof(*prog_data));
+
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_VERTEX,
+                                          &prog_data->base.base);
+   if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   pipeline->device = device;
-   pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
-   memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
+   void *mem_ctx = ralloc_context(NULL);
+
+   if (module->nir == NULL)
+      ralloc_steal(mem_ctx, nir);
+
+   prog_data->inputs_read = nir->info.inputs_read;
+   pipeline->writes_point_size = nir->info.outputs_written & VARYING_SLOT_PSIZ;
 
-   result = anv_reloc_list_init(&pipeline->batch.relocs, device);
-   if (result != VK_SUCCESS) {
-      anv_device_free(device, pipeline);
-      return result;
+   brw_compute_vue_map(&pipeline->device->info,
+                       &prog_data->base.vue_map,
+                       nir->info.outputs_written,
+                       nir->info.separate_shader);
+
+   unsigned code_size;
+   const unsigned *shader_code =
+      brw_compile_vs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+                     NULL, false, -1, &code_size, NULL);
+   if (shader_code == NULL) {
+      ralloc_free(mem_ctx);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
    }
-   pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
-   pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
 
-   anv_state_stream_init(&pipeline->program_stream,
-                         &device->instruction_block_pool);
+   const uint32_t offset =
+      anv_pipeline_upload_kernel(pipeline, shader_code, code_size);
+   if (prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
+      pipeline->vs_simd8 = offset;
+      pipeline->vs_vec4 = NO_KERNEL;
+   } else {
+      pipeline->vs_simd8 = NO_KERNEL;
+      pipeline->vs_vec4 = offset;
+   }
 
-   for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
-      pipeline->shaders[pCreateInfo->pStages[i].stage] =
-         anv_shader_from_handle(pCreateInfo->pStages[i].shader);
+   ralloc_free(mem_ctx);
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
+                                   &prog_data->base.base);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
+                        const VkGraphicsPipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   struct brw_gs_prog_data *prog_data = &pipeline->gs_prog_data;
+   struct brw_gs_prog_key key;
+
+   populate_gs_prog_key(&pipeline->device->info, &key);
+
+   /* TODO: Look up shader in cache */
+
+   memset(prog_data, 0, sizeof(*prog_data));
+
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_GEOMETRY,
+                                          &prog_data->base.base);
+   if (nir == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   void *mem_ctx = ralloc_context(NULL);
+
+   if (module->nir == NULL)
+      ralloc_steal(mem_ctx, nir);
+
+   brw_compute_vue_map(&pipeline->device->info,
+                       &prog_data->base.vue_map,
+                       nir->info.outputs_written,
+                       nir->info.separate_shader);
+
+   unsigned code_size;
+   const unsigned *shader_code =
+      brw_compile_gs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+                     NULL, -1, &code_size, NULL);
+   if (shader_code == NULL) {
+      ralloc_free(mem_ctx);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
    }
 
-   if (pCreateInfo->pTessellationState)
-      anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
-   if (pCreateInfo->pViewportState)
-      anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO");
-   if (pCreateInfo->pMultisampleState)
-      anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
+   /* TODO: SIMD8 GS */
+   pipeline->gs_kernel =
+      anv_pipeline_upload_kernel(pipeline, shader_code, code_size);
+   pipeline->gs_vertex_count = nir->info.gs.vertices_in;
 
-   pipeline->use_repclear = extra && extra->use_repclear;
+   ralloc_free(mem_ctx);
 
-   anv_compiler_run(device->compiler, pipeline);
-
-   /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
-    * hard code this to num_attributes - 2. This is because the attributes
-    * include VUE header and position, which aren't counted as varying
-    * inputs. */
-   if (pipeline->vs_simd8 == NO_KERNEL) {
-      pipeline->wm_prog_data.num_varying_inputs =
-         pCreateInfo->pVertexInputState->attributeCount - 2;
-   }
-
-   assert(pCreateInfo->pVertexInputState);
-   emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
-   assert(pCreateInfo->pInputAssemblyState);
-   emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState, extra);
-   assert(pCreateInfo->pRasterState);
-   emit_rs_state(pipeline, pCreateInfo->pRasterState, extra);
-   emit_ds_state(pipeline, pCreateInfo->pDepthStencilState);
-   emit_cb_state(pipeline, pCreateInfo->pColorBlendState);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_STATISTICS,
-                   .StatisticsEnable = true);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_HS, .Enable = false);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_TE, .TEEnable = false);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
-                  .ConstantBufferOffset = 0,
-                  .ConstantBufferSize = 4);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
-                  .ConstantBufferOffset = 4,
-                  .ConstantBufferSize = 4);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
-                  .ConstantBufferOffset = 8,
-                  .ConstantBufferSize = 4);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM_CHROMAKEY,
-                  .ChromaKeyKillEnable = false);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE_SWIZ);
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_CLIP,
-                  .ClipEnable = true,
-                  .ViewportXYClipTestEnable = !(extra && extra->disable_viewport),
-                  .MinimumPointWidth = 0.125,
-                  .MaximumPointWidth = 255.875);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM,
-                  .StatisticsEnable = true,
-                  .LineEndCapAntialiasingRegionWidth = _05pixels,
-                  .LineAntialiasingRegionWidth = _10pixels,
-                  .EarlyDepthStencilControl = NORMAL,
-                  .ForceThreadDispatchEnable = NORMAL,
-                  .PointRasterizationRule = RASTRULE_UPPER_RIGHT,
-                  .BarycentricInterpolationMode =
-                     pipeline->wm_prog_data.barycentric_interp_modes);
-
-   uint32_t samples = 1;
-   uint32_t log2_samples = __builtin_ffs(samples) - 1;
-   bool enable_sampling = samples > 1 ? true : false;
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE,
-                  .PixelPositionOffsetEnable = enable_sampling,
-                  .PixelLocation = CENTER,
-                  .NumberofMultisamples = log2_samples);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SAMPLE_MASK,
-                  .SampleMask = 0xffff);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS,
-                  .VSURBStartingAddress = pipeline->urb.vs_start,
-                  .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1,
-                  .VSNumberofURBEntries = pipeline->urb.nr_vs_entries);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS,
-                  .GSURBStartingAddress = pipeline->urb.gs_start,
-                  .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1,
-                  .GSNumberofURBEntries = pipeline->urb.nr_gs_entries);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS,
-                  .HSURBStartingAddress = pipeline->urb.vs_start,
-                  .HSURBEntryAllocationSize = 0,
-                  .HSNumberofURBEntries = 0);
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS,
-                  .DSURBStartingAddress = pipeline->urb.vs_start,
-                  .DSURBEntryAllocationSize = 0,
-                  .DSNumberofURBEntries = 0);
-
-   const struct brw_gs_prog_data *gs_prog_data = &pipeline->gs_prog_data;
-   offset = 1;
-   length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset;
-
-   if (pipeline->gs_vec4 == NO_KERNEL)
-      anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false);
-   else
-      anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS,
-                     .SingleProgramFlow = false,
-                     .KernelStartPointer = pipeline->gs_vec4,
-                     .VectorMaskEnable = Vmask,
-                     .SamplerCount = 0,
-                     .BindingTableEntryCount = 0,
-                     .ExpectedVertexCount = pipeline->gs_vertex_count,
-                        
-                     .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_GEOMETRY],
-                     .PerThreadScratchSpace = ffs(gs_prog_data->base.base.total_scratch / 2048),
-
-                     .OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1,
-                     .OutputTopology = gs_prog_data->output_topology,
-                     .VertexURBEntryReadLength = gs_prog_data->base.urb_read_length,
-                     .DispatchGRFStartRegisterForURBData =
-                        gs_prog_data->base.base.dispatch_grf_start_reg,
-
-                     .MaximumNumberofThreads = device->info.max_gs_threads,
-                     .ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords,
-                     //pipeline->gs_prog_data.dispatch_mode |
-                     .StatisticsEnable = true,
-                     .IncludePrimitiveID = gs_prog_data->include_primitive_id,
-                     .ReorderMode = TRAILING,
-                     .Enable = true,
-
-                     .ControlDataFormat = gs_prog_data->control_data_format,
-
-                     /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
-                      * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
-                      * UserClipDistanceCullTestEnableBitmask(v)
-                      */
-
-                     .VertexURBEntryOutputReadOffset = offset,
-                     .VertexURBEntryOutputLength = length);
-
-   const struct brw_vue_prog_data *vue_prog_data = &pipeline->vs_prog_data.base;
-   /* Skip the VUE header and position slots */
-   offset = 1;
-   length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset;
-
-   if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs))
-      anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
-                     .FunctionEnable = false,
-                     .VertexURBEntryOutputReadOffset = 1,
-                     /* Even if VS is disabled, SBE still gets the amount of
-                      * vertex data to read from this field. We use attribute
-                      * count - 1, as we don't count the VUE header here. */
-                     .VertexURBEntryOutputLength =
-                        DIV_ROUND_UP(pCreateInfo->pVertexInputState->attributeCount - 1, 2));
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
+                                   &prog_data->base.base);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
+                        const VkGraphicsPipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
+   struct brw_wm_prog_key key;
+
+   populate_wm_prog_key(&pipeline->device->info, info, &key);
+
+   if (pipeline->use_repclear)
+      key.nr_color_regions = 1;
+
+   /* TODO: Look up shader in cache */
+
+   memset(prog_data, 0, sizeof(*prog_data));
+
+   prog_data->binding_table.render_target_start = 0;
+
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_FRAGMENT,
+                                          &prog_data->base);
+   if (nir == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   void *mem_ctx = ralloc_context(NULL);
+
+   if (module->nir == NULL)
+      ralloc_steal(mem_ctx, nir);
+
+   unsigned code_size;
+   const unsigned *shader_code =
+      brw_compile_fs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+                     NULL, -1, -1, pipeline->use_repclear, &code_size, NULL);
+   if (shader_code == NULL) {
+      ralloc_free(mem_ctx);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   uint32_t offset = anv_pipeline_upload_kernel(pipeline,
+                                                shader_code, code_size);
+   if (prog_data->no_8)
+      pipeline->ps_simd8 = NO_KERNEL;
    else
-      anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS,
-                     .KernelStartPointer = pipeline->vs_simd8,
-                     .SingleVertexDispatch = Multiple,
-                     .VectorMaskEnable = Dmask,
-                     .SamplerCount = 0,
-                     .BindingTableEntryCount =
-                     vue_prog_data->base.binding_table.size_bytes / 4,
-                     .ThreadDispatchPriority = Normal,
-                     .FloatingPointMode = IEEE754,
-                     .IllegalOpcodeExceptionEnable = false,
-                     .AccessesUAV = false,
-                     .SoftwareExceptionEnable = false,
-
-                     .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_VERTEX],
-                     .PerThreadScratchSpace = ffs(vue_prog_data->base.total_scratch / 2048),
-
-                     .DispatchGRFStartRegisterForURBData =
-                     vue_prog_data->base.dispatch_grf_start_reg,
-                     .VertexURBEntryReadLength = vue_prog_data->urb_read_length,
-                     .VertexURBEntryReadOffset = 0,
-
-                     .MaximumNumberofThreads = device->info.max_vs_threads - 1,
-                     .StatisticsEnable = false,
-                     .SIMD8DispatchEnable = true,
-                     .VertexCacheDisable = false,
-                     .FunctionEnable = true,
-
-                     .VertexURBEntryOutputReadOffset = offset,
-                     .VertexURBEntryOutputLength = length,
-                     .UserClipDistanceClipTestEnableBitmask = 0,
-                     .UserClipDistanceCullTestEnableBitmask = 0);
-
-   const struct brw_wm_prog_data *wm_prog_data = &pipeline->wm_prog_data;
-   uint32_t ksp0, ksp2, grf_start0, grf_start2;
-
-   ksp2 = 0;
-   grf_start2 = 0;
+      pipeline->ps_simd8 = offset;
+
+   if (prog_data->no_8 || prog_data->prog_offset_16) {
+      pipeline->ps_simd16 = offset + prog_data->prog_offset_16;
+   } else {
+      pipeline->ps_simd16 = NO_KERNEL;
+   }
+
+   pipeline->ps_ksp2 = 0;
+   pipeline->ps_grf_start2 = 0;
    if (pipeline->ps_simd8 != NO_KERNEL) {
-      ksp0 = pipeline->ps_simd8;
-      grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
+      pipeline->ps_ksp0 = pipeline->ps_simd8;
+      pipeline->ps_grf_start0 = prog_data->base.dispatch_grf_start_reg;
       if (pipeline->ps_simd16 != NO_KERNEL) {
-         ksp2 = pipeline->ps_simd16;
-         grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
+         pipeline->ps_ksp2 = pipeline->ps_simd16;
+         pipeline->ps_grf_start2 = prog_data->dispatch_grf_start_reg_16;
       }
    } else if (pipeline->ps_simd16 != NO_KERNEL) {
-      ksp0 = pipeline->ps_simd16;
-      grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
-   } else {
-      unreachable("no ps shader");
-   }
-
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS,
-                  .KernelStartPointer0 = ksp0,
-   
-                  .SingleProgramFlow = false,
-                  .VectorMaskEnable = true,
-                  .SamplerCount = 1,
-
-                  .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
-                  .PerThreadScratchSpace = ffs(wm_prog_data->base.total_scratch / 2048),
-                  
-                  .MaximumNumberofThreadsPerPSD = 64 - 2,
-                  .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ?
-                     POSOFFSET_SAMPLE: POSOFFSET_NONE,
-                  .PushConstantEnable = wm_prog_data->base.nr_params > 0,
-                  ._8PixelDispatchEnable = pipeline->ps_simd8 != NO_KERNEL,
-                  ._16PixelDispatchEnable = pipeline->ps_simd16 != NO_KERNEL,
-                  ._32PixelDispatchEnable = false,
-
-                  .DispatchGRFStartRegisterForConstantSetupData0 = grf_start0,
-                  .DispatchGRFStartRegisterForConstantSetupData1 = 0,
-                  .DispatchGRFStartRegisterForConstantSetupData2 = grf_start2,
-
-                  .KernelStartPointer1 = 0,
-                  .KernelStartPointer2 = ksp2);
-
-   bool per_sample_ps = false;
-   anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA,
-                  .PixelShaderValid = true,
-                  .PixelShaderKillsPixel = wm_prog_data->uses_kill,
-                  .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode,
-                  .AttributeEnable = wm_prog_data->num_varying_inputs > 0,
-                  .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask,
-                  .PixelShaderIsPerSample = per_sample_ps);
-
-   *pPipeline = anv_pipeline_to_handle(pipeline);
+      pipeline->ps_ksp0 = pipeline->ps_simd16;
+      pipeline->ps_grf_start0 = prog_data->dispatch_grf_start_reg_16;
+   }
+
+   ralloc_free(mem_ctx);
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
+                                   &prog_data->base);
 
    return VK_SUCCESS;
 }
 
-VkResult anv_DestroyPipeline(
-    VkDevice                                    _device,
-    VkPipeline                                  _pipeline)
+VkResult
+anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
+                        const VkComputePipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
 {
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+   struct brw_cs_prog_key key;
 
-   anv_compiler_free(pipeline);
-   anv_reloc_list_finish(&pipeline->batch.relocs, pipeline->device);
-   anv_state_stream_finish(&pipeline->program_stream);
-   anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
-   anv_device_free(pipeline->device, pipeline);
+   populate_cs_prog_key(&pipeline->device->info, &key);
+
+   /* TODO: Look up shader in cache */
+
+   memset(prog_data, 0, sizeof(*prog_data));
+
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_COMPUTE,
+                                          &prog_data->base);
+   if (nir == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   void *mem_ctx = ralloc_context(NULL);
+
+   if (module->nir == NULL)
+      ralloc_steal(mem_ctx, nir);
+
+   unsigned code_size;
+   const unsigned *shader_code =
+      brw_compile_cs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+                     -1, &code_size, NULL);
+   if (shader_code == NULL) {
+      ralloc_free(mem_ctx);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   pipeline->cs_simd = anv_pipeline_upload_kernel(pipeline,
+                                                  shader_code, code_size);
+   ralloc_free(mem_ctx);
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
+                                   &prog_data->base);
 
    return VK_SUCCESS;
 }
 
-VkResult anv_CreateGraphicsPipelines(
-    VkDevice                                    _device,
-    VkPipelineCache                             pipelineCache,
-    uint32_t                                    count,
-    const VkGraphicsPipelineCreateInfo*         pCreateInfos,
-    VkPipeline*                                 pPipelines)
+static const int gen8_push_size = 32 * 1024;
+
+static void
+gen7_compute_urb_partition(struct anv_pipeline *pipeline)
 {
-   VkResult result = VK_SUCCESS;
+   const struct brw_device_info *devinfo = &pipeline->device->info;
+   bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
+   unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
+   unsigned vs_entry_size_bytes = vs_size * 64;
+   bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
+   unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
+   unsigned gs_entry_size_bytes = gs_size * 64;
+
+   /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
+    *
+    *     VS Number of URB Entries must be divisible by 8 if the VS URB Entry
+    *     Allocation Size is less than 9 512-bit URB entries.
+    *
+    * Similar text exists for GS.
+    */
+   unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
+   unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
+
+   /* URB allocations must be done in 8k chunks. */
+   unsigned chunk_size_bytes = 8192;
+
+   /* Determine the size of the URB in chunks. */
+   unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
+
+   /* Reserve space for push constants */
+   unsigned push_constant_bytes = gen8_push_size;
+   unsigned push_constant_chunks =
+      push_constant_bytes / chunk_size_bytes;
+
+   /* Initially, assign each stage the minimum amount of URB space it needs,
+    * and make a note of how much additional space it "wants" (the amount of
+    * additional space it could actually make use of).
+    */
+
+   /* VS has a lower limit on the number of URB entries */
+   unsigned vs_chunks =
+      ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
+            chunk_size_bytes) / chunk_size_bytes;
+   unsigned vs_wants =
+      ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
+            chunk_size_bytes) / chunk_size_bytes - vs_chunks;
+
+   unsigned gs_chunks = 0;
+   unsigned gs_wants = 0;
+   if (gs_present) {
+      /* There are two constraints on the minimum amount of URB space we can
+       * allocate:
+       *
+       * (1) We need room for at least 2 URB entries, since we always operate
+       * the GS in DUAL_OBJECT mode.
+       *
+       * (2) We can't allocate less than nr_gs_entries_granularity.
+       */
+      gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
+                        chunk_size_bytes) / chunk_size_bytes;
+      gs_wants =
+         ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
+               chunk_size_bytes) / chunk_size_bytes - gs_chunks;
+   }
 
-   unsigned i = 0;
-   for (; i < count; i++) {
-      result = anv_pipeline_create(_device, &pCreateInfos[i],
-                                   NULL, &pPipelines[i]);
-      if (result != VK_SUCCESS) {
-         for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j]);
-         }
+   /* There should always be enough URB space to satisfy the minimum
+    * requirements of each stage.
+    */
+   unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
+   assert(total_needs <= urb_chunks);
+
+   /* Mete out remaining space (if any) in proportion to "wants". */
+   unsigned total_wants = vs_wants + gs_wants;
+   unsigned remaining_space = urb_chunks - total_needs;
+   if (remaining_space > total_wants)
+      remaining_space = total_wants;
+   if (remaining_space > 0) {
+      unsigned vs_additional = (unsigned)
+         round(vs_wants * (((double) remaining_space) / total_wants));
+      vs_chunks += vs_additional;
+      remaining_space -= vs_additional;
+      gs_chunks += remaining_space;
+   }
 
-         return result;
+   /* Sanity check that we haven't over-allocated. */
+   assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
+
+   /* Finally, compute the number of entries that can fit in the space
+    * allocated to each stage.
+    */
+   unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
+   unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
+
+   /* Since we rounded up when computing *_wants, this may be slightly more
+    * than the maximum allowed amount, so correct for that.
+    */
+   nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
+   nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
+
+   /* Ensure that we program a multiple of the granularity. */
+   nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
+   nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
+
+   /* Finally, sanity check to make sure we have at least the minimum number
+    * of entries needed for each stage.
+    */
+   assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
+   if (gs_present)
+      assert(nr_gs_entries >= 2);
+
+   /* Lay out the URB in the following order:
+    * - push constants
+    * - VS
+    * - GS
+    */
+   pipeline->urb.vs_start = push_constant_chunks;
+   pipeline->urb.vs_size = vs_size;
+   pipeline->urb.nr_vs_entries = nr_vs_entries;
+
+   pipeline->urb.gs_start = push_constant_chunks + vs_chunks;
+   pipeline->urb.gs_size = gs_size;
+   pipeline->urb.nr_gs_entries = nr_gs_entries;
+}
+
+static void
+anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
+                                const VkGraphicsPipelineCreateInfo *pCreateInfo)
+{
+   anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
+   ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
+   struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
+
+   pipeline->dynamic_state = default_dynamic_state;
+
+   if (pCreateInfo->pDynamicState) {
+      /* Remove all of the states that are marked as dynamic */
+      uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
+      for (uint32_t s = 0; s < count; s++)
+         states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
+   }
+
+   struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
+
+   dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
+   if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+      typed_memcpy(dynamic->viewport.viewports,
+                   pCreateInfo->pViewportState->pViewports,
+                   pCreateInfo->pViewportState->viewportCount);
+   }
+
+   dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
+   if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+      typed_memcpy(dynamic->scissor.scissors,
+                   pCreateInfo->pViewportState->pScissors,
+                   pCreateInfo->pViewportState->scissorCount);
+   }
+
+   if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
+      assert(pCreateInfo->pRasterizationState);
+      dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
+   }
+
+   if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
+      assert(pCreateInfo->pRasterizationState);
+      dynamic->depth_bias.bias =
+         pCreateInfo->pRasterizationState->depthBiasConstantFactor;
+      dynamic->depth_bias.clamp =
+         pCreateInfo->pRasterizationState->depthBiasClamp;
+      dynamic->depth_bias.slope =
+         pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
+   }
+
+   if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
+      assert(pCreateInfo->pColorBlendState);
+      typed_memcpy(dynamic->blend_constants,
+                   pCreateInfo->pColorBlendState->blendConstants, 4);
+   }
+
+   /* If there is no depthstencil attachment, then don't read
+    * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
+    * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
+    * no need to override the depthstencil defaults in
+    * anv_pipeline::dynamic_state when there is no depthstencil attachment.
+    *
+    * From the Vulkan spec (20 Oct 2015, git-aa308cb):
+    *
+    *    pDepthStencilState [...] may only be NULL if renderPass and subpass
+    *    specify a subpass that has no depth/stencil attachment.
+    */
+   if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
+      if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->depth_bounds.min =
+            pCreateInfo->pDepthStencilState->minDepthBounds;
+         dynamic->depth_bounds.max =
+            pCreateInfo->pDepthStencilState->maxDepthBounds;
+      }
+
+      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->stencil_compare_mask.front =
+            pCreateInfo->pDepthStencilState->front.compareMask;
+         dynamic->stencil_compare_mask.back =
+            pCreateInfo->pDepthStencilState->back.compareMask;
+      }
+
+      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->stencil_write_mask.front =
+            pCreateInfo->pDepthStencilState->front.writeMask;
+         dynamic->stencil_write_mask.back =
+            pCreateInfo->pDepthStencilState->back.writeMask;
+      }
+
+      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
+         assert(pCreateInfo->pDepthStencilState);
+         dynamic->stencil_reference.front =
+            pCreateInfo->pDepthStencilState->front.reference;
+         dynamic->stencil_reference.back =
+            pCreateInfo->pDepthStencilState->back.reference;
       }
    }
 
-   return VK_SUCCESS;
+   pipeline->dynamic_state_mask = states;
 }
 
-static VkResult anv_compute_pipeline_create(
-    VkDevice                                    _device,
-    const VkComputePipelineCreateInfo*          pCreateInfo,
-    VkPipeline*                                 pPipeline)
+static void
+anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
 {
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   struct anv_pipeline *pipeline;
-   VkResult result;
+   struct anv_render_pass *renderpass = NULL;
+   struct anv_subpass *subpass = NULL;
 
-   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
+   /* Assert that all required members of VkGraphicsPipelineCreateInfo are
+    * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
+    * 4.2 Graphics Pipeline.
+    */
+   assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
 
-   pipeline = anv_device_alloc(device, sizeof(*pipeline), 8,
-                               VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-   if (pipeline == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   renderpass = anv_render_pass_from_handle(info->renderPass);
+   assert(renderpass);
+
+   if (renderpass != &anv_meta_dummy_renderpass) {
+      assert(info->subpass < renderpass->subpass_count);
+      subpass = &renderpass->subpasses[info->subpass];
+   }
+
+   assert(info->stageCount >= 1);
+   assert(info->pVertexInputState);
+   assert(info->pInputAssemblyState);
+   assert(info->pViewportState);
+   assert(info->pRasterizationState);
+
+   if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
+      assert(info->pDepthStencilState);
+
+   if (subpass && subpass->color_count > 0)
+      assert(info->pColorBlendState);
+
+   for (uint32_t i = 0; i < info->stageCount; ++i) {
+      switch (info->pStages[i].stage) {
+      case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+      case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+         assert(info->pTessellationState);
+         break;
+      default:
+         break;
+      }
+   }
+}
+
+VkResult
+anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
+                  const VkGraphicsPipelineCreateInfo *pCreateInfo,
+                  const struct anv_graphics_pipeline_create_info *extra,
+                  const VkAllocationCallbacks *alloc)
+{
+   anv_validate {
+      anv_pipeline_validate_create_info(pCreateInfo);
+   }
+
+   if (alloc == NULL)
+      alloc = &device->alloc;
 
    pipeline->device = device;
    pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
 
-   result = anv_reloc_list_init(&pipeline->batch.relocs, device);
-   if (result != VK_SUCCESS) {
-      anv_device_free(device, pipeline);
-      return result;
-   }
+   anv_reloc_list_init(&pipeline->batch_relocs, alloc);
+   /* TODO: Handle allocation fail */
+
+   pipeline->batch.alloc = alloc;
    pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
    pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
+   pipeline->batch.relocs = &pipeline->batch_relocs;
 
    anv_state_stream_init(&pipeline->program_stream,
                          &device->instruction_block_pool);
 
-   memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
+   anv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
 
-   pipeline->shaders[VK_SHADER_STAGE_COMPUTE] =
-      anv_shader_from_handle(pCreateInfo->cs.shader);
+   if (pCreateInfo->pTessellationState)
+      anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
+   if (pCreateInfo->pMultisampleState &&
+       pCreateInfo->pMultisampleState->rasterizationSamples > 1)
+      anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
 
-   pipeline->use_repclear = false;
+   pipeline->use_repclear = extra && extra->use_repclear;
+   pipeline->writes_point_size = false;
 
-   anv_compiler_run(device->compiler, pipeline);
+   /* When we free the pipeline, we detect stages based on the NULL status
+    * of various prog_data pointers.  Make them NULL by default.
+    */
+   memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
+   memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
 
-   const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
+   pipeline->vs_simd8 = NO_KERNEL;
+   pipeline->vs_vec4 = NO_KERNEL;
+   pipeline->gs_kernel = NO_KERNEL;
 
-   anv_batch_emit(&pipeline->batch, GEN8_MEDIA_VFE_STATE,
-                  .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT],
-                  .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048),
-                  .ScratchSpaceBasePointerHigh = 0,
-                  .StackSize = 0,
+   pipeline->active_stages = 0;
+   pipeline->total_scratch = 0;
 
-                  .MaximumNumberofThreads = device->info.max_cs_threads - 1,
-                  .NumberofURBEntries = 2,
-                  .ResetGatewayTimer = true,
-                  .BypassGatewayControl = true,
-                  .URBEntryAllocationSize = 2,
-                  .CURBEAllocationSize = 0);
+   for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+      ANV_FROM_HANDLE(anv_shader_module, module,
+                      pCreateInfo->pStages[i].module);
+      const char *entrypoint = pCreateInfo->pStages[i].pName;
 
-   struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
-   uint32_t group_size = prog_data->local_size[0] *
-      prog_data->local_size[1] * prog_data->local_size[2];
-   pipeline->cs_thread_width_max = DIV_ROUND_UP(group_size, prog_data->simd_size);
-   uint32_t remainder = group_size & (prog_data->simd_size - 1);
+      switch (pCreateInfo->pStages[i].stage) {
+      case VK_SHADER_STAGE_VERTEX_BIT:
+         anv_pipeline_compile_vs(pipeline, pCreateInfo, module, entrypoint);
+         break;
+      case VK_SHADER_STAGE_GEOMETRY_BIT:
+         anv_pipeline_compile_gs(pipeline, pCreateInfo, module, entrypoint);
+         break;
+      case VK_SHADER_STAGE_FRAGMENT_BIT:
+         anv_pipeline_compile_fs(pipeline, pCreateInfo, module, entrypoint);
+         break;
+      default:
+         anv_finishme("Unsupported shader stage");
+      }
+   }
 
-   if (remainder > 0)
-      pipeline->cs_right_mask = ~0u >> (32 - remainder);
-   else
-      pipeline->cs_right_mask = ~0u >> (32 - prog_data->simd_size);
+   if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
+      /* Vertex is only optional if disable_vs is set */
+      assert(extra->disable_vs);
+      memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
+   }
 
+   gen7_compute_urb_partition(pipeline);
 
-   *pPipeline = anv_pipeline_to_handle(pipeline);
+   const VkPipelineVertexInputStateCreateInfo *vi_info =
+      pCreateInfo->pVertexInputState;
+   pipeline->vb_used = 0;
+   for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
+      const VkVertexInputBindingDescription *desc =
+         &vi_info->pVertexBindingDescriptions[i];
+
+      pipeline->vb_used |= 1 << desc->binding;
+      pipeline->binding_stride[desc->binding] = desc->stride;
+
+      /* Step rate is programmed per vertex element (attribute), not
+       * binding. Set up a map of which bindings step per instance, for
+       * reference by vertex element setup. */
+      switch (desc->inputRate) {
+      default:
+      case VK_VERTEX_INPUT_RATE_VERTEX:
+         pipeline->instancing_enable[desc->binding] = false;
+         break;
+      case VK_VERTEX_INPUT_RATE_INSTANCE:
+         pipeline->instancing_enable[desc->binding] = true;
+         break;
+      }
+   }
+
+   const VkPipelineInputAssemblyStateCreateInfo *ia_info =
+      pCreateInfo->pInputAssemblyState;
+   pipeline->primitive_restart = ia_info->primitiveRestartEnable;
+   pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
+
+   if (extra && extra->use_rectlist)
+      pipeline->topology = _3DPRIM_RECTLIST;
 
    return VK_SUCCESS;
 }
 
-VkResult anv_CreateComputePipelines(
+VkResult
+anv_graphics_pipeline_create(
+   VkDevice _device,
+   const VkGraphicsPipelineCreateInfo *pCreateInfo,
+   const struct anv_graphics_pipeline_create_info *extra,
+   const VkAllocationCallbacks *pAllocator,
+   VkPipeline *pPipeline)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   switch (device->info.gen) {
+   case 7:
+      if (device->info.is_haswell)
+         return gen75_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+      else
+         return gen7_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+   case 8:
+      return gen8_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+   case 9:
+      return gen9_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+   default:
+      unreachable("unsupported gen\n");
+   }
+}
+
+VkResult anv_CreateGraphicsPipelines(
     VkDevice                                    _device,
     VkPipelineCache                             pipelineCache,
     uint32_t                                    count,
-    const VkComputePipelineCreateInfo*          pCreateInfos,
+    const VkGraphicsPipelineCreateInfo*         pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
     VkPipeline*                                 pPipelines)
 {
    VkResult result = VK_SUCCESS;
 
    unsigned i = 0;
    for (; i < count; i++) {
-      result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
-                                           &pPipelines[i]);
+      result = anv_graphics_pipeline_create(_device, &pCreateInfos[i],
+                                            NULL, pAllocator, &pPipelines[i]);
       if (result != VK_SUCCESS) {
          for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j]);
+            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
          }
 
          return result;
@@ -889,62 +1084,51 @@ VkResult anv_CreateComputePipelines(
    return VK_SUCCESS;
 }
 
-// Pipeline layout functions
-
-VkResult anv_CreatePipelineLayout(
+static VkResult anv_compute_pipeline_create(
     VkDevice                                    _device,
-    const VkPipelineLayoutCreateInfo*           pCreateInfo,
-    VkPipelineLayout*                           pPipelineLayout)
+    const VkComputePipelineCreateInfo*          pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipeline)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
-   struct anv_pipeline_layout *layout;
 
-   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
-
-   layout = anv_device_alloc(device, sizeof(*layout), 8,
-                             VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-   if (layout == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
-   layout->num_sets = pCreateInfo->descriptorSetCount;
-
-   uint32_t surface_start[VK_SHADER_STAGE_NUM] = { 0, };
-   uint32_t sampler_start[VK_SHADER_STAGE_NUM] = { 0, };
-
-   for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
-      layout->stage[s].surface_count = 0;
-      layout->stage[s].sampler_count = 0;
-   }
-
-   for (uint32_t i = 0; i < pCreateInfo->descriptorSetCount; i++) {
-      ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
-                      pCreateInfo->pSetLayouts[i]);
-
-      layout->set[i].layout = set_layout;
-      for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
-         layout->set[i].surface_start[s] = surface_start[s];
-         surface_start[s] += set_layout->stage[s].surface_count;
-         layout->set[i].sampler_start[s] = sampler_start[s];
-         sampler_start[s] += set_layout->stage[s].sampler_count;
-
-         layout->stage[s].surface_count += set_layout->stage[s].surface_count;
-         layout->stage[s].sampler_count += set_layout->stage[s].sampler_count;
-      }
+   switch (device->info.gen) {
+   case 7:
+      if (device->info.is_haswell)
+         return gen75_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+      else
+         return gen7_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+   case 8:
+      return gen8_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+   case 9:
+      return gen9_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+   default:
+      unreachable("unsupported gen\n");
    }
-
-   *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
-
-   return VK_SUCCESS;
 }
 
-VkResult anv_DestroyPipelineLayout(
+VkResult anv_CreateComputePipelines(
     VkDevice                                    _device,
-    VkPipelineLayout                            _pipelineLayout)
+    VkPipelineCache                             pipelineCache,
+    uint32_t                                    count,
+    const VkComputePipelineCreateInfo*          pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
+    VkPipeline*                                 pPipelines)
 {
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
+   VkResult result = VK_SUCCESS;
 
-   anv_device_free(device, pipeline_layout);
+   unsigned i = 0;
+   for (; i < count; i++) {
+      result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
+                                           pAllocator, &pPipelines[i]);
+      if (result != VK_SUCCESS) {
+         for (unsigned j = 0; j < i; j++) {
+            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
+         }
+
+         return result;
+      }
+   }
 
    return VK_SUCCESS;
 }