anv/pipeline: Allow the user to pass a null MultisampleCreateInfo
[mesa.git] / src / vulkan / anv_pipeline.c
index 1fce94b7f01925d72203fab9386c28d3aae3acd3..bf983ed8f2ab7107b4cca04b241880f42572ac4a 100644 (file)
@@ -40,6 +40,7 @@
 VkResult anv_CreateShaderModule(
     VkDevice                                    _device,
     const VkShaderModuleCreateInfo*             pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkShaderModule*                             pShaderModule)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
@@ -48,8 +49,9 @@ VkResult anv_CreateShaderModule(
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
    assert(pCreateInfo->flags == 0);
 
-   module = anv_device_alloc(device, sizeof(*module) + pCreateInfo->codeSize, 8,
-                             VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   module = anv_alloc2(&device->alloc, pAllocator,
+                       sizeof(*module) + pCreateInfo->codeSize, 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (module == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
@@ -64,118 +66,60 @@ VkResult anv_CreateShaderModule(
 
 void anv_DestroyShaderModule(
     VkDevice                                    _device,
-    VkShaderModule                              _module)
+    VkShaderModule                              _module,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_shader_module, module, _module);
 
-   anv_device_free(device, module);
-}
-
-VkResult anv_CreateShader(
-    VkDevice                                    _device,
-    const VkShaderCreateInfo*                   pCreateInfo,
-    VkShader*                                   pShader)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->module);
-   struct anv_shader *shader;
-
-   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
-   assert(pCreateInfo->flags == 0);
-
-   const char *name = pCreateInfo->pName ? pCreateInfo->pName : "main";
-   size_t name_len = strlen(name);
-
-   shader = anv_device_alloc(device, sizeof(*shader) + name_len + 1, 8,
-                             VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-   if (shader == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
-   shader->module = module,
-   memcpy(shader->entrypoint, name, name_len + 1);
-
-   *pShader = anv_shader_to_handle(shader);
-
-   return VK_SUCCESS;
-}
-
-void anv_DestroyShader(
-    VkDevice                                    _device,
-    VkShader                                    _shader)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_shader, shader, _shader);
-
-   anv_device_free(device, shader);
+   anv_free2(&device->alloc, pAllocator, module);
 }
 
 #define SPIR_V_MAGIC_NUMBER 0x07230203
 
-static const gl_shader_stage vk_shader_stage_to_mesa_stage[] = {
-   [VK_SHADER_STAGE_VERTEX] = MESA_SHADER_VERTEX,
-   [VK_SHADER_STAGE_TESS_CONTROL] = -1,
-   [VK_SHADER_STAGE_TESS_EVALUATION] = -1,
-   [VK_SHADER_STAGE_GEOMETRY] = MESA_SHADER_GEOMETRY,
-   [VK_SHADER_STAGE_FRAGMENT] = MESA_SHADER_FRAGMENT,
-   [VK_SHADER_STAGE_COMPUTE] = MESA_SHADER_COMPUTE,
-};
-
-static bool
-is_scalar_shader_stage(const struct brw_compiler *compiler, VkShaderStage stage)
-{
-   switch (stage) {
-   case VK_SHADER_STAGE_VERTEX:
-      return compiler->scalar_vs;
-   case VK_SHADER_STAGE_GEOMETRY:
-      return false;
-   case VK_SHADER_STAGE_FRAGMENT:
-   case VK_SHADER_STAGE_COMPUTE:
-      return true;
-   default:
-      unreachable("Unsupported shader stage");
-   }
-}
-
 /* Eventually, this will become part of anv_CreateShader.  Unfortunately,
  * we can't do that yet because we don't have the ability to copy nir.
  */
 static nir_shader *
 anv_shader_compile_to_nir(struct anv_device *device,
-                          struct anv_shader *shader, VkShaderStage vk_stage)
+                          struct anv_shader_module *module,
+                          const char *entrypoint_name,
+                          gl_shader_stage stage)
 {
-   if (strcmp(shader->entrypoint, "main") != 0) {
+   if (strcmp(entrypoint_name, "main") != 0) {
       anv_finishme("Multiple shaders per module not really supported");
    }
 
-   gl_shader_stage stage = vk_shader_stage_to_mesa_stage[vk_stage];
    const struct brw_compiler *compiler =
       device->instance->physicalDevice.compiler;
    const nir_shader_compiler_options *nir_options =
       compiler->glsl_compiler_options[stage].NirOptions;
 
    nir_shader *nir;
-   if (shader->module->nir) {
+   if (module->nir) {
       /* Some things such as our meta clear/blit code will give us a NIR
        * shader directly.  In that case, we just ignore the SPIR-V entirely
        * and just use the NIR shader */
-      nir = shader->module->nir;
+      nir = module->nir;
       nir->options = nir_options;
    } else {
-      uint32_t *spirv = (uint32_t *) shader->module->data;
+      uint32_t *spirv = (uint32_t *) module->data;
       assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
-      assert(shader->module->size % 4 == 0);
+      assert(module->size % 4 == 0);
 
-      nir = spirv_to_nir(spirv, shader->module->size / 4, stage, nir_options);
+      nir = spirv_to_nir(spirv, module->size / 4, stage, nir_options);
    }
    nir_validate_shader(nir);
 
+   /* Vulkan uses the separate-shader linking model */
+   nir->info.separate_shader = true;
+
    /* Make sure the provided shader has exactly one entrypoint and that the
     * name matches the name that came in from the VkShader.
     */
    nir_function_impl *entrypoint = NULL;
    nir_foreach_overload(nir, overload) {
-      if (strcmp(shader->entrypoint, overload->function->name) == 0 &&
+      if (strcmp(entrypoint_name, overload->function->name) == 0 &&
           overload->impl) {
          assert(entrypoint == NULL);
          entrypoint = overload->impl;
@@ -183,8 +127,7 @@ anv_shader_compile_to_nir(struct anv_device *device,
    }
    assert(entrypoint != NULL);
 
-   brw_preprocess_nir(nir, &device->info,
-                      is_scalar_shader_stage(compiler, vk_stage));
+   nir = brw_preprocess_nir(nir, compiler->scalar_stage[stage]);
 
    nir_shader_gather_info(nir, entrypoint);
 
@@ -194,32 +137,29 @@ anv_shader_compile_to_nir(struct anv_device *device,
 VkResult anv_CreatePipelineCache(
     VkDevice                                    device,
     const VkPipelineCacheCreateInfo*            pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkPipelineCache*                            pPipelineCache)
 {
-   pPipelineCache->handle = 1;
+   *pPipelineCache = (VkPipelineCache)1;
 
    stub_return(VK_SUCCESS);
 }
 
 void anv_DestroyPipelineCache(
     VkDevice                                    _device,
-    VkPipelineCache                             _cache)
-{
-}
-
-size_t anv_GetPipelineCacheSize(
-    VkDevice                                    device,
-    VkPipelineCache                             pipelineCache)
+    VkPipelineCache                             _cache,
+    const VkAllocationCallbacks*                pAllocator)
 {
-   stub_return(0);
 }
 
 VkResult anv_GetPipelineCacheData(
     VkDevice                                    device,
     VkPipelineCache                             pipelineCache,
+    size_t*                                     pDataSize,
     void*                                       pData)
 {
-   stub_return(VK_UNSUPPORTED);
+   *pDataSize = 0;
+   stub_return(VK_SUCCESS);
 }
 
 VkResult anv_MergePipelineCaches(
@@ -228,34 +168,37 @@ VkResult anv_MergePipelineCaches(
     uint32_t                                    srcCacheCount,
     const VkPipelineCache*                      pSrcCaches)
 {
-   stub_return(VK_UNSUPPORTED);
+   stub_return(VK_SUCCESS);
 }
 
 void anv_DestroyPipeline(
     VkDevice                                    _device,
-    VkPipeline                                  _pipeline)
+    VkPipeline                                  _pipeline,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
 
-   anv_reloc_list_finish(&pipeline->batch_relocs, pipeline->device);
+   anv_reloc_list_finish(&pipeline->batch_relocs,
+                         pAllocator ? pAllocator : &device->alloc);
    anv_state_stream_finish(&pipeline->program_stream);
-   anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
-   anv_device_free(pipeline->device, pipeline);
+   if (pipeline->blend_state.map)
+      anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
+   anv_free2(&device->alloc, pAllocator, pipeline);
 }
 
 static const uint32_t vk_to_gen_primitive_type[] = {
-   [VK_PRIMITIVE_TOPOLOGY_POINT_LIST]           = _3DPRIM_POINTLIST,
-   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST]            = _3DPRIM_LINELIST,
-   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP]           = _3DPRIM_LINESTRIP,
-   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST]        = _3DPRIM_TRILIST,
-   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP]       = _3DPRIM_TRISTRIP,
-   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN]         = _3DPRIM_TRIFAN,
-   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ]        = _3DPRIM_LINELIST_ADJ,
-   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ]       = _3DPRIM_LINESTRIP_ADJ,
-   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ]    = _3DPRIM_TRILIST_ADJ,
-   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ]   = _3DPRIM_TRISTRIP_ADJ,
-   [VK_PRIMITIVE_TOPOLOGY_PATCH]                = _3DPRIM_PATCHLIST_1
+   [VK_PRIMITIVE_TOPOLOGY_POINT_LIST]                    = _3DPRIM_POINTLIST,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST]                     = _3DPRIM_LINELIST,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP]                    = _3DPRIM_LINESTRIP,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST]                 = _3DPRIM_TRILIST,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP]                = _3DPRIM_TRISTRIP,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN]                  = _3DPRIM_TRIFAN,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY]      = _3DPRIM_LINELIST_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY]  = _3DPRIM_TRILIST_ADJ,
+   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
+/*   [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST]                = _3DPRIM_PATCHLIST_1 */
 };
 
 static void
@@ -282,6 +225,15 @@ populate_vs_prog_key(const struct brw_device_info *devinfo,
    /* XXX: Handle sampler_prog_key */
 }
 
+static void
+populate_gs_prog_key(const struct brw_device_info *devinfo,
+                     struct brw_gs_prog_key *key)
+{
+   memset(key, 0, sizeof(*key));
+
+   populate_sampler_prog_key(devinfo, &key->tex);
+}
+
 static void
 populate_wm_prog_key(const struct brw_device_info *devinfo,
                      const VkGraphicsPipelineCreateInfo *info,
@@ -293,6 +245,8 @@ populate_wm_prog_key(const struct brw_device_info *devinfo,
 
    populate_sampler_prog_key(devinfo, &key->tex);
 
+   /* TODO: Fill out key->input_slots_valid */
+
    /* Vulkan doesn't specify a default */
    key->high_quality_derivatives = false;
 
@@ -306,16 +260,16 @@ populate_wm_prog_key(const struct brw_device_info *devinfo,
    key->nr_color_regions = render_pass->subpasses[info->subpass].color_count;
 
    key->replicate_alpha = key->nr_color_regions > 1 &&
-                          info->pColorBlendState->alphaToCoverageEnable;
+                          info->pMultisampleState &&
+                          info->pMultisampleState->alphaToCoverageEnable;
 
-   if (info->pMultisampleState && info->pMultisampleState->rasterSamples > 1) {
+   if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
       /* We should probably pull this out of the shader, but it's fairly
        * harmless to compute it and then let dead-code take care of it.
        */
-      key->compute_sample_id = true;
       key->persample_shading = info->pMultisampleState->sampleShadingEnable;
       if (key->persample_shading)
-         key->persample_2x = info->pMultisampleState->rasterSamples == 2;
+         key->persample_2x = info->pMultisampleState->rasterizationSamples == 2;
 
       key->compute_pos_offset = info->pMultisampleState->sampleShadingEnable;
       key->compute_sample_id = info->pMultisampleState->sampleShadingEnable;
@@ -333,29 +287,25 @@ populate_cs_prog_key(const struct brw_device_info *devinfo,
 
 static nir_shader *
 anv_pipeline_compile(struct anv_pipeline *pipeline,
-                     struct anv_shader *shader,
-                     VkShaderStage stage,
+                     struct anv_shader_module *module,
+                     const char *entrypoint,
+                     gl_shader_stage stage,
                      struct brw_stage_prog_data *prog_data)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
 
-   nir_shader *nir = anv_shader_compile_to_nir(pipeline->device, shader, stage);
+   nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
+                                               module, entrypoint, stage);
    if (nir == NULL)
       return NULL;
 
-   bool have_push_constants = false;
-   nir_foreach_variable(var, &nir->uniforms) {
-      if (!glsl_type_is_sampler(var->type)) {
-         have_push_constants = true;
-         break;
-      }
-   }
+   anv_nir_lower_push_constants(nir, compiler->scalar_stage[stage]);
 
    /* Figure out the number of parameters */
    prog_data->nr_params = 0;
 
-   if (have_push_constants) {
+   if (nir->num_uniforms > 0) {
       /* If the shader uses any push constants at all, we'll just give
        * them the maximum possible number
        */
@@ -363,13 +313,16 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
    }
 
    if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
-      prog_data->nr_params += MAX_DYNAMIC_BUFFERS;
+      prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
+
+   if (pipeline->layout && pipeline->layout->stage[stage].image_count > 0)
+      prog_data->nr_params += pipeline->layout->stage[stage].image_count *
+                              BRW_IMAGE_PARAM_SIZE;
 
    if (prog_data->nr_params > 0) {
-      prog_data->param = (const gl_constant_value **)
-         anv_device_alloc(pipeline->device,
-                          prog_data->nr_params * sizeof(gl_constant_value *),
-                          8, VK_SYSTEM_ALLOC_TYPE_INTERNAL_SHADER);
+      /* XXX: I think we're leaking this */
+      prog_data->param = (const union gl_constant_value **)
+         malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
 
       /* We now set the param values to be offsets into a
        * anv_push_constant_data structure.  Since the compiler doesn't
@@ -377,10 +330,10 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
        * params array, it doesn't really matter what we put here.
        */
       struct anv_push_constants *null_data = NULL;
-      if (have_push_constants) {
+      if (nir->num_uniforms > 0) {
          /* Fill out the push constants section of the param array */
          for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
-            prog_data->param[i] = (const gl_constant_value *)
+            prog_data->param[i] = (const union gl_constant_value *)
                &null_data->client_data[i * sizeof(float)];
       }
    }
@@ -389,25 +342,27 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
    anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
 
    /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
-   anv_nir_apply_pipeline_layout(nir, pipeline->layout);
+   if (pipeline->layout)
+      anv_nir_apply_pipeline_layout(nir, prog_data, pipeline->layout);
 
    /* All binding table offsets provided by apply_pipeline_layout() are
     * relative to the start of the bindint table (plus MAX_RTS for VS).
     */
-   unsigned bias = stage == VK_SHADER_STAGE_FRAGMENT ? MAX_RTS : 0;
+   unsigned bias = stage == MESA_SHADER_FRAGMENT ? MAX_RTS : 0;
    prog_data->binding_table.size_bytes = 0;
    prog_data->binding_table.texture_start = bias;
    prog_data->binding_table.ubo_start = bias;
+   prog_data->binding_table.ssbo_start = bias;
    prog_data->binding_table.image_start = bias;
 
    /* Finish the optimization and compilation process */
-   brw_postprocess_nir(nir, &pipeline->device->info,
-                       is_scalar_shader_stage(compiler, stage));
+   nir = brw_lower_nir(nir, &pipeline->device->info, NULL,
+                       compiler->scalar_stage[stage]);
 
    /* nir_lower_io will only handle the push constants; we need to set this
     * to the full number of possible uniforms.
     */
-   nir->num_uniforms = prog_data->nr_params;
+   nir->num_uniforms = prog_data->nr_params * 4;
 
    return nir;
 }
@@ -423,25 +378,29 @@ anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
 
    memcpy(state.map, data, size);
 
+   if (!pipeline->device->info.has_llc)
+      anv_state_clflush(state);
+
    return state.offset;
 }
+
 static void
 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
-                                VkShaderStage stage,
+                                gl_shader_stage stage,
                                 struct brw_stage_prog_data *prog_data)
 {
    struct brw_device_info *devinfo = &pipeline->device->info;
    uint32_t max_threads[] = {
-      [VK_SHADER_STAGE_VERTEX]                  = devinfo->max_vs_threads,
-      [VK_SHADER_STAGE_TESS_CONTROL]            = 0,
-      [VK_SHADER_STAGE_TESS_EVALUATION]         = 0,
-      [VK_SHADER_STAGE_GEOMETRY]                = devinfo->max_gs_threads,
-      [VK_SHADER_STAGE_FRAGMENT]                = devinfo->max_wm_threads,
-      [VK_SHADER_STAGE_COMPUTE]                 = devinfo->max_cs_threads,
+      [MESA_SHADER_VERTEX]                  = devinfo->max_vs_threads,
+      [MESA_SHADER_TESS_CTRL]               = 0,
+      [MESA_SHADER_TESS_EVAL]               = 0,
+      [MESA_SHADER_GEOMETRY]                = devinfo->max_gs_threads,
+      [MESA_SHADER_FRAGMENT]                = devinfo->max_wm_threads,
+      [MESA_SHADER_COMPUTE]                 = devinfo->max_cs_threads,
    };
 
    pipeline->prog_data[stage] = prog_data;
-   pipeline->active_stages |= 1 << stage;
+   pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
    pipeline->scratch_start[stage] = pipeline->total_scratch;
    pipeline->total_scratch =
       align_u32(pipeline->total_scratch, 1024) +
@@ -451,7 +410,8 @@ anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
 static VkResult
 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
                         const VkGraphicsPipelineCreateInfo *info,
-                        struct anv_shader *shader)
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
@@ -464,15 +424,15 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
 
    memset(prog_data, 0, sizeof(*prog_data));
 
-   nir_shader *nir = anv_pipeline_compile(pipeline, shader,
-                                          VK_SHADER_STAGE_VERTEX,
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_VERTEX,
                                           &prog_data->base.base);
    if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    void *mem_ctx = ralloc_context(NULL);
 
-   if (shader->module->nir == NULL)
+   if (module->nir == NULL)
       ralloc_steal(mem_ctx, nir);
 
    prog_data->inputs_read = nir->info.inputs_read;
@@ -481,7 +441,7 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
    brw_compute_vue_map(&pipeline->device->info,
                        &prog_data->base.vue_map,
                        nir->info.outputs_written,
-                       false /* XXX: Do SSO? */);
+                       nir->info.separate_shader);
 
    unsigned code_size;
    const unsigned *shader_code =
@@ -504,7 +464,62 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
 
    ralloc_free(mem_ctx);
 
-   anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_VERTEX,
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
+                                   &prog_data->base.base);
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
+                        const VkGraphicsPipelineCreateInfo *info,
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
+{
+   const struct brw_compiler *compiler =
+      pipeline->device->instance->physicalDevice.compiler;
+   struct brw_gs_prog_data *prog_data = &pipeline->gs_prog_data;
+   struct brw_gs_prog_key key;
+
+   populate_gs_prog_key(&pipeline->device->info, &key);
+
+   /* TODO: Look up shader in cache */
+
+   memset(prog_data, 0, sizeof(*prog_data));
+
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_GEOMETRY,
+                                          &prog_data->base.base);
+   if (nir == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+   void *mem_ctx = ralloc_context(NULL);
+
+   if (module->nir == NULL)
+      ralloc_steal(mem_ctx, nir);
+
+   brw_compute_vue_map(&pipeline->device->info,
+                       &prog_data->base.vue_map,
+                       nir->info.outputs_written,
+                       nir->info.separate_shader);
+
+   unsigned code_size;
+   const unsigned *shader_code =
+      brw_compile_gs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+                     NULL, -1, &code_size, NULL);
+   if (shader_code == NULL) {
+      ralloc_free(mem_ctx);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   /* TODO: SIMD8 GS */
+   pipeline->gs_kernel =
+      anv_pipeline_upload_kernel(pipeline, shader_code, code_size);
+   pipeline->gs_vertex_count = nir->info.gs.vertices_in;
+
+   ralloc_free(mem_ctx);
+
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
                                    &prog_data->base.base);
 
    return VK_SUCCESS;
@@ -513,7 +528,8 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
 static VkResult
 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
                         const VkGraphicsPipelineCreateInfo *info,
-                        struct anv_shader *shader)
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
@@ -531,15 +547,15 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
 
    prog_data->binding_table.render_target_start = 0;
 
-   nir_shader *nir = anv_pipeline_compile(pipeline, shader,
-                                          VK_SHADER_STAGE_FRAGMENT,
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_FRAGMENT,
                                           &prog_data->base);
    if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    void *mem_ctx = ralloc_context(NULL);
 
-   if (shader->module->nir == NULL)
+   if (module->nir == NULL)
       ralloc_steal(mem_ctx, nir);
 
    unsigned code_size;
@@ -580,7 +596,7 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
 
    ralloc_free(mem_ctx);
 
-   anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_FRAGMENT,
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
                                    &prog_data->base);
 
    return VK_SUCCESS;
@@ -589,7 +605,8 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
 VkResult
 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
                         const VkComputePipelineCreateInfo *info,
-                        struct anv_shader *shader)
+                        struct anv_shader_module *module,
+                        const char *entrypoint)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
@@ -602,15 +619,15 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
 
    memset(prog_data, 0, sizeof(*prog_data));
 
-   nir_shader *nir = anv_pipeline_compile(pipeline, shader,
-                                          VK_SHADER_STAGE_COMPUTE,
+   nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+                                          MESA_SHADER_COMPUTE,
                                           &prog_data->base);
    if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    void *mem_ctx = ralloc_context(NULL);
 
-   if (shader->module->nir == NULL)
+   if (module->nir == NULL)
       ralloc_steal(mem_ctx, nir);
 
    unsigned code_size;
@@ -626,7 +643,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
                                                   shader_code, code_size);
    ralloc_free(mem_ctx);
 
-   anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_COMPUTE,
+   anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
                                    &prog_data->base);
 
    return VK_SUCCESS;
@@ -790,22 +807,24 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
    }
 
    if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
-      assert(pCreateInfo->pRasterState);
-      dynamic->line_width = pCreateInfo->pRasterState->lineWidth;
+      assert(pCreateInfo->pRasterizationState);
+      dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
    }
 
    if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
-      assert(pCreateInfo->pRasterState);
-      dynamic->depth_bias.bias = pCreateInfo->pRasterState->depthBias;
-      dynamic->depth_bias.clamp = pCreateInfo->pRasterState->depthBiasClamp;
-      dynamic->depth_bias.slope_scaled =
-         pCreateInfo->pRasterState->slopeScaledDepthBias;
+      assert(pCreateInfo->pRasterizationState);
+      dynamic->depth_bias.bias =
+         pCreateInfo->pRasterizationState->depthBiasConstantFactor;
+      dynamic->depth_bias.clamp =
+         pCreateInfo->pRasterizationState->depthBiasClamp;
+      dynamic->depth_bias.slope =
+         pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
    }
 
    if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
       assert(pCreateInfo->pColorBlendState);
       typed_memcpy(dynamic->blend_constants,
-                   pCreateInfo->pColorBlendState->blendConst, 4);
+                   pCreateInfo->pColorBlendState->blendConstants, 4);
    }
 
    /* If there is no depthstencil attachment, then don't read
@@ -831,25 +850,25 @@ anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
       if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
          assert(pCreateInfo->pDepthStencilState);
          dynamic->stencil_compare_mask.front =
-            pCreateInfo->pDepthStencilState->front.stencilCompareMask;
+            pCreateInfo->pDepthStencilState->front.compareMask;
          dynamic->stencil_compare_mask.back =
-            pCreateInfo->pDepthStencilState->back.stencilCompareMask;
+            pCreateInfo->pDepthStencilState->back.compareMask;
       }
 
       if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
          assert(pCreateInfo->pDepthStencilState);
          dynamic->stencil_write_mask.front =
-            pCreateInfo->pDepthStencilState->front.stencilWriteMask;
+            pCreateInfo->pDepthStencilState->front.writeMask;
          dynamic->stencil_write_mask.back =
-            pCreateInfo->pDepthStencilState->back.stencilWriteMask;
+            pCreateInfo->pDepthStencilState->back.writeMask;
       }
 
       if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
          assert(pCreateInfo->pDepthStencilState);
          dynamic->stencil_reference.front =
-            pCreateInfo->pDepthStencilState->front.stencilReference;
+            pCreateInfo->pDepthStencilState->front.reference;
          dynamic->stencil_reference.back =
-            pCreateInfo->pDepthStencilState->back.stencilReference;
+            pCreateInfo->pDepthStencilState->back.reference;
       }
    }
 
@@ -880,8 +899,7 @@ anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
    assert(info->pVertexInputState);
    assert(info->pInputAssemblyState);
    assert(info->pViewportState);
-   assert(info->pRasterState);
-   assert(info->pMultisampleState);
+   assert(info->pRasterizationState);
 
    if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
       assert(info->pDepthStencilState);
@@ -891,8 +909,8 @@ anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
 
    for (uint32_t i = 0; i < info->stageCount; ++i) {
       switch (info->pStages[i].stage) {
-      case VK_SHADER_STAGE_TESS_CONTROL:
-      case VK_SHADER_STAGE_TESS_EVALUATION:
+      case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+      case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
          assert(info->pTessellationState);
          break;
       default:
@@ -904,22 +922,23 @@ anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
 VkResult
 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
                   const VkGraphicsPipelineCreateInfo *pCreateInfo,
-                  const struct anv_graphics_pipeline_create_info *extra)
+                  const struct anv_graphics_pipeline_create_info *extra,
+                  const VkAllocationCallbacks *alloc)
 {
-   VkResult result;
-
    anv_validate {
       anv_pipeline_validate_create_info(pCreateInfo);
    }
 
+   if (alloc == NULL)
+      alloc = &device->alloc;
+
    pipeline->device = device;
    pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
 
-   result = anv_reloc_list_init(&pipeline->batch_relocs, device);
-   if (result != VK_SUCCESS) {
-      anv_device_free(device, pipeline);
-      return result;
-   }
+   anv_reloc_list_init(&pipeline->batch_relocs, alloc);
+   /* TODO: Handle allocation fail */
+
+   pipeline->batch.alloc = alloc;
    pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
    pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
    pipeline->batch.relocs = &pipeline->batch_relocs;
@@ -932,7 +951,7 @@ anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
    if (pCreateInfo->pTessellationState)
       anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
    if (pCreateInfo->pMultisampleState &&
-       pCreateInfo->pMultisampleState->rasterSamples > 1)
+       pCreateInfo->pMultisampleState->rasterizationSamples > 1)
       anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
 
    pipeline->use_repclear = extra && extra->use_repclear;
@@ -946,20 +965,25 @@ anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
 
    pipeline->vs_simd8 = NO_KERNEL;
    pipeline->vs_vec4 = NO_KERNEL;
-   pipeline->gs_vec4 = NO_KERNEL;
+   pipeline->gs_kernel = NO_KERNEL;
 
    pipeline->active_stages = 0;
    pipeline->total_scratch = 0;
 
    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
-      ANV_FROM_HANDLE(anv_shader, shader, pCreateInfo->pStages[i].shader);
+      ANV_FROM_HANDLE(anv_shader_module, module,
+                      pCreateInfo->pStages[i].module);
+      const char *entrypoint = pCreateInfo->pStages[i].pName;
 
       switch (pCreateInfo->pStages[i].stage) {
-      case VK_SHADER_STAGE_VERTEX:
-         anv_pipeline_compile_vs(pipeline, pCreateInfo, shader);
+      case VK_SHADER_STAGE_VERTEX_BIT:
+         anv_pipeline_compile_vs(pipeline, pCreateInfo, module, entrypoint);
+         break;
+      case VK_SHADER_STAGE_GEOMETRY_BIT:
+         anv_pipeline_compile_gs(pipeline, pCreateInfo, module, entrypoint);
          break;
-      case VK_SHADER_STAGE_FRAGMENT:
-         anv_pipeline_compile_fs(pipeline, pCreateInfo, shader);
+      case VK_SHADER_STAGE_FRAGMENT_BIT:
+         anv_pipeline_compile_fs(pipeline, pCreateInfo, module, entrypoint);
          break;
       default:
          anv_finishme("Unsupported shader stage");
@@ -977,22 +1001,22 @@ anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
    const VkPipelineVertexInputStateCreateInfo *vi_info =
       pCreateInfo->pVertexInputState;
    pipeline->vb_used = 0;
-   for (uint32_t i = 0; i < vi_info->bindingCount; i++) {
+   for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
       const VkVertexInputBindingDescription *desc =
          &vi_info->pVertexBindingDescriptions[i];
 
       pipeline->vb_used |= 1 << desc->binding;
-      pipeline->binding_stride[desc->binding] = desc->strideInBytes;
+      pipeline->binding_stride[desc->binding] = desc->stride;
 
       /* Step rate is programmed per vertex element (attribute), not
        * binding. Set up a map of which bindings step per instance, for
        * reference by vertex element setup. */
-      switch (desc->stepRate) {
+      switch (desc->inputRate) {
       default:
-      case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
+      case VK_VERTEX_INPUT_RATE_VERTEX:
          pipeline->instancing_enable[desc->binding] = false;
          break;
-      case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
+      case VK_VERTEX_INPUT_RATE_INSTANCE:
          pipeline->instancing_enable[desc->binding] = true;
          break;
       }
@@ -1014,15 +1038,21 @@ anv_graphics_pipeline_create(
    VkDevice _device,
    const VkGraphicsPipelineCreateInfo *pCreateInfo,
    const struct anv_graphics_pipeline_create_info *extra,
+   const VkAllocationCallbacks *pAllocator,
    VkPipeline *pPipeline)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
 
    switch (device->info.gen) {
    case 7:
-      return gen7_graphics_pipeline_create(_device, pCreateInfo, extra, pPipeline);
+      if (device->info.is_haswell)
+         return gen75_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+      else
+         return gen7_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
    case 8:
-      return gen8_graphics_pipeline_create(_device, pCreateInfo, extra, pPipeline);
+      return gen8_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+   case 9:
+      return gen9_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
    default:
       unreachable("unsupported gen\n");
    }
@@ -1033,6 +1063,7 @@ VkResult anv_CreateGraphicsPipelines(
     VkPipelineCache                             pipelineCache,
     uint32_t                                    count,
     const VkGraphicsPipelineCreateInfo*         pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
     VkPipeline*                                 pPipelines)
 {
    VkResult result = VK_SUCCESS;
@@ -1040,10 +1071,10 @@ VkResult anv_CreateGraphicsPipelines(
    unsigned i = 0;
    for (; i < count; i++) {
       result = anv_graphics_pipeline_create(_device, &pCreateInfos[i],
-                                            NULL, &pPipelines[i]);
+                                            NULL, pAllocator, &pPipelines[i]);
       if (result != VK_SUCCESS) {
          for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j]);
+            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
          }
 
          return result;
@@ -1056,15 +1087,21 @@ VkResult anv_CreateGraphicsPipelines(
 static VkResult anv_compute_pipeline_create(
     VkDevice                                    _device,
     const VkComputePipelineCreateInfo*          pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkPipeline*                                 pPipeline)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
 
    switch (device->info.gen) {
    case 7:
-      return gen7_compute_pipeline_create(_device, pCreateInfo, pPipeline);
+      if (device->info.is_haswell)
+         return gen75_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+      else
+         return gen7_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
    case 8:
-      return gen8_compute_pipeline_create(_device, pCreateInfo, pPipeline);
+      return gen8_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+   case 9:
+      return gen9_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
    default:
       unreachable("unsupported gen\n");
    }
@@ -1075,6 +1112,7 @@ VkResult anv_CreateComputePipelines(
     VkPipelineCache                             pipelineCache,
     uint32_t                                    count,
     const VkComputePipelineCreateInfo*          pCreateInfos,
+    const VkAllocationCallbacks*                pAllocator,
     VkPipeline*                                 pPipelines)
 {
    VkResult result = VK_SUCCESS;
@@ -1082,10 +1120,10 @@ VkResult anv_CreateComputePipelines(
    unsigned i = 0;
    for (; i < count; i++) {
       result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
-                                           &pPipelines[i]);
+                                           pAllocator, &pPipelines[i]);
       if (result != VK_SUCCESS) {
          for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j]);
+            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
          }
 
          return result;
@@ -1094,121 +1132,3 @@ VkResult anv_CreateComputePipelines(
 
    return VK_SUCCESS;
 }
-
-// Pipeline layout functions
-
-VkResult anv_CreatePipelineLayout(
-    VkDevice                                    _device,
-    const VkPipelineLayoutCreateInfo*           pCreateInfo,
-    VkPipelineLayout*                           pPipelineLayout)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   struct anv_pipeline_layout l, *layout;
-
-   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
-
-   l.num_sets = pCreateInfo->descriptorSetCount;
-
-   unsigned dynamic_offset_count = 0;
-
-   memset(l.stage, 0, sizeof(l.stage));
-   for (uint32_t set = 0; set < pCreateInfo->descriptorSetCount; set++) {
-      ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
-                      pCreateInfo->pSetLayouts[set]);
-      l.set[set].layout = set_layout;
-
-      l.set[set].dynamic_offset_start = dynamic_offset_count;
-      for (uint32_t b = 0; b < set_layout->binding_count; b++) {
-         if (set_layout->binding[b].dynamic_offset_index >= 0)
-            dynamic_offset_count += set_layout->binding[b].array_size;
-      }
-
-      for (VkShaderStage s = 0; s < VK_SHADER_STAGE_NUM; s++) {
-         l.set[set].stage[s].surface_start = l.stage[s].surface_count;
-         l.set[set].stage[s].sampler_start = l.stage[s].sampler_count;
-
-         for (uint32_t b = 0; b < set_layout->binding_count; b++) {
-            unsigned array_size = set_layout->binding[b].array_size;
-
-            if (set_layout->binding[b].stage[s].surface_index >= 0) {
-               l.stage[s].surface_count += array_size;
-
-               if (set_layout->binding[b].dynamic_offset_index >= 0)
-                  l.stage[s].has_dynamic_offsets = true;
-            }
-
-            if (set_layout->binding[b].stage[s].sampler_index >= 0)
-               l.stage[s].sampler_count += array_size;
-         }
-      }
-   }
-
-   unsigned num_bindings = 0;
-   for (VkShaderStage s = 0; s < VK_SHADER_STAGE_NUM; s++)
-      num_bindings += l.stage[s].surface_count + l.stage[s].sampler_count;
-
-   size_t size = sizeof(*layout) + num_bindings * sizeof(layout->entries[0]);
-
-   layout = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-   if (layout == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
-   /* Now we can actually build our surface and sampler maps */
-   struct anv_pipeline_binding *entry = layout->entries;
-   for (VkShaderStage s = 0; s < VK_SHADER_STAGE_NUM; s++) {
-      l.stage[s].surface_to_descriptor = entry;
-      entry += l.stage[s].surface_count;
-      l.stage[s].sampler_to_descriptor = entry;
-      entry += l.stage[s].sampler_count;
-
-      int surface = 0;
-      int sampler = 0;
-      for (uint32_t set = 0; set < pCreateInfo->descriptorSetCount; set++) {
-         struct anv_descriptor_set_layout *set_layout = l.set[set].layout;
-
-         unsigned set_offset = 0;
-         for (uint32_t b = 0; b < set_layout->binding_count; b++) {
-            unsigned array_size = set_layout->binding[b].array_size;
-
-            if (set_layout->binding[b].stage[s].surface_index >= 0) {
-               assert(surface == l.set[set].stage[s].surface_start +
-                                 set_layout->binding[b].stage[s].surface_index);
-               for (unsigned i = 0; i < array_size; i++) {
-                  l.stage[s].surface_to_descriptor[surface + i].set = set;
-                  l.stage[s].surface_to_descriptor[surface + i].offset = set_offset + i;
-               }
-               surface += array_size;
-            }
-
-            if (set_layout->binding[b].stage[s].sampler_index >= 0) {
-               assert(sampler == l.set[set].stage[s].sampler_start +
-                                 set_layout->binding[b].stage[s].sampler_index);
-               for (unsigned i = 0; i < array_size; i++) {
-                  l.stage[s].sampler_to_descriptor[sampler + i].set = set;
-                  l.stage[s].sampler_to_descriptor[sampler + i].offset = set_offset + i;
-               }
-               sampler += array_size;
-            }
-
-            set_offset += array_size;
-         }
-      }
-   }
-
-   /* Finally, we're done setting it up, copy into the allocated version */
-   *layout = l;
-
-   *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
-
-   return VK_SUCCESS;
-}
-
-void anv_DestroyPipelineLayout(
-    VkDevice                                    _device,
-    VkPipelineLayout                            _pipelineLayout)
-{
-   ANV_FROM_HANDLE(anv_device, device, _device);
-   ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
-
-   anv_device_free(device, pipeline_layout);
-}