anv/pipeline: remove unnecessary null-pointer check
[mesa.git] / src / intel / vulkan / genX_pipeline.c
index 8cb8de06cc9a11206d840a14e54b42bf39614f8c..e52e5d8304ac50ec34b3a977d713bf43b302f066 100644 (file)
@@ -28,6 +28,7 @@
 
 #include "common/gen_l3_config.h"
 #include "common/gen_sample_positions.h"
+#include "vk_util.h"
 #include "vk_format_info.h"
 
 static uint32_t
@@ -42,9 +43,35 @@ vertex_element_comp_control(enum isl_format format, unsigned comp)
    default: unreachable("Invalid component");
    }
 
+   /*
+    * Take in account hardware restrictions when dealing with 64-bit floats.
+    *
+    * From Broadwell spec, command reference structures, page 586:
+    *  "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
+    *   64-bit components are stored * in the URB without any conversion. In
+    *   this case, vertex elements must be written as 128 or 256 bits, with
+    *   VFCOMP_STORE_0 being used to pad the output as required. E.g., if
+    *   R64_PASSTHRU is used to copy a 64-bit Red component into the URB,
+    *   Component 1 must be specified as VFCOMP_STORE_0 (with Components 2,3
+    *   set to VFCOMP_NOSTORE) in order to output a 128-bit vertex element, or
+    *   Components 1-3 must be specified as VFCOMP_STORE_0 in order to output
+    *   a 256-bit vertex element. Likewise, use of R64G64B64_PASSTHRU requires
+    *   Component 3 to be specified as VFCOMP_STORE_0 in order to output a
+    *   256-bit vertex element."
+    */
    if (bits) {
       return VFCOMP_STORE_SRC;
-   } else if (comp < 3) {
+   } else if (comp >= 2 &&
+              !isl_format_layouts[format].channels.b.bits &&
+              isl_format_layouts[format].channels.r.type == ISL_RAW) {
+      /* When emitting 64-bit attributes, we need to write either 128 or 256
+       * bit chunks, using VFCOMP_NOSTORE when not writing the chunk, and
+       * VFCOMP_STORE_0 to pad the written chunk */
+      return VFCOMP_NOSTORE;
+   } else if (comp < 3 ||
+              isl_format_layouts[format].channels.r.type == ISL_RAW) {
+      /* Note we need to pad with value 0, not 1, due hardware restrictions
+       * (see comment above) */
       return VFCOMP_STORE_0;
    } else if (isl_format_layouts[format].channels.r.type == ISL_UINT ||
             isl_format_layouts[format].channels.r.type == ISL_SINT) {
@@ -64,35 +91,59 @@ emit_vertex_input(struct anv_pipeline *pipeline,
 
    /* Pull inputs_read out of the VS prog data */
    const uint64_t inputs_read = vs_prog_data->inputs_read;
+   const uint64_t double_inputs_read =
+      vs_prog_data->double_inputs_read & inputs_read;
    assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
    const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
-
-#if GEN_GEN >= 8
-   /* On BDW+, we only need to allocate space for base ids.  Setting up
-    * the actual vertex and instance id is a separate packet.
-    */
-   const bool needs_svgs_elem = vs_prog_data->uses_basevertex ||
-                                vs_prog_data->uses_baseinstance;
-#else
-   /* On Haswell and prior, vertex and instance id are created by using the
-    * ComponentControl fields, so we need an element for any of them.
-    */
+   const uint32_t elements_double = double_inputs_read >> VERT_ATTRIB_GENERIC0;
    const bool needs_svgs_elem = vs_prog_data->uses_vertexid ||
                                 vs_prog_data->uses_instanceid ||
-                                vs_prog_data->uses_basevertex ||
+                                vs_prog_data->uses_firstvertex ||
                                 vs_prog_data->uses_baseinstance;
-#endif
 
-   uint32_t elem_count = __builtin_popcount(elements) + needs_svgs_elem;
-   if (elem_count == 0)
+   uint32_t elem_count = __builtin_popcount(elements) -
+      __builtin_popcount(elements_double) / 2;
+
+   const uint32_t total_elems =
+      elem_count + needs_svgs_elem + vs_prog_data->uses_drawid;
+   if (total_elems == 0)
       return;
 
    uint32_t *p;
 
-   const uint32_t num_dwords = 1 + elem_count * 2;
+   const uint32_t num_dwords = 1 + total_elems * 2;
    p = anv_batch_emitn(&pipeline->batch, num_dwords,
                        GENX(3DSTATE_VERTEX_ELEMENTS));
-   memset(p + 1, 0, (num_dwords - 1) * 4);
+   if (!p)
+      return;
+
+   for (uint32_t i = 0; i < total_elems; i++) {
+      /* The SKL docs for VERTEX_ELEMENT_STATE say:
+       *
+       *    "All elements must be valid from Element[0] to the last valid
+       *    element. (I.e. if Element[2] is valid then Element[1] and
+       *    Element[0] must also be valid)."
+       *
+       * The SKL docs for 3D_Vertex_Component_Control say:
+       *
+       *    "Don't store this component. (Not valid for Component 0, but can
+       *    be used for Component 1-3)."
+       *
+       * So we can't just leave a vertex element blank and hope for the best.
+       * We have to tell the VF hardware to put something in it; so we just
+       * store a bunch of zero.
+       *
+       * TODO: Compact vertex elements so we never end up with holes.
+       */
+      struct GENX(VERTEX_ELEMENT_STATE) element = {
+         .Valid = true,
+         .Component0Control = VFCOMP_STORE_0,
+         .Component1Control = VFCOMP_STORE_0,
+         .Component2Control = VFCOMP_STORE_0,
+         .Component3Control = VFCOMP_STORE_0,
+      };
+      GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + i * 2], &element);
+   }
 
    for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
       const VkVertexInputAttributeDescription *desc =
@@ -102,12 +153,15 @@ emit_vertex_input(struct anv_pipeline *pipeline,
                                                   VK_IMAGE_ASPECT_COLOR_BIT,
                                                   VK_IMAGE_TILING_LINEAR);
 
-      assert(desc->binding < 32);
+      assert(desc->binding < MAX_VBS);
 
       if ((elements & (1 << desc->location)) == 0)
          continue; /* Binding unused */
 
-      uint32_t slot = __builtin_popcount(elements & ((1 << desc->location) - 1));
+      uint32_t slot =
+         __builtin_popcount(elements & ((1 << desc->location) - 1)) -
+         DIV_ROUND_UP(__builtin_popcount(elements_double &
+                                        ((1 << desc->location) -1)), 2);
 
       struct GENX(VERTEX_ELEMENT_STATE) element = {
          .VertexBufferIndex = desc->binding,
@@ -128,16 +182,15 @@ emit_vertex_input(struct anv_pipeline *pipeline,
        * VERTEX_BUFFER_STATE which we emit later.
        */
       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
-         vfi.InstancingEnable = pipeline->instancing_enable[desc->binding];
+         vfi.InstancingEnable = pipeline->vb[desc->binding].instanced;
          vfi.VertexElementIndex = slot;
-         /* Vulkan so far doesn't have an instance divisor, so
-          * this is always 1 (ignored if not instancing). */
-         vfi.InstanceDataStepRate = 1;
+         vfi.InstanceDataStepRate =
+            pipeline->vb[desc->binding].instance_divisor;
       }
 #endif
    }
 
-   const uint32_t id_slot = __builtin_popcount(elements);
+   const uint32_t id_slot = elem_count;
    if (needs_svgs_elem) {
       /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum:
        *    "Within a VERTEX_ELEMENT_STATE structure, if a Component
@@ -148,12 +201,12 @@ emit_vertex_input(struct anv_pipeline *pipeline,
        * This means, that if we have BaseInstance, we need BaseVertex as
        * well.  Just do all or nothing.
        */
-      uint32_t base_ctrl = (vs_prog_data->uses_basevertex ||
+      uint32_t base_ctrl = (vs_prog_data->uses_firstvertex ||
                             vs_prog_data->uses_baseinstance) ?
                            VFCOMP_STORE_SRC : VFCOMP_STORE_0;
 
       struct GENX(VERTEX_ELEMENT_STATE) element = {
-         .VertexBufferIndex = 32, /* Reserved for this */
+         .VertexBufferIndex = ANV_SVGS_VB_INDEX,
          .Valid = true,
          .SourceElementFormat = ISL_FORMAT_R32G32_UINT,
          .Component0Control = base_ctrl,
@@ -179,129 +232,53 @@ emit_vertex_input(struct anv_pipeline *pipeline,
       sgvs.InstanceIDElementOffset     = id_slot;
    }
 #endif
+
+   const uint32_t drawid_slot = elem_count + needs_svgs_elem;
+   if (vs_prog_data->uses_drawid) {
+      struct GENX(VERTEX_ELEMENT_STATE) element = {
+         .VertexBufferIndex = ANV_DRAWID_VB_INDEX,
+         .Valid = true,
+         .SourceElementFormat = ISL_FORMAT_R32_UINT,
+         .Component0Control = VFCOMP_STORE_SRC,
+         .Component1Control = VFCOMP_STORE_0,
+         .Component2Control = VFCOMP_STORE_0,
+         .Component3Control = VFCOMP_STORE_0,
+      };
+      GENX(VERTEX_ELEMENT_STATE_pack)(NULL,
+                                      &p[1 + drawid_slot * 2],
+                                      &element);
+
+#if GEN_GEN >= 8
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
+         vfi.VertexElementIndex = drawid_slot;
+      }
+#endif
+   }
 }
 
 void
 genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
+                     const struct gen_l3_config *l3_config,
                      VkShaderStageFlags active_stages,
-                     unsigned vs_size, unsigned gs_size,
-                     const struct gen_l3_config *l3_config)
+                     const unsigned entry_size[4])
 {
-   if (!(active_stages & VK_SHADER_STAGE_VERTEX_BIT))
-      vs_size = 1;
-
-   if (!(active_stages & VK_SHADER_STAGE_GEOMETRY_BIT))
-      gs_size = 1;
-
-   unsigned vs_entry_size_bytes = vs_size * 64;
-   unsigned gs_entry_size_bytes = gs_size * 64;
-
-   /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
-    *
-    *     VS Number of URB Entries must be divisible by 8 if the VS URB Entry
-    *     Allocation Size is less than 9 512-bit URB entries.
-    *
-    * Similar text exists for GS.
-    */
-   unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
-   unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
-
-   /* URB allocations must be done in 8k chunks. */
-   unsigned chunk_size_bytes = 8192;
-
-   /* Determine the size of the URB in chunks. */
-   const unsigned total_urb_size =
-      gen_get_l3_config_urb_size(&device->info, l3_config);
-   const unsigned urb_chunks = total_urb_size * 1024 / chunk_size_bytes;
-
-   /* Reserve space for push constants */
-   unsigned push_constant_kb;
-   if (device->info.gen >= 8)
-      push_constant_kb = 32;
-   else if (device->info.is_haswell)
-      push_constant_kb = device->info.gt == 3 ? 32 : 16;
-   else
-      push_constant_kb = 16;
-
-   unsigned push_constant_bytes = push_constant_kb * 1024;
-   unsigned push_constant_chunks =
-      push_constant_bytes / chunk_size_bytes;
-
-   /* Initially, assign each stage the minimum amount of URB space it needs,
-    * and make a note of how much additional space it "wants" (the amount of
-    * additional space it could actually make use of).
-    */
-
-   /* VS has a lower limit on the number of URB entries */
-   unsigned vs_chunks =
-      ALIGN(device->info.urb.min_vs_entries * vs_entry_size_bytes,
-            chunk_size_bytes) / chunk_size_bytes;
-   unsigned vs_wants =
-      ALIGN(device->info.urb.max_vs_entries * vs_entry_size_bytes,
-            chunk_size_bytes) / chunk_size_bytes - vs_chunks;
-
-   unsigned gs_chunks = 0;
-   unsigned gs_wants = 0;
-   if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) {
-      /* There are two constraints on the minimum amount of URB space we can
-       * allocate:
-       *
-       * (1) We need room for at least 2 URB entries, since we always operate
-       * the GS in DUAL_OBJECT mode.
-       *
-       * (2) We can't allocate less than nr_gs_entries_granularity.
-       */
-      gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
-                        chunk_size_bytes) / chunk_size_bytes;
-      gs_wants =
-         ALIGN(device->info.urb.max_gs_entries * gs_entry_size_bytes,
-               chunk_size_bytes) / chunk_size_bytes - gs_chunks;
-   }
-
-   /* There should always be enough URB space to satisfy the minimum
-    * requirements of each stage.
-    */
-   unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
-   assert(total_needs <= urb_chunks);
-
-   /* Mete out remaining space (if any) in proportion to "wants". */
-   unsigned total_wants = vs_wants + gs_wants;
-   unsigned remaining_space = urb_chunks - total_needs;
-   if (remaining_space > total_wants)
-      remaining_space = total_wants;
-   if (remaining_space > 0) {
-      unsigned vs_additional = (unsigned)
-         round(vs_wants * (((double) remaining_space) / total_wants));
-      vs_chunks += vs_additional;
-      remaining_space -= vs_additional;
-      gs_chunks += remaining_space;
-   }
-
-   /* Sanity check that we haven't over-allocated. */
-   assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
-
-   /* Finally, compute the number of entries that can fit in the space
-    * allocated to each stage.
-    */
-   unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
-   unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
+   const struct gen_device_info *devinfo = &device->info;
+#if GEN_IS_HASWELL
+   const unsigned push_constant_kb = devinfo->gt == 3 ? 32 : 16;
+#else
+   const unsigned push_constant_kb = GEN_GEN >= 8 ? 32 : 16;
+#endif
 
-   /* Since we rounded up when computing *_wants, this may be slightly more
-    * than the maximum allowed amount, so correct for that.
-    */
-   nr_vs_entries = MIN2(nr_vs_entries, device->info.urb.max_vs_entries);
-   nr_gs_entries = MIN2(nr_gs_entries, device->info.urb.max_gs_entries);
+   const unsigned urb_size_kb = gen_get_l3_config_urb_size(devinfo, l3_config);
 
-   /* Ensure that we program a multiple of the granularity. */
-   nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
-   nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
-
-   /* Finally, sanity check to make sure we have at least the minimum number
-    * of entries needed for each stage.
-    */
-   assert(nr_vs_entries >= device->info.urb.min_vs_entries);
-   if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
-      assert(nr_gs_entries >= 2);
+   unsigned entries[4];
+   unsigned start[4];
+   gen_get_urb_config(devinfo,
+                      1024 * push_constant_kb, 1024 * urb_size_kb,
+                      active_stages &
+                         VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+                      active_stages & VK_SHADER_STAGE_GEOMETRY_BIT,
+                      entry_size, entries, start);
 
 #if GEN_GEN == 7 && !GEN_IS_HASWELL
    /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
@@ -319,54 +296,37 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
    }
 #endif
 
-   /* Lay out the URB in the following order:
-    * - push constants
-    * - VS
-    * - GS
-    */
-   anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
-      urb.VSURBStartingAddress      = push_constant_chunks;
-      urb.VSURBEntryAllocationSize  = vs_size - 1;
-      urb.VSNumberofURBEntries      = nr_vs_entries;
-   }
-
-   anv_batch_emit(batch, GENX(3DSTATE_URB_HS), urb) {
-      urb.HSURBStartingAddress      = push_constant_chunks;
-   }
-
-   anv_batch_emit(batch, GENX(3DSTATE_URB_DS), urb) {
-      urb.DSURBStartingAddress      = push_constant_chunks;
-   }
-
-   anv_batch_emit(batch, GENX(3DSTATE_URB_GS), urb) {
-      urb.GSURBStartingAddress      = push_constant_chunks + vs_chunks;
-      urb.GSURBEntryAllocationSize  = gs_size - 1;
-      urb.GSNumberofURBEntries      = nr_gs_entries;
+   for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
+      anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
+         urb._3DCommandSubOpcode      += i;
+         urb.VSURBStartingAddress      = start[i];
+         urb.VSURBEntryAllocationSize  = entry_size[i] - 1;
+         urb.VSNumberofURBEntries      = entries[i];
+      }
    }
 }
 
-static inline void
+static void
 emit_urb_setup(struct anv_pipeline *pipeline)
 {
-   unsigned vs_entry_size =
-      (pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT) ?
-      get_vs_prog_data(pipeline)->base.urb_entry_size : 0;
-   unsigned gs_entry_size =
-      (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) ?
-      get_gs_prog_data(pipeline)->base.urb_entry_size : 0;
+   unsigned entry_size[4];
+   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
+      const struct brw_vue_prog_data *prog_data =
+         !anv_pipeline_has_stage(pipeline, i) ? NULL :
+         (const struct brw_vue_prog_data *) pipeline->shaders[i]->prog_data;
+
+      entry_size[i] = prog_data ? prog_data->urb_entry_size : 1;
+   }
 
    genX(emit_urb_setup)(pipeline->device, &pipeline->batch,
-                        pipeline->active_stages, vs_entry_size, gs_entry_size,
-                        pipeline->urb.l3_config);
+                        pipeline->urb.l3_config,
+                        pipeline->active_stages, entry_size);
 }
 
 static void
 emit_3dstate_sbe(struct anv_pipeline *pipeline)
 {
-   const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
-   const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
-   const struct brw_vue_map *fs_input_map;
 
    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe);
@@ -376,10 +336,8 @@ emit_3dstate_sbe(struct anv_pipeline *pipeline)
       return;
    }
 
-   if (gs_prog_data)
-      fs_input_map = &gs_prog_data->base.vue_map;
-   else
-      fs_input_map = &vs_prog_data->base.vue_map;
+   const struct brw_vue_map *fs_input_map =
+      &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
 
    struct GENX(3DSTATE_SBE) sbe = {
       GENX(3DSTATE_SBE_header),
@@ -403,6 +361,8 @@ emit_3dstate_sbe(struct anv_pipeline *pipeline)
 #  define swiz sbe
 #endif
 
+   /* Skip the VUE header and position slots by default */
+   unsigned urb_entry_read_offset = 1;
    int max_source_attr = 0;
    for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
       int input_index = wm_prog_data->urb_setup[attr];
@@ -410,6 +370,12 @@ emit_3dstate_sbe(struct anv_pipeline *pipeline)
       if (input_index < 0)
          continue;
 
+      /* gl_Layer is stored in the VUE header */
+      if (attr == VARYING_SLOT_LAYER) {
+         urb_entry_read_offset = 0;
+         continue;
+      }
+
       if (attr == VARYING_SLOT_PNTC) {
          sbe.PointSpriteTextureCoordinateEnable = 1 << input_index;
          continue;
@@ -434,25 +400,33 @@ emit_3dstate_sbe(struct anv_pipeline *pipeline)
          swiz.Attribute[input_index].ComponentOverrideZ = true;
          swiz.Attribute[input_index].ComponentOverrideW = true;
       } else {
-         assert(slot >= 2);
-         const int source_attr = slot - 2;
-         max_source_attr = MAX2(max_source_attr, source_attr);
          /* We have to subtract two slots to accout for the URB entry output
           * read offset in the VS and GS stages.
           */
+         const int source_attr = slot - 2 * urb_entry_read_offset;
+         assert(source_attr >= 0 && source_attr < 32);
+         max_source_attr = MAX2(max_source_attr, source_attr);
          swiz.Attribute[input_index].SourceAttribute = source_attr;
       }
    }
 
-   sbe.VertexURBEntryReadOffset = 1; /* Skip the VUE header and position slots */
+   sbe.VertexURBEntryReadOffset = urb_entry_read_offset;
    sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2);
+#if GEN_GEN >= 8
+   sbe.ForceVertexURBEntryReadOffset = true;
+   sbe.ForceVertexURBEntryReadLength = true;
+#endif
 
    uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
                                         GENX(3DSTATE_SBE_length));
+   if (!dw)
+      return;
    GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe);
 
 #if GEN_GEN >= 8
    dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length));
+   if (!dw)
+      return;
    GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
 #endif
 }
@@ -491,8 +465,16 @@ emit_rs_state(struct anv_pipeline *pipeline,
    sf.TriangleStripListProvokingVertexSelect = 0;
    sf.LineStripListProvokingVertexSelect = 0;
    sf.TriangleFanProvokingVertexSelect = 1;
-   sf.PointWidthSource = Vertex;
-   sf.PointWidth = 1.0;
+
+   const struct brw_vue_prog_data *last_vue_prog_data =
+      anv_pipeline_get_last_vue_prog_data(pipeline);
+
+   if (last_vue_prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
+      sf.PointWidthSource = Vertex;
+   } else {
+      sf.PointWidthSource = State;
+      sf.PointWidth = 1.0;
+   }
 
 #if GEN_GEN >= 8
    struct GENX(3DSTATE_RASTER) raster = {
@@ -507,6 +489,10 @@ emit_rs_state(struct anv_pipeline *pipeline,
     */
 #if GEN_GEN >= 8
    raster.DXMultisampleRasterizationEnable = true;
+   /* NOTE: 3DSTATE_RASTER::ForcedSampleCount affects the BDW and SKL PMA fix
+    * computations.  If we ever set this bit to a different value, they will
+    * need to be updated accordingly.
+    */
    raster.ForcedSampleCount = FSC_NUMRASTSAMPLES_0;
    raster.ForceMultisampling = false;
 #else
@@ -537,9 +523,9 @@ emit_rs_state(struct anv_pipeline *pipeline,
    /* Gen7 requires that we provide the depth format in 3DSTATE_SF so that it
     * can get the depth offsets correct.
     */
-   if (subpass->depth_stencil_attachment < pass->attachment_count) {
+   if (subpass->depth_stencil_attachment) {
       VkFormat vk_format =
-         pass->attachments[subpass->depth_stencil_attachment].format;
+         pass->attachments[subpass->depth_stencil_attachment->attachment].format;
       assert(vk_format_is_depth_or_stencil(vk_format));
       if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) {
          enum isl_format isl_format =
@@ -591,6 +577,7 @@ emit_ms_state(struct anv_pipeline *pipeline,
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) {
       ms.NumberofMultisamples       = log2_samples;
 
+      ms.PixelLocation              = CENTER;
 #if GEN_GEN >= 8
       /* The PRM says that this bit is valid only for DX9:
        *
@@ -598,9 +585,7 @@ emit_ms_state(struct anv_pipeline *pipeline,
        *    should not have any effect by setting or not setting this bit.
        */
       ms.PixelPositionOffsetEnable  = false;
-      ms.PixelLocation              = CENTER;
 #else
-      ms.PixelLocation              = PIXLOC_CENTER;
 
       switch (samples) {
       case 1:
@@ -697,9 +682,140 @@ static const uint32_t vk_to_gen_stencil_op[] = {
    [VK_STENCIL_OP_DECREMENT_AND_WRAP]           = STENCILOP_DECR,
 };
 
+/* This function sanitizes the VkStencilOpState by looking at the compare ops
+ * and trying to determine whether or not a given stencil op can ever actually
+ * occur.  Stencil ops which can never occur are set to VK_STENCIL_OP_KEEP.
+ * This function returns true if, after sanitation, any of the stencil ops are
+ * set to something other than VK_STENCIL_OP_KEEP.
+ */
+static bool
+sanitize_stencil_face(VkStencilOpState *face,
+                      VkCompareOp depthCompareOp)
+{
+   /* If compareOp is ALWAYS then the stencil test will never fail and failOp
+    * will never happen.  Set failOp to KEEP in this case.
+    */
+   if (face->compareOp == VK_COMPARE_OP_ALWAYS)
+      face->failOp = VK_STENCIL_OP_KEEP;
+
+   /* If compareOp is NEVER or depthCompareOp is NEVER then one of the depth
+    * or stencil tests will fail and passOp will never happen.
+    */
+   if (face->compareOp == VK_COMPARE_OP_NEVER ||
+       depthCompareOp == VK_COMPARE_OP_NEVER)
+      face->passOp = VK_STENCIL_OP_KEEP;
+
+   /* If compareOp is NEVER or depthCompareOp is ALWAYS then either the
+    * stencil test will fail or the depth test will pass.  In either case,
+    * depthFailOp will never happen.
+    */
+   if (face->compareOp == VK_COMPARE_OP_NEVER ||
+       depthCompareOp == VK_COMPARE_OP_ALWAYS)
+      face->depthFailOp = VK_STENCIL_OP_KEEP;
+
+   return face->failOp != VK_STENCIL_OP_KEEP ||
+          face->depthFailOp != VK_STENCIL_OP_KEEP ||
+          face->passOp != VK_STENCIL_OP_KEEP;
+}
+
+/* Intel hardware is fairly sensitive to whether or not depth/stencil writes
+ * are enabled.  In the presence of discards, it's fairly easy to get into the
+ * non-promoted case which means a fairly big performance hit.  From the Iron
+ * Lake PRM, Vol 2, pt. 1, section 8.4.3.2, "Early Depth Test Cases":
+ *
+ *    "Non-promoted depth (N) is active whenever the depth test can be done
+ *    early but it cannot determine whether or not to write source depth to
+ *    the depth buffer, therefore the depth write must be performed post pixel
+ *    shader. This includes cases where the pixel shader can kill pixels,
+ *    including via sampler chroma key, as well as cases where the alpha test
+ *    function is enabled, which kills pixels based on a programmable alpha
+ *    test. In this case, even if the depth test fails, the pixel cannot be
+ *    killed if a stencil write is indicated. Whether or not the stencil write
+ *    happens depends on whether or not the pixel is killed later. In these
+ *    cases if stencil test fails and stencil writes are off, the pixels can
+ *    also be killed early. If stencil writes are enabled, the pixels must be
+ *    treated as Computed depth (described above)."
+ *
+ * The same thing as mentioned in the stencil case can happen in the depth
+ * case as well if it thinks it writes depth but, thanks to the depth test
+ * being GL_EQUAL, the write doesn't actually matter.  A little extra work
+ * up-front to try and disable depth and stencil writes can make a big
+ * difference.
+ *
+ * Unfortunately, the way depth and stencil testing is specified, there are
+ * many case where, regardless of depth/stencil writes being enabled, nothing
+ * actually gets written due to some other bit of state being set.  This
+ * function attempts to "sanitize" the depth stencil state and disable writes
+ * and sometimes even testing whenever possible.
+ */
+static void
+sanitize_ds_state(VkPipelineDepthStencilStateCreateInfo *state,
+                  bool *stencilWriteEnable,
+                  VkImageAspectFlags ds_aspects)
+{
+   *stencilWriteEnable = state->stencilTestEnable;
+
+   /* If the depth test is disabled, we won't be writing anything. Make sure we
+    * treat the test as always passing later on as well.
+    *
+    * Also, the Vulkan spec requires that if either depth or stencil is not
+    * present, the pipeline is to act as if the test silently passes. In that
+    * case we won't write either.
+    */
+   if (!state->depthTestEnable || !(ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
+      state->depthWriteEnable = false;
+      state->depthCompareOp = VK_COMPARE_OP_ALWAYS;
+   }
+
+   if (!(ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
+      *stencilWriteEnable = false;
+      state->front.compareOp = VK_COMPARE_OP_ALWAYS;
+      state->back.compareOp = VK_COMPARE_OP_ALWAYS;
+   }
+
+   /* If the stencil test is enabled and always fails, then we will never get
+    * to the depth test so we can just disable the depth test entirely.
+    */
+   if (state->stencilTestEnable &&
+       state->front.compareOp == VK_COMPARE_OP_NEVER &&
+       state->back.compareOp == VK_COMPARE_OP_NEVER) {
+      state->depthTestEnable = false;
+      state->depthWriteEnable = false;
+   }
+
+   /* If depthCompareOp is EQUAL then the value we would be writing to the
+    * depth buffer is the same as the value that's already there so there's no
+    * point in writing it.
+    */
+   if (state->depthCompareOp == VK_COMPARE_OP_EQUAL)
+      state->depthWriteEnable = false;
+
+   /* If the stencil ops are such that we don't actually ever modify the
+    * stencil buffer, we should disable writes.
+    */
+   if (!sanitize_stencil_face(&state->front, state->depthCompareOp) &&
+       !sanitize_stencil_face(&state->back, state->depthCompareOp))
+      *stencilWriteEnable = false;
+
+   /* If the depth test always passes and we never write out depth, that's the
+    * same as if the depth test is disabled entirely.
+    */
+   if (state->depthCompareOp == VK_COMPARE_OP_ALWAYS &&
+       !state->depthWriteEnable)
+      state->depthTestEnable = false;
+
+   /* If the stencil test always passes and we never write out stencil, that's
+    * the same as if the stencil test is disabled entirely.
+    */
+   if (state->front.compareOp == VK_COMPARE_OP_ALWAYS &&
+       state->back.compareOp == VK_COMPARE_OP_ALWAYS &&
+       !*stencilWriteEnable)
+      state->stencilTestEnable = false;
+}
+
 static void
 emit_ds_state(struct anv_pipeline *pipeline,
-              const VkPipelineDepthStencilStateCreateInfo *info,
+              const VkPipelineDepthStencilStateCreateInfo *pCreateInfo,
               const struct anv_render_pass *pass,
               const struct anv_subpass *subpass)
 {
@@ -711,14 +827,31 @@ emit_ds_state(struct anv_pipeline *pipeline,
 #  define depth_stencil_dw pipeline->gen9.wm_depth_stencil
 #endif
 
-   if (info == NULL) {
+   if (pCreateInfo == NULL) {
       /* We're going to OR this together with the dynamic state.  We need
        * to make sure it's initialized to something useful.
        */
+      pipeline->writes_stencil = false;
+      pipeline->stencil_test_enable = false;
+      pipeline->writes_depth = false;
+      pipeline->depth_test_enable = false;
       memset(depth_stencil_dw, 0, sizeof(depth_stencil_dw));
       return;
    }
 
+   VkImageAspectFlags ds_aspects = 0;
+   if (subpass->depth_stencil_attachment) {
+      VkFormat depth_stencil_format =
+         pass->attachments[subpass->depth_stencil_attachment->attachment].format;
+      ds_aspects = vk_format_aspects(depth_stencil_format);
+   }
+
+   VkPipelineDepthStencilStateCreateInfo info = *pCreateInfo;
+   sanitize_ds_state(&info, &pipeline->writes_stencil, ds_aspects);
+   pipeline->stencil_test_enable = info.stencilTestEnable;
+   pipeline->writes_depth = info.depthWriteEnable;
+   pipeline->depth_test_enable = info.depthTestEnable;
+
    /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
 
 #if GEN_GEN <= 7
@@ -726,52 +859,22 @@ emit_ds_state(struct anv_pipeline *pipeline,
 #else
    struct GENX(3DSTATE_WM_DEPTH_STENCIL) depth_stencil = {
 #endif
-      .DepthTestEnable = info->depthTestEnable,
-      .DepthBufferWriteEnable = info->depthWriteEnable,
-      .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp],
+      .DepthTestEnable = info.depthTestEnable,
+      .DepthBufferWriteEnable = info.depthWriteEnable,
+      .DepthTestFunction = vk_to_gen_compare_op[info.depthCompareOp],
       .DoubleSidedStencilEnable = true,
 
-      .StencilTestEnable = info->stencilTestEnable,
-      .StencilBufferWriteEnable = info->stencilTestEnable,
-      .StencilFailOp = vk_to_gen_stencil_op[info->front.failOp],
-      .StencilPassDepthPassOp = vk_to_gen_stencil_op[info->front.passOp],
-      .StencilPassDepthFailOp = vk_to_gen_stencil_op[info->front.depthFailOp],
-      .StencilTestFunction = vk_to_gen_compare_op[info->front.compareOp],
-      .BackfaceStencilFailOp = vk_to_gen_stencil_op[info->back.failOp],
-      .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info->back.passOp],
-      .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info->back.depthFailOp],
-      .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.compareOp],
+      .StencilTestEnable = info.stencilTestEnable,
+      .StencilFailOp = vk_to_gen_stencil_op[info.front.failOp],
+      .StencilPassDepthPassOp = vk_to_gen_stencil_op[info.front.passOp],
+      .StencilPassDepthFailOp = vk_to_gen_stencil_op[info.front.depthFailOp],
+      .StencilTestFunction = vk_to_gen_compare_op[info.front.compareOp],
+      .BackfaceStencilFailOp = vk_to_gen_stencil_op[info.back.failOp],
+      .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info.back.passOp],
+      .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info.back.depthFailOp],
+      .BackfaceStencilTestFunction = vk_to_gen_compare_op[info.back.compareOp],
    };
 
-   VkImageAspectFlags aspects = 0;
-   if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
-      VkFormat depth_stencil_format =
-         pass->attachments[subpass->depth_stencil_attachment].format;
-      aspects = vk_format_aspects(depth_stencil_format);
-   }
-
-   /* The Vulkan spec requires that if either depth or stencil is not present,
-    * the pipeline is to act as if the test silently passes.
-    */
-   if (!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
-      depth_stencil.DepthBufferWriteEnable = false;
-      depth_stencil.DepthTestFunction = PREFILTEROPALWAYS;
-   }
-
-   if (!(aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
-      depth_stencil.StencilBufferWriteEnable = false;
-      depth_stencil.StencilTestFunction = PREFILTEROPALWAYS;
-      depth_stencil.BackfaceStencilTestFunction = PREFILTEROPALWAYS;
-   }
-
-   /* From the Broadwell PRM:
-    *
-    *    "If Depth_Test_Enable = 1 AND Depth_Test_func = EQUAL, the
-    *    Depth_Write_Enable must be set to 0."
-    */
-   if (info->depthTestEnable && info->depthCompareOp == VK_COMPARE_OP_EQUAL)
-      depth_stencil.DepthBufferWriteEnable = false;
-
 #if GEN_GEN <= 7
    GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
 #else
@@ -779,35 +882,30 @@ emit_ds_state(struct anv_pipeline *pipeline,
 #endif
 }
 
+MAYBE_UNUSED static bool
+is_dual_src_blend_factor(VkBlendFactor factor)
+{
+   return factor == VK_BLEND_FACTOR_SRC1_COLOR ||
+          factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR ||
+          factor == VK_BLEND_FACTOR_SRC1_ALPHA ||
+          factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
+}
+
 static void
 emit_cb_state(struct anv_pipeline *pipeline,
               const VkPipelineColorBlendStateCreateInfo *info,
               const VkPipelineMultisampleStateCreateInfo *ms_info)
 {
    struct anv_device *device = pipeline->device;
-
-   const uint32_t num_dwords = GENX(BLEND_STATE_length);
-   pipeline->blend_state =
-      anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
+   const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
 
    struct GENX(BLEND_STATE) blend_state = {
 #if GEN_GEN >= 8
       .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
       .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
-#else
-      /* Make sure it gets zeroed */
-      .Entry = { { 0, }, },
 #endif
    };
 
-   /* Default everything to disabled */
-   for (uint32_t i = 0; i < 8; i++) {
-      blend_state.Entry[i].WriteDisableAlpha = true;
-      blend_state.Entry[i].WriteDisableRed = true;
-      blend_state.Entry[i].WriteDisableGreen = true;
-      blend_state.Entry[i].WriteDisableBlue = true;
-   }
-
    uint32_t surface_count = 0;
    struct anv_pipeline_bind_map *map;
    if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
@@ -815,7 +913,17 @@ emit_cb_state(struct anv_pipeline *pipeline,
       surface_count = map->surface_count;
    }
 
+   const uint32_t num_dwords = GENX(BLEND_STATE_length) +
+      GENX(BLEND_STATE_ENTRY_length) * surface_count;
+   pipeline->blend_state =
+      anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
+
    bool has_writeable_rt = false;
+   uint32_t *state_pos = pipeline->blend_state.map;
+   state_pos += GENX(BLEND_STATE_length);
+#if GEN_GEN >= 8
+   struct GENX(BLEND_STATE_ENTRY) bs0 = { 0 };
+#endif
    for (unsigned i = 0; i < surface_count; i++) {
       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i];
 
@@ -826,14 +934,24 @@ emit_cb_state(struct anv_pipeline *pipeline,
       /* We can have at most 8 attachments */
       assert(i < 8);
 
-      if (binding->index >= info->attachmentCount)
+      if (info == NULL || binding->index >= info->attachmentCount) {
+         /* Default everything to disabled */
+         struct GENX(BLEND_STATE_ENTRY) entry = {
+            .WriteDisableAlpha = true,
+            .WriteDisableRed = true,
+            .WriteDisableGreen = true,
+            .WriteDisableBlue = true,
+         };
+         GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry);
+         state_pos += GENX(BLEND_STATE_ENTRY_length);
          continue;
+      }
 
       assert(binding->binding == 0);
       const VkPipelineColorBlendAttachmentState *a =
          &info->pAttachments[binding->index];
 
-      blend_state.Entry[i] = (struct GENX(BLEND_STATE_ENTRY)) {
+      struct GENX(BLEND_STATE_ENTRY) entry = {
 #if GEN_GEN < 8
          .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
          .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
@@ -862,10 +980,36 @@ emit_cb_state(struct anv_pipeline *pipeline,
 #if GEN_GEN >= 8
          blend_state.IndependentAlphaBlendEnable = true;
 #else
-         blend_state.Entry[i].IndependentAlphaBlendEnable = true;
+         entry.IndependentAlphaBlendEnable = true;
 #endif
       }
 
+      /* The Dual Source Blending documentation says:
+       *
+       * "If SRC1 is included in a src/dst blend factor and
+       * a DualSource RT Write message is not used, results
+       * are UNDEFINED. (This reflects the same restriction in DX APIs,
+       * where undefined results are produced if “o1” is not written
+       * by a PS – there are no default values defined)."
+       *
+       * There is no way to gracefully fix this undefined situation
+       * so we just disable the blending to prevent possible issues.
+       */
+      if (!wm_prog_data->dual_src_blend &&
+          (is_dual_src_blend_factor(a->srcColorBlendFactor) ||
+           is_dual_src_blend_factor(a->dstColorBlendFactor) ||
+           is_dual_src_blend_factor(a->srcAlphaBlendFactor) ||
+           is_dual_src_blend_factor(a->dstAlphaBlendFactor))) {
+         vk_debug_report(&device->instance->debug_report_callbacks,
+                         VK_DEBUG_REPORT_WARNING_BIT_EXT,
+                         VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+                         (uint64_t)(uintptr_t)device,
+                         0, 0, "anv",
+                         "Enabled dual-src blend factors without writing both targets "
+                         "in the shader.  Disabling blending to avoid GPU hangs.");
+         entry.ColorBufferBlendEnable = false;
+      }
+
       if (a->colorWriteMask != 0)
          has_writeable_rt = true;
 
@@ -877,26 +1021,31 @@ emit_cb_state(struct anv_pipeline *pipeline,
        */
       if (a->colorBlendOp == VK_BLEND_OP_MIN ||
           a->colorBlendOp == VK_BLEND_OP_MAX) {
-         blend_state.Entry[i].SourceBlendFactor = BLENDFACTOR_ONE;
-         blend_state.Entry[i].DestinationBlendFactor = BLENDFACTOR_ONE;
+         entry.SourceBlendFactor = BLENDFACTOR_ONE;
+         entry.DestinationBlendFactor = BLENDFACTOR_ONE;
       }
       if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
           a->alphaBlendOp == VK_BLEND_OP_MAX) {
-         blend_state.Entry[i].SourceAlphaBlendFactor = BLENDFACTOR_ONE;
-         blend_state.Entry[i].DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
+         entry.SourceAlphaBlendFactor = BLENDFACTOR_ONE;
+         entry.DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
       }
+      GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry);
+      state_pos += GENX(BLEND_STATE_ENTRY_length);
+#if GEN_GEN >= 8
+      if (i == 0)
+         bs0 = entry;
+#endif
    }
 
 #if GEN_GEN >= 8
-   struct GENX(BLEND_STATE_ENTRY) *bs0 = &blend_state.Entry[0];
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) {
       blend.AlphaToCoverageEnable         = blend_state.AlphaToCoverageEnable;
       blend.HasWriteableRT                = has_writeable_rt;
-      blend.ColorBufferBlendEnable        = bs0->ColorBufferBlendEnable;
-      blend.SourceAlphaBlendFactor        = bs0->SourceAlphaBlendFactor;
-      blend.DestinationAlphaBlendFactor   = bs0->DestinationAlphaBlendFactor;
-      blend.SourceBlendFactor             = bs0->SourceBlendFactor;
-      blend.DestinationBlendFactor        = bs0->DestinationBlendFactor;
+      blend.ColorBufferBlendEnable        = bs0.ColorBufferBlendEnable;
+      blend.SourceAlphaBlendFactor        = bs0.SourceAlphaBlendFactor;
+      blend.DestinationAlphaBlendFactor   = bs0.DestinationAlphaBlendFactor;
+      blend.SourceBlendFactor             = bs0.SourceBlendFactor;
+      blend.DestinationBlendFactor        = bs0.DestinationBlendFactor;
       blend.AlphaTestEnable               = false;
       blend.IndependentAlphaBlendEnable   =
          blend_state.IndependentAlphaBlendEnable;
@@ -906,8 +1055,7 @@ emit_cb_state(struct anv_pipeline *pipeline,
 #endif
 
    GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
-   if (!device->info.has_llc)
-      anv_state_clflush(pipeline->blend_state);
+   anv_state_flush(device, pipeline->blend_state);
 
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) {
       bsp.BlendStatePointer      = pipeline->blend_state.offset;
@@ -926,6 +1074,7 @@ emit_3dstate_clip(struct anv_pipeline *pipeline,
    (void) wm_prog_data;
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
       clip.ClipEnable               = true;
+      clip.StatisticsEnable         = true;
       clip.EarlyCullEnable          = true;
       clip.APIMode                  = APIMODE_D3D,
       clip.ViewportXYClipTestEnable = true;
@@ -938,15 +1087,41 @@ emit_3dstate_clip(struct anv_pipeline *pipeline,
 
       clip.MinimumPointWidth = 0.125;
       clip.MaximumPointWidth = 255.875;
-      clip.MaximumVPIndex    = (vp_info ? vp_info->viewportCount : 1) - 1;
+
+      const struct brw_vue_prog_data *last =
+         anv_pipeline_get_last_vue_prog_data(pipeline);
+
+      /* From the Vulkan 1.0.45 spec:
+       *
+       *    "If the last active vertex processing stage shader entry point's
+       *    interface does not include a variable decorated with
+       *    ViewportIndex, then the first viewport is used."
+       */
+      if (vp_info && (last->vue_map.slots_valid & VARYING_BIT_VIEWPORT)) {
+         clip.MaximumVPIndex = vp_info->viewportCount - 1;
+      } else {
+         clip.MaximumVPIndex = 0;
+      }
+
+      /* From the Vulkan 1.0.45 spec:
+       *
+       *    "If the last active vertex processing stage shader entry point's
+       *    interface does not include a variable decorated with Layer, then
+       *    the first layer is used."
+       */
+      clip.ForceZeroRTAIndexEnable =
+         !(last->vue_map.slots_valid & VARYING_BIT_LAYER);
 
 #if GEN_GEN == 7
       clip.FrontWinding            = vk_to_gen_front_face[rs_info->frontFace];
       clip.CullMode                = vk_to_gen_cullmode[rs_info->cullMode];
       clip.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
+      clip.UserClipDistanceClipTestEnableBitmask = last->clip_distance_mask;
+      clip.UserClipDistanceCullTestEnableBitmask = last->cull_distance_mask;
 #else
       clip.NonPerspectiveBarycentricEnable = wm_prog_data ?
-         (wm_prog_data->barycentric_interp_modes & 0x38) != 0 : 0;
+         (wm_prog_data->barycentric_interp_modes &
+          BRW_BARYCENTRIC_NONPERSPECTIVE_BITS) != 0 : 0;
 #endif
    }
 }
@@ -960,19 +1135,25 @@ emit_3dstate_streamout(struct anv_pipeline *pipeline,
    }
 }
 
-static inline uint32_t
+static uint32_t
 get_sampler_count(const struct anv_shader_bin *bin)
 {
-   return DIV_ROUND_UP(bin->bind_map.sampler_count, 4);
+   uint32_t count_by_4 = DIV_ROUND_UP(bin->bind_map.sampler_count, 4);
+
+   /* We can potentially have way more than 32 samplers and that's ok.
+    * However, the 3DSTATE_XS packets only have 3 bits to specify how
+    * many to pre-fetch and all values above 4 are marked reserved.
+    */
+   return MIN2(count_by_4, 4);
 }
 
-static inline uint32_t
+static uint32_t
 get_binding_table_entry_count(const struct anv_shader_bin *bin)
 {
    return DIV_ROUND_UP(bin->bind_map.surface_count, 32);
 }
 
-static inline struct anv_address
+static struct anv_address
 get_scratch_address(struct anv_pipeline *pipeline,
                     gl_shader_stage stage,
                     const struct anv_shader_bin *bin)
@@ -985,28 +1166,12 @@ get_scratch_address(struct anv_pipeline *pipeline,
    };
 }
 
-static inline uint32_t
+static uint32_t
 get_scratch_space(const struct anv_shader_bin *bin)
 {
    return ffs(bin->prog_data->total_scratch / 2048);
 }
 
-static inline uint32_t
-get_urb_output_offset()
-{
-   /* Skip the VUE header and position slots */
-   return 1;
-}
-
-static inline uint32_t
-get_urb_output_length(const struct anv_shader_bin *bin)
-{
-   const struct brw_vue_prog_data *prog_data =
-      (const struct brw_vue_prog_data *)bin->prog_data;
-
-   return (prog_data->vue_map.num_slots + 1) / 2 - get_urb_output_offset();
-}
-
 static void
 emit_3dstate_vs(struct anv_pipeline *pipeline)
 {
@@ -1018,7 +1183,7 @@ emit_3dstate_vs(struct anv_pipeline *pipeline)
    assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX));
 
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
-      vs.FunctionEnable       = true;
+      vs.Enable               = true;
       vs.StatisticsEnable     = true;
       vs.KernelStartPointer   = vs_bin->kernel.offset;
 #if GEN_GEN >= 8
@@ -1027,15 +1192,47 @@ emit_3dstate_vs(struct anv_pipeline *pipeline)
 #endif
 
       assert(!vs_prog_data->base.base.use_alt_mode);
+#if GEN_GEN < 11
       vs.SingleVertexDispatch       = false;
+#endif
       vs.VectorMaskEnable           = false;
-      vs.SamplerCount               = get_sampler_count(vs_bin);
-      vs.BindingTableEntryCount     = get_binding_table_entry_count(vs_bin);
+      /* WA_1606682166:
+       * Incorrect TDL's SSP address shift in SARB for 16:6 & 18:8 modes.
+       * Disable the Sampler state prefetch functionality in the SARB by
+       * programming 0xB000[30] to '1'.
+       */
+      vs.SamplerCount               = GEN_GEN == 11 ? 0 : get_sampler_count(vs_bin);
+     /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to
+      * disable prefetching of binding tables on A0 and B0 steppings.
+      * TODO: Revisit this WA on newer steppings.
+      */
+      vs.BindingTableEntryCount     = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(vs_bin);
       vs.FloatingPointMode          = IEEE754;
       vs.IllegalOpcodeExceptionEnable = false;
       vs.SoftwareExceptionEnable    = false;
       vs.MaximumNumberofThreads     = devinfo->max_vs_threads - 1;
-      vs.VertexCacheDisable         = false;
+
+      if (GEN_GEN == 9 && devinfo->gt == 4 &&
+          anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
+         /* On Sky Lake GT4, we have experienced some hangs related to the VS
+          * cache and tessellation.  It is unknown exactly what is happening
+          * but the Haswell docs for the "VS Reference Count Full Force Miss
+          * Enable" field of the "Thread Mode" register refer to a HSW bug in
+          * which the VUE handle reference count would overflow resulting in
+          * internal reference counting bugs.  My (Jason's) best guess is that
+          * this bug cropped back up on SKL GT4 when we suddenly had more
+          * threads in play than any previous gen9 hardware.
+          *
+          * What we do know for sure is that setting this bit when
+          * tessellation shaders are in use fixes a GPU hang in Batman: Arkham
+          * City when playing with DXVK (https://bugs.freedesktop.org/107280).
+          * Disabling the vertex cache with tessellation shaders should only
+          * have a minor performance impact as the tessellation shaders are
+          * likely generating and processing far more geometry than the vertex
+          * stage.
+          */
+         vs.VertexCacheDisable = true;
+      }
 
       vs.VertexURBEntryReadLength      = vs_prog_data->base.urb_read_length;
       vs.VertexURBEntryReadOffset      = 0;
@@ -1043,12 +1240,10 @@ emit_3dstate_vs(struct anv_pipeline *pipeline)
          vs_prog_data->base.base.dispatch_grf_start_reg;
 
 #if GEN_GEN >= 8
-      vs.VertexURBEntryOutputReadOffset = get_urb_output_offset();
-      vs.VertexURBEntryOutputLength     = get_urb_output_length(vs_bin);
-
-     /* TODO */
-      vs.UserClipDistanceClipTestEnableBitmask = 0;
-      vs.UserClipDistanceCullTestEnableBitmask = 0;
+      vs.UserClipDistanceClipTestEnableBitmask =
+         vs_prog_data->base.clip_distance_mask;
+      vs.UserClipDistanceCullTestEnableBitmask =
+         vs_prog_data->base.cull_distance_mask;
 #endif
 
       vs.PerThreadScratchSpace   = get_scratch_space(vs_bin);
@@ -1057,6 +1252,118 @@ emit_3dstate_vs(struct anv_pipeline *pipeline)
    }
 }
 
+static void
+emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline,
+                      const VkPipelineTessellationStateCreateInfo *tess_info)
+{
+   if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs);
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te);
+      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds);
+      return;
+   }
+
+   const struct gen_device_info *devinfo = &pipeline->device->info;
+   const struct anv_shader_bin *tcs_bin =
+      pipeline->shaders[MESA_SHADER_TESS_CTRL];
+   const struct anv_shader_bin *tes_bin =
+      pipeline->shaders[MESA_SHADER_TESS_EVAL];
+
+   const struct brw_tcs_prog_data *tcs_prog_data = get_tcs_prog_data(pipeline);
+   const struct brw_tes_prog_data *tes_prog_data = get_tes_prog_data(pipeline);
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs) {
+      hs.Enable = true;
+      hs.StatisticsEnable = true;
+      hs.KernelStartPointer = tcs_bin->kernel.offset;
+      /* WA_1606682166 */
+      hs.SamplerCount = GEN_GEN == 11 ? 0 : get_sampler_count(tcs_bin);
+      /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
+      hs.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(tcs_bin);
+      hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
+      hs.IncludeVertexHandles = true;
+      hs.InstanceCount = tcs_prog_data->instances - 1;
+
+      hs.VertexURBEntryReadLength = 0;
+      hs.VertexURBEntryReadOffset = 0;
+      hs.DispatchGRFStartRegisterForURBData =
+         tcs_prog_data->base.base.dispatch_grf_start_reg;
+
+      hs.PerThreadScratchSpace = get_scratch_space(tcs_bin);
+      hs.ScratchSpaceBasePointer =
+         get_scratch_address(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin);
+   }
+
+   const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state =
+      tess_info ? vk_find_struct_const(tess_info, PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO) : NULL;
+
+   VkTessellationDomainOrigin uv_origin =
+      domain_origin_state ? domain_origin_state->domainOrigin :
+                            VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT;
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te) {
+      te.Partitioning = tes_prog_data->partitioning;
+
+      if (uv_origin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT) {
+         te.OutputTopology = tes_prog_data->output_topology;
+      } else {
+         /* When the origin is upper-left, we have to flip the winding order */
+         if (tes_prog_data->output_topology == OUTPUT_TRI_CCW) {
+            te.OutputTopology = OUTPUT_TRI_CW;
+         } else if (tes_prog_data->output_topology == OUTPUT_TRI_CW) {
+            te.OutputTopology = OUTPUT_TRI_CCW;
+         } else {
+            te.OutputTopology = tes_prog_data->output_topology;
+         }
+      }
+
+      te.TEDomain = tes_prog_data->domain;
+      te.TEEnable = true;
+      te.MaximumTessellationFactorOdd = 63.0;
+      te.MaximumTessellationFactorNotOdd = 64.0;
+   }
+
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds) {
+      ds.Enable = true;
+      ds.StatisticsEnable = true;
+      ds.KernelStartPointer = tes_bin->kernel.offset;
+      /* WA_1606682166 */
+      ds.SamplerCount = GEN_GEN == 11 ? 0 : get_sampler_count(tes_bin);
+      /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
+      ds.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(tes_bin);
+      ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
+
+      ds.ComputeWCoordinateEnable =
+         tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
+
+      ds.PatchURBEntryReadLength = tes_prog_data->base.urb_read_length;
+      ds.PatchURBEntryReadOffset = 0;
+      ds.DispatchGRFStartRegisterForURBData =
+         tes_prog_data->base.base.dispatch_grf_start_reg;
+
+#if GEN_GEN >= 8
+#if GEN_GEN < 11
+      ds.DispatchMode =
+         tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8 ?
+            DISPATCH_MODE_SIMD8_SINGLE_PATCH :
+            DISPATCH_MODE_SIMD4X2;
+#else
+      assert(tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8);
+      ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
+#endif
+
+      ds.UserClipDistanceClipTestEnableBitmask =
+         tes_prog_data->base.clip_distance_mask;
+      ds.UserClipDistanceCullTestEnableBitmask =
+         tes_prog_data->base.cull_distance_mask;
+#endif
+
+      ds.PerThreadScratchSpace = get_scratch_space(tes_bin);
+      ds.ScratchSpaceBasePointer =
+         get_scratch_address(pipeline, MESA_SHADER_TESS_EVAL, tes_bin);
+   }
+}
+
 static void
 emit_3dstate_gs(struct anv_pipeline *pipeline)
 {
@@ -1072,15 +1379,17 @@ emit_3dstate_gs(struct anv_pipeline *pipeline)
    const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
 
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {
-      gs.FunctionEnable          = true;
+      gs.Enable                  = true;
       gs.StatisticsEnable        = true;
       gs.KernelStartPointer      = gs_bin->kernel.offset;
       gs.DispatchMode            = gs_prog_data->base.dispatch_mode;
 
       gs.SingleProgramFlow       = false;
       gs.VectorMaskEnable        = false;
-      gs.SamplerCount            = get_sampler_count(gs_bin);
-      gs.BindingTableEntryCount  = get_binding_table_entry_count(gs_bin);
+      /* WA_1606682166 */
+      gs.SamplerCount            = GEN_GEN == 11 ? 0 : get_sampler_count(gs_bin);
+      /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
+      gs.BindingTableEntryCount  = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(gs_bin);
       gs.IncludeVertexHandles    = gs_prog_data->base.include_vue_handles;
       gs.IncludePrimitiveID      = gs_prog_data->include_primitive_id;
 
@@ -1097,11 +1406,7 @@ emit_3dstate_gs(struct anv_pipeline *pipeline)
       gs.ControlDataFormat       = gs_prog_data->control_data_format;
       gs.ControlDataHeaderSize   = gs_prog_data->control_data_header_size_hwords;
       gs.InstanceControl         = MAX2(gs_prog_data->invocations, 1) - 1;
-#if GEN_GEN >= 8 || GEN_IS_HASWELL
       gs.ReorderMode             = TRAILING;
-#else
-      gs.ReorderEnable           = true;
-#endif
 
 #if GEN_GEN >= 8
       gs.ExpectedVertexCount     = gs_prog_data->vertices_in;
@@ -1116,12 +1421,10 @@ emit_3dstate_gs(struct anv_pipeline *pipeline)
          gs_prog_data->base.base.dispatch_grf_start_reg;
 
 #if GEN_GEN >= 8
-      gs.VertexURBEntryOutputReadOffset = get_urb_output_offset();
-      gs.VertexURBEntryOutputLength     = get_urb_output_length(gs_bin);
-
-     /* TODO */
-      gs.UserClipDistanceClipTestEnableBitmask = 0;
-      gs.UserClipDistanceCullTestEnableBitmask = 0;
+      gs.UserClipDistanceClipTestEnableBitmask =
+         gs_prog_data->base.clip_distance_mask;
+      gs.UserClipDistanceCullTestEnableBitmask =
+         gs_prog_data->base.cull_distance_mask;
 #endif
 
       gs.PerThreadScratchSpace   = get_scratch_space(gs_bin);
@@ -1130,8 +1433,35 @@ emit_3dstate_gs(struct anv_pipeline *pipeline)
    }
 }
 
+static bool
+has_color_buffer_write_enabled(const struct anv_pipeline *pipeline,
+                               const VkPipelineColorBlendStateCreateInfo *blend)
+{
+   const struct anv_shader_bin *shader_bin =
+      pipeline->shaders[MESA_SHADER_FRAGMENT];
+   if (!shader_bin)
+      return false;
+
+   const struct anv_pipeline_bind_map *bind_map = &shader_bin->bind_map;
+   for (int i = 0; i < bind_map->surface_count; i++) {
+      struct anv_pipeline_binding *binding = &bind_map->surface_to_descriptor[i];
+
+      if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
+         continue;
+
+      if (binding->index == UINT32_MAX)
+         continue;
+
+      if (blend && blend->pAttachments[binding->index].colorWriteMask != 0)
+         return true;
+   }
+
+   return false;
+}
+
 static void
-emit_3dstate_wm(struct anv_pipeline *pipeline,
+emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass,
+                const VkPipelineColorBlendStateCreateInfo *blend,
                 const VkPipelineMultisampleStateCreateInfo *multisample)
 {
    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
@@ -1154,19 +1484,53 @@ emit_3dstate_wm(struct anv_pipeline *pipeline,
             wm.EarlyDepthStencilControl         = EDSC_NORMAL;
          }
 
+#if GEN_GEN >= 8
+         /* Gen8 hardware tries to compute ThreadDispatchEnable for us but
+          * doesn't take into account KillPixels when no depth or stencil
+          * writes are enabled.  In order for occlusion queries to work
+          * correctly with no attachments, we need to force-enable PS thread
+          * dispatch.
+          *
+          * The BDW docs are pretty clear that that this bit isn't validated
+          * and probably shouldn't be used in production:
+          *
+          *    "This must always be set to Normal. This field should not be
+          *    tested for functional validation."
+          *
+          * Unfortunately, however, the other mechanism we have for doing this
+          * is 3DSTATE_PS_EXTRA::PixelShaderHasUAV which causes hangs on BDW.
+          * Given two bad options, we choose the one which works.
+          */
+         if ((wm_prog_data->has_side_effects || wm_prog_data->uses_kill) &&
+             !has_color_buffer_write_enabled(pipeline, blend))
+            wm.ForceThreadDispatchEnable = ForceON;
+#endif
+
          wm.BarycentricInterpolationMode =
             wm_prog_data->barycentric_interp_modes;
 
 #if GEN_GEN < 8
-         /* FIXME: This needs a lot more work, cf gen7 upload_wm_state(). */
-         wm.ThreadDispatchEnable          = true;
-
-         wm.PixelShaderKillsPixel         = wm_prog_data->uses_kill;
          wm.PixelShaderComputedDepthMode  = wm_prog_data->computed_depth_mode;
          wm.PixelShaderUsesSourceDepth    = wm_prog_data->uses_src_depth;
          wm.PixelShaderUsesSourceW        = wm_prog_data->uses_src_w;
          wm.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
 
+         /* If the subpass has a depth or stencil self-dependency, then we
+          * need to force the hardware to do the depth/stencil write *after*
+          * fragment shader execution.  Otherwise, the writes may hit memory
+          * before we get around to fetching from the input attachment and we
+          * may get the depth or stencil value from the current draw rather
+          * than the previous one.
+          */
+         wm.PixelShaderKillsPixel         = subpass->has_ds_self_dep ||
+                                            wm_prog_data->uses_kill;
+
+         if (wm.PixelShaderComputedDepthMode != PSCDEPTH_OFF ||
+             wm_prog_data->has_side_effects ||
+             wm.PixelShaderKillsPixel ||
+             has_color_buffer_write_enabled(pipeline, blend))
+            wm.ThreadDispatchEnable = true;
+
          if (samples > 1) {
             wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
             if (wm_prog_data->persample_dispatch) {
@@ -1184,7 +1548,9 @@ emit_3dstate_wm(struct anv_pipeline *pipeline,
 }
 
 static void
-emit_3dstate_ps(struct anv_pipeline *pipeline)
+emit_3dstate_ps(struct anv_pipeline *pipeline,
+                const VkPipelineColorBlendStateCreateInfo *blend,
+                const VkPipelineMultisampleStateCreateInfo *multisample)
 {
    MAYBE_UNUSED const struct gen_device_info *devinfo = &pipeline->device->info;
    const struct anv_shader_bin *fs_bin =
@@ -1204,26 +1570,68 @@ emit_3dstate_ps(struct anv_pipeline *pipeline)
 
    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
 
+#if GEN_GEN < 8
+   /* The hardware wedges if you have this bit set but don't turn on any dual
+    * source blend factors.
+    */
+   bool dual_src_blend = false;
+   if (wm_prog_data->dual_src_blend && blend) {
+      for (uint32_t i = 0; i < blend->attachmentCount; i++) {
+         const VkPipelineColorBlendAttachmentState *bstate =
+            &blend->pAttachments[i];
+
+         if (bstate->blendEnable &&
+             (is_dual_src_blend_factor(bstate->srcColorBlendFactor) ||
+              is_dual_src_blend_factor(bstate->dstColorBlendFactor) ||
+              is_dual_src_blend_factor(bstate->srcAlphaBlendFactor) ||
+              is_dual_src_blend_factor(bstate->dstAlphaBlendFactor))) {
+            dual_src_blend = true;
+            break;
+         }
+      }
+   }
+#endif
+
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
-      ps.KernelStartPointer0        = fs_bin->kernel.offset;
-      ps.KernelStartPointer1        = 0;
-      ps.KernelStartPointer2        = fs_bin->kernel.offset +
-                                      wm_prog_data->prog_offset_2;
       ps._8PixelDispatchEnable      = wm_prog_data->dispatch_8;
       ps._16PixelDispatchEnable     = wm_prog_data->dispatch_16;
-      ps._32PixelDispatchEnable     = false;
+      ps._32PixelDispatchEnable     = wm_prog_data->dispatch_32;
+
+      /* From the Sky Lake PRM 3DSTATE_PS::32 Pixel Dispatch Enable:
+       *
+       *    "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, SIMD32
+       *    Dispatch must not be enabled for PER_PIXEL dispatch mode."
+       *
+       * Since 16x MSAA is first introduced on SKL, we don't need to apply
+       * the workaround on any older hardware.
+       */
+      if (GEN_GEN >= 9 && !wm_prog_data->persample_dispatch &&
+          multisample && multisample->rasterizationSamples == 16) {
+         assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
+         ps._32PixelDispatchEnable = false;
+      }
+
+      ps.KernelStartPointer0 = fs_bin->kernel.offset +
+                               brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
+      ps.KernelStartPointer1 = fs_bin->kernel.offset +
+                               brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
+      ps.KernelStartPointer2 = fs_bin->kernel.offset +
+                               brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
 
       ps.SingleProgramFlow          = false;
       ps.VectorMaskEnable           = true;
-      ps.SamplerCount               = get_sampler_count(fs_bin);
-      ps.BindingTableEntryCount     = get_binding_table_entry_count(fs_bin);
-      ps.PushConstantEnable         = wm_prog_data->base.nr_params > 0;
+      /* WA_1606682166 */
+      ps.SamplerCount               = GEN_GEN == 11 ? 0 : get_sampler_count(fs_bin);
+      /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
+      ps.BindingTableEntryCount     = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(fs_bin);
+      ps.PushConstantEnable         = wm_prog_data->base.nr_params > 0 ||
+                                      wm_prog_data->base.ubo_ranges[0].length;
       ps.PositionXYOffsetSelect     = wm_prog_data->uses_pos_offset ?
                                       POSOFFSET_SAMPLE: POSOFFSET_NONE;
 #if GEN_GEN < 8
       ps.AttributeEnable            = wm_prog_data->num_varying_inputs > 0;
       ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
-      ps.DualSourceBlendEnable      = wm_prog_data->dual_src_blend;
+      ps.DualSourceBlendEnable      = dual_src_blend;
 #endif
 
 #if GEN_IS_HASWELL
@@ -1242,10 +1650,11 @@ emit_3dstate_ps(struct anv_pipeline *pipeline)
 #endif
 
       ps.DispatchGRFStartRegisterForConstantSetupData0 =
-         wm_prog_data->base.dispatch_grf_start_reg;
-      ps.DispatchGRFStartRegisterForConstantSetupData1 = 0;
+         brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
+      ps.DispatchGRFStartRegisterForConstantSetupData1 =
+         brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
       ps.DispatchGRFStartRegisterForConstantSetupData2 =
-         wm_prog_data->dispatch_grf_start_reg_2;
+         brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
 
       ps.PerThreadScratchSpace   = get_scratch_space(fs_bin);
       ps.ScratchSpaceBasePointer =
@@ -1255,7 +1664,9 @@ emit_3dstate_ps(struct anv_pipeline *pipeline)
 
 #if GEN_GEN >= 8
 static void
-emit_3dstate_ps_extra(struct anv_pipeline *pipeline)
+emit_3dstate_ps_extra(struct anv_pipeline *pipeline,
+                      struct anv_subpass *subpass,
+                      const VkPipelineColorBlendStateCreateInfo *blend)
 {
    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
 
@@ -1269,15 +1680,30 @@ emit_3dstate_ps_extra(struct anv_pipeline *pipeline)
       ps.AttributeEnable               = wm_prog_data->num_varying_inputs > 0;
       ps.oMaskPresenttoRenderTarget    = wm_prog_data->uses_omask;
       ps.PixelShaderIsPerSample        = wm_prog_data->persample_dispatch;
-      ps.PixelShaderKillsPixel         = wm_prog_data->uses_kill;
       ps.PixelShaderComputedDepthMode  = wm_prog_data->computed_depth_mode;
       ps.PixelShaderUsesSourceDepth    = wm_prog_data->uses_src_depth;
       ps.PixelShaderUsesSourceW        = wm_prog_data->uses_src_w;
 
+      /* If the subpass has a depth or stencil self-dependency, then we need
+       * to force the hardware to do the depth/stencil write *after* fragment
+       * shader execution.  Otherwise, the writes may hit memory before we get
+       * around to fetching from the input attachment and we may get the depth
+       * or stencil value from the current draw rather than the previous one.
+       */
+      ps.PixelShaderKillsPixel         = subpass->has_ds_self_dep ||
+                                         wm_prog_data->uses_kill;
+
 #if GEN_GEN >= 9
+      ps.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
       ps.PixelShaderPullsBary    = wm_prog_data->pulls_bary;
-      ps.InputCoverageMaskState  = wm_prog_data->uses_sample_mask ?
-                                   ICMS_INNER_CONSERVATIVE : ICMS_NONE;
+
+      ps.InputCoverageMaskState  = ICMS_NONE;
+      if (wm_prog_data->uses_sample_mask) {
+         if (wm_prog_data->post_depth_coverage)
+            ps.InputCoverageMaskState  = ICMS_DEPTH_COVERAGE;
+         else
+            ps.InputCoverageMaskState  = ICMS_INNER_CONSERVATIVE;
+      }
 #else
       ps.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
 #endif
@@ -1293,6 +1719,46 @@ emit_3dstate_vf_topology(struct anv_pipeline *pipeline)
 }
 #endif
 
+static void
+emit_3dstate_vf_statistics(struct anv_pipeline *pipeline)
+{
+   anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS), vfs) {
+      vfs.StatisticsEnable = true;
+   }
+}
+
+static void
+compute_kill_pixel(struct anv_pipeline *pipeline,
+                   const VkPipelineMultisampleStateCreateInfo *ms_info,
+                   const struct anv_subpass *subpass)
+{
+   if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
+      pipeline->kill_pixel = false;
+      return;
+   }
+
+   const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
+
+   /* This computes the KillPixel portion of the computation for whether or
+    * not we want to enable the PMA fix on gen8 or gen9.  It's given by this
+    * chunk of the giant formula:
+    *
+    *    (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
+    *     3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
+    *     3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
+    *     3DSTATE_PS_BLEND::AlphaTestEnable ||
+    *     3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
+    *
+    * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false and so is
+    * 3DSTATE_PS_BLEND::AlphaTestEnable since Vulkan doesn't have a concept
+    * of an alpha test.
+    */
+   pipeline->kill_pixel =
+      subpass->has_ds_self_dep || wm_prog_data->uses_kill ||
+      wm_prog_data->uses_omask ||
+      (ms_info && ms_info->alphaToCoverageEnable);
+}
+
 static VkResult
 genX(graphics_pipeline_create)(
     VkDevice                                    _device,
@@ -1309,6 +1775,10 @@ genX(graphics_pipeline_create)(
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
 
+   /* Use the default pipeline cache if none is specified */
+   if (cache == NULL && device->instance->pipeline_cache_enabled)
+      cache = &device->default_pipeline_cache;
+
    pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (pipeline == NULL)
@@ -1330,6 +1800,7 @@ genX(graphics_pipeline_create)(
    emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
    emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
                            pCreateInfo->pMultisampleState);
+   compute_kill_pixel(pipeline, pCreateInfo->pMultisampleState, subpass);
 
    emit_urb_setup(pipeline);
 
@@ -1352,23 +1823,27 @@ genX(graphics_pipeline_create)(
     * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
     * Stall" bit set.
     */
-   if (!brw->is_haswell && !brw->is_baytrail)
+   if (!device->info.is_haswell && !device->info.is_baytrail)
       gen7_emit_vs_workaround_flush(brw);
 #endif
 
    emit_3dstate_vs(pipeline);
+   emit_3dstate_hs_te_ds(pipeline, pCreateInfo->pTessellationState);
    emit_3dstate_gs(pipeline);
    emit_3dstate_sbe(pipeline);
-   emit_3dstate_wm(pipeline, pCreateInfo->pMultisampleState);
-   emit_3dstate_ps(pipeline);
+   emit_3dstate_wm(pipeline, subpass, pCreateInfo->pColorBlendState,
+                   pCreateInfo->pMultisampleState);
+   emit_3dstate_ps(pipeline, pCreateInfo->pColorBlendState,
+                   pCreateInfo->pMultisampleState);
 #if GEN_GEN >= 8
-   emit_3dstate_ps_extra(pipeline);
+   emit_3dstate_ps_extra(pipeline, subpass, pCreateInfo->pColorBlendState);
    emit_3dstate_vf_topology(pipeline);
 #endif
+   emit_3dstate_vf_statistics(pipeline);
 
    *pPipeline = anv_pipeline_to_handle(pipeline);
 
-   return VK_SUCCESS;
+   return pipeline->batch.status;
 }
 
 static VkResult
@@ -1388,13 +1863,16 @@ compute_pipeline_create(
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
 
+   /* Use the default pipeline cache if none is specified */
+   if (cache == NULL && device->instance->pipeline_cache_enabled)
+      cache = &device->default_pipeline_cache;
+
    pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (pipeline == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    pipeline->device = device;
-   pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
 
    pipeline->blend_state.map = NULL;
 
@@ -1407,17 +1885,17 @@ compute_pipeline_create(
    pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
    pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
    pipeline->batch.relocs = &pipeline->batch_relocs;
+   pipeline->batch.status = VK_SUCCESS;
 
    /* When we free the pipeline, we detect stages based on the NULL status
     * of various prog_data pointers.  Make them NULL by default.
     */
    memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
 
-   pipeline->active_stages = 0;
-
    pipeline->needs_data_cache = false;
 
    assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
+   pipeline->active_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
    ANV_FROM_HANDLE(anv_shader_module, module,  pCreateInfo->stage.module);
    result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
                                     pCreateInfo->stage.pName,
@@ -1458,7 +1936,9 @@ compute_pipeline_create(
       vfe.MaximumNumberofThreads =
          devinfo->max_cs_threads * subslices - 1;
       vfe.NumberofURBEntries     = GEN_GEN <= 7 ? 0 : 2;
+#if GEN_GEN < 11
       vfe.ResetGatewayTimer      = true;
+#endif
 #if GEN_GEN <= 8
       vfe.BypassGatewayControl   = true;
 #endif
@@ -1472,9 +1952,14 @@ compute_pipeline_create(
 
    struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
       .KernelStartPointer     = cs_bin->kernel.offset,
-
-      .SamplerCount           = get_sampler_count(cs_bin),
-      .BindingTableEntryCount = get_binding_table_entry_count(cs_bin),
+      /* WA_1606682166 */
+      .SamplerCount           = GEN_GEN == 11 ? 0 : get_sampler_count(cs_bin),
+      /* Gen 11 workarounds table #2056 WABTPPrefetchDisable
+       *
+       * We add 1 because the CS indirect parameters buffer isn't accounted
+       * for in bind_map.surface_count.
+       */
+      .BindingTableEntryCount = GEN_GEN == 11 ? 0 : 1 + MIN2(cs_bin->bind_map.surface_count, 30),
       .BarrierEnable          = cs_prog_data->uses_barrier,
       .SharedLocalMemorySize  =
          encode_slm_size(GEN_GEN, cs_prog_data->base.total_shared),
@@ -1496,7 +1981,7 @@ compute_pipeline_create(
 
    *pPipeline = anv_pipeline_to_handle(pipeline);
 
-   return VK_SUCCESS;
+   return pipeline->batch.status;
 }
 
 VkResult genX(CreateGraphicsPipelines)(
@@ -1511,22 +1996,23 @@ VkResult genX(CreateGraphicsPipelines)(
 
    VkResult result = VK_SUCCESS;
 
-   unsigned i = 0;
-   for (; i < count; i++) {
+   unsigned i;
+   for (i = 0; i < count; i++) {
       result = genX(graphics_pipeline_create)(_device,
                                               pipeline_cache,
                                               &pCreateInfos[i],
                                               pAllocator, &pPipelines[i]);
-      if (result != VK_SUCCESS) {
-         for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
-         }
 
-         return result;
-      }
+      /* Bail out on the first error as it is not obvious what error should be
+       * report upon 2 different failures. */
+      if (result != VK_SUCCESS)
+         break;
    }
 
-   return VK_SUCCESS;
+   for (; i < count; i++)
+      pPipelines[i] = VK_NULL_HANDLE;
+
+   return result;
 }
 
 VkResult genX(CreateComputePipelines)(
@@ -1541,19 +2027,20 @@ VkResult genX(CreateComputePipelines)(
 
    VkResult result = VK_SUCCESS;
 
-   unsigned i = 0;
-   for (; i < count; i++) {
+   unsigned i;
+   for (i = 0; i < count; i++) {
       result = compute_pipeline_create(_device, pipeline_cache,
                                        &pCreateInfos[i],
                                        pAllocator, &pPipelines[i]);
-      if (result != VK_SUCCESS) {
-         for (unsigned j = 0; j < i; j++) {
-            anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
-         }
 
-         return result;
-      }
+      /* Bail out on the first error as it is not obvious what error should be
+       * report upon 2 different failures. */
+      if (result != VK_SUCCESS)
+         break;
    }
 
-   return VK_SUCCESS;
+   for (; i < count; i++)
+      pPipelines[i] = VK_NULL_HANDLE;
+
+   return result;
 }