intel/disasm: Label support in shader disassembly for UIP/JIP
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
index 78a46cb050dde3fb81644ee3e7e7feb08f60dbfb..1291470d4795a79b21686b97f36c608e1d311762 100644 (file)
 #include "brw_context.h"
 #include "brw_state.h"
 #include "brw_defines.h"
+#include "compiler/brw_eu_defines.h"
 
+#include "main/framebuffer.h"
 #include "main/fbobject.h"
+#include "main/format_utils.h"
 #include "main/glformats.h"
 
-/* Constant single cliprect for framebuffer object or DRI2 drawing */
-static void upload_drawing_rect(struct brw_context *brw)
-{
-   struct gl_context *ctx = &brw->ctx;
-
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
-   OUT_BATCH(0); /* xmin, ymin */
-   OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
-           ((ctx->DrawBuffer->Height - 1) << 16));
-   OUT_BATCH(0);
-   ADVANCE_BATCH();
-}
-
-const struct brw_tracked_state brw_drawing_rect = {
-   .dirty = {
-      .mesa = _NEW_BUFFERS,
-      .brw = BRW_NEW_CONTEXT,
-   },
-   .emit = upload_drawing_rect
-};
-
 /**
  * Upload pointers to the per-stage state.
  *
  * The state pointers in this packet are all relative to the general state
  * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
  */
-static void upload_pipelined_state_pointers(struct brw_context *brw )
+static void
+upload_pipelined_state_pointers(struct brw_context *brw)
 {
-   if (brw->gen == 5) {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   if (devinfo->gen == 5) {
       /* Need to flush before changing clip max threads for errata. */
       BEGIN_BATCH(1);
       OUT_BATCH(MI_FLUSH);
@@ -81,27 +65,22 @@ static void upload_pipelined_state_pointers(struct brw_context *brw )
 
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->vs.base.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
    if (brw->ff_gs.prog_active)
-      OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-               brw->ff_gs.state_offset | 1);
+      OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
    else
       OUT_BATCH(0);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->clip.state_offset | 1);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->sf.state_offset);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->wm.base.state_offset);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->cc.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
    ADVANCE_BATCH();
 
    brw->ctx.NewDriverState |= BRW_NEW_PSP;
 }
 
-static void upload_psp_urb_cbs(struct brw_context *brw )
+static void
+upload_psp_urb_cbs(struct brw_context *brw)
 {
    upload_pipelined_state_pointers(brw);
    brw_upload_urb_fence(brw);
@@ -112,6 +91,7 @@ const struct brw_tracked_state brw_psp_urb_cbs = {
    .dirty = {
       .mesa = 0,
       .brw = BRW_NEW_BATCH |
+             BRW_NEW_BLORP |
              BRW_NEW_FF_GS_PROG_DATA |
              BRW_NEW_GEN4_UNIT_STATE |
              BRW_NEW_STATE_BASE_ADDRESS |
@@ -142,95 +122,88 @@ brw_depthbuffer_format(struct brw_context *brw)
    return brw_depth_format(brw, drb->mt->format);
 }
 
-/**
- * Returns the mask of how many bits of x and y must be handled through the
- * depthbuffer's draw offset x and y fields.
- *
- * The draw offset x/y field of the depthbuffer packet is unfortunately shared
- * between the depth, hiz, and stencil buffers.  Because it can be hard to get
- * all 3 to agree on this value, we want to do as much drawing offset
- * adjustment as possible by moving the base offset of the 3 buffers, which is
- * restricted to tile boundaries.
- *
- * For each buffer, the remainder must be applied through the x/y draw offset.
- * This returns the worst-case mask of the low bits that have to go into the
- * packet.  If the 3 buffers don't agree on the drawing offset ANDed with this
- * mask, then we're in trouble.
- */
-void
-brw_get_depthstencil_tile_masks(struct intel_mipmap_tree *depth_mt,
-                                uint32_t depth_level,
-                                uint32_t depth_layer,
-                                struct intel_mipmap_tree *stencil_mt,
-                                uint32_t *out_tile_mask_x,
-                                uint32_t *out_tile_mask_y)
+static struct intel_mipmap_tree *
+get_stencil_miptree(struct intel_renderbuffer *irb)
 {
-   uint32_t tile_mask_x = 0, tile_mask_y = 0;
+   if (!irb)
+      return NULL;
+   if (irb->mt->stencil_mt)
+      return irb->mt->stencil_mt;
+   return intel_renderbuffer_get_mt(irb);
+}
 
-   if (depth_mt) {
-      intel_miptree_get_tile_masks(depth_mt, &tile_mask_x, &tile_mask_y, false);
+static bool
+rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
+                     bool invalidate)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   struct gl_context *ctx = &brw->ctx;
+   uint32_t tile_mask_x = 0, tile_mask_y = 0;
 
-      if (intel_miptree_level_has_hiz(depth_mt, depth_level)) {
-         uint32_t hiz_tile_mask_x, hiz_tile_mask_y;
-         intel_miptree_get_tile_masks(depth_mt->hiz_buf->mt,
-                                      &hiz_tile_mask_x, &hiz_tile_mask_y,
-                                      false);
+   intel_get_tile_masks(irb->mt->surf.tiling, irb->mt->cpp,
+                        &tile_mask_x, &tile_mask_y);
+   assert(!intel_miptree_level_has_hiz(irb->mt, irb->mt_level));
 
-         /* Each HiZ row represents 2 rows of pixels */
-         hiz_tile_mask_y = hiz_tile_mask_y << 1 | 1;
+   uint32_t tile_x = irb->draw_x & tile_mask_x;
+   uint32_t tile_y = irb->draw_y & tile_mask_y;
 
-         tile_mask_x |= hiz_tile_mask_x;
-         tile_mask_y |= hiz_tile_mask_y;
-      }
+   /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
+    * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
+    * Coordinate Offset X/Y":
+    *
+    *   "The 3 LSBs of both offsets must be zero to ensure correct
+    *   alignment"
+    */
+   bool rebase = tile_x & 7 || tile_y & 7;
+
+   /* We didn't even have intra-tile offsets before g45. */
+   rebase |= (!devinfo->has_surface_tile_offset && (tile_x || tile_y));
+
+   if (rebase) {
+      perf_debug("HW workaround: blitting depth level %d to a temporary "
+                 "to fix alignment (depth tile offset %d,%d)\n",
+                 irb->mt_level, tile_x, tile_y);
+      intel_renderbuffer_move_to_temp(brw, irb, invalidate);
+
+      /* There is now only single slice miptree. */
+      brw->depthstencil.tile_x = 0;
+      brw->depthstencil.tile_y = 0;
+      brw->depthstencil.depth_offset = 0;
+      return true;
    }
 
-   if (stencil_mt) {
-      if (stencil_mt->stencil_mt)
-        stencil_mt = stencil_mt->stencil_mt;
-
-      if (stencil_mt->format == MESA_FORMAT_S_UINT8) {
-         /* Separate stencil buffer uses 64x64 tiles. */
-         tile_mask_x |= 63;
-         tile_mask_y |= 63;
-      } else {
-         uint32_t stencil_tile_mask_x, stencil_tile_mask_y;
-         intel_miptree_get_tile_masks(stencil_mt,
-                                      &stencil_tile_mask_x,
-                                      &stencil_tile_mask_y, false);
-
-         tile_mask_x |= stencil_tile_mask_x;
-         tile_mask_y |= stencil_tile_mask_y;
-      }
-   }
+   /* While we just tried to get everything aligned, we may have failed to do
+    * so in the case of rendering to array or 3D textures, where nonzero faces
+    * will still have an offset post-rebase.  At least give an informative
+    * warning.
+    */
+   WARN_ONCE((tile_x & 7) || (tile_y & 7),
+             "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
+             "Truncating offset (%u:%u), bad rendering may occur.\n",
+             tile_x, tile_y);
+   tile_x &= ~7;
+   tile_y &= ~7;
 
-   *out_tile_mask_x = tile_mask_x;
-   *out_tile_mask_y = tile_mask_y;
-}
+   brw->depthstencil.tile_x = tile_x;
+   brw->depthstencil.tile_y = tile_y;
+   brw->depthstencil.depth_offset = intel_miptree_get_aligned_offset(
+                                       irb->mt,
+                                       irb->draw_x & ~tile_mask_x,
+                                       irb->draw_y & ~tile_mask_y);
 
-static struct intel_mipmap_tree *
-get_stencil_miptree(struct intel_renderbuffer *irb)
-{
-   if (!irb)
-      return NULL;
-   if (irb->mt->stencil_mt)
-      return irb->mt->stencil_mt;
-   return irb->mt;
+   return false;
 }
 
 void
 brw_workaround_depthstencil_alignment(struct brw_context *brw,
                                       GLbitfield clear_mask)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
    struct gl_context *ctx = &brw->ctx;
    struct gl_framebuffer *fb = ctx->DrawBuffer;
-   bool rebase_depth = false;
-   bool rebase_stencil = false;
    struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
    struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
    struct intel_mipmap_tree *depth_mt = NULL;
-   struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
-   uint32_t tile_x = 0, tile_y = 0, stencil_tile_x = 0, stencil_tile_y = 0;
-   uint32_t stencil_draw_x = 0, stencil_draw_y = 0;
    bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
    bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
 
@@ -242,70 +215,22 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
    brw->depthstencil.tile_x = 0;
    brw->depthstencil.tile_y = 0;
    brw->depthstencil.depth_offset = 0;
-   brw->depthstencil.stencil_offset = 0;
-   brw->depthstencil.hiz_offset = 0;
-   brw->depthstencil.depth_mt = NULL;
-   brw->depthstencil.stencil_mt = NULL;
-   if (depth_irb)
-      brw->depthstencil.depth_mt = depth_mt;
-   if (stencil_irb)
-      brw->depthstencil.stencil_mt = get_stencil_miptree(stencil_irb);
 
    /* Gen6+ doesn't require the workarounds, since we always program the
     * surface state at the start of the whole surface.
     */
-   if (brw->gen >= 6)
+   if (devinfo->gen >= 6)
       return;
 
    /* Check if depth buffer is in depth/stencil format.  If so, then it's only
-    * safe to invalidate it if we're also clearing stencil, and both depth_irb
-    * and stencil_irb point to the same miptree.
-    *
-    * Note: it's not sufficient to check for the case where
-    * _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL,
-    * because this fails to catch depth/stencil buffers on hardware that uses
-    * separate stencil.  To catch that case, we check whether
-    * depth_mt->stencil_mt is non-NULL.
+    * safe to invalidate it if we're also clearing stencil.
     */
    if (depth_irb && invalidate_depth &&
-       (_mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL ||
-        depth_mt->stencil_mt)) {
-      invalidate_depth = invalidate_stencil && depth_irb && stencil_irb
-         && depth_irb->mt == stencil_irb->mt;
-   }
-
-   uint32_t tile_mask_x, tile_mask_y;
-   brw_get_depthstencil_tile_masks(depth_mt,
-                                   depth_mt ? depth_irb->mt_level : 0,
-                                   depth_mt ? depth_irb->mt_layer : 0,
-                                   stencil_mt,
-                                   &tile_mask_x, &tile_mask_y);
+      _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL)
+      invalidate_depth = invalidate_stencil && stencil_irb;
 
    if (depth_irb) {
-      tile_x = depth_irb->draw_x & tile_mask_x;
-      tile_y = depth_irb->draw_y & tile_mask_y;
-
-      /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
-       * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
-       * Coordinate Offset X/Y":
-       *
-       *   "The 3 LSBs of both offsets must be zero to ensure correct
-       *   alignment"
-       */
-      if (tile_x & 7 || tile_y & 7)
-         rebase_depth = true;
-
-      /* We didn't even have intra-tile offsets before g45. */
-      if (!brw->has_surface_tile_offset) {
-         if (tile_x || tile_y)
-            rebase_depth = true;
-      }
-
-      if (rebase_depth) {
-         perf_debug("HW workaround: blitting depth level %d to a temporary "
-                    "to fix alignment (depth tile offset %d,%d)\n",
-                    depth_irb->mt_level, tile_x, tile_y);
-         intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
+      if (rebase_depth_stencil(brw, depth_irb, invalidate_depth)) {
          /* In the case of stencil_irb being the same packed depth/stencil
           * texture but not the same rb, make it point at our rebased mt, too.
           */
@@ -315,294 +240,65 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
             intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
             intel_renderbuffer_set_draw_offset(stencil_irb);
          }
-
-         stencil_mt = get_stencil_miptree(stencil_irb);
-
-         tile_x = depth_irb->draw_x & tile_mask_x;
-         tile_y = depth_irb->draw_y & tile_mask_y;
       }
 
       if (stencil_irb) {
-         stencil_mt = get_stencil_miptree(stencil_irb);
-         intel_miptree_get_image_offset(stencil_mt,
-                                        stencil_irb->mt_level,
-                                        stencil_irb->mt_layer,
-                                        &stencil_draw_x, &stencil_draw_y);
-         int stencil_tile_x = stencil_draw_x & tile_mask_x;
-         int stencil_tile_y = stencil_draw_y & tile_mask_y;
-
-         /* If stencil doesn't match depth, then we'll need to rebase stencil
-          * as well.  (if we hadn't decided to rebase stencil before, the
-          * post-stencil depth test will also rebase depth to try to match it
-          * up).
-          */
-         if (tile_x != stencil_tile_x ||
-             tile_y != stencil_tile_y) {
-            rebase_stencil = true;
-         }
-      }
-   }
-
-   /* If we have (just) stencil, check it for ignored low bits as well */
-   if (stencil_irb) {
-      intel_miptree_get_image_offset(stencil_mt,
-                                     stencil_irb->mt_level,
-                                     stencil_irb->mt_layer,
-                                     &stencil_draw_x, &stencil_draw_y);
-      stencil_tile_x = stencil_draw_x & tile_mask_x;
-      stencil_tile_y = stencil_draw_y & tile_mask_y;
-
-      if (stencil_tile_x & 7 || stencil_tile_y & 7)
-         rebase_stencil = true;
-
-      if (!brw->has_surface_tile_offset) {
-         if (stencil_tile_x || stencil_tile_y)
-            rebase_stencil = true;
+         assert(stencil_irb->mt == depth_irb->mt);
+         assert(stencil_irb->mt_level == depth_irb->mt_level);
+         assert(stencil_irb->mt_layer == depth_irb->mt_layer);
       }
    }
 
-   if (rebase_stencil) {
-      perf_debug("HW workaround: blitting stencil level %d to a temporary "
-                 "to fix alignment (stencil tile offset %d,%d)\n",
-                 stencil_irb->mt_level, stencil_tile_x, stencil_tile_y);
-
-      intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil);
-      stencil_mt = get_stencil_miptree(stencil_irb);
-
-      intel_miptree_get_image_offset(stencil_mt,
-                                     stencil_irb->mt_level,
-                                     stencil_irb->mt_layer,
-                                     &stencil_draw_x, &stencil_draw_y);
-      stencil_tile_x = stencil_draw_x & tile_mask_x;
-      stencil_tile_y = stencil_draw_y & tile_mask_y;
-
-      if (depth_irb && depth_irb->mt == stencil_irb->mt) {
-         intel_miptree_reference(&depth_irb->mt, stencil_irb->mt);
-         intel_renderbuffer_set_draw_offset(depth_irb);
-      } else if (depth_irb && !rebase_depth) {
-         if (tile_x != stencil_tile_x ||
-             tile_y != stencil_tile_y) {
-            perf_debug("HW workaround: blitting depth level %d to a temporary "
-                       "to match stencil level %d alignment (depth tile offset "
-                       "%d,%d, stencil offset %d,%d)\n",
-                       depth_irb->mt_level,
-                       stencil_irb->mt_level,
-                       tile_x, tile_y,
-                       stencil_tile_x, stencil_tile_y);
-
-            intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
-
-            tile_x = depth_irb->draw_x & tile_mask_x;
-            tile_y = depth_irb->draw_y & tile_mask_y;
-
-            if (stencil_irb && stencil_irb->mt == depth_mt) {
-               intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
-               intel_renderbuffer_set_draw_offset(stencil_irb);
-            }
-
-            WARN_ONCE(stencil_tile_x != tile_x ||
-                      stencil_tile_y != tile_y,
-                      "Rebased stencil tile offset (%d,%d) doesn't match depth "
-                      "tile offset (%d,%d).\n",
-                      stencil_tile_x, stencil_tile_y,
-                      tile_x, tile_y);
-         }
-      }
-   }
-
-   if (!depth_irb) {
-      tile_x = stencil_tile_x;
-      tile_y = stencil_tile_y;
-   }
-
-   /* While we just tried to get everything aligned, we may have failed to do
-    * so in the case of rendering to array or 3D textures, where nonzero faces
-    * will still have an offset post-rebase.  At least give an informative
-    * warning.
-    */
-   WARN_ONCE((tile_x & 7) || (tile_y & 7),
-             "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
-             "Truncating offset, bad rendering may occur.\n");
-   tile_x &= ~7;
-   tile_y &= ~7;
-
-   /* Now, after rebasing, save off the new dephtstencil state so the hardware
-    * packets can just dereference that without re-calculating tile offsets.
-    */
-   brw->depthstencil.tile_x = tile_x;
-   brw->depthstencil.tile_y = tile_y;
-   if (depth_irb) {
-      depth_mt = depth_irb->mt;
-      brw->depthstencil.depth_mt = depth_mt;
-      brw->depthstencil.depth_offset =
-         intel_miptree_get_aligned_offset(depth_mt,
-                                          depth_irb->draw_x & ~tile_mask_x,
-                                          depth_irb->draw_y & ~tile_mask_y,
-                                          false);
-      if (intel_renderbuffer_has_hiz(depth_irb)) {
-         brw->depthstencil.hiz_offset =
-            intel_miptree_get_aligned_offset(depth_mt,
-                                             depth_irb->draw_x & ~tile_mask_x,
-                                             (depth_irb->draw_y & ~tile_mask_y) / 2,
-                                             false);
-      }
-   }
-   if (stencil_irb) {
-      stencil_mt = get_stencil_miptree(stencil_irb);
-
-      brw->depthstencil.stencil_mt = stencil_mt;
-      if (stencil_mt->format == MESA_FORMAT_S_UINT8) {
-         /* Note: we can't compute the stencil offset using
-          * intel_region_get_aligned_offset(), because stencil_region claims
-          * that the region is untiled even though it's W tiled.
-          */
-         brw->depthstencil.stencil_offset =
-            (stencil_draw_y & ~tile_mask_y) * stencil_mt->pitch +
-            (stencil_draw_x & ~tile_mask_x) * 64;
-      }
-   }
+   /* If there is no depth attachment, consider if stencil needs rebase. */
+   if (!depth_irb && stencil_irb)
+       rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
 }
 
-void
-brw_emit_depthbuffer(struct brw_context *brw)
+static void
+brw_emit_depth_stencil_hiz(struct brw_context *brw,
+                           struct intel_renderbuffer *depth_irb,
+                           struct intel_mipmap_tree *depth_mt,
+                           struct intel_renderbuffer *stencil_irb,
+                           struct intel_mipmap_tree *stencil_mt)
 {
-   struct gl_context *ctx = &brw->ctx;
-   struct gl_framebuffer *fb = ctx->DrawBuffer;
-   /* _NEW_BUFFERS */
-   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
-   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
-   struct intel_mipmap_tree *depth_mt = brw->depthstencil.depth_mt;
-   struct intel_mipmap_tree *stencil_mt = brw->depthstencil.stencil_mt;
    uint32_t tile_x = brw->depthstencil.tile_x;
    uint32_t tile_y = brw->depthstencil.tile_y;
-   bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
-   bool separate_stencil = false;
    uint32_t depth_surface_type = BRW_SURFACE_NULL;
    uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
    uint32_t depth_offset = 0;
    uint32_t width = 1, height = 1;
-
-   if (stencil_mt) {
-      separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
-
-      /* Gen7 supports only separate stencil */
-      assert(separate_stencil || brw->gen < 7);
-   }
+   bool tiled_surface = true;
 
    /* If there's a packed depth/stencil bound to stencil only, we need to
     * emit the packed depth/stencil buffer packet.
     */
-   if (!depth_irb && stencil_irb && !separate_stencil) {
+   if (!depth_irb && stencil_irb) {
       depth_irb = stencil_irb;
       depth_mt = stencil_mt;
    }
 
    if (depth_irb && depth_mt) {
-      /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
-       * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
-       * depthstencil format.
-       *
-       * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
-       * set to the same value. Gens after 7 implicitly always set
-       * Separate_Stencil_Enable; software cannot disable it.
-       */
-      if ((brw->gen < 7 && hiz) || brw->gen >= 7) {
-         assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
-      }
-
-      /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
-      assert(brw->gen >= 7 || !separate_stencil || hiz);
-
-      assert(brw->gen < 6 || depth_mt->tiling == I915_TILING_Y);
-      assert(!hiz || depth_mt->tiling == I915_TILING_Y);
-
       depthbuffer_format = brw_depthbuffer_format(brw);
       depth_surface_type = BRW_SURFACE_2D;
       depth_offset = brw->depthstencil.depth_offset;
       width = depth_irb->Base.Base.Width;
       height = depth_irb->Base.Base.Height;
-   } else if (separate_stencil) {
-      /*
-       * There exists a separate stencil buffer but no depth buffer.
-       *
-       * The stencil buffer inherits most of its fields from
-       * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
-       * height.
-       *
-       * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
-       * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
-       *     [DevGT+]: This field must be set to TRUE.
-       */
-      assert(brw->has_separate_stencil);
-
-      depth_surface_type = BRW_SURFACE_2D;
-      width = stencil_irb->Base.Base.Width;
-      height = stencil_irb->Base.Base.Height;
+      tiled_surface = depth_mt->surf.tiling != ISL_TILING_LINEAR;
    }
 
-   if (depth_mt)
-      brw_render_cache_set_check_flush(brw, depth_mt->bo);
-   if (stencil_mt)
-      brw_render_cache_set_check_flush(brw, stencil_mt->bo);
-
-   brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
-                                    depthbuffer_format, depth_surface_type,
-                                    stencil_mt, hiz, separate_stencil,
-                                    width, height, tile_x, tile_y);
-}
-
-void
-brw_emit_depth_stencil_hiz(struct brw_context *brw,
-                           struct intel_mipmap_tree *depth_mt,
-                           uint32_t depth_offset, uint32_t depthbuffer_format,
-                           uint32_t depth_surface_type,
-                           struct intel_mipmap_tree *stencil_mt,
-                           bool hiz, bool separate_stencil,
-                           uint32_t width, uint32_t height,
-                           uint32_t tile_x, uint32_t tile_y)
-{
-   /* Enable the hiz bit if we're doing separate stencil, because it and the
-    * separate stencil bit must have the same value. From Section 2.11.5.6.1.1
-    * 3DSTATE_DEPTH_BUFFER, Bit 1.21 "Separate Stencil Enable":
-    *     [DevIL]: If this field is enabled, Hierarchical Depth Buffer
-    *     Enable must also be enabled.
-    *
-    *     [DevGT]: This field must be set to the same value (enabled or
-    *     disabled) as Hierarchical Depth Buffer Enable
-    */
-   bool enable_hiz_ss = hiz || separate_stencil;
-
-
-   /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both
-    * non-pipelined state that will need the PIPE_CONTROL workaround.
-    */
-   if (brw->gen == 6) {
-      intel_emit_depth_stall_flushes(brw);
-   }
-
-   unsigned int len;
-   if (brw->gen >= 6)
-      len = 7;
-   else if (brw->is_g4x || brw->gen == 5)
-      len = 6;
-   else
-      len = 5;
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const unsigned len = (devinfo->is_g4x || devinfo->gen == 5) ? 6 : 5;
 
    BEGIN_BATCH(len);
    OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
-   OUT_BATCH((depth_mt ? depth_mt->pitch - 1 : 0) |
+   OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch_B - 1 : 0) |
              (depthbuffer_format << 18) |
-             ((enable_hiz_ss ? 1 : 0) << 21) | /* separate stencil enable */
-             ((enable_hiz_ss ? 1 : 0) << 22) | /* hiz enable */
              (BRW_TILEWALK_YMAJOR << 26) |
-             ((depth_mt ? depth_mt->tiling != I915_TILING_NONE : 1)
-              << 27) |
+             (tiled_surface << 27) |
              (depth_surface_type << 29));
 
    if (depth_mt) {
-      OUT_RELOC(depth_mt->bo,
-               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
-               depth_offset);
+      OUT_RELOC(depth_mt->bo, RELOC_WRITE, depth_offset);
    } else {
       OUT_BATCH(0);
    }
@@ -611,274 +307,403 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
              ((height + tile_y - 1) << 19));
    OUT_BATCH(0);
 
-   if (brw->is_g4x || brw->gen >= 5)
+   if (devinfo->is_g4x || devinfo->gen >= 5)
       OUT_BATCH(tile_x | (tile_y << 16));
    else
       assert(tile_x == 0 && tile_y == 0);
 
-   if (brw->gen >= 6)
+   if (devinfo->gen >= 6)
       OUT_BATCH(0);
 
    ADVANCE_BATCH();
+}
 
-   if (hiz || separate_stencil) {
-      /*
-       * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate
-       * stencil enable' and 'hiz enable' bits were set. Therefore we must
-       * emit 3DSTATE_HIER_DEPTH_BUFFER and 3DSTATE_STENCIL_BUFFER. Even if
-       * there is no stencil buffer, 3DSTATE_STENCIL_BUFFER must be emitted;
-       * failure to do so causes hangs on gen5 and a stall on gen6.
-       */
+void
+brw_emit_depthbuffer(struct brw_context *brw)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   struct gl_context *ctx = &brw->ctx;
+   struct gl_framebuffer *fb = ctx->DrawBuffer;
+   /* _NEW_BUFFERS */
+   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+   struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
+   struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
 
-      /* Emit hiz buffer. */
-      if (hiz) {
-         struct intel_mipmap_tree *hiz_mt = depth_mt->hiz_buf->mt;
-        BEGIN_BATCH(3);
-        OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
-        OUT_BATCH(hiz_mt->pitch - 1);
-        OUT_RELOC(hiz_mt->bo,
-                  I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
-                  brw->depthstencil.hiz_offset);
-        ADVANCE_BATCH();
-      } else {
-        BEGIN_BATCH(3);
-        OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
-        OUT_BATCH(0);
-        OUT_BATCH(0);
-        ADVANCE_BATCH();
-      }
+   if (depth_mt)
+      brw_cache_flush_for_depth(brw, depth_mt->bo);
+   if (stencil_mt)
+      brw_cache_flush_for_depth(brw, stencil_mt->bo);
 
-      /* Emit stencil buffer. */
-      if (separate_stencil) {
-        BEGIN_BATCH(3);
-        OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
-         /* The stencil buffer has quirky pitch requirements.  From Vol 2a,
-          * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
-          *    The pitch must be set to 2x the value computed based on width, as
-          *    the stencil buffer is stored with two rows interleaved.
-          */
-        OUT_BATCH(2 * stencil_mt->pitch - 1);
-        OUT_RELOC(stencil_mt->bo,
-                  I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
-                  brw->depthstencil.stencil_offset);
-        ADVANCE_BATCH();
-      } else {
-        BEGIN_BATCH(3);
-        OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
-        OUT_BATCH(0);
-        OUT_BATCH(0);
-        ADVANCE_BATCH();
-      }
+   if (devinfo->gen < 6) {
+      brw_emit_depth_stencil_hiz(brw, depth_irb, depth_mt,
+                                 stencil_irb, stencil_mt);
+      return;
    }
 
-   /*
-    * On Gen >= 6, emit clear params for safety. If using hiz, then clear
-    * params must be emitted.
-    *
-    * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS:
-    *     3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
-    *     when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
-    */
-   if (brw->gen >= 6 || hiz) {
-      BEGIN_BATCH(2);
-      OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
-               GEN5_DEPTH_CLEAR_VALID |
-               (2 - 2));
-      OUT_BATCH(depth_mt ? depth_mt->depth_clear_value : 0);
-      ADVANCE_BATCH();
+   /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
+   if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
+      assert(brw->hw_ctx);
+      return;
    }
-}
 
-const struct brw_tracked_state brw_depthbuffer = {
-   .dirty = {
-      .mesa = _NEW_BUFFERS,
-      .brw = BRW_NEW_BATCH,
-   },
-   .emit = brw_emit_depthbuffer,
-};
+   brw_emit_depth_stall_flushes(brw);
 
+   const unsigned ds_dwords = brw->isl_dev.ds.size / 4;
+   intel_batchbuffer_begin(brw, ds_dwords);
+   uint32_t *ds_map = brw->batch.map_next;
+   const uint32_t ds_offset = (char *)ds_map - (char *)brw->batch.batch.map;
 
+   struct isl_view view = {
+      /* Some nice defaults */
+      .base_level = 0,
+      .levels = 1,
+      .base_array_layer = 0,
+      .array_len = 1,
+      .swizzle = ISL_SWIZZLE_IDENTITY,
+   };
 
-/***********************************************************************
- * Polygon stipple packet
- */
+   struct isl_depth_stencil_hiz_emit_info info = {
+      .view = &view,
+   };
 
-static void upload_polygon_stipple(struct brw_context *brw)
-{
-   struct gl_context *ctx = &brw->ctx;
-   GLuint i;
+   if (depth_mt) {
+      view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
+      info.depth_surf = &depth_mt->surf;
+
+      info.depth_address =
+         brw_batch_reloc(&brw->batch,
+                         ds_offset + brw->isl_dev.ds.depth_offset,
+                         depth_mt->bo, depth_mt->offset, RELOC_WRITE);
+
+      info.mocs = brw_get_bo_mocs(devinfo, depth_mt->bo);
+      view.base_level = depth_irb->mt_level - depth_irb->mt->first_level;
+      view.base_array_layer = depth_irb->mt_layer;
+      view.array_len = MAX2(depth_irb->layer_count, 1);
+      view.format = depth_mt->surf.format;
+
+      info.hiz_usage = depth_mt->aux_usage;
+      if (!intel_renderbuffer_has_hiz(depth_irb)) {
+         /* Just because a miptree has ISL_AUX_USAGE_HIZ does not mean that
+          * all miplevels of that miptree are guaranteed to support HiZ.  See
+          * intel_miptree_level_enable_hiz for details.
+          */
+         info.hiz_usage = ISL_AUX_USAGE_NONE;
+      }
 
-   /* _NEW_POLYGON */
-   if (!ctx->Polygon.StippleFlag)
-      return;
+      if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
+         info.hiz_surf = &depth_mt->aux_buf->surf;
+
+         uint32_t hiz_offset = 0;
+         if (devinfo->gen == 6) {
+            /* HiZ surfaces on Sandy Bridge technically don't support
+             * mip-mapping.  However, we can fake it by offsetting to the
+             * first slice of LOD0 in the HiZ surface.
+             */
+            isl_surf_get_image_offset_B_tile_sa(&depth_mt->aux_buf->surf,
+                                                view.base_level, 0, 0,
+                                                &hiz_offset, NULL, NULL);
+         }
 
-   BEGIN_BATCH(33);
-   OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
+         info.hiz_address =
+            brw_batch_reloc(&brw->batch,
+                            ds_offset + brw->isl_dev.ds.hiz_offset,
+                            depth_mt->aux_buf->bo,
+                            depth_mt->aux_buf->offset + hiz_offset,
+                            RELOC_WRITE);
+      }
 
-   /* Polygon stipple is provided in OpenGL order, i.e. bottom
-    * row first.  If we're rendering to a window (i.e. the
-    * default frame buffer object, 0), then we need to invert
-    * it to match our pixel layout.  But if we're rendering
-    * to a FBO (i.e. any named frame buffer object), we *don't*
-    * need to invert - we already match the layout.
-    */
-   if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
-      for (i = 0; i < 32; i++)
-         OUT_BATCH(ctx->PolygonStipple[31 - i]); /* invert */
-   }
-   else {
-      for (i = 0; i < 32; i++)
-        OUT_BATCH(ctx->PolygonStipple[i]);
+      info.depth_clear_value = depth_mt->fast_clear_color.f32[0];
    }
-   ADVANCE_BATCH();
-}
-
-const struct brw_tracked_state brw_polygon_stipple = {
-   .dirty = {
-      .mesa = _NEW_POLYGON |
-              _NEW_POLYGONSTIPPLE,
-      .brw = BRW_NEW_CONTEXT,
-   },
-   .emit = upload_polygon_stipple
-};
 
+   if (stencil_mt) {
+      view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
+      info.stencil_surf = &stencil_mt->surf;
+
+      if (!depth_mt) {
+         info.mocs = brw_get_bo_mocs(devinfo, stencil_mt->bo);
+         view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
+         view.base_array_layer = stencil_irb->mt_layer;
+         view.array_len = MAX2(stencil_irb->layer_count, 1);
+         view.format = stencil_mt->surf.format;
+      }
 
-/***********************************************************************
- * Polygon stipple offset packet
- */
+      uint32_t stencil_offset = 0;
+      if (devinfo->gen == 6) {
+         /* Stencil surfaces on Sandy Bridge technically don't support
+          * mip-mapping.  However, we can fake it by offsetting to the
+          * first slice of LOD0 in the stencil surface.
+          */
+         isl_surf_get_image_offset_B_tile_sa(&stencil_mt->surf,
+                                             view.base_level, 0, 0,
+                                             &stencil_offset, NULL, NULL);
+      }
 
-static void upload_polygon_stipple_offset(struct brw_context *brw)
-{
-   struct gl_context *ctx = &brw->ctx;
+      info.stencil_address =
+         brw_batch_reloc(&brw->batch,
+                         ds_offset + brw->isl_dev.ds.stencil_offset,
+                         stencil_mt->bo,
+                         stencil_mt->offset + stencil_offset,
+                         RELOC_WRITE);
+   }
 
-   /* _NEW_POLYGON */
-   if (!ctx->Polygon.StippleFlag)
-      return;
+   isl_emit_depth_stencil_hiz_s(&brw->isl_dev, ds_map, &info);
 
-   BEGIN_BATCH(2);
-   OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
+   brw->batch.map_next += ds_dwords;
+   intel_batchbuffer_advance(brw);
 
-   /* _NEW_BUFFERS
-    *
-    * If we're drawing to a system window we have to invert the Y axis
-    * in order to match the OpenGL pixel coordinate system, and our
-    * offset must be matched to the window position.  If we're drawing
-    * to a user-created FBO then our native pixel coordinate system
-    * works just fine, and there's no window system to worry about.
-    */
-   if (_mesa_is_winsys_fbo(ctx->DrawBuffer))
-      OUT_BATCH((32 - (ctx->DrawBuffer->Height & 31)) & 31);
-   else
-      OUT_BATCH(0);
-   ADVANCE_BATCH();
+   brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
 }
 
-const struct brw_tracked_state brw_polygon_stipple_offset = {
+const struct brw_tracked_state brw_depthbuffer = {
    .dirty = {
-      .mesa = _NEW_BUFFERS |
-              _NEW_POLYGON,
-      .brw = BRW_NEW_CONTEXT,
+      .mesa = _NEW_BUFFERS,
+      .brw = BRW_NEW_AUX_STATE |
+             BRW_NEW_BATCH |
+             BRW_NEW_BLORP,
    },
-   .emit = upload_polygon_stipple_offset
+   .emit = brw_emit_depthbuffer,
 };
 
-/**********************************************************************
- * AA Line parameters
- */
-static void upload_aa_line_parameters(struct brw_context *brw)
+void
+brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
 {
-   struct gl_context *ctx = &brw->ctx;
-
-   if (!ctx->Line.SmoothFlag)
-      return;
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
+   const uint32_t _3DSTATE_PIPELINE_SELECT =
+      is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
 
-   /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
-   if (brw->gen == 4 && !brw->is_g4x)
-      return;
+   if (devinfo->gen >= 8 && devinfo->gen < 10) {
+      /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
+       *
+       *   Software must clear the COLOR_CALC_STATE Valid field in
+       *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
+       *   with Pipeline Select set to GPGPU.
+       *
+       * The internal hardware docs recommend the same workaround for Gen9
+       * hardware too.
+       */
+      if (pipeline == BRW_COMPUTE_PIPELINE) {
+         BEGIN_BATCH(2);
+         OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
+         OUT_BATCH(0);
+         ADVANCE_BATCH();
 
-   BEGIN_BATCH(3);
-   OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
-   /* use legacy aa line coverage computation */
-   OUT_BATCH(0);
-   OUT_BATCH(0);
-   ADVANCE_BATCH();
-}
+         brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
+      }
+   }
 
-const struct brw_tracked_state brw_aa_line_parameters = {
-   .dirty = {
-      .mesa = _NEW_LINE,
-      .brw = BRW_NEW_CONTEXT,
-   },
-   .emit = upload_aa_line_parameters
-};
+   if (devinfo->gen == 9 && pipeline == BRW_RENDER_PIPELINE) {
+      /* We seem to have issues with geometry flickering when 3D and compute
+       * are combined in the same batch and this appears to fix it.
+       */
+      const uint32_t subslices = MAX2(brw->screen->subslice_total, 1);
+      const uint32_t maxNumberofThreads =
+         devinfo->max_cs_threads * subslices - 1;
 
-/***********************************************************************
- * Line stipple packet
- */
+      BEGIN_BATCH(9);
+      OUT_BATCH(MEDIA_VFE_STATE << 16 | (9 - 2));
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(2 << 8 | maxNumberofThreads << 16);
+      OUT_BATCH(0);
+      OUT_BATCH(2 << 16);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      ADVANCE_BATCH();
+   }
 
-static void upload_line_stipple(struct brw_context *brw)
-{
-   struct gl_context *ctx = &brw->ctx;
-   GLfloat tmp;
-   GLint tmpi;
+   if (devinfo->gen >= 6) {
+      /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+       * PIPELINE_SELECT [DevBWR+]":
+       *
+       *   Project: DEVSNB+
+       *
+       *   Software must ensure all the write caches are flushed through a
+       *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
+       *   command to invalidate read only caches prior to programming
+       *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
+       */
+      const unsigned dc_flush =
+         devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
 
-   if (!ctx->Line.StippleFlag)
-      return;
+      brw_emit_pipe_control_flush(brw,
+                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
+                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                                  dc_flush |
+                                  PIPE_CONTROL_CS_STALL);
 
-   BEGIN_BATCH(3);
-   OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
-   OUT_BATCH(ctx->Line.StipplePattern);
+      brw_emit_pipe_control_flush(brw,
+                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+                                  PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+                                  PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE);
 
-   if (brw->gen >= 7) {
-      /* in U1.16 */
-      tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
-      tmpi = tmp * (1<<16);
-      OUT_BATCH(tmpi << 15 | ctx->Line.StippleFactor);
-   }
-   else {
-      /* in U1.13 */
-      tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
-      tmpi = tmp * (1<<13);
-      OUT_BATCH(tmpi << 16 | ctx->Line.StippleFactor);
+   } else {
+      /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+       * PIPELINE_SELECT [DevBWR+]":
+       *
+       *   Project: PRE-DEVSNB
+       *
+       *   Software must ensure the current pipeline is flushed via an
+       *   MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
+       */
+      BEGIN_BATCH(1);
+      OUT_BATCH(MI_FLUSH);
+      ADVANCE_BATCH();
    }
 
+   /* Select the pipeline */
+   BEGIN_BATCH(1);
+   OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
+             (devinfo->gen >= 9 ? (3 << 8) : 0) |
+             (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
    ADVANCE_BATCH();
+
+   if (devinfo->gen == 7 && !devinfo->is_haswell &&
+       pipeline == BRW_RENDER_PIPELINE) {
+      /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+       * PIPELINE_SELECT [DevBWR+]":
+       *
+       *   Project: DEVIVB, DEVHSW:GT3:A0
+       *
+       *   Software must send a pipe_control with a CS stall and a post sync
+       *   operation and then a dummy DRAW after every MI_SET_CONTEXT and
+       *   after any PIPELINE_SELECT that is enabling 3D mode.
+       */
+      gen7_emit_cs_stall_flush(brw);
+
+      BEGIN_BATCH(7);
+      OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
+      OUT_BATCH(_3DPRIM_POINTLIST);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      ADVANCE_BATCH();
+   }
+
+   if (devinfo->is_geminilake) {
+      /* Project: DevGLK
+       *
+       * "This chicken bit works around a hardware issue with barrier logic
+       *  encountered when switching between GPGPU and 3D pipelines.  To
+       *  workaround the issue, this mode bit should be set after a pipeline
+       *  is selected."
+       */
+      const unsigned barrier_mode =
+         pipeline == BRW_RENDER_PIPELINE ? GLK_SCEC_BARRIER_MODE_3D_HULL
+                                         : GLK_SCEC_BARRIER_MODE_GPGPU;
+      brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
+                              barrier_mode | GLK_SCEC_BARRIER_MODE_MASK);
+   }
 }
 
-const struct brw_tracked_state brw_line_stipple = {
-   .dirty = {
-      .mesa = _NEW_LINE,
-      .brw = BRW_NEW_CONTEXT,
-   },
-   .emit = upload_line_stipple
-};
+/**
+ * Update the pixel hashing modes that determine the balancing of PS threads
+ * across subslices and slices.
+ *
+ * \param width Width bound of the rendering area (already scaled down if \p
+ *              scale is greater than 1).
+ * \param height Height bound of the rendering area (already scaled down if \p
+ *               scale is greater than 1).
+ * \param scale The number of framebuffer samples that could potentially be
+ *              affected by an individual channel of the PS thread.  This is
+ *              typically one for single-sampled rendering, but for operations
+ *              like CCS resolves and fast clears a single PS invocation may
+ *              update a huge number of pixels, in which case a finer
+ *              balancing is desirable in order to maximally utilize the
+ *              bandwidth available.  UINT_MAX can be used as shorthand for
+ *              "finest hashing mode available".
+ */
+void
+brw_emit_hashing_mode(struct brw_context *brw, unsigned width,
+                      unsigned height, unsigned scale)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   if (devinfo->gen == 9) {
+      const uint32_t slice_hashing[] = {
+         /* Because all Gen9 platforms with more than one slice require
+          * three-way subslice hashing, a single "normal" 16x16 slice hashing
+          * block is guaranteed to suffer from substantial imbalance, with one
+          * subslice receiving twice as much work as the other two in the
+          * slice.
+          *
+          * The performance impact of that would be particularly severe when
+          * three-way hashing is also in use for slice balancing (which is the
+          * case for all Gen9 GT4 platforms), because one of the slices
+          * receives one every three 16x16 blocks in either direction, which
+          * is roughly the periodicity of the underlying subslice imbalance
+          * pattern ("roughly" because in reality the hardware's
+          * implementation of three-way hashing doesn't do exact modulo 3
+          * arithmetic, which somewhat decreases the magnitude of this effect
+          * in practice).  This leads to a systematic subslice imbalance
+          * within that slice regardless of the size of the primitive.  The
+          * 32x32 hashing mode guarantees that the subslice imbalance within a
+          * single slice hashing block is minimal, largely eliminating this
+          * effect.
+          */
+         GEN9_SLICE_HASHING_32x32,
+         /* Finest slice hashing mode available. */
+         GEN9_SLICE_HASHING_NORMAL
+      };
+      const uint32_t subslice_hashing[] = {
+         /* The 16x16 subslice hashing mode is used on non-LLC platforms to
+          * match the performance of previous Mesa versions.  16x16 has a
+          * slight cache locality benefit especially visible in the sampler L1
+          * cache efficiency of low-bandwidth platforms, but it comes at the
+          * cost of greater subslice imbalance for primitives of dimensions
+          * approximately intermediate between 16x4 and 16x16.
+          */
+         (devinfo->has_llc ? GEN9_SUBSLICE_HASHING_16x4 :
+                             GEN9_SUBSLICE_HASHING_16x16),
+         /* Finest subslice hashing mode available. */
+         GEN9_SUBSLICE_HASHING_8x4
+      };
+      /* Dimensions of the smallest hashing block of a given hashing mode.  If
+       * the rendering area is smaller than this there can't possibly be any
+       * benefit from switching to this mode, so we optimize out the
+       * transition.
+       */
+      const unsigned min_size[][2] = {
+         { 16, 4 },
+         { 8, 4 }
+      };
+      const unsigned idx = scale > 1;
 
+      if (width > min_size[idx][0] || height > min_size[idx][1]) {
+         const uint32_t gt_mode =
+            (devinfo->num_slices == 1 ? 0 :
+             GEN9_SLICE_HASHING_MASK_BITS | slice_hashing[idx]) |
+            GEN9_SUBSLICE_HASHING_MASK_BITS | subslice_hashing[idx];
 
-/***********************************************************************
+         brw_emit_pipe_control_flush(brw,
+                                     PIPE_CONTROL_STALL_AT_SCOREBOARD |
+                                     PIPE_CONTROL_CS_STALL);
+
+         brw_load_register_imm32(brw, GEN7_GT_MODE, gt_mode);
+
+         brw->current_hash_scale = scale;
+      }
+   }
+}
+
+/**
  * Misc invariant state packets
  */
-
 void
 brw_upload_invariant_state(struct brw_context *brw)
 {
-   const bool is_965 = brw->gen == 4 && !brw->is_g4x;
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
 
-   /* Select the 3D pipeline (as opposed to media) */
-   const uint32_t _3DSTATE_PIPELINE_SELECT =
-      is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
-   BEGIN_BATCH(1);
-   OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 | (brw->gen >= 9 ? (3 << 8) : 0));
-   ADVANCE_BATCH();
+   brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
+   brw->last_pipeline = BRW_RENDER_PIPELINE;
 
-   if (brw->gen < 6) {
-      /* Disable depth offset clamping. */
-      BEGIN_BATCH(2);
-      OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
-      OUT_BATCH_F(0.0);
-      ADVANCE_BATCH();
-   }
-
-   if (brw->gen >= 8) {
+   if (devinfo->gen >= 8) {
       BEGIN_BATCH(3);
       OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
       OUT_BATCH(0);
@@ -891,21 +716,17 @@ brw_upload_invariant_state(struct brw_context *brw)
       ADVANCE_BATCH();
    }
 
-   const uint32_t _3DSTATE_VF_STATISTICS =
-      is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
-   BEGIN_BATCH(1);
-   OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
-   ADVANCE_BATCH();
+   /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
+   if (!is_965) {
+      BEGIN_BATCH(3);
+      OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
+      /* use legacy aa line coverage computation */
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      ADVANCE_BATCH();
+   }
 }
 
-const struct brw_tracked_state brw_invariant_state = {
-   .dirty = {
-      .mesa = 0,
-      .brw = BRW_NEW_CONTEXT,
-   },
-   .emit = brw_upload_invariant_state
-};
-
 /**
  * Define the base addresses which some state is referenced from.
  *
@@ -916,8 +737,14 @@ const struct brw_tracked_state brw_invariant_state = {
  * surface state objects, but not the surfaces that the surface state
  * objects point to.
  */
-static void upload_state_base_address( struct brw_context *brw )
+void
+brw_upload_state_base_address(struct brw_context *brw)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   if (brw->batch.state_base_address_emitted)
+      return;
+
    /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
     * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
     * programmed prior to STATE_BASE_ADDRESS.
@@ -927,8 +754,83 @@ static void upload_state_base_address( struct brw_context *brw )
     * maybe this isn't required for us in particular.
     */
 
-   if (brw->gen >= 6) {
-      uint8_t mocs = brw->gen == 7 ? GEN7_MOCS_L3 : 0;
+   if (devinfo->gen >= 6) {
+      const unsigned dc_flush =
+         devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
+
+      /* Emit a render target cache flush.
+       *
+       * This isn't documented anywhere in the PRM.  However, it seems to be
+       * necessary prior to changing the surface state base adress.  We've
+       * seen issues in Vulkan where we get GPU hangs when using multi-level
+       * command buffers which clear depth, reset state base address, and then
+       * go render stuff.
+       *
+       * Normally, in GL, we would trust the kernel to do sufficient stalls
+       * and flushes prior to executing our batch.  However, it doesn't seem
+       * as if the kernel's flushing is always sufficient and we don't want to
+       * rely on it.
+       *
+       * We make this an end-of-pipe sync instead of a normal flush because we
+       * do not know the current status of the GPU.  On Haswell at least,
+       * having a fast-clear operation in flight at the same time as a normal
+       * rendering operation can cause hangs.  Since the kernel's flushing is
+       * insufficient, we need to ensure that any rendering operations from
+       * other processes are definitely complete before we try to do our own
+       * rendering.  It's a bit of a big hammer but it appears to work.
+       */
+      brw_emit_end_of_pipe_sync(brw,
+                                PIPE_CONTROL_RENDER_TARGET_FLUSH |
+                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                                dc_flush);
+   }
+
+   if (devinfo->gen >= 8) {
+      /* STATE_BASE_ADDRESS has issues with 48-bit address spaces.  If the
+       * address + size as seen by STATE_BASE_ADDRESS overflows 48 bits,
+       * the GPU appears to treat all accesses to the buffer as being out
+       * of bounds and returns zero.  To work around this, we pin all SBAs
+       * to the bottom 4GB.
+       */
+      uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
+      int pkt_len = devinfo->gen >= 10 ? 22 : (devinfo->gen >= 9 ? 19 : 16);
+
+      BEGIN_BATCH(pkt_len);
+      OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
+      /* General state base address: stateless DP read/write requests */
+      OUT_BATCH(mocs_wb << 4 | 1);
+      OUT_BATCH(0);
+      OUT_BATCH(mocs_wb << 16);
+      /* Surface state base address: */
+      OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
+      /* Dynamic state base address: */
+      OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
+      /* Indirect object base address: MEDIA_OBJECT data */
+      OUT_BATCH(mocs_wb << 4 | 1);
+      OUT_BATCH(0);
+      /* Instruction base address: shader kernels (incl. SIP) */
+      OUT_RELOC64(brw->cache.bo, RELOC_32BIT, mocs_wb << 4 | 1);
+      /* General state buffer size */
+      OUT_BATCH(0xfffff001);
+      /* Dynamic state buffer size */
+      OUT_BATCH(ALIGN(MAX_STATE_SIZE, 4096) | 1);
+      /* Indirect object upper bound */
+      OUT_BATCH(0xfffff001);
+      /* Instruction access upper bound */
+      OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
+      if (devinfo->gen >= 9) {
+         OUT_BATCH(1);
+         OUT_BATCH(0);
+         OUT_BATCH(0);
+      }
+      if (devinfo->gen >= 10) {
+         OUT_BATCH(1);
+         OUT_BATCH(0);
+         OUT_BATCH(0);
+      }
+      ADVANCE_BATCH();
+   } else if (devinfo->gen >= 6) {
+      uint8_t mocs = devinfo->gen == 7 ? GEN7_MOCS_L3 : 0;
 
        BEGIN_BATCH(10);
        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
@@ -939,7 +841,7 @@ static void upload_state_base_address( struct brw_context *brw )
        * BINDING_TABLE_STATE
        * SURFACE_STATE
        */
-       OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
+       OUT_RELOC(brw->batch.state.bo, 0, 1);
         /* Dynamic state base address:
         * SAMPLER_STATE
         * SAMPLER_BORDER_COLOR_STATE
@@ -950,12 +852,12 @@ static void upload_state_base_address( struct brw_context *brw )
         * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
         * Disable is clear, which we rely on)
         */
-       OUT_RELOC(brw->batch.bo, (I915_GEM_DOMAIN_RENDER |
-                                  I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
+       OUT_RELOC(brw->batch.state.bo, 0, 1);
 
        OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
-       OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-                1); /* Instruction base address: shader kernels (incl. SIP) */
+
+       /* Instruction base address: shader kernels (incl. SIP) */
+       OUT_RELOC(brw->cache.bo, 0, 1);
 
        OUT_BATCH(1); /* General state upper bound */
        /* Dynamic state upper bound.  Although the documentation says that
@@ -967,15 +869,13 @@ static void upload_state_base_address( struct brw_context *brw )
        OUT_BATCH(1); /* Indirect object upper bound */
        OUT_BATCH(1); /* Instruction access upper bound */
        ADVANCE_BATCH();
-   } else if (brw->gen == 5) {
+   } else if (devinfo->gen == 5) {
        BEGIN_BATCH(8);
        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
        OUT_BATCH(1); /* General state base address */
-       OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
-                1); /* Surface state base address */
+       OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
        OUT_BATCH(1); /* Indirect object base address */
-       OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-                1); /* Instruction base address */
+       OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
        OUT_BATCH(0xfffff001); /* General state upper bound */
        OUT_BATCH(1); /* Indirect object upper bound */
        OUT_BATCH(1); /* Instruction access upper bound */
@@ -984,14 +884,20 @@ static void upload_state_base_address( struct brw_context *brw )
        BEGIN_BATCH(6);
        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
        OUT_BATCH(1); /* General state base address */
-       OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
-                1); /* Surface state base address */
+       OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
        OUT_BATCH(1); /* Indirect object base address */
        OUT_BATCH(1); /* General state upper bound */
        OUT_BATCH(1); /* Indirect object upper bound */
        ADVANCE_BATCH();
    }
 
+   if (devinfo->gen >= 6) {
+      brw_emit_pipe_control_flush(brw,
+                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE |
+                                  PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+   }
+
    /* According to section 3.6.1 of VOL1 of the 965 PRM,
     * STATE_BASE_ADDRESS updates require a reissue of:
     *
@@ -1015,13 +921,5 @@ static void upload_state_base_address( struct brw_context *brw )
     */
 
    brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
+   brw->batch.state_base_address_emitted = true;
 }
-
-const struct brw_tracked_state brw_state_base_address = {
-   .dirty = {
-      .mesa = 0,
-      .brw = BRW_NEW_BATCH |
-             BRW_NEW_PROGRAM_CACHE,
-   },
-   .emit = upload_state_base_address
-};