i965/state: Don't use brw->state.dirty.brw
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.c
index 646b1f7571793fcdd38c75daa0e55e8b0ccf118b..e522e4e9c1de942086f48d753c72d57c50f4616b 100644 (file)
@@ -1,8 +1,8 @@
 /**************************************************************************
- * 
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ *
+ * Copyright 2006 VMware, Inc.
  * All Rights Reserved.
- * 
+ *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
  * "Software"), to deal in the Software without restriction, including
  * distribute, sub license, and/or sell copies of the Software, and to
  * permit persons to whom the Software is furnished to do so, subject to
  * the following conditions:
- * 
+ *
  * The above copyright notice and this permission notice (including the
  * next paragraph) shall be included in all copies or substantial portions
  * of the Software.
- * 
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- * 
+ *
  **************************************************************************/
 
 #include "intel_batchbuffer.h"
 #include "intel_reg.h"
 #include "intel_bufmgr.h"
 #include "intel_buffers.h"
+#include "intel_fbo.h"
 #include "brw_context.h"
 
-static void
-intel_batchbuffer_reset(struct brw_context *brw);
-
-struct cached_batch_item {
-   struct cached_batch_item *next;
-   uint16_t header;
-   uint16_t size;
-};
+#include <xf86drm.h>
+#include <i915_drm.h>
 
 static void
-clear_cache(struct brw_context *brw)
-{
-   struct intel_context *intel = &brw->intel;
-   struct cached_batch_item *item = intel->batch.cached_items;
-
-   while (item) {
-      struct cached_batch_item *next = item->next;
-      free(item);
-      item = next;
-   }
-
-   intel->batch.cached_items = NULL;
-}
+intel_batchbuffer_reset(struct brw_context *brw);
 
 void
 intel_batchbuffer_init(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
    intel_batchbuffer_reset(brw);
 
-   if (intel->gen >= 6) {
+   if (brw->gen >= 6) {
       /* We can't just use brw_state_batch to get a chunk of space for
        * the gen6 workaround because it involves actually writing to
        * the buffer, and the kernel doesn't let us write to the batch.
        */
-      intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
+      brw->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
                                                      "pipe_control workaround",
                                                      4096, 4096);
    }
 
-   if (!intel->has_llc) {
-      intel->batch.cpu_map = malloc(BATCH_SZ);
-      intel->batch.map = intel->batch.cpu_map;
+   if (!brw->has_llc) {
+      brw->batch.cpu_map = malloc(BATCH_SZ);
+      brw->batch.map = brw->batch.cpu_map;
    }
 }
 
 static void
 intel_batchbuffer_reset(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   if (intel->batch.last_bo != NULL) {
-      drm_intel_bo_unreference(intel->batch.last_bo);
-      intel->batch.last_bo = NULL;
+   if (brw->batch.last_bo != NULL) {
+      drm_intel_bo_unreference(brw->batch.last_bo);
+      brw->batch.last_bo = NULL;
    }
-   intel->batch.last_bo = intel->batch.bo;
+   brw->batch.last_bo = brw->batch.bo;
 
-   clear_cache(brw);
+   brw_render_cache_set_clear(brw);
 
-   intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
+   brw->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
                                        BATCH_SZ, 4096);
-   if (intel->has_llc) {
-      drm_intel_bo_map(intel->batch.bo, true);
-      intel->batch.map = intel->batch.bo->virtual;
+   if (brw->has_llc) {
+      drm_intel_bo_map(brw->batch.bo, true);
+      brw->batch.map = brw->batch.bo->virtual;
    }
 
-   intel->batch.reserved_space = BATCH_RESERVED;
-   intel->batch.state_batch_offset = intel->batch.bo->size;
-   intel->batch.used = 0;
-   intel->batch.needs_sol_reset = false;
+   brw->batch.reserved_space = BATCH_RESERVED;
+   brw->batch.state_batch_offset = brw->batch.bo->size;
+   brw->batch.used = 0;
+   brw->batch.needs_sol_reset = false;
+   brw->batch.pipe_controls_since_last_cs_stall = 0;
+
+   /* We don't know what ring the new batch will be sent to until we see the
+    * first BEGIN_BATCH or BEGIN_BATCH_BLT.  Mark it as unknown.
+    */
+   brw->batch.ring = UNKNOWN_RING;
 }
 
 void
 intel_batchbuffer_save_state(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   intel->batch.saved.used = intel->batch.used;
-   intel->batch.saved.reloc_count =
-      drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
+   brw->batch.saved.used = brw->batch.used;
+   brw->batch.saved.reloc_count =
+      drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
 }
 
 void
 intel_batchbuffer_reset_to_saved(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
-
-   intel->batch.used = intel->batch.saved.used;
+   drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
 
-   /* Cached batch state is dead, since we just cleared some unknown part of the
-    * batchbuffer.  Assume that the caller resets any other state necessary.
-    */
-   clear_cache(brw);
+   brw->batch.used = brw->batch.saved.used;
+   if (brw->batch.used == 0)
+      brw->batch.ring = UNKNOWN_RING;
 }
 
 void
 intel_batchbuffer_free(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   free(intel->batch.cpu_map);
-   drm_intel_bo_unreference(intel->batch.last_bo);
-   drm_intel_bo_unreference(intel->batch.bo);
-   drm_intel_bo_unreference(intel->batch.workaround_bo);
-   clear_cache(brw);
+   free(brw->batch.cpu_map);
+   drm_intel_bo_unreference(brw->batch.last_bo);
+   drm_intel_bo_unreference(brw->batch.bo);
+   drm_intel_bo_unreference(brw->batch.workaround_bo);
 }
 
 static void
 do_batch_dump(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
    struct drm_intel_decode *decode;
-   struct intel_batchbuffer *batch = &intel->batch;
+   struct intel_batchbuffer *batch = &brw->batch;
    int ret;
 
-   decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
+   decode = drm_intel_decode_context_alloc(brw->intelScreen->deviceID);
    if (!decode)
       return;
 
@@ -153,7 +132,7 @@ do_batch_dump(struct brw_context *brw)
    if (ret == 0) {
       drm_intel_decode_set_batch_pointer(decode,
                                         batch->bo->virtual,
-                                        batch->bo->offset,
+                                        batch->bo->offset64,
                                         batch->used);
    } else {
       fprintf(stderr,
@@ -162,10 +141,11 @@ do_batch_dump(struct brw_context *brw)
 
       drm_intel_decode_set_batch_pointer(decode,
                                         batch->map,
-                                        batch->bo->offset,
+                                        batch->bo->offset64,
                                         batch->used);
    }
 
+   drm_intel_decode_set_output_file(decode, stderr);
    drm_intel_decode(decode);
 
    drm_intel_decode_context_free(decode);
@@ -177,16 +157,125 @@ do_batch_dump(struct brw_context *brw)
    }
 }
 
+void
+intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw)
+{
+   /* We may need to enable and snapshot OA counters. */
+   brw_perf_monitor_new_batch(brw);
+}
+
+/**
+ * Called when starting a new batch buffer.
+ */
+static void
+brw_new_batch(struct brw_context *brw)
+{
+   /* Create a new batchbuffer and reset the associated state: */
+   drm_intel_gem_bo_clear_relocs(brw->batch.bo, 0);
+   intel_batchbuffer_reset(brw);
+
+   /* If the kernel supports hardware contexts, then most hardware state is
+    * preserved between batches; we only need to re-emit state that is required
+    * to be in every batch.  Otherwise we need to re-emit all the state that
+    * would otherwise be stored in the context (which for all intents and
+    * purposes means everything).
+    */
+   if (brw->hw_ctx == NULL)
+      brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
+
+   brw->ctx.NewDriverState |= BRW_NEW_BATCH;
+
+   brw->state_batch_count = 0;
+
+   brw->ib.type = -1;
+
+   /* We need to periodically reap the shader time results, because rollover
+    * happens every few seconds.  We also want to see results every once in a
+    * while, because many programs won't cleanly destroy our context, so the
+    * end-of-run printout may not happen.
+    */
+   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+      brw_collect_and_report_shader_time(brw);
+
+   if (INTEL_DEBUG & DEBUG_PERFMON)
+      brw_dump_perf_monitors(brw);
+}
+
+/**
+ * Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
+ * sending it off.
+ *
+ * This function can emit state (say, to preserve registers that aren't saved
+ * between batches).  All of this state MUST fit in the reserved space at the
+ * end of the batchbuffer.  If you add more GPU state, increase the reserved
+ * space by updating the BATCH_RESERVED macro.
+ */
+static void
+brw_finish_batch(struct brw_context *brw)
+{
+   /* Capture the closing pipeline statistics register values necessary to
+    * support query objects (in the non-hardware context world).
+    */
+   brw_emit_query_end(brw);
+
+   /* We may also need to snapshot and disable OA counters. */
+   if (brw->batch.ring == RENDER_RING)
+      brw_perf_monitor_finish_batch(brw);
+
+   /* Mark that the current program cache BO has been used by the GPU.
+    * It will be reallocated if we need to put new programs in for the
+    * next batch.
+    */
+   brw->cache.bo_used_by_gpu = true;
+}
+
+static void
+throttle(struct brw_context *brw)
+{
+   /* Wait for the swapbuffers before the one we just emitted, so we
+    * don't get too many swaps outstanding for apps that are GPU-heavy
+    * but not CPU-heavy.
+    *
+    * We're using intelDRI2Flush (called from the loader before
+    * swapbuffer) and glFlush (for front buffer rendering) as the
+    * indicator that a frame is done and then throttle when we get
+    * here as we prepare to render the next frame.  At this point for
+    * round trips for swap/copy and getting new buffers are done and
+    * we'll spend less time waiting on the GPU.
+    *
+    * Unfortunately, we don't have a handle to the batch containing
+    * the swap, and getting our hands on that doesn't seem worth it,
+    * so we just use the first batch we emitted after the last swap.
+    */
+   if (brw->need_swap_throttle && brw->throttle_batch[0]) {
+      if (brw->throttle_batch[1]) {
+         if (!brw->disable_throttling)
+            drm_intel_bo_wait_rendering(brw->throttle_batch[1]);
+         drm_intel_bo_unreference(brw->throttle_batch[1]);
+      }
+      brw->throttle_batch[1] = brw->throttle_batch[0];
+      brw->throttle_batch[0] = NULL;
+      brw->need_swap_throttle = false;
+      /* Throttling here is more precise than the throttle ioctl, so skip it */
+      brw->need_flush_throttle = false;
+   }
+
+   if (brw->need_flush_throttle) {
+      __DRIscreen *psp = brw->intelScreen->driScrnPriv;
+      drmCommandNone(psp->fd, DRM_I915_GEM_THROTTLE);
+      brw->need_flush_throttle = false;
+   }
+}
+
 /* TODO: Push this whole function into bufmgr.
  */
 static int
 do_flush_locked(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   struct intel_batchbuffer *batch = &intel->batch;
+   struct intel_batchbuffer *batch = &brw->batch;
    int ret = 0;
 
-   if (intel->has_llc) {
+   if (brw->has_llc) {
       drm_intel_bo_unmap(batch->bo);
    } else {
       ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
@@ -198,29 +287,31 @@ do_flush_locked(struct brw_context *brw)
       }
    }
 
-   if (!intel->intelScreen->no_hw) {
+   if (!brw->intelScreen->no_hw) {
       int flags;
 
-      if (intel->gen < 6 || !batch->is_blit) {
-        flags = I915_EXEC_RENDER;
+      if (brw->gen >= 6 && batch->ring == BLT_RING) {
+         flags = I915_EXEC_BLT;
       } else {
-        flags = I915_EXEC_BLT;
+         flags = I915_EXEC_RENDER;
       }
-
       if (batch->needs_sol_reset)
         flags |= I915_EXEC_GEN7_SOL_RESET;
 
       if (ret == 0) {
          if (unlikely(INTEL_DEBUG & DEBUG_AUB))
             brw_annotate_aub(brw);
-        if (intel->hw_ctx == NULL || batch->is_blit) {
+
+        if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
            ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
                                        flags);
         } else {
-           ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
+           ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
                                                4 * batch->used, flags);
         }
       }
+
+      throttle(brw);
    }
 
    if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
@@ -230,7 +321,6 @@ do_flush_locked(struct brw_context *brw)
       fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
       exit(1);
    }
-   intel->vtbl.new_batch(brw);
 
    return ret;
 }
@@ -239,29 +329,34 @@ int
 _intel_batchbuffer_flush(struct brw_context *brw,
                         const char *file, int line)
 {
-   struct intel_context *intel = &brw->intel;
    int ret;
 
-   if (intel->batch.used == 0)
+   if (brw->batch.used == 0)
       return 0;
 
-   if (intel->first_post_swapbuffers_batch == NULL) {
-      intel->first_post_swapbuffers_batch = intel->batch.bo;
-      drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
+   if (brw->throttle_batch[0] == NULL) {
+      brw->throttle_batch[0] = brw->batch.bo;
+      drm_intel_bo_reference(brw->throttle_batch[0]);
    }
 
-   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
-      fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
-             4*intel->batch.used);
+   if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+      int bytes_for_commands = 4 * brw->batch.used;
+      int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
+      int total_bytes = bytes_for_commands + bytes_for_state;
+      fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
+              "%4db (state) = %4db (%0.1f%%)\n", file, line,
+              bytes_for_commands, bytes_for_state,
+              total_bytes,
+              100.0f * total_bytes / BATCH_SZ);
+   }
 
-   intel->batch.reserved_space = 0;
+   brw->batch.reserved_space = 0;
 
-   if (intel->vtbl.finish_batch)
-      intel->vtbl.finish_batch(brw);
+   brw_finish_batch(brw);
 
    /* Mark the end of the buffer. */
    intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
-   if (intel->batch.used & 1) {
+   if (brw->batch.used & 1) {
       /* Round batchbuffer usage to 2 DWORDs. */
       intel_batchbuffer_emit_dword(brw, MI_NOOP);
    }
@@ -269,18 +364,17 @@ _intel_batchbuffer_flush(struct brw_context *brw,
    intel_upload_finish(brw);
 
    /* Check that we didn't just wrap our batchbuffer at a bad time. */
-   assert(!intel->no_batch_wrap);
+   assert(!brw->no_batch_wrap);
 
    ret = do_flush_locked(brw);
 
    if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
       fprintf(stderr, "waiting for idle\n");
-      drm_intel_bo_wait_rendering(intel->batch.bo);
+      drm_intel_bo_wait_rendering(brw->batch.bo);
    }
 
-   /* Reset the buffer:
-    */
-   intel_batchbuffer_reset(brw);
+   /* Start a new batch buffer. */
+   brw_new_batch(brw);
 
    return ret;
 }
@@ -294,102 +388,205 @@ intel_batchbuffer_emit_reloc(struct brw_context *brw,
                              uint32_t read_domains, uint32_t write_domain,
                             uint32_t delta)
 {
-   struct intel_context *intel = &brw->intel;
    int ret;
 
-   ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
+   ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
                                 buffer, delta,
                                 read_domains, write_domain);
    assert(ret == 0);
    (void)ret;
 
-   /*
-    * Using the old buffer offset, write in what the right data would be, in case
-    * the buffer doesn't move and we can short-circuit the relocation processing
-    * in the kernel
+   /* Using the old buffer offset, write in what the right data would be, in
+    * case the buffer doesn't move and we can short-circuit the relocation
+    * processing in the kernel
     */
-   intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
+   intel_batchbuffer_emit_dword(brw, buffer->offset64 + delta);
 
    return true;
 }
 
 bool
-intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
-                                   drm_intel_bo *buffer,
-                                   uint32_t read_domains,
-                                   uint32_t write_domain,
-                                   uint32_t delta)
+intel_batchbuffer_emit_reloc64(struct brw_context *brw,
+                               drm_intel_bo *buffer,
+                               uint32_t read_domains, uint32_t write_domain,
+                              uint32_t delta)
 {
-   struct intel_context *intel = &brw->intel;
-   int ret;
-
-   ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
-                                      buffer, delta,
-                                      read_domains, write_domain);
+   int ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
+                                     buffer, delta,
+                                     read_domains, write_domain);
    assert(ret == 0);
-   (void)ret;
+   (void) ret;
 
-   /*
-    * Using the old buffer offset, write in what the right data would
-    * be, in case the buffer doesn't move and we can short-circuit the
-    * relocation processing in the kernel
+   /* Using the old buffer offset, write in what the right data would be, in
+    * case the buffer doesn't move and we can short-circuit the relocation
+    * processing in the kernel
     */
-   intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
+   uint64_t offset = buffer->offset64 + delta;
+   intel_batchbuffer_emit_dword(brw, offset);
+   intel_batchbuffer_emit_dword(brw, offset >> 32);
 
    return true;
 }
 
+
 void
 intel_batchbuffer_data(struct brw_context *brw,
-                       const void *data, GLuint bytes, bool is_blit)
+                       const void *data, GLuint bytes, enum brw_gpu_ring ring)
 {
-   struct intel_context *intel = &brw->intel;
    assert((bytes & 3) == 0);
-   intel_batchbuffer_require_space(brw, bytes, is_blit);
-   __memcpy(intel->batch.map + intel->batch.used, data, bytes);
-   intel->batch.used += bytes >> 2;
+   intel_batchbuffer_require_space(brw, bytes, ring);
+   memcpy(brw->batch.map + brw->batch.used, data, bytes);
+   brw->batch.used += bytes >> 2;
 }
 
-void
-intel_batchbuffer_cached_advance(struct brw_context *brw)
+/**
+ * According to the latest documentation, any PIPE_CONTROL with the
+ * "Command Streamer Stall" bit set must also have another bit set,
+ * with five different options:
+ *
+ *  - Render Target Cache Flush
+ *  - Depth Cache Flush
+ *  - Stall at Pixel Scoreboard
+ *  - Post-Sync Operation
+ *  - Depth Stall
+ *
+ * I chose "Stall at Pixel Scoreboard" since we've used it effectively
+ * in the past, but the choice is fairly arbitrary.
+ */
+static void
+gen8_add_cs_stall_workaround_bits(uint32_t *flags)
 {
-   struct intel_context *intel = &brw->intel;
-   struct cached_batch_item **prev = &intel->batch.cached_items, *item;
-   uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
-   uint32_t *start = intel->batch.map + intel->batch.emit;
-   uint16_t op = *start >> 16;
-
-   while (*prev) {
-      uint32_t *old;
-
-      item = *prev;
-      old = intel->batch.map + item->header;
-      if (op == *old >> 16) {
-        if (item->size == sz && memcmp(old, start, sz) == 0) {
-           if (prev != &intel->batch.cached_items) {
-              *prev = item->next;
-              item->next = intel->batch.cached_items;
-              intel->batch.cached_items = item;
-           }
-           intel->batch.used = intel->batch.emit;
-           return;
-        }
+   uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
+                      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                      PIPE_CONTROL_WRITE_IMMEDIATE |
+                      PIPE_CONTROL_WRITE_DEPTH_COUNT |
+                      PIPE_CONTROL_WRITE_TIMESTAMP |
+                      PIPE_CONTROL_STALL_AT_SCOREBOARD |
+                      PIPE_CONTROL_DEPTH_STALL;
+
+   /* If we're doing a CS stall, and don't already have one of the
+    * workaround bits set, add "Stall at Pixel Scoreboard."
+    */
+   if ((*flags & PIPE_CONTROL_CS_STALL) != 0 && (*flags & wa_bits) == 0)
+      *flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+}
+
+/* Implement the WaCsStallAtEveryFourthPipecontrol workaround on IVB, BYT:
+ *
+ * "Every 4th PIPE_CONTROL command, not counting the PIPE_CONTROL with
+ *  only read-cache-invalidate bit(s) set, must have a CS_STALL bit set."
+ *
+ * Note that the kernel does CS stalls between batches, so we only need
+ * to count them within a batch.
+ */
+static uint32_t
+gen7_cs_stall_every_four_pipe_controls(struct brw_context *brw, uint32_t flags)
+{
+   if (brw->gen == 7 && !brw->is_haswell) {
+      if (flags & PIPE_CONTROL_CS_STALL) {
+         /* If we're doing a CS stall, reset the counter and carry on. */
+         brw->batch.pipe_controls_since_last_cs_stall = 0;
+         return 0;
+      }
 
-        goto emit;
+      /* If this is the fourth pipe control without a CS stall, do one now. */
+      if (++brw->batch.pipe_controls_since_last_cs_stall == 4) {
+         brw->batch.pipe_controls_since_last_cs_stall = 0;
+         return PIPE_CONTROL_CS_STALL;
       }
-      prev = &item->next;
    }
+   return 0;
+}
 
-   item = malloc(sizeof(struct cached_batch_item));
-   if (item == NULL)
-      return;
+/**
+ * Emit a PIPE_CONTROL with various flushing flags.
+ *
+ * The caller is responsible for deciding what flags are appropriate for the
+ * given generation.
+ */
+void
+brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags)
+{
+   if (brw->gen >= 8) {
+      gen8_add_cs_stall_workaround_bits(&flags);
+
+      BEGIN_BATCH(6);
+      OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
+      OUT_BATCH(flags);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      ADVANCE_BATCH();
+   } else if (brw->gen >= 6) {
+      flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
+
+      BEGIN_BATCH(5);
+      OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
+      OUT_BATCH(flags);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      ADVANCE_BATCH();
+   } else {
+      BEGIN_BATCH(4);
+      OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      ADVANCE_BATCH();
+   }
+}
 
-   item->next = intel->batch.cached_items;
-   intel->batch.cached_items = item;
+/**
+ * Emit a PIPE_CONTROL that writes to a buffer object.
+ *
+ * \p flags should contain one of the following items:
+ *  - PIPE_CONTROL_WRITE_IMMEDIATE
+ *  - PIPE_CONTROL_WRITE_TIMESTAMP
+ *  - PIPE_CONTROL_WRITE_DEPTH_COUNT
+ */
+void
+brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
+                            drm_intel_bo *bo, uint32_t offset,
+                            uint32_t imm_lower, uint32_t imm_upper)
+{
+   if (brw->gen >= 8) {
+      gen8_add_cs_stall_workaround_bits(&flags);
+
+      BEGIN_BATCH(6);
+      OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
+      OUT_BATCH(flags);
+      OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                  offset);
+      OUT_BATCH(imm_lower);
+      OUT_BATCH(imm_upper);
+      ADVANCE_BATCH();
+   } else if (brw->gen >= 6) {
+      flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
 
-emit:
-   item->size = sz;
-   item->header = intel->batch.emit;
+      /* PPGTT/GGTT is selected by DW2 bit 2 on Sandybridge, but DW1 bit 24
+       * on later platforms.  We always use PPGTT on Gen7+.
+       */
+      unsigned gen6_gtt = brw->gen == 6 ? PIPE_CONTROL_GLOBAL_GTT_WRITE : 0;
+
+      BEGIN_BATCH(5);
+      OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
+      OUT_BATCH(flags);
+      OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                gen6_gtt | offset);
+      OUT_BATCH(imm_lower);
+      OUT_BATCH(imm_upper);
+      ADVANCE_BATCH();
+   } else {
+      BEGIN_BATCH(4);
+      OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
+      OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+                PIPE_CONTROL_GLOBAL_GTT_WRITE | offset);
+      OUT_BATCH(imm_lower);
+      OUT_BATCH(imm_upper);
+      ADVANCE_BATCH();
+   }
 }
 
 /**
@@ -407,34 +604,16 @@ emit:
 void
 intel_emit_depth_stall_flushes(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   assert(intel->gen >= 6 && intel->gen <= 7);
-
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-   OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
-   OUT_BATCH(0); /* address */
-   OUT_BATCH(0); /* write data */
-   ADVANCE_BATCH()
-
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-   OUT_BATCH(PIPE_CONTROL_DEPTH_CACHE_FLUSH);
-   OUT_BATCH(0); /* address */
-   OUT_BATCH(0); /* write data */
-   ADVANCE_BATCH();
-
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-   OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
-   OUT_BATCH(0); /* address */
-   OUT_BATCH(0); /* write data */
-   ADVANCE_BATCH();
+   assert(brw->gen >= 6 && brw->gen <= 9);
+
+   brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
+   brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_CACHE_FLUSH);
+   brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
 }
 
 /**
- * From the BSpec, volume 2a.03: VS Stage Input / State:
- * "[DevIVB] A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
+ * From the Ivybridge PRM, Volume 2 Part 1, Section 3.2 (VS Stage Input):
+ * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
  *  stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
  *  3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
  *  3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one PIPE_CONTROL needs
@@ -443,18 +622,29 @@ intel_emit_depth_stall_flushes(struct brw_context *brw)
 void
 gen7_emit_vs_workaround_flush(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   assert(intel->gen == 7);
-
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-   OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
-   OUT_RELOC(intel->batch.workaround_bo,
-            I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
-   OUT_BATCH(0); /* write data */
-   ADVANCE_BATCH();
+   assert(brw->gen == 7);
+   brw_emit_pipe_control_write(brw,
+                               PIPE_CONTROL_WRITE_IMMEDIATE
+                               | PIPE_CONTROL_DEPTH_STALL,
+                               brw->batch.workaround_bo, 0,
+                               0, 0);
 }
 
+
+/**
+ * Emit a PIPE_CONTROL command for gen7 with the CS Stall bit set.
+ */
+void
+gen7_emit_cs_stall_flush(struct brw_context *brw)
+{
+   brw_emit_pipe_control_write(brw,
+                               PIPE_CONTROL_CS_STALL
+                               | PIPE_CONTROL_WRITE_IMMEDIATE,
+                               brw->batch.workaround_bo, 0,
+                               0, 0);
+}
+
+
 /**
  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  * implementing two workarounds on gen6.  From section 1.4.7.1
@@ -495,27 +685,12 @@ gen7_emit_vs_workaround_flush(struct brw_context *brw)
 void
 intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   if (!intel->batch.need_workaround_flush)
-      return;
+   brw_emit_pipe_control_flush(brw,
+                               PIPE_CONTROL_CS_STALL |
+                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
 
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-   OUT_BATCH(PIPE_CONTROL_CS_STALL |
-            PIPE_CONTROL_STALL_AT_SCOREBOARD);
-   OUT_BATCH(0); /* address */
-   OUT_BATCH(0); /* write data */
-   ADVANCE_BATCH();
-
-   BEGIN_BATCH(4);
-   OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-   OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
-   OUT_RELOC(intel->batch.workaround_bo,
-            I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
-   OUT_BATCH(0); /* write data */
-   ADVANCE_BATCH();
-
-   intel->batch.need_workaround_flush = false;
+   brw_emit_pipe_control_write(brw, PIPE_CONTROL_WRITE_IMMEDIATE,
+                               brw->batch.workaround_bo, 0, 0, 0);
 }
 
 /* Emit a pipelined flush to either flush render and texture cache for
@@ -527,47 +702,68 @@ intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
 void
 intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
 {
-   struct intel_context *intel = &brw->intel;
-   if (intel->gen >= 6) {
-      if (intel->batch.is_blit) {
-        BEGIN_BATCH_BLT(4);
-        OUT_BATCH(MI_FLUSH_DW);
-        OUT_BATCH(0);
-        OUT_BATCH(0);
-        OUT_BATCH(0);
-        ADVANCE_BATCH();
-      } else {
-        if (intel->gen == 6) {
-           /* Hardware workaround: SNB B-Spec says:
-            *
-            * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
-            * Flush Enable =1, a PIPE_CONTROL with any non-zero
-            * post-sync-op is required.
-            */
-           intel_emit_post_sync_nonzero_flush(brw);
-        }
-
-        BEGIN_BATCH(4);
-        OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
-        OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
-                  PIPE_CONTROL_WRITE_FLUSH |
-                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
-                   PIPE_CONTROL_VF_CACHE_INVALIDATE |
-                  PIPE_CONTROL_TC_FLUSH |
-                  PIPE_CONTROL_NO_WRITE |
-                   PIPE_CONTROL_CS_STALL);
-        OUT_BATCH(0); /* write address */
-        OUT_BATCH(0); /* write data */
-        ADVANCE_BATCH();
-      }
+   if (brw->batch.ring == BLT_RING && brw->gen >= 6) {
+      BEGIN_BATCH_BLT(4);
+      OUT_BATCH(MI_FLUSH_DW);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      OUT_BATCH(0);
+      ADVANCE_BATCH();
    } else {
+      int flags = PIPE_CONTROL_NO_WRITE | PIPE_CONTROL_RENDER_TARGET_FLUSH;
+      if (brw->gen >= 6) {
+         if (brw->gen == 9) {
+            /* Hardware workaround: SKL
+             *
+             * Emit Pipe Control with all bits set to zero before emitting
+             * a Pipe Control with VF Cache Invalidate set.
+             */
+            brw_emit_pipe_control_flush(brw, 0);
+         }
+
+         flags |= PIPE_CONTROL_INSTRUCTION_INVALIDATE |
+                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                  PIPE_CONTROL_VF_CACHE_INVALIDATE |
+                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+                  PIPE_CONTROL_CS_STALL;
+
+         if (brw->gen == 6) {
+            /* Hardware workaround: SNB B-Spec says:
+             *
+             * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
+             * Flush Enable =1, a PIPE_CONTROL with any non-zero
+             * post-sync-op is required.
+             */
+            intel_emit_post_sync_nonzero_flush(brw);
+         }
+      }
+      brw_emit_pipe_control_flush(brw, flags);
+   }
+
+   brw_render_cache_set_clear(brw);
+}
+
+void
+brw_load_register_mem(struct brw_context *brw,
+                      uint32_t reg,
+                      drm_intel_bo *bo,
+                      uint32_t read_domains, uint32_t write_domain,
+                      uint32_t offset)
+{
+   /* MI_LOAD_REGISTER_MEM only exists on Gen7+. */
+   assert(brw->gen >= 7);
+
+   if (brw->gen >= 8) {
       BEGIN_BATCH(4);
-      OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
-               PIPE_CONTROL_WRITE_FLUSH |
-               PIPE_CONTROL_NO_WRITE);
-      OUT_BATCH(0); /* write address */
-      OUT_BATCH(0); /* write data */
-      OUT_BATCH(0); /* write data */
+      OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (4 - 2));
+      OUT_BATCH(reg);
+      OUT_RELOC64(bo, read_domains, write_domain, offset);
+      ADVANCE_BATCH();
+   } else {
+      BEGIN_BATCH(3);
+      OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (3 - 2));
+      OUT_BATCH(reg);
+      OUT_RELOC(bo, read_domains, write_domain, offset);
       ADVANCE_BATCH();
    }
 }