-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+/*
+ * Copyright 2006 VMware, Inc.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
+ * distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
+ */
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
-#include "intel_reg.h"
#include "intel_bufmgr.h"
#include "intel_buffers.h"
+#include "intel_fbo.h"
#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_state.h"
-static void
-intel_batchbuffer_reset(struct brw_context *brw);
+#include <xf86drm.h>
+#include <i915_drm.h>
-struct cached_batch_item {
- struct cached_batch_item *next;
- uint16_t header;
- uint16_t size;
-};
+static void
+intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc);
void
-intel_batchbuffer_clear_cache(struct brw_context *brw)
+intel_batchbuffer_init(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc)
{
- struct cached_batch_item *item = brw->batch.cached_items;
+ intel_batchbuffer_reset(batch, bufmgr, has_llc);
- while (item) {
- struct cached_batch_item *next = item->next;
- free(item);
- item = next;
+ if (!has_llc) {
+ batch->cpu_map = malloc(BATCH_SZ);
+ batch->map = batch->cpu_map;
+ batch->map_next = batch->cpu_map;
}
-
- brw->batch.cached_items = NULL;
}
-void
-intel_batchbuffer_init(struct brw_context *brw)
+static void
+intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc)
{
- intel_batchbuffer_reset(brw);
+ if (batch->last_bo != NULL) {
+ drm_intel_bo_unreference(batch->last_bo);
+ batch->last_bo = NULL;
+ }
+ batch->last_bo = batch->bo;
- if (brw->gen >= 6) {
- /* We can't just use brw_state_batch to get a chunk of space for
- * the gen6 workaround because it involves actually writing to
- * the buffer, and the kernel doesn't let us write to the batch.
- */
- brw->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
- "pipe_control workaround",
- 4096, 4096);
+ batch->bo = drm_intel_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+ if (has_llc) {
+ drm_intel_bo_map(batch->bo, true);
+ batch->map = batch->bo->virtual;
}
+ batch->map_next = batch->map;
- brw->batch.need_workaround_flush = true;
+ batch->reserved_space = BATCH_RESERVED;
+ batch->state_batch_offset = batch->bo->size;
+ batch->needs_sol_reset = false;
+ batch->state_base_address_emitted = false;
- if (!brw->has_llc) {
- brw->batch.cpu_map = malloc(BATCH_SZ);
- brw->batch.map = brw->batch.cpu_map;
- }
+ /* We don't know what ring the new batch will be sent to until we see the
+ * first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
+ */
+ batch->ring = UNKNOWN_RING;
}
static void
-intel_batchbuffer_reset(struct brw_context *brw)
+intel_batchbuffer_reset_and_clear_render_cache(struct brw_context *brw)
{
- if (brw->batch.last_bo != NULL) {
- drm_intel_bo_unreference(brw->batch.last_bo);
- brw->batch.last_bo = NULL;
- }
- brw->batch.last_bo = brw->batch.bo;
-
- intel_batchbuffer_clear_cache(brw);
-
- brw->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
- BATCH_SZ, 4096);
- if (brw->has_llc) {
- drm_intel_bo_map(brw->batch.bo, true);
- brw->batch.map = brw->batch.bo->virtual;
- }
-
- brw->batch.reserved_space = BATCH_RESERVED;
- brw->batch.state_batch_offset = brw->batch.bo->size;
- brw->batch.used = 0;
- brw->batch.needs_sol_reset = false;
+ intel_batchbuffer_reset(&brw->batch, brw->bufmgr, brw->has_llc);
+ brw_render_cache_set_clear(brw);
}
void
intel_batchbuffer_save_state(struct brw_context *brw)
{
- brw->batch.saved.used = brw->batch.used;
+ brw->batch.saved.map_next = brw->batch.map_next;
brw->batch.saved.reloc_count =
drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
}
{
drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
- brw->batch.used = brw->batch.saved.used;
+ brw->batch.map_next = brw->batch.saved.map_next;
+ if (USED_BATCH(brw->batch) == 0)
+ brw->batch.ring = UNKNOWN_RING;
+}
- /* Cached batch state is dead, since we just cleared some unknown part of the
- * batchbuffer. Assume that the caller resets any other state necessary.
- */
- intel_batchbuffer_clear_cache(brw);
+void
+intel_batchbuffer_free(struct intel_batchbuffer *batch)
+{
+ free(batch->cpu_map);
+ drm_intel_bo_unreference(batch->last_bo);
+ drm_intel_bo_unreference(batch->bo);
}
void
-intel_batchbuffer_free(struct brw_context *brw)
+intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
+ enum brw_gpu_ring ring)
{
- free(brw->batch.cpu_map);
- drm_intel_bo_unreference(brw->batch.last_bo);
- drm_intel_bo_unreference(brw->batch.bo);
- drm_intel_bo_unreference(brw->batch.workaround_bo);
- intel_batchbuffer_clear_cache(brw);
+ /* If we're switching rings, implicitly flush the batch. */
+ if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
+ brw->gen >= 6) {
+ intel_batchbuffer_flush(brw);
+ }
+
+#ifdef DEBUG
+ assert(sz < BATCH_SZ - BATCH_RESERVED);
+#endif
+ if (intel_batchbuffer_space(&brw->batch) < sz)
+ intel_batchbuffer_flush(brw);
+
+ enum brw_gpu_ring prev_ring = brw->batch.ring;
+ /* The intel_batchbuffer_flush() calls above might have changed
+ * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
+ */
+ brw->batch.ring = ring;
+
+ if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
+ intel_batchbuffer_emit_render_ring_prelude(brw);
}
static void
struct intel_batchbuffer *batch = &brw->batch;
int ret;
- decode = drm_intel_decode_context_alloc(brw->intelScreen->deviceID);
+ decode = drm_intel_decode_context_alloc(brw->screen->deviceID);
if (!decode)
return;
if (ret == 0) {
drm_intel_decode_set_batch_pointer(decode,
batch->bo->virtual,
- batch->bo->offset,
- batch->used);
+ batch->bo->offset64,
+ USED_BATCH(*batch));
} else {
fprintf(stderr,
"WARNING: failed to map batchbuffer (%s), "
drm_intel_decode_set_batch_pointer(decode,
batch->map,
- batch->bo->offset,
- batch->used);
+ batch->bo->offset64,
+ USED_BATCH(*batch));
}
+ drm_intel_decode_set_output_file(decode, stderr);
drm_intel_decode(decode);
drm_intel_decode_context_free(decode);
}
}
+void
+intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw)
+{
+ /* Un-used currently */
+}
+
+/**
+ * Called when starting a new batch buffer.
+ */
+static void
+brw_new_batch(struct brw_context *brw)
+{
+ /* Create a new batchbuffer and reset the associated state: */
+ drm_intel_gem_bo_clear_relocs(brw->batch.bo, 0);
+ intel_batchbuffer_reset_and_clear_render_cache(brw);
+
+ /* If the kernel supports hardware contexts, then most hardware state is
+ * preserved between batches; we only need to re-emit state that is required
+ * to be in every batch. Otherwise we need to re-emit all the state that
+ * would otherwise be stored in the context (which for all intents and
+ * purposes means everything).
+ */
+ if (brw->hw_ctx == NULL)
+ brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
+
+ brw->ctx.NewDriverState |= BRW_NEW_BATCH;
+
+ brw->state_batch_count = 0;
+
+ brw->ib.type = -1;
+
+ /* We need to periodically reap the shader time results, because rollover
+ * happens every few seconds. We also want to see results every once in a
+ * while, because many programs won't cleanly destroy our context, so the
+ * end-of-run printout may not happen.
+ */
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ brw_collect_and_report_shader_time(brw);
+}
+
/**
* Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
* sending it off.
static void
brw_finish_batch(struct brw_context *brw)
{
+ /* Capture the closing pipeline statistics register values necessary to
+ * support query objects (in the non-hardware context world).
+ */
brw_emit_query_end(brw);
- if (brw->curbe.curbe_bo) {
- drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
- drm_intel_bo_unreference(brw->curbe.curbe_bo);
- brw->curbe.curbe_bo = NULL;
+ if (brw->batch.ring == RENDER_RING) {
+ /* Work around L3 state leaks into contexts set MI_RESTORE_INHIBIT which
+ * assume that the L3 cache is configured according to the hardware
+ * defaults.
+ */
+ if (brw->gen >= 7)
+ gen7_restore_default_l3_config(brw);
+
+ if (brw->is_haswell) {
+ /* From the Haswell PRM, Volume 2b, Command Reference: Instructions,
+ * 3DSTATE_CC_STATE_POINTERS > "Note":
+ *
+ * "SW must program 3DSTATE_CC_STATE_POINTERS command at the end of every
+ * 3D batch buffer followed by a PIPE_CONTROL with RC flush and CS stall."
+ *
+ * From the example in the docs, it seems to expect a regular pipe control
+ * flush here as well. We may have done it already, but meh.
+ *
+ * See also WaAvoidRCZCounterRollover.
+ */
+ brw_emit_mi_flush(brw);
+ BEGIN_BATCH(2);
+ OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
+ OUT_BATCH(brw->cc.state_offset | 1);
+ ADVANCE_BATCH();
+ brw_emit_pipe_control_flush(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+ }
+ }
+
+ /* Mark that the current program cache BO has been used by the GPU.
+ * It will be reallocated if we need to put new programs in for the
+ * next batch.
+ */
+ brw->cache.bo_used_by_gpu = true;
+}
+
+static void
+throttle(struct brw_context *brw)
+{
+ /* Wait for the swapbuffers before the one we just emitted, so we
+ * don't get too many swaps outstanding for apps that are GPU-heavy
+ * but not CPU-heavy.
+ *
+ * We're using intelDRI2Flush (called from the loader before
+ * swapbuffer) and glFlush (for front buffer rendering) as the
+ * indicator that a frame is done and then throttle when we get
+ * here as we prepare to render the next frame. At this point for
+ * round trips for swap/copy and getting new buffers are done and
+ * we'll spend less time waiting on the GPU.
+ *
+ * Unfortunately, we don't have a handle to the batch containing
+ * the swap, and getting our hands on that doesn't seem worth it,
+ * so we just use the first batch we emitted after the last swap.
+ */
+ if (brw->need_swap_throttle && brw->throttle_batch[0]) {
+ if (brw->throttle_batch[1]) {
+ if (!brw->disable_throttling)
+ drm_intel_bo_wait_rendering(brw->throttle_batch[1]);
+ drm_intel_bo_unreference(brw->throttle_batch[1]);
+ }
+ brw->throttle_batch[1] = brw->throttle_batch[0];
+ brw->throttle_batch[0] = NULL;
+ brw->need_swap_throttle = false;
+ /* Throttling here is more precise than the throttle ioctl, so skip it */
+ brw->need_flush_throttle = false;
+ }
+
+ if (brw->need_flush_throttle) {
+ __DRIscreen *dri_screen = brw->screen->driScrnPriv;
+ drmCommandNone(dri_screen->fd, DRM_I915_GEM_THROTTLE);
+ brw->need_flush_throttle = false;
}
}
/* TODO: Push this whole function into bufmgr.
*/
static int
-do_flush_locked(struct brw_context *brw)
+do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
{
struct intel_batchbuffer *batch = &brw->batch;
int ret = 0;
if (brw->has_llc) {
drm_intel_bo_unmap(batch->bo);
} else {
- ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
ret = drm_intel_bo_subdata(batch->bo,
batch->state_batch_offset,
}
}
- if (!brw->intelScreen->no_hw) {
+ if (!brw->screen->no_hw) {
int flags;
- if (brw->gen < 6 || !batch->is_blit) {
- flags = I915_EXEC_RENDER;
+ if (brw->gen >= 6 && batch->ring == BLT_RING) {
+ flags = I915_EXEC_BLT;
} else {
- flags = I915_EXEC_BLT;
+ flags = I915_EXEC_RENDER;
}
-
if (batch->needs_sol_reset)
flags |= I915_EXEC_GEN7_SOL_RESET;
if (ret == 0) {
if (unlikely(INTEL_DEBUG & DEBUG_AUB))
brw_annotate_aub(brw);
- if (brw->hw_ctx == NULL || batch->is_blit) {
- ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
- flags);
+
+ if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
+ assert(in_fence_fd == -1);
+ assert(out_fence_fd == NULL);
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
+ NULL, 0, 0, flags);
} else {
- ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
- 4 * batch->used, flags);
+ ret = drm_intel_gem_bo_fence_exec(batch->bo, brw->hw_ctx,
+ 4 * USED_BATCH(*batch),
+ in_fence_fd, out_fence_fd,
+ flags);
}
}
+
+ throttle(brw);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
do_batch_dump(brw);
+ if (brw->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
+ brw_check_for_reset(brw);
+
if (ret != 0) {
fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
exit(1);
}
- brw->vtbl.new_batch(brw);
return ret;
}
+/**
+ * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
+ * of the fd.
+ *
+ * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
+ * of the returned fd.
+ */
int
-_intel_batchbuffer_flush(struct brw_context *brw,
- const char *file, int line)
+_intel_batchbuffer_flush_fence(struct brw_context *brw,
+ int in_fence_fd, int *out_fence_fd,
+ const char *file, int line)
{
int ret;
- if (brw->batch.used == 0)
+ if (USED_BATCH(brw->batch) == 0)
return 0;
- if (brw->first_post_swapbuffers_batch == NULL) {
- brw->first_post_swapbuffers_batch = brw->batch.bo;
- drm_intel_bo_reference(brw->first_post_swapbuffers_batch);
+ if (brw->throttle_batch[0] == NULL) {
+ brw->throttle_batch[0] = brw->batch.bo;
+ drm_intel_bo_reference(brw->throttle_batch[0]);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- int bytes_for_commands = 4 * brw->batch.used;
+ int bytes_for_commands = 4 * USED_BATCH(brw->batch);
int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
int total_bytes = bytes_for_commands + bytes_for_state;
fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
brw_finish_batch(brw);
/* Mark the end of the buffer. */
- intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
- if (brw->batch.used & 1) {
+ intel_batchbuffer_emit_dword(&brw->batch, MI_BATCH_BUFFER_END);
+ if (USED_BATCH(brw->batch) & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
- intel_batchbuffer_emit_dword(brw, MI_NOOP);
+ intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
}
intel_upload_finish(brw);
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!brw->no_batch_wrap);
- ret = do_flush_locked(brw);
+ ret = do_flush_locked(brw, in_fence_fd, out_fence_fd);
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
drm_intel_bo_wait_rendering(brw->batch.bo);
}
- /* Reset the buffer:
- */
- intel_batchbuffer_reset(brw);
+ /* Start a new batch buffer. */
+ brw_new_batch(brw);
return ret;
}
/* This is the only way buffers get added to the validate list.
*/
-bool
-intel_batchbuffer_emit_reloc(struct brw_context *brw,
- drm_intel_bo *buffer,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta)
+uint32_t
+intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
+ drm_intel_bo *buffer, uint32_t offset,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
{
int ret;
- ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
+ ret = drm_intel_bo_emit_reloc(batch->bo, offset,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
(void)ret;
- /*
- * Using the old buffer offset, write in what the right data would be, in case
- * the buffer doesn't move and we can short-circuit the relocation processing
- * in the kernel
+ /* Using the old buffer offset, write in what the right data would be, in
+ * case the buffer doesn't move and we can short-circuit the relocation
+ * processing in the kernel
*/
- intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
-
- return true;
+ return buffer->offset64 + delta;
}
-bool
-intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
- drm_intel_bo *buffer,
- uint32_t read_domains,
- uint32_t write_domain,
- uint32_t delta)
+uint64_t
+intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
+ drm_intel_bo *buffer, uint32_t offset,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
{
- int ret;
-
- ret = drm_intel_bo_emit_reloc_fence(brw->batch.bo, 4*brw->batch.used,
- buffer, delta,
- read_domains, write_domain);
+ int ret = drm_intel_bo_emit_reloc(batch->bo, offset,
+ buffer, delta,
+ read_domains, write_domain);
assert(ret == 0);
- (void)ret;
+ (void) ret;
- /*
- * Using the old buffer offset, write in what the right data would
- * be, in case the buffer doesn't move and we can short-circuit the
- * relocation processing in the kernel
+ /* Using the old buffer offset, write in what the right data would be, in
+ * case the buffer doesn't move and we can short-circuit the relocation
+ * processing in the kernel
*/
- intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
-
- return true;
+ return buffer->offset64 + delta;
}
+
void
intel_batchbuffer_data(struct brw_context *brw,
- const void *data, GLuint bytes, bool is_blit)
+ const void *data, GLuint bytes, enum brw_gpu_ring ring)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(brw, bytes, is_blit);
- __memcpy(brw->batch.map + brw->batch.used, data, bytes);
- brw->batch.used += bytes >> 2;
+ intel_batchbuffer_require_space(brw, bytes, ring);
+ memcpy(brw->batch.map_next, data, bytes);
+ brw->batch.map_next += bytes >> 2;
}
-void
-intel_batchbuffer_cached_advance(struct brw_context *brw)
+static void
+load_sized_register_mem(struct brw_context *brw,
+ uint32_t reg,
+ drm_intel_bo *bo,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t offset,
+ int size)
{
- struct cached_batch_item **prev = &brw->batch.cached_items, *item;
- uint32_t sz = (brw->batch.used - brw->batch.emit) * sizeof(uint32_t);
- uint32_t *start = brw->batch.map + brw->batch.emit;
- uint16_t op = *start >> 16;
-
- while (*prev) {
- uint32_t *old;
-
- item = *prev;
- old = brw->batch.map + item->header;
- if (op == *old >> 16) {
- if (item->size == sz && memcmp(old, start, sz) == 0) {
- if (prev != &brw->batch.cached_items) {
- *prev = item->next;
- item->next = brw->batch.cached_items;
- brw->batch.cached_items = item;
- }
- brw->batch.used = brw->batch.emit;
- return;
- }
+ int i;
+
+ /* MI_LOAD_REGISTER_MEM only exists on Gen7+. */
+ assert(brw->gen >= 7);
- goto emit;
+ if (brw->gen >= 8) {
+ BEGIN_BATCH(4 * size);
+ for (i = 0; i < size; i++) {
+ OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (4 - 2));
+ OUT_BATCH(reg + i * 4);
+ OUT_RELOC64(bo, read_domains, write_domain, offset + i * 4);
}
- prev = &item->next;
+ ADVANCE_BATCH();
+ } else {
+ BEGIN_BATCH(3 * size);
+ for (i = 0; i < size; i++) {
+ OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (3 - 2));
+ OUT_BATCH(reg + i * 4);
+ OUT_RELOC(bo, read_domains, write_domain, offset + i * 4);
+ }
+ ADVANCE_BATCH();
}
+}
- item = malloc(sizeof(struct cached_batch_item));
- if (item == NULL)
- return;
+void
+brw_load_register_mem(struct brw_context *brw,
+ uint32_t reg,
+ drm_intel_bo *bo,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t offset)
+{
+ load_sized_register_mem(brw, reg, bo, read_domains, write_domain, offset, 1);
+}
- item->next = brw->batch.cached_items;
- brw->batch.cached_items = item;
+void
+brw_load_register_mem64(struct brw_context *brw,
+ uint32_t reg,
+ drm_intel_bo *bo,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t offset)
+{
+ load_sized_register_mem(brw, reg, bo, read_domains, write_domain, offset, 2);
+}
+
+/*
+ * Write an arbitrary 32-bit register to a buffer via MI_STORE_REGISTER_MEM.
+ */
+void
+brw_store_register_mem32(struct brw_context *brw,
+ drm_intel_bo *bo, uint32_t reg, uint32_t offset)
+{
+ assert(brw->gen >= 6);
-emit:
- item->size = sz;
- item->header = brw->batch.emit;
+ if (brw->gen >= 8) {
+ BEGIN_BATCH(4);
+ OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
+ OUT_BATCH(reg);
+ OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
+ ADVANCE_BATCH();
+ } else {
+ BEGIN_BATCH(3);
+ OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
+ OUT_BATCH(reg);
+ OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
+ ADVANCE_BATCH();
+ }
}
-/**
- * Restriction [DevSNB, DevIVB]:
- *
- * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
- * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
- * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
- * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
- * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
- * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
- * unless SW can otherwise guarantee that the pipeline from WM onwards is
- * already flushed (e.g., via a preceding MI_FLUSH).
+/*
+ * Write an arbitrary 64-bit register to a buffer via MI_STORE_REGISTER_MEM.
*/
void
-intel_emit_depth_stall_flushes(struct brw_context *brw)
+brw_store_register_mem64(struct brw_context *brw,
+ drm_intel_bo *bo, uint32_t reg, uint32_t offset)
{
- assert(brw->gen >= 6 && brw->gen <= 7);
+ assert(brw->gen >= 6);
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH()
+ /* MI_STORE_REGISTER_MEM only stores a single 32-bit value, so to
+ * read a full 64-bit register, we need to do two of them.
+ */
+ if (brw->gen >= 8) {
+ BEGIN_BATCH(8);
+ OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
+ OUT_BATCH(reg);
+ OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
+ OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
+ OUT_BATCH(reg + sizeof(uint32_t));
+ OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset + sizeof(uint32_t));
+ ADVANCE_BATCH();
+ } else {
+ BEGIN_BATCH(6);
+ OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
+ OUT_BATCH(reg);
+ OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
+ OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
+ OUT_BATCH(reg + sizeof(uint32_t));
+ OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset + sizeof(uint32_t));
+ ADVANCE_BATCH();
+ }
+}
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_CACHE_FLUSH);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
+/*
+ * Write a 32-bit register using immediate data.
+ */
+void
+brw_load_register_imm32(struct brw_context *brw, uint32_t reg, uint32_t imm)
+{
+ assert(brw->gen >= 6);
+
+ BEGIN_BATCH(3);
+ OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
+ OUT_BATCH(reg);
+ OUT_BATCH(imm);
ADVANCE_BATCH();
+}
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
+/*
+ * Write a 64-bit register using immediate data.
+ */
+void
+brw_load_register_imm64(struct brw_context *brw, uint32_t reg, uint64_t imm)
+{
+ assert(brw->gen >= 6);
+
+ BEGIN_BATCH(5);
+ OUT_BATCH(MI_LOAD_REGISTER_IMM | (5 - 2));
+ OUT_BATCH(reg);
+ OUT_BATCH(imm & 0xffffffff);
+ OUT_BATCH(reg + 4);
+ OUT_BATCH(imm >> 32);
ADVANCE_BATCH();
}
-/**
- * From the Ivybridge PRM, Volume 2 Part 1, Section 3.2 (VS Stage Input):
- * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
- * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
- * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
- * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
- * to be sent before any combination of VS associated 3DSTATE."
+/*
+ * Copies a 32-bit register.
*/
void
-gen7_emit_vs_workaround_flush(struct brw_context *brw)
+brw_load_register_reg(struct brw_context *brw, uint32_t src, uint32_t dest)
{
- assert(brw->gen == 7);
+ assert(brw->gen >= 8 || brw->is_haswell);
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(brw->batch.workaround_bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
- OUT_BATCH(0); /* write data */
+ BEGIN_BATCH(3);
+ OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
+ OUT_BATCH(src);
+ OUT_BATCH(dest);
ADVANCE_BATCH();
}
-/**
- * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
- * implementing two workarounds on gen6. From section 1.4.7.1
- * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
- *
- * [DevSNB-C+{W/A}] Before any depth stall flush (including those
- * produced by non-pipelined state commands), software needs to first
- * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
- * 0.
- *
- * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
- * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
- *
- * And the workaround for these two requires this workaround first:
- *
- * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
- * BEFORE the pipe-control with a post-sync op and no write-cache
- * flushes.
- *
- * And this last workaround is tricky because of the requirements on
- * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
- * volume 2 part 1:
- *
- * "1 of the following must also be set:
- * - Render Target Cache Flush Enable ([12] of DW1)
- * - Depth Cache Flush Enable ([0] of DW1)
- * - Stall at Pixel Scoreboard ([1] of DW1)
- * - Depth Stall ([13] of DW1)
- * - Post-Sync Operation ([13] of DW1)
- * - Notify Enable ([8] of DW1)"
- *
- * The cache flushes require the workaround flush that triggered this
- * one, so we can't use it. Depth stall would trigger the same.
- * Post-sync nonzero is what triggered this second workaround, so we
- * can't use that one either. Notify enable is IRQs, which aren't
- * really our business. That leaves only stall at scoreboard.
+/*
+ * Copies a 64-bit register.
*/
void
-intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
+brw_load_register_reg64(struct brw_context *brw, uint32_t src, uint32_t dest)
{
- if (!brw->batch.need_workaround_flush)
- return;
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- OUT_BATCH(0); /* address */
- OUT_BATCH(0); /* write data */
+ assert(brw->gen >= 8 || brw->is_haswell);
+
+ BEGIN_BATCH(6);
+ OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
+ OUT_BATCH(src);
+ OUT_BATCH(dest);
+ OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
+ OUT_BATCH(src + sizeof(uint32_t));
+ OUT_BATCH(dest + sizeof(uint32_t));
ADVANCE_BATCH();
+}
+
+/*
+ * Write 32-bits of immediate data to a GPU memory buffer.
+ */
+void
+brw_store_data_imm32(struct brw_context *brw, drm_intel_bo *bo,
+ uint32_t offset, uint32_t imm)
+{
+ assert(brw->gen >= 6);
BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(brw->batch.workaround_bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
- OUT_BATCH(0); /* write data */
+ OUT_BATCH(MI_STORE_DATA_IMM | (4 - 2));
+ if (brw->gen >= 8)
+ OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
+ else {
+ OUT_BATCH(0); /* MBZ */
+ OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
+ }
+ OUT_BATCH(imm);
ADVANCE_BATCH();
-
- brw->batch.need_workaround_flush = false;
}
-/* Emit a pipelined flush to either flush render and texture cache for
- * reading from a FBO-drawn texture, or flush so that frontbuffer
- * render appears on the screen in DRI1.
- *
- * This is also used for the always_flush_cache driconf debug option.
+/*
+ * Write 64-bits of immediate data to a GPU memory buffer.
*/
void
-intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
+brw_store_data_imm64(struct brw_context *brw, drm_intel_bo *bo,
+ uint32_t offset, uint64_t imm)
{
- if (brw->gen >= 6) {
- if (brw->batch.is_blit) {
- BEGIN_BATCH_BLT(4);
- OUT_BATCH(MI_FLUSH_DW);
- OUT_BATCH(0);
- OUT_BATCH(0);
- OUT_BATCH(0);
- ADVANCE_BATCH();
- } else {
- if (brw->gen == 6) {
- /* Hardware workaround: SNB B-Spec says:
- *
- * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
- * Flush Enable =1, a PIPE_CONTROL with any non-zero
- * post-sync-op is required.
- */
- intel_emit_post_sync_nonzero_flush(brw);
- }
-
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
- OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_VF_CACHE_INVALIDATE |
- PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NO_WRITE |
- PIPE_CONTROL_CS_STALL);
- OUT_BATCH(0); /* write address */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
- }
- } else {
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_NO_WRITE);
- OUT_BATCH(0); /* write address */
- OUT_BATCH(0); /* write data */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
+ assert(brw->gen >= 6);
+
+ BEGIN_BATCH(5);
+ OUT_BATCH(MI_STORE_DATA_IMM | (5 - 2));
+ if (brw->gen >= 8)
+ OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
+ else {
+ OUT_BATCH(0); /* MBZ */
+ OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ offset);
}
+ OUT_BATCH(imm & 0xffffffffu);
+ OUT_BATCH(imm >> 32);
+ ADVANCE_BATCH();
}