#include "intel_context.h"
#include "intel_batchbuffer.h"
+#include "intel_buffer_objects.h"
#include "intel_decode.h"
#include "intel_reg.h"
#include "intel_bufmgr.h"
#include "intel_buffers.h"
-void
-intel_batchbuffer_reset(struct intel_batchbuffer *batch)
+struct cached_batch_item {
+ struct cached_batch_item *next;
+ uint16_t header;
+ uint16_t size;
+};
+
+static void clear_cache( struct intel_context *intel )
{
- struct intel_context *intel = batch->intel;
+ struct cached_batch_item *item = intel->batch.cached_items;
- if (batch->buf != NULL) {
- dri_bo_unreference(batch->buf);
- batch->buf = NULL;
+ while (item) {
+ struct cached_batch_item *next = item->next;
+ free(item);
+ item = next;
}
- batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer",
- intel->maxBatchSize, 4096);
- batch->map = batch->buffer;
- batch->size = intel->maxBatchSize;
- batch->ptr = batch->map;
- batch->reserved_space = BATCH_RESERVED;
- batch->dirty_state = ~0;
+ intel->batch.cached_items = NULL;
+}
+
+void
+intel_batchbuffer_init(struct intel_context *intel)
+{
+ intel_batchbuffer_reset(intel);
+
+ if (intel->gen == 6) {
+ /* We can't just use brw_state_batch to get a chunk of space for
+ * the gen6 workaround because it involves actually writing to
+ * the buffer, and the kernel doesn't let us write to the batch.
+ */
+ intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
+ "gen6 workaround",
+ 4096, 4096);
+ }
}
-struct intel_batchbuffer *
-intel_batchbuffer_alloc(struct intel_context *intel)
+void
+intel_batchbuffer_reset(struct intel_context *intel)
{
- struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
+ if (intel->batch.last_bo != NULL) {
+ drm_intel_bo_unreference(intel->batch.last_bo);
+ intel->batch.last_bo = NULL;
+ }
+ intel->batch.last_bo = intel->batch.bo;
- batch->intel = intel;
- batch->buffer = malloc(intel->maxBatchSize);
- intel_batchbuffer_reset(batch);
+ clear_cache(intel);
- return batch;
+ intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
+ intel->maxBatchSize, 4096);
+
+ intel->batch.reserved_space = BATCH_RESERVED;
+ intel->batch.state_batch_offset = intel->batch.bo->size;
+ intel->batch.used = 0;
}
void
-intel_batchbuffer_free(struct intel_batchbuffer *batch)
+intel_batchbuffer_free(struct intel_context *intel)
{
- free (batch->buffer);
- dri_bo_unreference(batch->buf);
- batch->buf = NULL;
- free(batch);
+ drm_intel_bo_unreference(intel->batch.last_bo);
+ drm_intel_bo_unreference(intel->batch.bo);
+ drm_intel_bo_unreference(intel->batch.workaround_bo);
+ clear_cache(intel);
}
-
/* TODO: Push this whole function into bufmgr.
*/
static void
-do_flush_locked(struct intel_batchbuffer *batch, GLuint used)
+do_flush_locked(struct intel_context *intel)
{
- struct intel_context *intel = batch->intel;
+ struct intel_batchbuffer *batch = &intel->batch;
int ret = 0;
- int x_off = 0, y_off = 0;
- dri_bo_subdata (batch->buf, 0, used, batch->buffer);
-
- batch->ptr = NULL;
-
- if (!intel->no_hw)
- dri_bo_exec(batch->buf, used, NULL, 0, (x_off & 0xffff) | (y_off << 16));
+ if (!intel->intelScreen->no_hw) {
+ int ring;
+
+ if (intel->gen < 6 || !batch->is_blit) {
+ ring = I915_EXEC_RENDER;
+ } else {
+ ring = I915_EXEC_BLT;
+ }
+
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
+ ret = drm_intel_bo_subdata(batch->bo,
+ batch->state_batch_offset,
+ batch->bo->size - batch->state_batch_offset,
+ (char *)batch->map + batch->state_batch_offset);
+ }
+
+ if (ret == 0)
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4*batch->used, NULL, 0, 0, ring);
+ }
- if (INTEL_DEBUG & DEBUG_BATCH) {
- dri_bo_map(batch->buf, GL_FALSE);
- intel_decode(batch->buf->virtual, used / 4, batch->buf->offset,
- intel->intelScreen->deviceID);
- dri_bo_unmap(batch->buf);
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+ drm_intel_bo_map(batch->bo, false);
+ intel_decode(batch->bo->virtual, batch->used,
+ batch->bo->offset,
+ intel->intelScreen->deviceID, GL_TRUE);
+ drm_intel_bo_unmap(batch->bo);
if (intel->vtbl.debug_batch != NULL)
intel->vtbl.debug_batch(intel);
}
void
-_intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
- int line)
+_intel_batchbuffer_flush(struct intel_context *intel,
+ const char *file, int line)
{
- struct intel_context *intel = batch->intel;
- GLuint used = batch->ptr - batch->map;
+ if (intel->batch.used == 0)
+ return;
if (intel->first_post_swapbuffers_batch == NULL) {
- intel->first_post_swapbuffers_batch = intel->batch->buf;
+ intel->first_post_swapbuffers_batch = intel->batch.bo;
drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
}
- if (used == 0)
- return;
-
- if (INTEL_DEBUG & DEBUG_BATCH)
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
- used);
+ 4*intel->batch.used);
- batch->reserved_space = 0;
- /* Emit a flush if the bufmgr doesn't do it for us. */
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(batch);
- used = batch->ptr - batch->map;
- }
-
- /* Round batchbuffer usage to 2 DWORDs. */
+ intel->batch.reserved_space = 0;
- if ((used & 4) == 0) {
- *(GLuint *) (batch->ptr) = 0; /* noop */
- batch->ptr += 4;
- used = batch->ptr - batch->map;
+ if (intel->always_flush_cache) {
+ intel_batchbuffer_emit_mi_flush(intel);
}
/* Mark the end of the buffer. */
- *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END;
- batch->ptr += 4;
- used = batch->ptr - batch->map;
- assert (used <= batch->buf->size);
-
- /* Workaround for recursive batchbuffer flushing: If the window is
- * moved, we can get into a case where we try to flush during a
- * flush. What happens is that when we try to grab the lock for
- * the first flush, we detect that the window moved which then
- * causes another flush (from the intel_draw_buffer() call in
- * intelUpdatePageFlipping()). To work around this we reset the
- * batchbuffer tail pointer before trying to get the lock. This
- * prevent the nested buffer flush, but a better fix would be to
- * avoid that in the first place. */
- batch->ptr = batch->map;
+ intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
+ if (intel->batch.used & 1) {
+ /* Round batchbuffer usage to 2 DWORDs. */
+ intel_batchbuffer_emit_dword(intel, MI_NOOP);
+ }
if (intel->vtbl.finish_batch)
intel->vtbl.finish_batch(intel);
+ intel_upload_finish(intel);
+
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!intel->no_batch_wrap);
- /* TODO: Just pass the relocation list and dma buffer up to the
- * kernel.
- */
- do_flush_locked(batch, used);
+ do_flush_locked(intel);
- if (INTEL_DEBUG & DEBUG_SYNC) {
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- dri_bo_map(batch->buf, GL_TRUE);
- dri_bo_unmap(batch->buf);
+ drm_intel_bo_wait_rendering(intel->batch.bo);
}
/* Reset the buffer:
*/
- intel_batchbuffer_reset(batch);
+ intel_batchbuffer_reset(intel);
}
/* This is the only way buffers get added to the validate list.
*/
GLboolean
-intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
- dri_bo *buffer,
+intel_batchbuffer_emit_reloc(struct intel_context *intel,
+ drm_intel_bo *buffer,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
int ret;
- assert(delta < buffer->size);
-
- if (batch->ptr - batch->map > batch->buf->size)
- printf ("bad relocation ptr %p map %p offset %d size %lu\n",
- batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
- ret = dri_bo_emit_reloc(batch->buf, read_domains, write_domain,
- delta, batch->ptr - batch->map, buffer);
+ ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
+ buffer, delta,
+ read_domains, write_domain);
+ assert(ret == 0);
+ (void)ret;
/*
* Using the old buffer offset, write in what the right data would be, in case
* the buffer doesn't move and we can short-circuit the relocation processing
* in the kernel
*/
- intel_batchbuffer_emit_dword (batch, buffer->offset + delta);
+ intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
return GL_TRUE;
}
GLboolean
-intel_batchbuffer_emit_reloc_fenced(struct intel_batchbuffer *batch,
+intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
drm_intel_bo *buffer,
- uint32_t read_domains, uint32_t write_domain,
+ uint32_t read_domains,
+ uint32_t write_domain,
uint32_t delta)
{
int ret;
- assert(delta < buffer->size);
-
- if (batch->ptr - batch->map > batch->buf->size)
- printf ("bad relocation ptr %p map %p offset %d size %lu\n",
- batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
- ret = drm_intel_bo_emit_reloc_fence(batch->buf, batch->ptr - batch->map,
+ ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
buffer, delta,
read_domains, write_domain);
+ assert(ret == 0);
+ (void)ret;
/*
* Using the old buffer offset, write in what the right data would
* be, in case the buffer doesn't move and we can short-circuit the
* relocation processing in the kernel
*/
- intel_batchbuffer_emit_dword (batch, buffer->offset + delta);
+ intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
return GL_TRUE;
}
void
-intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data, GLuint bytes)
+intel_batchbuffer_data(struct intel_context *intel,
+ const void *data, GLuint bytes, bool is_blit)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(batch, bytes);
- __memcpy(batch->ptr, data, bytes);
- batch->ptr += bytes;
+ intel_batchbuffer_require_space(intel, bytes, is_blit);
+ __memcpy(intel->batch.map + intel->batch.used, data, bytes);
+ intel->batch.used += bytes >> 2;
+}
+
+void
+intel_batchbuffer_cached_advance(struct intel_context *intel)
+{
+ struct cached_batch_item **prev = &intel->batch.cached_items, *item;
+ uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
+ uint32_t *start = intel->batch.map + intel->batch.emit;
+ uint16_t op = *start >> 16;
+
+ while (*prev) {
+ uint32_t *old;
+
+ item = *prev;
+ old = intel->batch.map + item->header;
+ if (op == *old >> 16) {
+ if (item->size == sz && memcmp(old, start, sz) == 0) {
+ if (prev != &intel->batch.cached_items) {
+ *prev = item->next;
+ item->next = intel->batch.cached_items;
+ intel->batch.cached_items = item;
+ }
+ intel->batch.used = intel->batch.emit;
+ return;
+ }
+
+ goto emit;
+ }
+ prev = &item->next;
+ }
+
+ item = malloc(sizeof(struct cached_batch_item));
+ if (item == NULL)
+ return;
+
+ item->next = intel->batch.cached_items;
+ intel->batch.cached_items = item;
+
+emit:
+ item->size = sz;
+ item->header = intel->batch.emit;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+void
+intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
+{
+ if (!intel->batch.need_workaround_flush)
+ return;
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
+ OUT_RELOC(intel->batch.workaround_bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ intel->batch.need_workaround_flush = false;
}
/* Emit a pipelined flush to either flush render and texture cache for
* This is also used for the always_flush_cache driconf debug option.
*/
void
-intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
+intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
{
- struct intel_context *intel = batch->intel;
-
if (intel->gen >= 6) {
- BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL);
- OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_NO_WRITE);
- OUT_BATCH(0); /* write address */
- OUT_BATCH(0); /* write data */
- ADVANCE_BATCH();
+ if (intel->batch.is_blit) {
+ BEGIN_BATCH_BLT(4);
+ OUT_BATCH(MI_FLUSH_DW);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ } else {
+ if (intel->gen == 6) {
+ /* Hardware workaround: SNB B-Spec says:
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
+ * Flush Enable =1, a PIPE_CONTROL with any non-zero
+ * post-sync-op is required.
+ */
+ intel_emit_post_sync_nonzero_flush(intel);
+ }
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_NO_WRITE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ }
} else if (intel->gen >= 4) {
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL |