#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
-#include "intel_decode.h"
#include "intel_reg.h"
#include "intel_bufmgr.h"
#include "intel_buffers.h"
+static void
+intel_batchbuffer_reset(struct intel_context *intel);
+
struct cached_batch_item {
struct cached_batch_item *next;
uint16_t header;
{
intel_batchbuffer_reset(intel);
- if (intel->gen == 6) {
+ if (intel->gen >= 6) {
/* We can't just use brw_state_batch to get a chunk of space for
* the gen6 workaround because it involves actually writing to
* the buffer, and the kernel doesn't let us write to the batch.
*/
intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
- "gen6 workaround",
+ "pipe_control workaround",
4096, 4096);
}
+
+ if (!intel->has_llc) {
+ intel->batch.cpu_map = malloc(intel->maxBatchSize);
+ intel->batch.map = intel->batch.cpu_map;
+ }
}
-void
+static void
intel_batchbuffer_reset(struct intel_context *intel)
{
if (intel->batch.last_bo != NULL) {
intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
intel->maxBatchSize, 4096);
+ if (intel->has_llc) {
+ drm_intel_bo_map(intel->batch.bo, true);
+ intel->batch.map = intel->batch.bo->virtual;
+ }
intel->batch.reserved_space = BATCH_RESERVED;
intel->batch.state_batch_offset = intel->batch.bo->size;
intel->batch.used = 0;
+ intel->batch.needs_sol_reset = false;
+}
+
+void
+intel_batchbuffer_save_state(struct intel_context *intel)
+{
+ intel->batch.saved.used = intel->batch.used;
+ intel->batch.saved.reloc_count =
+ drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
+}
+
+void
+intel_batchbuffer_reset_to_saved(struct intel_context *intel)
+{
+ drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
+
+ intel->batch.used = intel->batch.saved.used;
+
+ /* Cached batch state is dead, since we just cleared some unknown part of the
+ * batchbuffer. Assume that the caller resets any other state necessary.
+ */
+ clear_cache(intel);
}
void
intel_batchbuffer_free(struct intel_context *intel)
{
+ free(intel->batch.cpu_map);
drm_intel_bo_unreference(intel->batch.last_bo);
drm_intel_bo_unreference(intel->batch.bo);
drm_intel_bo_unreference(intel->batch.workaround_bo);
clear_cache(intel);
}
+static void
+do_batch_dump(struct intel_context *intel)
+{
+ struct drm_intel_decode *decode;
+ struct intel_batchbuffer *batch = &intel->batch;
+ int ret;
+
+ decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
+ if (!decode)
+ return;
+
+ ret = drm_intel_bo_map(batch->bo, false);
+ if (ret == 0) {
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->bo->virtual,
+ batch->bo->offset,
+ batch->used);
+ } else {
+ fprintf(stderr,
+ "WARNING: failed to map batchbuffer (%s), "
+ "dumping uploaded data instead.\n", strerror(ret));
+
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->map,
+ batch->bo->offset,
+ batch->used);
+ }
+
+ drm_intel_decode(decode);
+
+ drm_intel_decode_context_free(decode);
+
+ if (ret == 0) {
+ drm_intel_bo_unmap(batch->bo);
+
+ if (intel->vtbl.debug_batch != NULL)
+ intel->vtbl.debug_batch(intel);
+ }
+}
/* TODO: Push this whole function into bufmgr.
*/
-static void
+static int
do_flush_locked(struct intel_context *intel)
{
struct intel_batchbuffer *batch = &intel->batch;
int ret = 0;
- ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
- if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
- ret = drm_intel_bo_subdata(batch->bo,
- batch->state_batch_offset,
- batch->bo->size - batch->state_batch_offset,
- (char *)batch->map + batch->state_batch_offset);
+ if (intel->has_llc) {
+ drm_intel_bo_unmap(batch->bo);
+ } else {
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
+ ret = drm_intel_bo_subdata(batch->bo,
+ batch->state_batch_offset,
+ batch->bo->size - batch->state_batch_offset,
+ (char *)batch->map + batch->state_batch_offset);
+ }
}
if (!intel->intelScreen->no_hw) {
- int ring;
+ int flags;
if (intel->gen < 6 || !batch->is_blit) {
- ring = I915_EXEC_RENDER;
+ flags = I915_EXEC_RENDER;
} else {
- ring = I915_EXEC_BLT;
+ flags = I915_EXEC_BLT;
}
- if (ret == 0)
- ret = drm_intel_bo_mrb_exec(batch->bo, 4*batch->used, NULL, 0, 0, ring);
+ if (batch->needs_sol_reset)
+ flags |= I915_EXEC_GEN7_SOL_RESET;
+
+ if (ret == 0) {
+ if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
+ intel->vtbl.annotate_aub(intel);
+ if (intel->hw_ctx == NULL || batch->is_blit) {
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
+ flags);
+ } else {
+ ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
+ 4 * batch->used, flags);
+ }
+ }
}
- if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- drm_intel_bo_map(batch->bo, false);
- intel_decode(batch->bo->virtual, batch->used,
- batch->bo->offset,
- intel->intelScreen->deviceID, GL_TRUE);
- drm_intel_bo_unmap(batch->bo);
-
- if (intel->vtbl.debug_batch != NULL)
- intel->vtbl.debug_batch(intel);
- }
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+ do_batch_dump(intel);
if (ret != 0) {
- fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerr(ret));
+ fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
exit(1);
}
intel->vtbl.new_batch(intel);
+
+ return ret;
}
-void
+int
_intel_batchbuffer_flush(struct intel_context *intel,
const char *file, int line)
{
+ int ret;
+
if (intel->batch.used == 0)
- return;
+ return 0;
if (intel->first_post_swapbuffers_batch == NULL) {
intel->first_post_swapbuffers_batch = intel->batch.bo;
intel->batch.reserved_space = 0;
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
- }
+ if (intel->vtbl.finish_batch)
+ intel->vtbl.finish_batch(intel);
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
intel_batchbuffer_emit_dword(intel, MI_NOOP);
}
- if (intel->vtbl.finish_batch)
- intel->vtbl.finish_batch(intel);
-
intel_upload_finish(intel);
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!intel->no_batch_wrap);
- do_flush_locked(intel);
+ ret = do_flush_locked(intel);
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
/* Reset the buffer:
*/
intel_batchbuffer_reset(intel);
+
+ return ret;
}
/* This is the only way buffers get added to the validate list.
*/
-GLboolean
+bool
intel_batchbuffer_emit_reloc(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains, uint32_t write_domain,
*/
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
- return GL_TRUE;
+ return true;
}
-GLboolean
+bool
intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains,
*/
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
- return GL_TRUE;
+ return true;
}
void
item->header = intel->batch.emit;
}
+/**
+ * Restriction [DevSNB, DevIVB]:
+ *
+ * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
+ * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
+ * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
+ * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
+ * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
+ * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
+ * unless SW can otherwise guarantee that the pipeline from WM onwards is
+ * already flushed (e.g., via a preceding MI_FLUSH).
+ */
+void
+intel_emit_depth_stall_flushes(struct intel_context *intel)
+{
+ assert(intel->gen >= 6 && intel->gen <= 7);
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH()
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_CACHE_FLUSH);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+}
+
+/**
+ * From the BSpec, volume 2a.03: VS Stage Input / State:
+ * "[DevIVB] A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
+ * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
+ * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
+ * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
+ * to be sent before any combination of VS associated 3DSTATE."
+ */
+void
+gen7_emit_vs_workaround_flush(struct intel_context *intel)
+{
+ assert(intel->gen == 7);
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
+ OUT_RELOC(intel->batch.workaround_bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+}
+
/**
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
return;
BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
OUT_BATCH(0); /* address */
ADVANCE_BATCH();
BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
OUT_RELOC(intel->batch.workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
}
BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NO_WRITE);
+ PIPE_CONTROL_NO_WRITE |
+ PIPE_CONTROL_CS_STALL);
OUT_BATCH(0); /* write address */
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
}
} else if (intel->gen >= 4) {
BEGIN_BATCH(4);
- OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_NO_WRITE);
OUT_BATCH(0); /* write address */