static void
clear_cache(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- struct cached_batch_item *item = intel->batch.cached_items;
+ struct cached_batch_item *item = brw->batch.cached_items;
while (item) {
struct cached_batch_item *next = item->next;
item = next;
}
- intel->batch.cached_items = NULL;
+ brw->batch.cached_items = NULL;
}
void
intel_batchbuffer_init(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
intel_batchbuffer_reset(brw);
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
/* We can't just use brw_state_batch to get a chunk of space for
* the gen6 workaround because it involves actually writing to
* the buffer, and the kernel doesn't let us write to the batch.
*/
- intel->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
+ brw->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
"pipe_control workaround",
4096, 4096);
}
- if (!intel->has_llc) {
- intel->batch.cpu_map = malloc(BATCH_SZ);
- intel->batch.map = intel->batch.cpu_map;
+ if (!brw->has_llc) {
+ brw->batch.cpu_map = malloc(BATCH_SZ);
+ brw->batch.map = brw->batch.cpu_map;
}
}
static void
intel_batchbuffer_reset(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- if (intel->batch.last_bo != NULL) {
- drm_intel_bo_unreference(intel->batch.last_bo);
- intel->batch.last_bo = NULL;
+ if (brw->batch.last_bo != NULL) {
+ drm_intel_bo_unreference(brw->batch.last_bo);
+ brw->batch.last_bo = NULL;
}
- intel->batch.last_bo = intel->batch.bo;
+ brw->batch.last_bo = brw->batch.bo;
clear_cache(brw);
- intel->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
+ brw->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
BATCH_SZ, 4096);
- if (intel->has_llc) {
- drm_intel_bo_map(intel->batch.bo, true);
- intel->batch.map = intel->batch.bo->virtual;
+ if (brw->has_llc) {
+ drm_intel_bo_map(brw->batch.bo, true);
+ brw->batch.map = brw->batch.bo->virtual;
}
- intel->batch.reserved_space = BATCH_RESERVED;
- intel->batch.state_batch_offset = intel->batch.bo->size;
- intel->batch.used = 0;
- intel->batch.needs_sol_reset = false;
+ brw->batch.reserved_space = BATCH_RESERVED;
+ brw->batch.state_batch_offset = brw->batch.bo->size;
+ brw->batch.used = 0;
+ brw->batch.needs_sol_reset = false;
}
void
intel_batchbuffer_save_state(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- intel->batch.saved.used = intel->batch.used;
- intel->batch.saved.reloc_count =
- drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
+ brw->batch.saved.used = brw->batch.used;
+ brw->batch.saved.reloc_count =
+ drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
}
void
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
+ drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
- intel->batch.used = intel->batch.saved.used;
+ brw->batch.used = brw->batch.saved.used;
/* Cached batch state is dead, since we just cleared some unknown part of the
* batchbuffer. Assume that the caller resets any other state necessary.
void
intel_batchbuffer_free(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- free(intel->batch.cpu_map);
- drm_intel_bo_unreference(intel->batch.last_bo);
- drm_intel_bo_unreference(intel->batch.bo);
- drm_intel_bo_unreference(intel->batch.workaround_bo);
+ free(brw->batch.cpu_map);
+ drm_intel_bo_unreference(brw->batch.last_bo);
+ drm_intel_bo_unreference(brw->batch.bo);
+ drm_intel_bo_unreference(brw->batch.workaround_bo);
clear_cache(brw);
}
static void
do_batch_dump(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
struct drm_intel_decode *decode;
- struct intel_batchbuffer *batch = &intel->batch;
+ struct intel_batchbuffer *batch = &brw->batch;
int ret;
- decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
+ decode = drm_intel_decode_context_alloc(brw->intelScreen->deviceID);
if (!decode)
return;
static int
do_flush_locked(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- struct intel_batchbuffer *batch = &intel->batch;
+ struct intel_batchbuffer *batch = &brw->batch;
int ret = 0;
- if (intel->has_llc) {
+ if (brw->has_llc) {
drm_intel_bo_unmap(batch->bo);
} else {
ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
}
}
- if (!intel->intelScreen->no_hw) {
+ if (!brw->intelScreen->no_hw) {
int flags;
- if (intel->gen < 6 || !batch->is_blit) {
+ if (brw->gen < 6 || !batch->is_blit) {
flags = I915_EXEC_RENDER;
} else {
flags = I915_EXEC_BLT;
if (ret == 0) {
if (unlikely(INTEL_DEBUG & DEBUG_AUB))
brw_annotate_aub(brw);
- if (intel->hw_ctx == NULL || batch->is_blit) {
+ if (brw->hw_ctx == NULL || batch->is_blit) {
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
flags);
} else {
- ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
+ ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
4 * batch->used, flags);
}
}
_intel_batchbuffer_flush(struct brw_context *brw,
const char *file, int line)
{
- struct intel_context *intel = &brw->intel;
int ret;
- if (intel->batch.used == 0)
+ if (brw->batch.used == 0)
return 0;
- if (intel->first_post_swapbuffers_batch == NULL) {
- intel->first_post_swapbuffers_batch = intel->batch.bo;
- drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
+ if (brw->first_post_swapbuffers_batch == NULL) {
+ brw->first_post_swapbuffers_batch = brw->batch.bo;
+ drm_intel_bo_reference(brw->first_post_swapbuffers_batch);
}
- if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
- fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
- 4*intel->batch.used);
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+ int bytes_for_commands = 4 * brw->batch.used;
+ int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
+ int total_bytes = bytes_for_commands + bytes_for_state;
+ fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
+ "%4db (state) = %4db (%0.1f%%)\n", file, line,
+ bytes_for_commands, bytes_for_state,
+ total_bytes,
+ 100.0f * total_bytes / BATCH_SZ);
+ }
- intel->batch.reserved_space = 0;
+ brw->batch.reserved_space = 0;
if (brw->vtbl.finish_batch)
brw->vtbl.finish_batch(brw);
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
- if (intel->batch.used & 1) {
+ if (brw->batch.used & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
intel_batchbuffer_emit_dword(brw, MI_NOOP);
}
intel_upload_finish(brw);
/* Check that we didn't just wrap our batchbuffer at a bad time. */
- assert(!intel->no_batch_wrap);
+ assert(!brw->no_batch_wrap);
ret = do_flush_locked(brw);
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- drm_intel_bo_wait_rendering(intel->batch.bo);
+ drm_intel_bo_wait_rendering(brw->batch.bo);
}
/* Reset the buffer:
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
- struct intel_context *intel = &brw->intel;
int ret;
- ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
+ ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
uint32_t write_domain,
uint32_t delta)
{
- struct intel_context *intel = &brw->intel;
int ret;
- ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
+ ret = drm_intel_bo_emit_reloc_fence(brw->batch.bo, 4*brw->batch.used,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
intel_batchbuffer_data(struct brw_context *brw,
const void *data, GLuint bytes, bool is_blit)
{
- struct intel_context *intel = &brw->intel;
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(brw, bytes, is_blit);
- __memcpy(intel->batch.map + intel->batch.used, data, bytes);
- intel->batch.used += bytes >> 2;
+ __memcpy(brw->batch.map + brw->batch.used, data, bytes);
+ brw->batch.used += bytes >> 2;
}
void
intel_batchbuffer_cached_advance(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- struct cached_batch_item **prev = &intel->batch.cached_items, *item;
- uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
- uint32_t *start = intel->batch.map + intel->batch.emit;
+ struct cached_batch_item **prev = &brw->batch.cached_items, *item;
+ uint32_t sz = (brw->batch.used - brw->batch.emit) * sizeof(uint32_t);
+ uint32_t *start = brw->batch.map + brw->batch.emit;
uint16_t op = *start >> 16;
while (*prev) {
uint32_t *old;
item = *prev;
- old = intel->batch.map + item->header;
+ old = brw->batch.map + item->header;
if (op == *old >> 16) {
if (item->size == sz && memcmp(old, start, sz) == 0) {
- if (prev != &intel->batch.cached_items) {
+ if (prev != &brw->batch.cached_items) {
*prev = item->next;
- item->next = intel->batch.cached_items;
- intel->batch.cached_items = item;
+ item->next = brw->batch.cached_items;
+ brw->batch.cached_items = item;
}
- intel->batch.used = intel->batch.emit;
+ brw->batch.used = brw->batch.emit;
return;
}
if (item == NULL)
return;
- item->next = intel->batch.cached_items;
- intel->batch.cached_items = item;
+ item->next = brw->batch.cached_items;
+ brw->batch.cached_items = item;
emit:
item->size = sz;
- item->header = intel->batch.emit;
+ item->header = brw->batch.emit;
}
/**
void
intel_emit_depth_stall_flushes(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- assert(intel->gen >= 6 && intel->gen <= 7);
+ assert(brw->gen >= 6 && brw->gen <= 7);
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
}
/**
- * From the BSpec, volume 2a.03: VS Stage Input / State:
- * "[DevIVB] A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
+ * From the Ivybridge PRM, Volume 2 Part 1, Section 3.2 (VS Stage Input):
+ * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
* stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
* 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
* 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
void
gen7_emit_vs_workaround_flush(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- assert(intel->gen == 7);
+ assert(brw->gen == 7);
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(intel->batch.workaround_bo,
+ OUT_RELOC(brw->batch.workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
void
intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- if (!intel->batch.need_workaround_flush)
+ if (!brw->batch.need_workaround_flush)
return;
BEGIN_BATCH(4);
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
- OUT_RELOC(intel->batch.workaround_bo,
+ OUT_RELOC(brw->batch.workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
- intel->batch.need_workaround_flush = false;
+ brw->batch.need_workaround_flush = false;
}
/* Emit a pipelined flush to either flush render and texture cache for
void
intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- if (intel->gen >= 6) {
- if (intel->batch.is_blit) {
+ if (brw->gen >= 6) {
+ if (brw->batch.is_blit) {
BEGIN_BATCH_BLT(4);
OUT_BATCH(MI_FLUSH_DW);
OUT_BATCH(0);
OUT_BATCH(0);
ADVANCE_BATCH();
} else {
- if (intel->gen == 6) {
+ if (brw->gen == 6) {
/* Hardware workaround: SNB B-Spec says:
*
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache