brw->batch.state_batch_offset = brw->batch.bo->size;
brw->batch.used = 0;
brw->batch.needs_sol_reset = false;
+
+ /* We don't know what ring the new batch will be sent to until we see the
+ * first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
+ */
+ brw->batch.ring = UNKNOWN_RING;
}
void
drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
brw->batch.used = brw->batch.saved.used;
+ if (brw->batch.used == 0)
+ brw->batch.ring = UNKNOWN_RING;
/* Cached batch state is dead, since we just cleared some unknown part of the
* batchbuffer. Assume that the caller resets any other state necessary.
brw->batch.cached_items = item;
}
brw->batch.used = brw->batch.emit;
+ assert(brw->batch.used > 0);
return;
}
assert(intel_batchbuffer_space(brw) >= 4);
#endif
brw->batch.map[brw->batch.used++] = dword;
+ assert(brw->batch.ring != UNKNOWN_RING);
}
static INLINE void
enum brw_gpu_ring ring)
{
/* If we're switching rings, implicitly flush the batch. */
- if (unlikely(ring != brw->batch.ring) && brw->batch.used && brw->gen >= 6) {
+ if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
+ brw->gen >= 6) {
intel_batchbuffer_flush(brw);
}
- brw->batch.ring = ring;
-
#ifdef DEBUG
assert(sz < BATCH_SZ - BATCH_RESERVED);
#endif
if (intel_batchbuffer_space(brw) < sz)
intel_batchbuffer_flush(brw);
+
+ /* The intel_batchbuffer_flush() calls above might have changed
+ * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
+ */
+ brw->batch.ring = ring;
}
static INLINE void