struct intel_context *intel = batch->intel;
if (batch->buf != NULL) {
- dri_bo_unreference(batch->buf);
+ drm_intel_bo_unreference(batch->buf);
batch->buf = NULL;
}
- if (!batch->buffer)
- batch->buffer = malloc (intel->maxBatchSize);
+ batch->buf = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
+ intel->maxBatchSize, 4096);
+ drm_intel_gem_bo_map_gtt(batch->buf);
+ batch->map = batch->buf->virtual;
- batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer",
- intel->maxBatchSize, 4096);
- if (batch->buffer)
- batch->map = batch->buffer;
- else {
- dri_bo_map(batch->buf, GL_TRUE);
- batch->map = batch->buf->virtual;
- }
batch->size = intel->maxBatchSize;
batch->ptr = batch->map;
+ batch->reserved_space = BATCH_RESERVED;
batch->dirty_state = ~0;
+ batch->state_batch_offset = batch->size;
}
struct intel_batchbuffer *
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
- if (batch->buffer)
- free (batch->buffer);
- else {
- if (batch->map) {
- dri_bo_unmap(batch->buf);
- batch->map = NULL;
- }
+ if (batch->map) {
+ drm_intel_gem_bo_unmap_gtt(batch->buf);
+ batch->map = NULL;
}
dri_bo_unreference(batch->buf);
batch->buf = NULL;
int ret = 0;
int x_off = 0, y_off = 0;
- if (batch->buffer)
- dri_bo_subdata (batch->buf, 0, used, batch->buffer);
- else
- dri_bo_unmap(batch->buf);
+ drm_intel_gem_bo_unmap_gtt(batch->buf);
- batch->map = NULL;
batch->ptr = NULL;
- if (!intel->no_hw)
- dri_bo_exec(batch->buf, used, NULL, 0, (x_off & 0xffff) | (y_off << 16));
+ if (!intel->no_hw) {
+ drm_intel_bo_exec(batch->buf, used, NULL, 0,
+ (x_off & 0xffff) | (y_off << 16));
+ }
- if (INTEL_DEBUG & DEBUG_BATCH) {
- dri_bo_map(batch->buf, GL_FALSE);
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+ drm_intel_bo_map(batch->buf, GL_FALSE);
intel_decode(batch->buf->virtual, used / 4, batch->buf->offset,
- intel->intelScreen->deviceID);
- dri_bo_unmap(batch->buf);
+ intel->intelScreen->deviceID, GL_TRUE);
+ drm_intel_bo_unmap(batch->buf);
if (intel->vtbl.debug_batch != NULL)
intel->vtbl.debug_batch(intel);
if (used == 0)
return;
- if (INTEL_DEBUG & DEBUG_BATCH)
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
used);
batch->reserved_space = 0;
- /* Emit a flush if the bufmgr doesn't do it for us. */
+
if (intel->always_flush_cache) {
intel_batchbuffer_emit_mi_flush(batch);
used = batch->ptr - batch->map;
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!intel->no_batch_wrap);
- batch->reserved_space = BATCH_RESERVED;
-
- /* TODO: Just pass the relocation list and dma buffer up to the
- * kernel.
- */
do_flush_locked(batch, used);
- if (INTEL_DEBUG & DEBUG_SYNC) {
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- dri_bo_map(batch->buf, GL_TRUE);
- dri_bo_unmap(batch->buf);
+ drm_intel_bo_map(batch->buf, GL_TRUE);
+ drm_intel_bo_unmap(batch->buf);
}
/* Reset the buffer:
*/
GLboolean
intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
- dri_bo *buffer,
+ drm_intel_bo *buffer,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
if (batch->ptr - batch->map > batch->buf->size)
printf ("bad relocation ptr %p map %p offset %d size %lu\n",
batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
- ret = dri_bo_emit_reloc(batch->buf, read_domains, write_domain,
- delta, batch->ptr - batch->map, buffer);
+ ret = drm_intel_bo_emit_reloc(batch->buf, batch->ptr - batch->map,
+ buffer, delta,
+ read_domains, write_domain);
/*
* Using the old buffer offset, write in what the right data would be, in case
{
struct intel_context *intel = batch->intel;
- if (intel->gen >= 4) {
+ if (intel->gen >= 6) {
+ BEGIN_BATCH(8);
+
+ /* XXX workaround: issue any post sync != 0 before write cache flush = 1 */
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_NO_WRITE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ } else if (intel->gen >= 4) {
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL |
- PIPE_CONTROL_INSTRUCTION_FLUSH |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_NO_WRITE);
OUT_BATCH(0); /* write address */