batch->buf = NULL;
}
+ if (!batch->buffer && intel->ttm == GL_TRUE)
+ batch->buffer = malloc (intel->maxBatchSize);
+
batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer",
intel->maxBatchSize, 4096,
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED);
- dri_bo_map(batch->buf, GL_TRUE);
- batch->map = batch->buf->virtual;
+ if (batch->buffer)
+ batch->map = batch->buffer;
+ else {
+ dri_bo_map(batch->buf, GL_TRUE);
+ batch->map = batch->buf->virtual;
+ }
batch->size = intel->maxBatchSize;
batch->ptr = batch->map;
batch->dirty_state = ~0;
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
- if (batch->map) {
- dri_bo_unmap(batch->buf);
- batch->map = NULL;
+ if (batch->buffer)
+ free (batch->buffer);
+ else {
+ if (batch->map) {
+ dri_bo_unmap(batch->buf);
+ batch->map = NULL;
+ }
}
dri_bo_unreference(batch->buf);
batch->buf = NULL;
GLuint used, GLboolean allow_unlock)
{
struct intel_context *intel = batch->intel;
+ int ret = 0;
- dri_bo_unmap(batch->buf);
+ if (batch->buffer)
+ dri_bo_subdata (batch->buf, 0, used, batch->buffer);
+ else
+ dri_bo_unmap(batch->buf);
batch->map = NULL;
batch->ptr = NULL;
struct drm_i915_gem_execbuffer *execbuf;
execbuf = dri_process_relocs(batch->buf);
- intel_exec_ioctl(batch->intel,
- used,
- batch->cliprect_mode != LOOP_CLIPRECTS,
- allow_unlock,
- execbuf);
+ ret = intel_exec_ioctl(batch->intel,
+ used,
+ batch->cliprect_mode != LOOP_CLIPRECTS,
+ allow_unlock,
+ execbuf);
} else {
dri_process_relocs(batch->buf);
- intel_batch_ioctl(batch->intel,
- batch->buf->offset,
- used,
- batch->cliprect_mode != LOOP_CLIPRECTS,
- allow_unlock);
+ ret = intel_batch_ioctl(batch->intel,
+ batch->buf->offset,
+ used,
+ batch->cliprect_mode != LOOP_CLIPRECTS,
+ allow_unlock);
}
}
intel->vtbl.debug_batch(intel);
}
+ if (ret != 0) {
+ UNLOCK_HARDWARE(intel);
+ exit(1);
+ }
intel->vtbl.new_batch(intel);
}
if (INTEL_DEBUG & DEBUG_BATCH)
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
used);
- /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
- * performance drain that we would like to avoid.
- */
- if (used & 4) {
- ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->ptr)[1] = 0;
- ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
- used += 12;
+
+ /* Emit a flush if the bufmgr doesn't do it for us. */
+ if (!intel->ttm) {
+ *(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd();
+ batch->ptr += 4;
+ used = batch->ptr - batch->map;
}
- else {
- ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
- used += 8;
+
+ /* Round batchbuffer usage to 2 DWORDs. */
+
+ if ((used & 4) == 0) {
+ *(GLuint *) (batch->ptr) = 0; /* noop */
+ batch->ptr += 4;
+ used = batch->ptr - batch->map;
}
+ /* Mark the end of the buffer. */
+ *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */
+ batch->ptr += 4;
+ used = batch->ptr - batch->map;
+
/* Workaround for recursive batchbuffer flushing: If the window is
* moved, we can get into a case where we try to flush during a
* flush. What happens is that when we try to grab the lock for
GLboolean
intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
dri_bo *buffer,
- GLuint flags, GLuint delta)
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
{
int ret;
- ret = dri_emit_reloc(batch->buf, flags, delta, batch->ptr - batch->map, buffer);
+ if (batch->ptr - batch->map > batch->buf->size)
+ _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
+ batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
+ ret = dri_emit_reloc(batch->buf, read_domains, write_domain,
+ delta, batch->ptr - batch->map, buffer);
/*
* Using the old buffer offset, write in what the right data would be, in case