#define I915_BATCH_H
#include "i915_batchbuffer.h"
+#include "i915_context.h"
#define BEGIN_BATCH(dwords) \
#define FLUSH_BATCH(fence) \
i915_flush(i915, fence)
-
/************************************************************************
* i915_flush.c
*/
void i915_flush(struct i915_context *i915, struct pipe_fence_handle **fence);
+/*
+ * Flush if the current color buf is idle and we have more than 256 vertices
+ * queued, or if the current color buf is busy and we have more than 4096
+ * vertices queued.
+ */
+static INLINE void i915_flush_heuristically(struct i915_context* i915,
+ int num_vertex)
+{
+ struct i915_winsys *iws = i915->iws;
+ i915->vertices_since_last_flush += num_vertex;
+ if ( i915->vertices_since_last_flush > 4096
+ || ( i915->vertices_since_last_flush > 256 &&
+ !iws->buffer_is_busy(iws, i915->current.cbuf_bo)) )
+ FLUSH_BATCH(NULL);
+}
+
#endif
OUT_BATCH_F(desty + height);
OUT_BATCH_F(destx);
OUT_BATCH_F(desty);
+
+ /* Flush after clear, its expected to be a costly operation.
+ * This is not required, just a heuristic
+ */
+ FLUSH_BATCH(NULL);
}
/**
struct util_slab_mempool transfer_pool;
struct util_slab_mempool texture_transfer_pool;
+ int vertices_since_last_flush;
+
/** blitter/hw-clear */
struct blitter_context* blitter;
i915->static_dirty = ~0;
/* kernel emits flushes in between batchbuffers */
i915->flush_dirty = 0;
+ i915->vertices_since_last_flush = 0;
}
for (i = 0; i < nr; i++)
emit_hw_vertex(i915, prim->v[i]);
+
+ i915_flush_heuristically(i915, nr);
}
draw_arrays_generate_indices(render, start, nr, i915_render->fallback);
+ i915_flush_heuristically(i915, nr_indices);
out:
return;
}
nr);
OUT_BATCH(start); /* Beginning vertex index */
+ i915_flush_heuristically(i915, nr);
out:
return;
}
save_nr_indices,
i915_render->fallback);
+ i915_flush_heuristically(i915, nr_indices);
out:
return;
}
void (*buffer_destroy)(struct i915_winsys *iws,
struct i915_winsys_buffer *buffer);
+
+ /**
+ * Check if a buffer is busy.
+ */
+ boolean (*buffer_is_busy)(struct i915_winsys *iws,
+ struct i915_winsys_buffer *buffer);
/*@}*/
FREE(buffer);
}
+static boolean
+i915_drm_buffer_is_busy(struct i915_winsys *iws,
+ struct i915_winsys_buffer *buffer)
+{
+ struct i915_drm_buffer* i915_buffer = i915_drm_buffer(buffer);
+ return drm_intel_bo_busy(i915_buffer->bo);
+}
+
+
void
i915_drm_winsys_init_buffer_functions(struct i915_drm_winsys *idws)
{
idws->base.buffer_unmap = i915_drm_buffer_unmap;
idws->base.buffer_write = i915_drm_buffer_write;
idws->base.buffer_destroy = i915_drm_buffer_destroy;
+ idws->base.buffer_is_busy = i915_drm_buffer_is_busy;
}