#define BATCH_SZ 16384
#define BATCH_RESERVED 16
-enum cliprect_mode {
- /**
- * Batchbuffer contents may be looped over per cliprect, but do not
- * require it.
- */
- IGNORE_CLIPRECTS,
- /**
- * Batchbuffer contents require looping over per cliprect at batch submit
- * time.
- *
- * This will be upgraded to NO_LOOP_CLIPRECTS when there's a single
- * constant cliprect, as in DRI2 or FBO rendering.
- */
- LOOP_CLIPRECTS,
- /**
- * Batchbuffer contents contain drawing that should not be executed multiple
- * times.
- */
- NO_LOOP_CLIPRECTS,
- /**
- * Batchbuffer contents contain drawing that already handles cliprects, such
- * as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
- *
- * Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
- * outside of LOCK/UNLOCK. This is upgraded to just NO_LOOP_CLIPRECTS when
- * there's a constant cliprect, as in DRI2 or FBO rendering.
- */
- REFERENCES_CLIPRECTS
-};
struct intel_batchbuffer
{
GLubyte *map;
GLubyte *ptr;
- enum cliprect_mode cliprect_mode;
-
GLuint size;
/** Tracking of BEGIN_BATCH()/OUT_BATCH()/ADVANCE_BATCH() debugging */
} emit;
GLuint dirty_state;
+ GLuint reserved_space;
};
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data, GLuint bytes,
- enum cliprect_mode cliprect_mode);
+ const void *data, GLuint bytes);
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
GLuint bytes);
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
+void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
static INLINE GLint
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
- return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
+ return (batch->size - batch->reserved_space) - (batch->ptr - batch->map);
}
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
- GLuint sz,
- enum cliprect_mode cliprect_mode)
+ GLuint sz)
{
assert(sz < batch->size - 8);
if (intel_batchbuffer_space(batch) < sz)
intel_batchbuffer_flush(batch);
-
- if ((cliprect_mode == LOOP_CLIPRECTS ||
- cliprect_mode == REFERENCES_CLIPRECTS) &&
- batch->intel->constant_cliprect)
- cliprect_mode = NO_LOOP_CLIPRECTS;
-
- if (cliprect_mode != IGNORE_CLIPRECTS) {
- if (batch->cliprect_mode == IGNORE_CLIPRECTS) {
- batch->cliprect_mode = cliprect_mode;
- } else {
- if (batch->cliprect_mode != cliprect_mode) {
- intel_batchbuffer_flush(batch);
- batch->cliprect_mode = cliprect_mode;
- }
- }
- }
}
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
-#define BEGIN_BATCH(n, cliprect_mode) do { \
- intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
+#define BEGIN_BATCH(n) do { \
+ intel_batchbuffer_require_space(intel->batch, (n)*4); \
assert(intel->batch->emit.start_ptr == NULL); \
intel->batch->emit.total = (n) * 4; \
intel->batch->emit.start_ptr = intel->batch->ptr; \
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
- assert((unsigned) (delta) <= buf->size); \
+ assert((unsigned) (delta) < buf->size); \
intel_batchbuffer_emit_reloc(intel->batch, buf, \
read_domains, write_domain, delta); \
} while (0)
intel->batch->emit.start_ptr = NULL; \
} while(0)
-
-static INLINE void
-intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
-{
- intel_batchbuffer_require_space(batch, 4, IGNORE_CLIPRECTS);
- intel_batchbuffer_emit_dword(batch, MI_FLUSH);
-}
-
#endif