1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
10 #define BATCH_SZ 16384
11 #define BATCH_RESERVED 16
15 * Batchbuffer contents may be looped over per cliprect, but do not
20 * Batchbuffer contents require looping over per cliprect at batch submit
25 * Batchbuffer contents contain drawing that should not be executed multiple
30 * Batchbuffer contents contain drawing that already handles cliprects, such
31 * as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
32 * Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
33 * outside of LOCK/UNLOCK.
38 struct intel_batchbuffer
40 struct intel_context
*intel
;
49 enum cliprect_mode cliprect_mode
;
56 struct intel_batchbuffer
*intel_batchbuffer_alloc(struct intel_context
59 void intel_batchbuffer_free(struct intel_batchbuffer
*batch
);
62 void _intel_batchbuffer_flush(struct intel_batchbuffer
*batch
,
63 const char *file
, int line
);
65 #define intel_batchbuffer_flush(batch) \
66 _intel_batchbuffer_flush(batch, __FILE__, __LINE__)
68 void intel_batchbuffer_reset(struct intel_batchbuffer
*batch
);
71 /* Unlike bmBufferData, this currently requires the buffer be mapped.
72 * Consider it a convenience function wrapping multple
73 * intel_buffer_dword() calls.
75 void intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
76 const void *data
, GLuint bytes
,
77 enum cliprect_mode cliprect_mode
);
79 void intel_batchbuffer_release_space(struct intel_batchbuffer
*batch
,
82 GLboolean
intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
84 uint32_t read_domains
,
85 uint32_t write_domain
,
88 /* Inline functions - might actually be better off with these
89 * non-inlined. Certainly better off switching all command packets to
90 * be passed as structs rather than dwords, but that's a little bit of
94 intel_batchbuffer_space(struct intel_batchbuffer
*batch
)
96 return (batch
->size
- BATCH_RESERVED
) - (batch
->ptr
- batch
->map
);
101 intel_batchbuffer_emit_dword(struct intel_batchbuffer
*batch
, GLuint dword
)
104 assert(intel_batchbuffer_space(batch
) >= 4);
105 *(GLuint
*) (batch
->ptr
) = dword
;
110 intel_batchbuffer_require_space(struct intel_batchbuffer
*batch
,
112 enum cliprect_mode cliprect_mode
)
114 assert(sz
< batch
->size
- 8);
115 if (intel_batchbuffer_space(batch
) < sz
)
116 intel_batchbuffer_flush(batch
);
118 if (cliprect_mode
!= IGNORE_CLIPRECTS
) {
119 if (batch
->cliprect_mode
== IGNORE_CLIPRECTS
) {
120 batch
->cliprect_mode
= cliprect_mode
;
122 if (batch
->cliprect_mode
!= cliprect_mode
) {
123 intel_batchbuffer_flush(batch
);
124 batch
->cliprect_mode
= cliprect_mode
;
130 /* Here are the crusty old macros, to be removed:
134 #define BEGIN_BATCH(n, cliprect_mode) do { \
135 intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
138 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
140 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
141 assert((delta) >= 0); \
142 intel_batchbuffer_emit_reloc(intel->batch, buf, \
143 read_domains, write_domain, delta); \
146 #define ADVANCE_BATCH() do { } while(0)
150 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer
*batch
)
152 intel_batchbuffer_require_space(batch
, 4, IGNORE_CLIPRECTS
);
153 intel_batchbuffer_emit_dword(batch
, MI_FLUSH
);