1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
10 #define BATCH_SZ 16384
11 #define BATCH_RESERVED 16
15 * Batchbuffer contents may be looped over per cliprect, but do not
20 * Batchbuffer contents require looping over per cliprect at batch submit
23 * This will be upgraded to NO_LOOP_CLIPRECTS when there's a single
24 * constant cliprect, as in DRI2 or FBO rendering.
28 * Batchbuffer contents contain drawing that should not be executed multiple
33 * Batchbuffer contents contain drawing that already handles cliprects, such
34 * as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
36 * Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
37 * outside of LOCK/UNLOCK. This is upgraded to just NO_LOOP_CLIPRECTS when
38 * there's a constant cliprect, as in DRI2 or FBO rendering.
43 struct intel_batchbuffer
45 struct intel_context
*intel
;
54 enum cliprect_mode cliprect_mode
;
61 struct intel_batchbuffer
*intel_batchbuffer_alloc(struct intel_context
64 void intel_batchbuffer_free(struct intel_batchbuffer
*batch
);
67 void _intel_batchbuffer_flush(struct intel_batchbuffer
*batch
,
68 const char *file
, int line
);
70 #define intel_batchbuffer_flush(batch) \
71 _intel_batchbuffer_flush(batch, __FILE__, __LINE__)
73 void intel_batchbuffer_reset(struct intel_batchbuffer
*batch
);
76 /* Unlike bmBufferData, this currently requires the buffer be mapped.
77 * Consider it a convenience function wrapping multple
78 * intel_buffer_dword() calls.
80 void intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
81 const void *data
, GLuint bytes
,
82 enum cliprect_mode cliprect_mode
);
84 void intel_batchbuffer_release_space(struct intel_batchbuffer
*batch
,
87 GLboolean
intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
89 uint32_t read_domains
,
90 uint32_t write_domain
,
93 /* Inline functions - might actually be better off with these
94 * non-inlined. Certainly better off switching all command packets to
95 * be passed as structs rather than dwords, but that's a little bit of
99 intel_batchbuffer_space(struct intel_batchbuffer
*batch
)
101 return (batch
->size
- BATCH_RESERVED
) - (batch
->ptr
- batch
->map
);
106 intel_batchbuffer_emit_dword(struct intel_batchbuffer
*batch
, GLuint dword
)
109 assert(intel_batchbuffer_space(batch
) >= 4);
110 *(GLuint
*) (batch
->ptr
) = dword
;
115 intel_batchbuffer_require_space(struct intel_batchbuffer
*batch
,
117 enum cliprect_mode cliprect_mode
)
119 assert(sz
< batch
->size
- 8);
120 if (intel_batchbuffer_space(batch
) < sz
)
121 intel_batchbuffer_flush(batch
);
123 if ((cliprect_mode
== LOOP_CLIPRECTS
||
124 cliprect_mode
== REFERENCES_CLIPRECTS
) &&
125 batch
->intel
->constant_cliprect
)
126 cliprect_mode
= NO_LOOP_CLIPRECTS
;
128 if (cliprect_mode
!= IGNORE_CLIPRECTS
) {
129 if (batch
->cliprect_mode
== IGNORE_CLIPRECTS
) {
130 batch
->cliprect_mode
= cliprect_mode
;
132 if (batch
->cliprect_mode
!= cliprect_mode
) {
133 intel_batchbuffer_flush(batch
);
134 batch
->cliprect_mode
= cliprect_mode
;
140 /* Here are the crusty old macros, to be removed:
144 #define BEGIN_BATCH(n, cliprect_mode) do { \
145 intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
148 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
150 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
151 assert((delta) >= 0); \
152 intel_batchbuffer_emit_reloc(intel->batch, buf, \
153 read_domains, write_domain, delta); \
156 #define ADVANCE_BATCH() do { } while(0)
160 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer
*batch
)
162 intel_batchbuffer_require_space(batch
, 4, IGNORE_CLIPRECTS
);
163 intel_batchbuffer_emit_dword(batch
, MI_FLUSH
);