Merge commit 'origin/gallium-0.1'
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #define BATCH_SZ 16384
11 #define BATCH_RESERVED 16
12
13 enum cliprect_mode {
14 /**
15 * Batchbuffer contents may be looped over per cliprect, but do not
16 * require it.
17 */
18 IGNORE_CLIPRECTS,
19 /**
20 * Batchbuffer contents require looping over per cliprect at batch submit
21 * time.
22 *
23 * This will be upgraded to NO_LOOP_CLIPRECTS when there's a single
24 * constant cliprect, as in DRI2 or FBO rendering.
25 */
26 LOOP_CLIPRECTS,
27 /**
28 * Batchbuffer contents contain drawing that should not be executed multiple
29 * times.
30 */
31 NO_LOOP_CLIPRECTS,
32 /**
33 * Batchbuffer contents contain drawing that already handles cliprects, such
34 * as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
35 *
36 * Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
37 * outside of LOCK/UNLOCK. This is upgraded to just NO_LOOP_CLIPRECTS when
38 * there's a constant cliprect, as in DRI2 or FBO rendering.
39 */
40 REFERENCES_CLIPRECTS
41 };
42
43 struct intel_batchbuffer
44 {
45 struct intel_context *intel;
46
47 dri_bo *buf;
48
49 GLubyte *buffer;
50
51 GLubyte *map;
52 GLubyte *ptr;
53
54 enum cliprect_mode cliprect_mode;
55
56 GLuint size;
57
58 /** Tracking of BEGIN_BATCH()/OUT_BATCH()/ADVANCE_BATCH() debugging */
59 struct {
60 GLuint total;
61 GLubyte *start_ptr;
62 } emit;
63
64 GLuint dirty_state;
65 };
66
67 struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
68 *intel);
69
70 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
71
72
73 void _intel_batchbuffer_flush(struct intel_batchbuffer *batch,
74 const char *file, int line);
75
76 #define intel_batchbuffer_flush(batch) \
77 _intel_batchbuffer_flush(batch, __FILE__, __LINE__)
78
79 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
80
81
82 /* Unlike bmBufferData, this currently requires the buffer be mapped.
83 * Consider it a convenience function wrapping multple
84 * intel_buffer_dword() calls.
85 */
86 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
87 const void *data, GLuint bytes,
88 enum cliprect_mode cliprect_mode);
89
90 void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
91 GLuint bytes);
92
93 GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
94 dri_bo *buffer,
95 uint32_t read_domains,
96 uint32_t write_domain,
97 uint32_t offset);
98
99 /* Inline functions - might actually be better off with these
100 * non-inlined. Certainly better off switching all command packets to
101 * be passed as structs rather than dwords, but that's a little bit of
102 * work...
103 */
104 static INLINE GLint
105 intel_batchbuffer_space(struct intel_batchbuffer *batch)
106 {
107 return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
108 }
109
110
111 static INLINE void
112 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
113 {
114 assert(batch->map);
115 assert(intel_batchbuffer_space(batch) >= 4);
116 *(GLuint *) (batch->ptr) = dword;
117 batch->ptr += 4;
118 }
119
120 static INLINE void
121 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
122 GLuint sz,
123 enum cliprect_mode cliprect_mode)
124 {
125 assert(sz < batch->size - 8);
126 if (intel_batchbuffer_space(batch) < sz)
127 intel_batchbuffer_flush(batch);
128
129 if ((cliprect_mode == LOOP_CLIPRECTS ||
130 cliprect_mode == REFERENCES_CLIPRECTS) &&
131 batch->intel->constant_cliprect)
132 cliprect_mode = NO_LOOP_CLIPRECTS;
133
134 if (cliprect_mode != IGNORE_CLIPRECTS) {
135 if (batch->cliprect_mode == IGNORE_CLIPRECTS) {
136 batch->cliprect_mode = cliprect_mode;
137 } else {
138 if (batch->cliprect_mode != cliprect_mode) {
139 intel_batchbuffer_flush(batch);
140 batch->cliprect_mode = cliprect_mode;
141 }
142 }
143 }
144 }
145
146 /* Here are the crusty old macros, to be removed:
147 */
148 #define BATCH_LOCALS
149
150 #define BEGIN_BATCH(n, cliprect_mode) do { \
151 intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
152 assert(intel->batch->emit.start_ptr == NULL); \
153 intel->batch->emit.total = (n) * 4; \
154 intel->batch->emit.start_ptr = intel->batch->ptr; \
155 } while (0)
156
157 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
158
159 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
160 assert((delta) >= 0); \
161 intel_batchbuffer_emit_reloc(intel->batch, buf, \
162 read_domains, write_domain, delta); \
163 } while (0)
164
165 #define ADVANCE_BATCH() do { \
166 unsigned int _n = intel->batch->ptr - intel->batch->emit.start_ptr; \
167 assert(intel->batch->emit.start_ptr != NULL); \
168 if (_n != intel->batch->emit.total) { \
169 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n", \
170 _n, intel->batch->emit.total); \
171 abort(); \
172 } \
173 intel->batch->emit.start_ptr = NULL; \
174 } while(0)
175
176
177 static INLINE void
178 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
179 {
180 intel_batchbuffer_require_space(batch, 4, IGNORE_CLIPRECTS);
181 intel_batchbuffer_emit_dword(batch, MI_FLUSH);
182 }
183
184 #endif