Merge branch 'mesa_7_6_branch' into mesa_7_7_branch
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #define BATCH_SZ 16384
11 #define BATCH_RESERVED 16
12
13 enum cliprect_mode {
14 /**
15 * Batchbuffer contents may be looped over per cliprect, but do not
16 * require it.
17 */
18 IGNORE_CLIPRECTS,
19 /**
20 * Batchbuffer contents require looping over per cliprect at batch submit
21 * time.
22 *
23 * This will be upgraded to NO_LOOP_CLIPRECTS when there's a single
24 * constant cliprect, as in DRI2 or FBO rendering.
25 */
26 LOOP_CLIPRECTS,
27 /**
28 * Batchbuffer contents contain drawing that should not be executed multiple
29 * times.
30 */
31 NO_LOOP_CLIPRECTS,
32 /**
33 * Batchbuffer contents contain drawing that already handles cliprects, such
34 * as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
35 *
36 * Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
37 * outside of LOCK/UNLOCK. This is upgraded to just NO_LOOP_CLIPRECTS when
38 * there's a constant cliprect, as in DRI2 or FBO rendering.
39 */
40 REFERENCES_CLIPRECTS
41 };
42
43 struct intel_batchbuffer
44 {
45 struct intel_context *intel;
46
47 dri_bo *buf;
48
49 GLubyte *buffer;
50
51 GLubyte *map;
52 GLubyte *ptr;
53
54 enum cliprect_mode cliprect_mode;
55
56 GLuint size;
57
58 /** Tracking of BEGIN_BATCH()/OUT_BATCH()/ADVANCE_BATCH() debugging */
59 struct {
60 GLuint total;
61 GLubyte *start_ptr;
62 } emit;
63
64 GLuint dirty_state;
65 GLuint reserved_space;
66 };
67
68 struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
69 *intel);
70
71 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
72
73
74 void _intel_batchbuffer_flush(struct intel_batchbuffer *batch,
75 const char *file, int line);
76
77 #define intel_batchbuffer_flush(batch) \
78 _intel_batchbuffer_flush(batch, __FILE__, __LINE__)
79
80 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
81
82
83 /* Unlike bmBufferData, this currently requires the buffer be mapped.
84 * Consider it a convenience function wrapping multple
85 * intel_buffer_dword() calls.
86 */
87 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
88 const void *data, GLuint bytes,
89 enum cliprect_mode cliprect_mode);
90
91 void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
92 GLuint bytes);
93
94 GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
95 dri_bo *buffer,
96 uint32_t read_domains,
97 uint32_t write_domain,
98 uint32_t offset);
99 void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
100
101 /* Inline functions - might actually be better off with these
102 * non-inlined. Certainly better off switching all command packets to
103 * be passed as structs rather than dwords, but that's a little bit of
104 * work...
105 */
106 static INLINE GLint
107 intel_batchbuffer_space(struct intel_batchbuffer *batch)
108 {
109 return (batch->size - batch->reserved_space) - (batch->ptr - batch->map);
110 }
111
112
113 static INLINE void
114 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
115 {
116 assert(batch->map);
117 assert(intel_batchbuffer_space(batch) >= 4);
118 *(GLuint *) (batch->ptr) = dword;
119 batch->ptr += 4;
120 }
121
122 static INLINE void
123 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
124 GLuint sz,
125 enum cliprect_mode cliprect_mode)
126 {
127 assert(sz < batch->size - 8);
128 if (intel_batchbuffer_space(batch) < sz)
129 intel_batchbuffer_flush(batch);
130
131 if ((cliprect_mode == LOOP_CLIPRECTS ||
132 cliprect_mode == REFERENCES_CLIPRECTS) &&
133 batch->intel->constant_cliprect)
134 cliprect_mode = NO_LOOP_CLIPRECTS;
135
136 if (cliprect_mode != IGNORE_CLIPRECTS) {
137 if (batch->cliprect_mode == IGNORE_CLIPRECTS) {
138 batch->cliprect_mode = cliprect_mode;
139 } else {
140 if (batch->cliprect_mode != cliprect_mode) {
141 intel_batchbuffer_flush(batch);
142 batch->cliprect_mode = cliprect_mode;
143 }
144 }
145 }
146 }
147
148 /* Here are the crusty old macros, to be removed:
149 */
150 #define BATCH_LOCALS
151
152 #define BEGIN_BATCH(n, cliprect_mode) do { \
153 intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
154 assert(intel->batch->emit.start_ptr == NULL); \
155 intel->batch->emit.total = (n) * 4; \
156 intel->batch->emit.start_ptr = intel->batch->ptr; \
157 } while (0)
158
159 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
160
161 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
162 assert((unsigned) (delta) < buf->size); \
163 intel_batchbuffer_emit_reloc(intel->batch, buf, \
164 read_domains, write_domain, delta); \
165 } while (0)
166
167 #define ADVANCE_BATCH() do { \
168 unsigned int _n = intel->batch->ptr - intel->batch->emit.start_ptr; \
169 assert(intel->batch->emit.start_ptr != NULL); \
170 if (_n != intel->batch->emit.total) { \
171 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n", \
172 _n, intel->batch->emit.total); \
173 abort(); \
174 } \
175 intel->batch->emit.start_ptr = NULL; \
176 } while(0)
177
178 #endif