Merge commit 'origin/gallium-0.1' into gallium-0.2
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #define BATCH_SZ 16384
11 #define BATCH_RESERVED 16
12
13 enum cliprect_mode {
14 /**
15 * Batchbuffer contents may be looped over per cliprect, but do not
16 * require it.
17 */
18 IGNORE_CLIPRECTS,
19 /**
20 * Batchbuffer contents require looping over per cliprect at batch submit
21 * time.
22 *
23 * This will be upgraded to NO_LOOP_CLIPRECTS when there's a single
24 * constant cliprect, as in DRI2 or FBO rendering.
25 */
26 LOOP_CLIPRECTS,
27 /**
28 * Batchbuffer contents contain drawing that should not be executed multiple
29 * times.
30 */
31 NO_LOOP_CLIPRECTS,
32 /**
33 * Batchbuffer contents contain drawing that already handles cliprects, such
34 * as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
35 *
36 * Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
37 * outside of LOCK/UNLOCK. This is upgraded to just NO_LOOP_CLIPRECTS when
38 * there's a constant cliprect, as in DRI2 or FBO rendering.
39 */
40 REFERENCES_CLIPRECTS
41 };
42
43 struct intel_batchbuffer
44 {
45 struct intel_context *intel;
46
47 dri_bo *buf;
48
49 GLubyte *buffer;
50
51 GLubyte *map;
52 GLubyte *ptr;
53
54 enum cliprect_mode cliprect_mode;
55
56 GLuint size;
57
58 GLuint dirty_state;
59 };
60
61 struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
62 *intel);
63
64 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
65
66
67 void _intel_batchbuffer_flush(struct intel_batchbuffer *batch,
68 const char *file, int line);
69
70 #define intel_batchbuffer_flush(batch) \
71 _intel_batchbuffer_flush(batch, __FILE__, __LINE__)
72
73 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
74
75
76 /* Unlike bmBufferData, this currently requires the buffer be mapped.
77 * Consider it a convenience function wrapping multple
78 * intel_buffer_dword() calls.
79 */
80 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
81 const void *data, GLuint bytes,
82 enum cliprect_mode cliprect_mode);
83
84 void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
85 GLuint bytes);
86
87 GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
88 dri_bo *buffer,
89 uint32_t read_domains,
90 uint32_t write_domain,
91 uint32_t offset);
92
93 /* Inline functions - might actually be better off with these
94 * non-inlined. Certainly better off switching all command packets to
95 * be passed as structs rather than dwords, but that's a little bit of
96 * work...
97 */
98 static INLINE GLint
99 intel_batchbuffer_space(struct intel_batchbuffer *batch)
100 {
101 return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
102 }
103
104
105 static INLINE void
106 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
107 {
108 assert(batch->map);
109 assert(intel_batchbuffer_space(batch) >= 4);
110 *(GLuint *) (batch->ptr) = dword;
111 batch->ptr += 4;
112 }
113
114 static INLINE void
115 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
116 GLuint sz,
117 enum cliprect_mode cliprect_mode)
118 {
119 assert(sz < batch->size - 8);
120 if (intel_batchbuffer_space(batch) < sz)
121 intel_batchbuffer_flush(batch);
122
123 if ((cliprect_mode == LOOP_CLIPRECTS ||
124 cliprect_mode == REFERENCES_CLIPRECTS) &&
125 batch->intel->constant_cliprect)
126 cliprect_mode = NO_LOOP_CLIPRECTS;
127
128 if (cliprect_mode != IGNORE_CLIPRECTS) {
129 if (batch->cliprect_mode == IGNORE_CLIPRECTS) {
130 batch->cliprect_mode = cliprect_mode;
131 } else {
132 if (batch->cliprect_mode != cliprect_mode) {
133 intel_batchbuffer_flush(batch);
134 batch->cliprect_mode = cliprect_mode;
135 }
136 }
137 }
138 }
139
140 /* Here are the crusty old macros, to be removed:
141 */
142 #define BATCH_LOCALS
143
144 #define BEGIN_BATCH(n, cliprect_mode) do { \
145 intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
146 } while (0)
147
148 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
149
150 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
151 assert((delta) >= 0); \
152 intel_batchbuffer_emit_reloc(intel->batch, buf, \
153 read_domains, write_domain, delta); \
154 } while (0)
155
156 #define ADVANCE_BATCH() do { } while(0)
157
158
159 static INLINE void
160 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
161 {
162 intel_batchbuffer_require_space(batch, 4, IGNORE_CLIPRECTS);
163 intel_batchbuffer_emit_dword(batch, MI_FLUSH);
164 }
165
166 #endif