Avoid failing assertion in intel_miptree_set_image_offset() with cube maps.
[mesa.git] / src / mesa / drivers / dri / i915tex / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "mtypes.h"
5 #include "dri_bufmgr.h"
6
7 struct intel_context;
8
9 #define BATCH_SZ 16384
10 #define BATCH_RESERVED 16
11
12 #define MAX_RELOCS 100
13
14 #define INTEL_BATCH_NO_CLIPRECTS 0x1
15 #define INTEL_BATCH_CLIPRECTS 0x2
16
17 struct buffer_reloc
18 {
19 struct _DriBufferObject *buf;
20 GLuint offset;
21 GLuint delta; /* not needed? */
22 };
23
24 struct intel_batchbuffer
25 {
26 struct bufmgr *bm;
27 struct intel_context *intel;
28
29 struct _DriBufferObject *buffer;
30 struct _DriFenceObject *last_fence;
31 GLuint flags;
32
33 drmBOList list;
34 GLuint list_count;
35 GLubyte *map;
36 GLubyte *ptr;
37
38 struct buffer_reloc reloc[MAX_RELOCS];
39 GLuint nr_relocs;
40 GLuint size;
41 };
42
43 struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
44 *intel);
45
46 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
47
48
49 void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
50
51 struct _DriFenceObject *intel_batchbuffer_flush(struct intel_batchbuffer
52 *batch);
53
54 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
55
56
57 /* Unlike bmBufferData, this currently requires the buffer be mapped.
58 * Consider it a convenience function wrapping multple
59 * intel_buffer_dword() calls.
60 */
61 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
62 const void *data, GLuint bytes, GLuint flags);
63
64 void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
65 GLuint bytes);
66
67 GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
68 struct _DriBufferObject *buffer,
69 GLuint flags,
70 GLuint mask, GLuint offset);
71
72 /* Inline functions - might actually be better off with these
73 * non-inlined. Certainly better off switching all command packets to
74 * be passed as structs rather than dwords, but that's a little bit of
75 * work...
76 */
77 static INLINE GLuint
78 intel_batchbuffer_space(struct intel_batchbuffer *batch)
79 {
80 return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
81 }
82
83
84 static INLINE void
85 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
86 {
87 assert(batch->map);
88 assert(intel_batchbuffer_space(batch) >= 4);
89 *(GLuint *) (batch->ptr) = dword;
90 batch->ptr += 4;
91 }
92
93 static INLINE void
94 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
95 GLuint sz, GLuint flags)
96 {
97 assert(sz < batch->size - 8);
98 if (intel_batchbuffer_space(batch) < sz ||
99 (batch->flags != 0 && flags != 0 && batch->flags != flags))
100 intel_batchbuffer_flush(batch);
101
102 batch->flags |= flags;
103 }
104
105 /* Here are the crusty old macros, to be removed:
106 */
107 #define BATCH_LOCALS
108
109 #define BEGIN_BATCH(n, flags) do { \
110 assert(!intel->prim.flush); \
111 intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
112 } while (0)
113
114 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
115
116 #define OUT_RELOC(buf,flags,mask,delta) do { \
117 assert((delta) >= 0); \
118 intel_batchbuffer_emit_reloc(intel->batch, buf, flags, mask, delta); \
119 } while (0)
120
121 #define ADVANCE_BATCH() do { } while(0)
122
123
124 #endif