WIP 965 conversion to dri_bufmgr.
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "mtypes.h"
5 #include "dri_bufmgr.h"
6
7 struct intel_context;
8
9 #define BATCH_SZ 16384
10 #define BATCH_RESERVED 16
11
12 #define MAX_RELOCS 4096
13
14 #define INTEL_BATCH_NO_CLIPRECTS 0x1
15 #define INTEL_BATCH_CLIPRECTS 0x2
16
17 struct buffer_reloc
18 {
19 dri_bo *buf;
20 GLuint offset;
21 GLuint delta; /* not needed? */
22 GLuint validate_flags;
23 };
24
25 struct intel_batchbuffer
26 {
27 struct intel_context *intel;
28
29 dri_bo *buf;
30 dri_fence *last_fence;
31 GLuint flags;
32
33 drmBOList list;
34 GLuint list_count;
35 GLubyte *map;
36 GLubyte *ptr;
37
38 struct buffer_reloc reloc[MAX_RELOCS];
39 GLuint nr_relocs;
40 GLuint size;
41 };
42
43 struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
44 *intel);
45
46 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
47
48
49 void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
50
51 void intel_batchbuffer_flush(struct intel_batchbuffer *batch);
52
53 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
54
55
56 /* Unlike bmBufferData, this currently requires the buffer be mapped.
57 * Consider it a convenience function wrapping multple
58 * intel_buffer_dword() calls.
59 */
60 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
61 const void *data, GLuint bytes, GLuint flags);
62
63 void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
64 GLuint bytes);
65
66 GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
67 dri_bo *buffer,
68 GLuint flags, GLuint offset);
69
70 /* Inline functions - might actually be better off with these
71 * non-inlined. Certainly better off switching all command packets to
72 * be passed as structs rather than dwords, but that's a little bit of
73 * work...
74 */
75 static INLINE GLuint
76 intel_batchbuffer_space(struct intel_batchbuffer *batch)
77 {
78 return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
79 }
80
81
82 static INLINE void
83 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
84 {
85 assert(batch->map);
86 assert(intel_batchbuffer_space(batch) >= 4);
87 *(GLuint *) (batch->ptr) = dword;
88 batch->ptr += 4;
89 }
90
91 static INLINE void
92 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
93 GLuint sz, GLuint flags)
94 {
95 assert(sz < batch->size - 8);
96 if (intel_batchbuffer_space(batch) < sz ||
97 (batch->flags != 0 && flags != 0 && batch->flags != flags))
98 intel_batchbuffer_flush(batch);
99
100 batch->flags |= flags;
101 }
102
103 /* Here are the crusty old macros, to be removed:
104 */
105 #define BATCH_LOCALS
106
107 #define BEGIN_BATCH(n, flags) do { \
108 assert(!intel->prim.flush); \
109 intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
110 } while (0)
111
112 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
113
114 #define OUT_RELOC(buf, flags, delta) do { \
115 assert((delta) >= 0); \
116 intel_batchbuffer_emit_reloc(intel->batch, buf, flags, delta); \
117 } while (0)
118
119 #define ADVANCE_BATCH() do { } while(0)
120
121
122 #endif