Merge branch 'mesa_7_7_branch'
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #define BATCH_SZ 16384
11 #define BATCH_RESERVED 16
12
13
14 struct intel_batchbuffer
15 {
16 struct intel_context *intel;
17
18 dri_bo *buf;
19
20 GLubyte *buffer;
21
22 GLubyte *map;
23 GLubyte *ptr;
24
25 GLuint size;
26
27 /** Tracking of BEGIN_BATCH()/OUT_BATCH()/ADVANCE_BATCH() debugging */
28 struct {
29 GLuint total;
30 GLubyte *start_ptr;
31 } emit;
32
33 GLuint dirty_state;
34 GLuint reserved_space;
35 };
36
37 struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
38 *intel);
39
40 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
41
42
43 void _intel_batchbuffer_flush(struct intel_batchbuffer *batch,
44 const char *file, int line);
45
46 #define intel_batchbuffer_flush(batch) \
47 _intel_batchbuffer_flush(batch, __FILE__, __LINE__)
48
49 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
50
51
52 /* Unlike bmBufferData, this currently requires the buffer be mapped.
53 * Consider it a convenience function wrapping multple
54 * intel_buffer_dword() calls.
55 */
56 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
57 const void *data, GLuint bytes);
58
59 void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
60 GLuint bytes);
61
62 GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
63 dri_bo *buffer,
64 uint32_t read_domains,
65 uint32_t write_domain,
66 uint32_t offset);
67 void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
68
69 /* Inline functions - might actually be better off with these
70 * non-inlined. Certainly better off switching all command packets to
71 * be passed as structs rather than dwords, but that's a little bit of
72 * work...
73 */
74 static INLINE GLint
75 intel_batchbuffer_space(struct intel_batchbuffer *batch)
76 {
77 return (batch->size - batch->reserved_space) - (batch->ptr - batch->map);
78 }
79
80
81 static INLINE void
82 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
83 {
84 assert(batch->map);
85 assert(intel_batchbuffer_space(batch) >= 4);
86 *(GLuint *) (batch->ptr) = dword;
87 batch->ptr += 4;
88 }
89
90 static INLINE void
91 intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
92 GLuint sz)
93 {
94 assert(sz < batch->size - 8);
95 if (intel_batchbuffer_space(batch) < sz)
96 intel_batchbuffer_flush(batch);
97 }
98
99 /* Here are the crusty old macros, to be removed:
100 */
101 #define BATCH_LOCALS
102
103 #define BEGIN_BATCH(n) do { \
104 intel_batchbuffer_require_space(intel->batch, (n)*4); \
105 assert(intel->batch->emit.start_ptr == NULL); \
106 intel->batch->emit.total = (n) * 4; \
107 intel->batch->emit.start_ptr = intel->batch->ptr; \
108 } while (0)
109
110 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
111
112 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
113 assert((unsigned) (delta) < buf->size); \
114 intel_batchbuffer_emit_reloc(intel->batch, buf, \
115 read_domains, write_domain, delta); \
116 } while (0)
117
118 #define ADVANCE_BATCH() do { \
119 unsigned int _n = intel->batch->ptr - intel->batch->emit.start_ptr; \
120 assert(intel->batch->emit.start_ptr != NULL); \
121 if (_n != intel->batch->emit.total) { \
122 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n", \
123 _n, intel->batch->emit.total); \
124 abort(); \
125 } \
126 intel->batch->emit.start_ptr = NULL; \
127 } while(0)
128
129 #endif