mesa: Remove unnecessary parameters CompressedTexImage
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #ifdef __cplusplus
11 extern "C" {
12 #endif
13
14 #define BATCH_RESERVED 16
15
16 struct intel_batchbuffer;
17
18 void intel_batchbuffer_init(struct intel_context *intel);
19 void intel_batchbuffer_reset(struct intel_context *intel);
20 void intel_batchbuffer_free(struct intel_context *intel);
21 void intel_batchbuffer_save_state(struct intel_context *intel);
22 void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
23
24 int _intel_batchbuffer_flush(struct intel_context *intel,
25 const char *file, int line);
26
27 #define intel_batchbuffer_flush(intel) \
28 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
29
30
31
32 /* Unlike bmBufferData, this currently requires the buffer be mapped.
33 * Consider it a convenience function wrapping multple
34 * intel_buffer_dword() calls.
35 */
36 void intel_batchbuffer_data(struct intel_context *intel,
37 const void *data, GLuint bytes, bool is_blit);
38
39 bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
40 drm_intel_bo *buffer,
41 uint32_t read_domains,
42 uint32_t write_domain,
43 uint32_t offset);
44 bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
45 drm_intel_bo *buffer,
46 uint32_t read_domains,
47 uint32_t write_domain,
48 uint32_t offset);
49 void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
50 void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
51 void intel_emit_depth_stall_flushes(struct intel_context *intel);
52 void gen7_emit_vs_workaround_flush(struct intel_context *intel);
53
54 static INLINE uint32_t float_as_int(float f)
55 {
56 union {
57 float f;
58 uint32_t d;
59 } fi;
60
61 fi.f = f;
62 return fi.d;
63 }
64
65 /* Inline functions - might actually be better off with these
66 * non-inlined. Certainly better off switching all command packets to
67 * be passed as structs rather than dwords, but that's a little bit of
68 * work...
69 */
70 static INLINE unsigned
71 intel_batchbuffer_space(struct intel_context *intel)
72 {
73 return (intel->batch.state_batch_offset - intel->batch.reserved_space)
74 - intel->batch.used*4;
75 }
76
77
78 static INLINE void
79 intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
80 {
81 #ifdef DEBUG
82 assert(intel_batchbuffer_space(intel) >= 4);
83 #endif
84 intel->batch.map[intel->batch.used++] = dword;
85 }
86
87 static INLINE void
88 intel_batchbuffer_emit_float(struct intel_context *intel, float f)
89 {
90 intel_batchbuffer_emit_dword(intel, float_as_int(f));
91 }
92
93 static INLINE void
94 intel_batchbuffer_require_space(struct intel_context *intel,
95 GLuint sz, int is_blit)
96 {
97
98 if (intel->gen >= 6 &&
99 intel->batch.is_blit != is_blit && intel->batch.used) {
100 intel_batchbuffer_flush(intel);
101 }
102
103 intel->batch.is_blit = is_blit;
104
105 #ifdef DEBUG
106 assert(sz < sizeof(intel->batch.map) - BATCH_RESERVED);
107 #endif
108 if (intel_batchbuffer_space(intel) < sz)
109 intel_batchbuffer_flush(intel);
110 }
111
112 static INLINE void
113 intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
114 {
115 intel_batchbuffer_require_space(intel, n * 4, is_blit);
116
117 intel->batch.emit = intel->batch.used;
118 #ifdef DEBUG
119 intel->batch.total = n;
120 #endif
121 }
122
123 static INLINE void
124 intel_batchbuffer_advance(struct intel_context *intel)
125 {
126 #ifdef DEBUG
127 struct intel_batchbuffer *batch = &intel->batch;
128 unsigned int _n = batch->used - batch->emit;
129 assert(batch->total != 0);
130 if (_n != batch->total) {
131 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
132 _n, batch->total);
133 abort();
134 }
135 batch->total = 0;
136 #endif
137 }
138
139 void intel_batchbuffer_cached_advance(struct intel_context *intel);
140
141 /* Here are the crusty old macros, to be removed:
142 */
143 #define BATCH_LOCALS
144
145 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
146 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
147 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
148 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
149 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
150 intel_batchbuffer_emit_reloc(intel, buf, \
151 read_domains, write_domain, delta); \
152 } while (0)
153 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
154 intel_batchbuffer_emit_reloc_fenced(intel, buf, \
155 read_domains, write_domain, delta); \
156 } while (0)
157
158 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
159 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
160
161 #ifdef __cplusplus
162 }
163 #endif
164
165 #endif