glsl: Lower UBO and SSBO access in glsl linker
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "brw_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #ifdef __cplusplus
11 extern "C" {
12 #endif
13
14 /**
15 * Number of bytes to reserve for commands necessary to complete a batch.
16 *
17 * This includes:
18 * - MI_BATCHBUFFER_END (4 bytes)
19 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
20 * - Any state emitted by vtbl->finish_batch():
21 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
22 * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
23 * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
24 * - Two sets of PIPE_CONTROLs, which become 3 PIPE_CONTROLs each on SNB,
25 * which are 5 DWords each ==> 2 * 3 * 5 * 4 = 120 bytes
26 * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
27 * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
28 * Sandybridge PIPE_CONTROL madness.
29 * - CC_STATE workaround on HSW (12 * 4 = 48 bytes)
30 * - 5 dwords for initial mi_flush
31 * - 2 dwords for CC state setup
32 * - 5 dwords for the required pipe control at the end
33 */
34 #define BATCH_RESERVED 152
35
36 struct intel_batchbuffer;
37
38 void intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw);
39 void intel_batchbuffer_init(struct brw_context *brw);
40 void intel_batchbuffer_free(struct brw_context *brw);
41 void intel_batchbuffer_save_state(struct brw_context *brw);
42 void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
43
44 int _intel_batchbuffer_flush(struct brw_context *brw,
45 const char *file, int line);
46
47 #define intel_batchbuffer_flush(intel) \
48 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
49
50
51
52 /* Unlike bmBufferData, this currently requires the buffer be mapped.
53 * Consider it a convenience function wrapping multple
54 * intel_buffer_dword() calls.
55 */
56 void intel_batchbuffer_data(struct brw_context *brw,
57 const void *data, GLuint bytes,
58 enum brw_gpu_ring ring);
59
60 uint32_t intel_batchbuffer_reloc(struct brw_context *brw,
61 drm_intel_bo *buffer,
62 uint32_t offset,
63 uint32_t read_domains,
64 uint32_t write_domain,
65 uint32_t delta);
66 uint64_t intel_batchbuffer_reloc64(struct brw_context *brw,
67 drm_intel_bo *buffer,
68 uint32_t offset,
69 uint32_t read_domains,
70 uint32_t write_domain,
71 uint32_t delta);
72
73 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
74
75 static inline uint32_t float_as_int(float f)
76 {
77 union {
78 float f;
79 uint32_t d;
80 } fi;
81
82 fi.f = f;
83 return fi.d;
84 }
85
86 /* Inline functions - might actually be better off with these
87 * non-inlined. Certainly better off switching all command packets to
88 * be passed as structs rather than dwords, but that's a little bit of
89 * work...
90 */
91 static inline unsigned
92 intel_batchbuffer_space(struct brw_context *brw)
93 {
94 return (brw->batch.state_batch_offset - brw->batch.reserved_space)
95 - USED_BATCH(brw->batch) * 4;
96 }
97
98
99 static inline void
100 intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
101 {
102 #ifdef DEBUG
103 assert(intel_batchbuffer_space(brw) >= 4);
104 #endif
105 *brw->batch.map_next++ = dword;
106 assert(brw->batch.ring != UNKNOWN_RING);
107 }
108
109 static inline void
110 intel_batchbuffer_emit_float(struct brw_context *brw, float f)
111 {
112 intel_batchbuffer_emit_dword(brw, float_as_int(f));
113 }
114
115 static inline void
116 intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
117 enum brw_gpu_ring ring)
118 {
119 /* If we're switching rings, implicitly flush the batch. */
120 if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
121 brw->gen >= 6) {
122 intel_batchbuffer_flush(brw);
123 }
124
125 #ifdef DEBUG
126 assert(sz < BATCH_SZ - BATCH_RESERVED);
127 #endif
128 if (intel_batchbuffer_space(brw) < sz)
129 intel_batchbuffer_flush(brw);
130
131 enum brw_gpu_ring prev_ring = brw->batch.ring;
132 /* The intel_batchbuffer_flush() calls above might have changed
133 * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
134 */
135 brw->batch.ring = ring;
136
137 if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
138 intel_batchbuffer_emit_render_ring_prelude(brw);
139 }
140
141 static inline void
142 intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
143 {
144 intel_batchbuffer_require_space(brw, n * 4, ring);
145
146 #ifdef DEBUG
147 brw->batch.emit = USED_BATCH(brw->batch);
148 brw->batch.total = n;
149 #endif
150 }
151
152 static inline void
153 intel_batchbuffer_advance(struct brw_context *brw)
154 {
155 #ifdef DEBUG
156 struct intel_batchbuffer *batch = &brw->batch;
157 unsigned int _n = USED_BATCH(*batch) - batch->emit;
158 assert(batch->total != 0);
159 if (_n != batch->total) {
160 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
161 _n, batch->total);
162 abort();
163 }
164 batch->total = 0;
165 #else
166 (void) brw;
167 #endif
168 }
169
170 #define BEGIN_BATCH(n) do { \
171 intel_batchbuffer_begin(brw, (n), RENDER_RING); \
172 uint32_t *__map = brw->batch.map_next; \
173 brw->batch.map_next += (n)
174
175 #define BEGIN_BATCH_BLT(n) do { \
176 intel_batchbuffer_begin(brw, (n), BLT_RING); \
177 uint32_t *__map = brw->batch.map_next; \
178 brw->batch.map_next += (n)
179
180 #define OUT_BATCH(d) *__map++ = (d)
181 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
182
183 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
184 uint32_t __offset = (__map - brw->batch.map) * 4; \
185 OUT_BATCH(intel_batchbuffer_reloc(brw, (buf), __offset, \
186 (read_domains), \
187 (write_domain), \
188 (delta))); \
189 } while (0)
190
191 /* Handle 48-bit address relocations for Gen8+ */
192 #define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
193 uint32_t __offset = (__map - brw->batch.map) * 4; \
194 uint64_t reloc64 = intel_batchbuffer_reloc64(brw, (buf), __offset, \
195 (read_domains), \
196 (write_domain), \
197 (delta)); \
198 OUT_BATCH(reloc64); \
199 OUT_BATCH(reloc64 >> 32); \
200 } while (0)
201
202 #define ADVANCE_BATCH() \
203 assert(__map == brw->batch.map_next); \
204 intel_batchbuffer_advance(brw); \
205 } while (0)
206
207 #ifdef __cplusplus
208 }
209 #endif
210
211 #endif