i965/miptree: Replace is_lossless_compressed with mt->aux_usage checks
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "brw_context.h"
7 #include "brw_bufmgr.h"
8
9 #ifdef __cplusplus
10 extern "C" {
11 #endif
12
13 /**
14 * Number of bytes to reserve for commands necessary to complete a batch.
15 *
16 * This includes:
17 * - MI_BATCHBUFFER_END (4 bytes)
18 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
19 * - Any state emitted by vtbl->finish_batch():
20 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
21 * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
22 * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
23 * - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB,
24 * which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes
25 * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
26 * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
27 * Sandybridge PIPE_CONTROL madness.
28 * - CC_STATE workaround on HSW (17 * 4 = 68 bytes)
29 * - 10 dwords for initial mi_flush
30 * - 2 dwords for CC state setup
31 * - 5 dwords for the required pipe control at the end
32 * - Restoring L3 configuration: (24 dwords = 96 bytes)
33 * - 2*6 dwords for two PIPE_CONTROL flushes.
34 * - 7 dwords for L3 configuration set-up.
35 * - 5 dwords for L3 atomic set-up (on HSW).
36 */
37 #define BATCH_RESERVED 308
38
39 struct intel_batchbuffer;
40
41 void intel_batchbuffer_init(struct intel_batchbuffer *batch,
42 struct brw_bufmgr *bufmgr,
43 bool has_llc);
44 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
45 void intel_batchbuffer_save_state(struct brw_context *brw);
46 void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
47 void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
48 enum brw_gpu_ring ring);
49 int _intel_batchbuffer_flush_fence(struct brw_context *brw,
50 int in_fence_fd, int *out_fence_fd,
51 const char *file, int line);
52
53 #define intel_batchbuffer_flush(brw) \
54 _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
55
56 #define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
57 _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
58 __FILE__, __LINE__)
59
60 /* Unlike bmBufferData, this currently requires the buffer be mapped.
61 * Consider it a convenience function wrapping multple
62 * intel_buffer_dword() calls.
63 */
64 void intel_batchbuffer_data(struct brw_context *brw,
65 const void *data, GLuint bytes,
66 enum brw_gpu_ring ring);
67
68 bool brw_batch_has_aperture_space(struct brw_context *brw,
69 unsigned extra_space_in_bytes);
70
71 bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
72
73 uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
74 struct brw_bo *target, uint32_t target_offset,
75 uint32_t read_domains, uint32_t write_domain);
76
77 static inline uint32_t
78 brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
79 uint32_t prog_offset)
80 {
81 if (brw->gen >= 5) {
82 /* Using state base address. */
83 return prog_offset;
84 }
85
86 brw_emit_reloc(&brw->batch, state_offset, brw->cache.bo, prog_offset,
87 I915_GEM_DOMAIN_INSTRUCTION, 0);
88
89 return brw->cache.bo->offset64 + prog_offset;
90 }
91
92 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
93
94 static inline uint32_t float_as_int(float f)
95 {
96 union {
97 float f;
98 uint32_t d;
99 } fi;
100
101 fi.f = f;
102 return fi.d;
103 }
104
105 /* Inline functions - might actually be better off with these
106 * non-inlined. Certainly better off switching all command packets to
107 * be passed as structs rather than dwords, but that's a little bit of
108 * work...
109 */
110 static inline unsigned
111 intel_batchbuffer_space(struct intel_batchbuffer *batch)
112 {
113 return (batch->state_batch_offset - batch->reserved_space)
114 - USED_BATCH(*batch) * 4;
115 }
116
117
118 static inline void
119 intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
120 {
121 #ifdef DEBUG
122 assert(intel_batchbuffer_space(batch) >= 4);
123 #endif
124 *batch->map_next++ = dword;
125 assert(batch->ring != UNKNOWN_RING);
126 }
127
128 static inline void
129 intel_batchbuffer_emit_float(struct intel_batchbuffer *batch, float f)
130 {
131 intel_batchbuffer_emit_dword(batch, float_as_int(f));
132 }
133
134 static inline void
135 intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
136 {
137 intel_batchbuffer_require_space(brw, n * 4, ring);
138
139 #ifdef DEBUG
140 brw->batch.emit = USED_BATCH(brw->batch);
141 brw->batch.total = n;
142 #endif
143 }
144
145 static inline void
146 intel_batchbuffer_advance(struct brw_context *brw)
147 {
148 #ifdef DEBUG
149 struct intel_batchbuffer *batch = &brw->batch;
150 unsigned int _n = USED_BATCH(*batch) - batch->emit;
151 assert(batch->total != 0);
152 if (_n != batch->total) {
153 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
154 _n, batch->total);
155 abort();
156 }
157 batch->total = 0;
158 #else
159 (void) brw;
160 #endif
161 }
162
163 #define BEGIN_BATCH(n) do { \
164 intel_batchbuffer_begin(brw, (n), RENDER_RING); \
165 uint32_t *__map = brw->batch.map_next; \
166 brw->batch.map_next += (n)
167
168 #define BEGIN_BATCH_BLT(n) do { \
169 intel_batchbuffer_begin(brw, (n), BLT_RING); \
170 uint32_t *__map = brw->batch.map_next; \
171 brw->batch.map_next += (n)
172
173 #define OUT_BATCH(d) *__map++ = (d)
174 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
175
176 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
177 uint32_t __offset = (__map - brw->batch.map) * 4; \
178 uint32_t reloc = \
179 brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \
180 (read_domains), (write_domain)); \
181 OUT_BATCH(reloc); \
182 } while (0)
183
184 /* Handle 48-bit address relocations for Gen8+ */
185 #define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
186 uint32_t __offset = (__map - brw->batch.map) * 4; \
187 uint64_t reloc64 = \
188 brw_emit_reloc(&brw->batch, __offset, (buf), (delta), \
189 (read_domains), (write_domain)); \
190 OUT_BATCH(reloc64); \
191 OUT_BATCH(reloc64 >> 32); \
192 } while (0)
193
194 #define ADVANCE_BATCH() \
195 assert(__map == brw->batch.map_next); \
196 intel_batchbuffer_advance(brw); \
197 } while (0)
198
199 #ifdef __cplusplus
200 }
201 #endif
202
203 #endif