i965: Use unreachable() instead of unconditional assert().
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.h
1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
3
4 #include "main/mtypes.h"
5
6 #include "brw_context.h"
7 #include "intel_bufmgr.h"
8 #include "intel_reg.h"
9
10 #ifdef __cplusplus
11 extern "C" {
12 #endif
13
14 /**
15 * Number of bytes to reserve for commands necessary to complete a batch.
16 *
17 * This includes:
18 * - MI_BATCHBUFFER_END (4 bytes)
19 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
20 * - Any state emitted by vtbl->finish_batch():
21 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
22 * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
23 * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
24 * - Two sets of PIPE_CONTROLs, which become 3 PIPE_CONTROLs each on SNB,
25 * which are 4 DWords each ==> 2 * 3 * 4 * 4 = 96 bytes
26 * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
27 * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
28 * Sandybridge PIPE_CONTROL madness.
29 */
30 #define BATCH_RESERVED 146
31
32 struct intel_batchbuffer;
33
34 void intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw);
35 void intel_batchbuffer_init(struct brw_context *brw);
36 void intel_batchbuffer_free(struct brw_context *brw);
37 void intel_batchbuffer_save_state(struct brw_context *brw);
38 void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
39
40 int _intel_batchbuffer_flush(struct brw_context *brw,
41 const char *file, int line);
42
43 #define intel_batchbuffer_flush(intel) \
44 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
45
46
47
48 /* Unlike bmBufferData, this currently requires the buffer be mapped.
49 * Consider it a convenience function wrapping multple
50 * intel_buffer_dword() calls.
51 */
52 void intel_batchbuffer_data(struct brw_context *brw,
53 const void *data, GLuint bytes,
54 enum brw_gpu_ring ring);
55
56 bool intel_batchbuffer_emit_reloc(struct brw_context *brw,
57 drm_intel_bo *buffer,
58 uint32_t read_domains,
59 uint32_t write_domain,
60 uint32_t offset);
61 bool intel_batchbuffer_emit_reloc64(struct brw_context *brw,
62 drm_intel_bo *buffer,
63 uint32_t read_domains,
64 uint32_t write_domain,
65 uint32_t offset);
66 void brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags);
67 void brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
68 drm_intel_bo *bo, uint32_t offset,
69 uint32_t imm_lower, uint32_t imm_upper);
70 void intel_batchbuffer_emit_mi_flush(struct brw_context *brw);
71 void intel_emit_post_sync_nonzero_flush(struct brw_context *brw);
72 void intel_emit_depth_stall_flushes(struct brw_context *brw);
73 void gen7_emit_vs_workaround_flush(struct brw_context *brw);
74 void gen7_emit_cs_stall_flush(struct brw_context *brw);
75
76 static inline uint32_t float_as_int(float f)
77 {
78 union {
79 float f;
80 uint32_t d;
81 } fi;
82
83 fi.f = f;
84 return fi.d;
85 }
86
87 /* Inline functions - might actually be better off with these
88 * non-inlined. Certainly better off switching all command packets to
89 * be passed as structs rather than dwords, but that's a little bit of
90 * work...
91 */
92 static inline unsigned
93 intel_batchbuffer_space(struct brw_context *brw)
94 {
95 return (brw->batch.state_batch_offset - brw->batch.reserved_space)
96 - brw->batch.used*4;
97 }
98
99
100 static inline void
101 intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
102 {
103 #ifdef DEBUG
104 assert(intel_batchbuffer_space(brw) >= 4);
105 #endif
106 brw->batch.map[brw->batch.used++] = dword;
107 assert(brw->batch.ring != UNKNOWN_RING);
108 }
109
110 static inline void
111 intel_batchbuffer_emit_float(struct brw_context *brw, float f)
112 {
113 intel_batchbuffer_emit_dword(brw, float_as_int(f));
114 }
115
116 static inline void
117 intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
118 enum brw_gpu_ring ring)
119 {
120 /* If we're switching rings, implicitly flush the batch. */
121 if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
122 brw->gen >= 6) {
123 intel_batchbuffer_flush(brw);
124 }
125
126 #ifdef DEBUG
127 assert(sz < BATCH_SZ - BATCH_RESERVED);
128 #endif
129 if (intel_batchbuffer_space(brw) < sz)
130 intel_batchbuffer_flush(brw);
131
132 enum brw_gpu_ring prev_ring = brw->batch.ring;
133 /* The intel_batchbuffer_flush() calls above might have changed
134 * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
135 */
136 brw->batch.ring = ring;
137
138 if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
139 intel_batchbuffer_emit_render_ring_prelude(brw);
140 }
141
142 static inline void
143 intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
144 {
145 intel_batchbuffer_require_space(brw, n * 4, ring);
146
147 brw->batch.emit = brw->batch.used;
148 #ifdef DEBUG
149 brw->batch.total = n;
150 #endif
151 }
152
153 static inline void
154 intel_batchbuffer_advance(struct brw_context *brw)
155 {
156 #ifdef DEBUG
157 struct intel_batchbuffer *batch = &brw->batch;
158 unsigned int _n = batch->used - batch->emit;
159 assert(batch->total != 0);
160 if (_n != batch->total) {
161 fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
162 _n, batch->total);
163 abort();
164 }
165 batch->total = 0;
166 #endif
167 }
168
169 #define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, RENDER_RING)
170 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, BLT_RING)
171 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d)
172 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f)
173 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
174 intel_batchbuffer_emit_reloc(brw, buf, \
175 read_domains, write_domain, delta); \
176 } while (0)
177
178 /* Handle 48-bit address relocations for Gen8+ */
179 #define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
180 intel_batchbuffer_emit_reloc64(brw, buf, read_domains, write_domain, delta); \
181 } while (0)
182
183 #define ADVANCE_BATCH() intel_batchbuffer_advance(brw);
184
185 #ifdef __cplusplus
186 }
187 #endif
188
189 #endif