1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "brw_context.h"
7 #include "intel_bufmgr.h"
14 * Number of bytes to reserve for commands necessary to complete a batch.
17 * - MI_BATCHBUFFER_END (4 bytes)
18 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
19 * - Any state emitted by vtbl->finish_batch():
20 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
21 * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
22 * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
23 * - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB,
24 * which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes
25 * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
26 * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
27 * Sandybridge PIPE_CONTROL madness.
28 * - CC_STATE workaround on HSW (17 * 4 = 68 bytes)
29 * - 10 dwords for initial mi_flush
30 * - 2 dwords for CC state setup
31 * - 5 dwords for the required pipe control at the end
32 * - Restoring L3 configuration: (24 dwords = 96 bytes)
33 * - 2*6 dwords for two PIPE_CONTROL flushes.
34 * - 7 dwords for L3 configuration set-up.
35 * - 5 dwords for L3 atomic set-up (on HSW).
37 #define BATCH_RESERVED 308
39 struct intel_batchbuffer
;
41 void intel_batchbuffer_init(struct intel_batchbuffer
*batch
, dri_bufmgr
*bufmgr
,
43 void intel_batchbuffer_free(struct intel_batchbuffer
*batch
);
44 void intel_batchbuffer_save_state(struct brw_context
*brw
);
45 void intel_batchbuffer_reset_to_saved(struct brw_context
*brw
);
46 void intel_batchbuffer_require_space(struct brw_context
*brw
, GLuint sz
,
47 enum brw_gpu_ring ring
);
48 int _intel_batchbuffer_flush_fence(struct brw_context
*brw
,
49 int in_fence_fd
, int *out_fence_fd
,
50 const char *file
, int line
);
52 #define intel_batchbuffer_flush(brw) \
53 _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
55 #define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
56 _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
59 /* Unlike bmBufferData, this currently requires the buffer be mapped.
60 * Consider it a convenience function wrapping multple
61 * intel_buffer_dword() calls.
63 void intel_batchbuffer_data(struct brw_context
*brw
,
64 const void *data
, GLuint bytes
,
65 enum brw_gpu_ring ring
);
67 uint32_t intel_batchbuffer_reloc(struct intel_batchbuffer
*batch
,
70 uint32_t read_domains
,
71 uint32_t write_domain
,
73 uint64_t intel_batchbuffer_reloc64(struct intel_batchbuffer
*batch
,
76 uint32_t read_domains
,
77 uint32_t write_domain
,
80 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
82 static inline uint32_t float_as_int(float f
)
93 /* Inline functions - might actually be better off with these
94 * non-inlined. Certainly better off switching all command packets to
95 * be passed as structs rather than dwords, but that's a little bit of
98 static inline unsigned
99 intel_batchbuffer_space(struct intel_batchbuffer
*batch
)
101 return (batch
->state_batch_offset
- batch
->reserved_space
)
102 - USED_BATCH(*batch
) * 4;
107 intel_batchbuffer_emit_dword(struct intel_batchbuffer
*batch
, GLuint dword
)
110 assert(intel_batchbuffer_space(batch
) >= 4);
112 *batch
->map_next
++ = dword
;
113 assert(batch
->ring
!= UNKNOWN_RING
);
117 intel_batchbuffer_emit_float(struct intel_batchbuffer
*batch
, float f
)
119 intel_batchbuffer_emit_dword(batch
, float_as_int(f
));
123 intel_batchbuffer_begin(struct brw_context
*brw
, int n
, enum brw_gpu_ring ring
)
125 intel_batchbuffer_require_space(brw
, n
* 4, ring
);
128 brw
->batch
.emit
= USED_BATCH(brw
->batch
);
129 brw
->batch
.total
= n
;
134 intel_batchbuffer_advance(struct brw_context
*brw
)
137 struct intel_batchbuffer
*batch
= &brw
->batch
;
138 unsigned int _n
= USED_BATCH(*batch
) - batch
->emit
;
139 assert(batch
->total
!= 0);
140 if (_n
!= batch
->total
) {
141 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
151 #define BEGIN_BATCH(n) do { \
152 intel_batchbuffer_begin(brw, (n), RENDER_RING); \
153 uint32_t *__map = brw->batch.map_next; \
154 brw->batch.map_next += (n)
156 #define BEGIN_BATCH_BLT(n) do { \
157 intel_batchbuffer_begin(brw, (n), BLT_RING); \
158 uint32_t *__map = brw->batch.map_next; \
159 brw->batch.map_next += (n)
161 #define OUT_BATCH(d) *__map++ = (d)
162 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
164 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
165 uint32_t __offset = (__map - brw->batch.map) * 4; \
166 OUT_BATCH(intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \
172 /* Handle 48-bit address relocations for Gen8+ */
173 #define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
174 uint32_t __offset = (__map - brw->batch.map) * 4; \
175 uint64_t reloc64 = intel_batchbuffer_reloc64(&brw->batch, (buf), __offset, \
179 OUT_BATCH(reloc64); \
180 OUT_BATCH(reloc64 >> 32); \
183 #define ADVANCE_BATCH() \
184 assert(__map == brw->batch.map_next); \
185 intel_batchbuffer_advance(brw); \