1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "brw_context.h"
7 #include "brw_bufmgr.h"
13 struct intel_batchbuffer
;
15 void intel_batchbuffer_init(struct brw_context
*brw
);
16 void intel_batchbuffer_free(struct intel_batchbuffer
*batch
);
17 void intel_batchbuffer_save_state(struct brw_context
*brw
);
18 void intel_batchbuffer_reset_to_saved(struct brw_context
*brw
);
19 void intel_batchbuffer_require_space(struct brw_context
*brw
, GLuint sz
,
20 enum brw_gpu_ring ring
);
21 int _intel_batchbuffer_flush_fence(struct brw_context
*brw
,
22 int in_fence_fd
, int *out_fence_fd
,
23 const char *file
, int line
);
25 #define intel_batchbuffer_flush(brw) \
26 _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
28 #define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
29 _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
32 /* Unlike bmBufferData, this currently requires the buffer be mapped.
33 * Consider it a convenience function wrapping multple
34 * intel_buffer_dword() calls.
36 void intel_batchbuffer_data(struct brw_context
*brw
,
37 const void *data
, GLuint bytes
,
38 enum brw_gpu_ring ring
);
40 bool brw_batch_has_aperture_space(struct brw_context
*brw
,
41 unsigned extra_space_in_bytes
);
43 bool brw_batch_references(struct intel_batchbuffer
*batch
, struct brw_bo
*bo
);
45 #define RELOC_WRITE EXEC_OBJECT_WRITE
46 #define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
47 uint64_t brw_batch_reloc(struct intel_batchbuffer
*batch
,
48 uint32_t batch_offset
,
49 struct brw_bo
*target
,
50 uint32_t target_offset
,
52 uint64_t brw_state_reloc(struct intel_batchbuffer
*batch
,
53 uint32_t batch_offset
,
54 struct brw_bo
*target
,
55 uint32_t target_offset
,
58 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
60 static inline uint32_t float_as_int(float f
)
72 intel_batchbuffer_emit_dword(struct intel_batchbuffer
*batch
, GLuint dword
)
74 *batch
->map_next
++ = dword
;
75 assert(batch
->ring
!= UNKNOWN_RING
);
79 intel_batchbuffer_emit_float(struct intel_batchbuffer
*batch
, float f
)
81 intel_batchbuffer_emit_dword(batch
, float_as_int(f
));
85 intel_batchbuffer_begin(struct brw_context
*brw
, int n
, enum brw_gpu_ring ring
)
87 intel_batchbuffer_require_space(brw
, n
* 4, ring
);
90 brw
->batch
.emit
= USED_BATCH(brw
->batch
);
96 intel_batchbuffer_advance(struct brw_context
*brw
)
99 struct intel_batchbuffer
*batch
= &brw
->batch
;
100 unsigned int _n
= USED_BATCH(*batch
) - batch
->emit
;
101 assert(batch
->total
!= 0);
102 if (_n
!= batch
->total
) {
103 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
114 brw_ptr_in_state_buffer(struct intel_batchbuffer
*batch
, void *p
)
116 return (char *) p
>= (char *) batch
->state_map
&&
117 (char *) p
< (char *) batch
->state_map
+ batch
->state_bo
->size
;
120 #define BEGIN_BATCH(n) do { \
121 intel_batchbuffer_begin(brw, (n), RENDER_RING); \
122 uint32_t *__map = brw->batch.map_next; \
123 brw->batch.map_next += (n)
125 #define BEGIN_BATCH_BLT(n) do { \
126 intel_batchbuffer_begin(brw, (n), BLT_RING); \
127 uint32_t *__map = brw->batch.map_next; \
128 brw->batch.map_next += (n)
130 #define OUT_BATCH(d) *__map++ = (d)
131 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
133 #define OUT_RELOC(buf, flags, delta) do { \
134 uint32_t __offset = (__map - brw->batch.map) * 4; \
136 brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
140 /* Handle 48-bit address relocations for Gen8+ */
141 #define OUT_RELOC64(buf, flags, delta) do { \
142 uint32_t __offset = (__map - brw->batch.map) * 4; \
144 brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
145 OUT_BATCH(reloc64); \
146 OUT_BATCH(reloc64 >> 32); \
149 #define ADVANCE_BATCH() \
150 assert(__map == brw->batch.map_next); \
151 intel_batchbuffer_advance(brw); \