1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "brw_context.h"
7 #include "brw_bufmgr.h"
13 /* The kernel assumes batchbuffers are smaller than 256kB. */
14 #define MAX_BATCH_SIZE (256 * 1024)
16 /* 3DSTATE_BINDING_TABLE_POINTERS has a U16 offset from Surface State Base
17 * Address, which means that we can't put binding tables beyond 64kB. This
18 * effectively limits the maximum statebuffer size to 64kB.
20 #define MAX_STATE_SIZE (64 * 1024)
22 struct intel_batchbuffer
;
24 void intel_batchbuffer_init(struct brw_context
*brw
);
25 void intel_batchbuffer_free(struct intel_batchbuffer
*batch
);
26 void intel_batchbuffer_save_state(struct brw_context
*brw
);
27 bool intel_batchbuffer_saved_state_is_empty(struct brw_context
*brw
);
28 void intel_batchbuffer_reset_to_saved(struct brw_context
*brw
);
29 void intel_batchbuffer_require_space(struct brw_context
*brw
, GLuint sz
);
30 int _intel_batchbuffer_flush_fence(struct brw_context
*brw
,
31 int in_fence_fd
, int *out_fence_fd
,
32 const char *file
, int line
);
34 #define intel_batchbuffer_flush(brw) \
35 _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
37 #define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
38 _intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
41 /* Unlike bmBufferData, this currently requires the buffer be mapped.
42 * Consider it a convenience function wrapping multple
43 * intel_buffer_dword() calls.
45 void intel_batchbuffer_data(struct brw_context
*brw
,
46 const void *data
, GLuint bytes
);
49 brw_batch_has_aperture_space(struct brw_context
*brw
, uint64_t extra_space
)
51 return brw
->batch
.aperture_space
+ extra_space
<=
52 brw
->screen
->aperture_threshold
;
55 bool brw_batch_references(struct intel_batchbuffer
*batch
, struct brw_bo
*bo
);
57 #define RELOC_WRITE EXEC_OBJECT_WRITE
58 #define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
59 /* Inverted meaning, but using the same bit...emit_reloc will flip it. */
60 #define RELOC_32BIT EXEC_OBJECT_SUPPORTS_48B_ADDRESS
62 void brw_use_pinned_bo(struct intel_batchbuffer
*batch
, struct brw_bo
*bo
,
63 unsigned writeable_flag
);
65 uint64_t brw_batch_reloc(struct intel_batchbuffer
*batch
,
66 uint32_t batch_offset
,
67 struct brw_bo
*target
,
68 uint32_t target_offset
,
70 uint64_t brw_state_reloc(struct intel_batchbuffer
*batch
,
71 uint32_t batch_offset
,
72 struct brw_bo
*target
,
73 uint32_t target_offset
,
76 #define USED_BATCH(_batch) \
77 ((uintptr_t)((_batch).map_next - (_batch).batch.map))
79 static inline uint32_t float_as_int(float f
)
91 intel_batchbuffer_begin(struct brw_context
*brw
, int n
)
93 intel_batchbuffer_require_space(brw
, n
* 4);
96 brw
->batch
.emit
= USED_BATCH(brw
->batch
);
102 intel_batchbuffer_advance(struct brw_context
*brw
)
105 struct intel_batchbuffer
*batch
= &brw
->batch
;
106 unsigned int _n
= USED_BATCH(*batch
) - batch
->emit
;
107 assert(batch
->total
!= 0);
108 if (_n
!= batch
->total
) {
109 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
120 brw_ptr_in_state_buffer(struct intel_batchbuffer
*batch
, void *p
)
122 return (char *) p
>= (char *) batch
->state
.map
&&
123 (char *) p
< (char *) batch
->state
.map
+ batch
->state
.bo
->size
;
126 #define BEGIN_BATCH(n) do { \
127 intel_batchbuffer_begin(brw, (n)); \
128 uint32_t *__map = brw->batch.map_next; \
129 brw->batch.map_next += (n)
131 #define BEGIN_BATCH_BLT(n) do { \
132 assert(brw->screen->devinfo.gen < 6); \
133 intel_batchbuffer_begin(brw, (n)); \
134 uint32_t *__map = brw->batch.map_next; \
135 brw->batch.map_next += (n)
137 #define OUT_BATCH(d) *__map++ = (d)
138 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
140 #define OUT_RELOC(buf, flags, delta) do { \
141 uint32_t __offset = (__map - brw->batch.batch.map) * 4; \
143 brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
147 /* Handle 48-bit address relocations for Gen8+ */
148 #define OUT_RELOC64(buf, flags, delta) do { \
149 uint32_t __offset = (__map - brw->batch.batch.map) * 4; \
151 brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
152 OUT_BATCH(reloc64); \
153 OUT_BATCH(reloc64 >> 32); \
156 #define ADVANCE_BATCH() \
157 assert(__map == brw->batch.map_next); \
158 intel_batchbuffer_advance(brw); \