1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
15 * Number of bytes to reserve for commands necessary to complete a batch.
18 * - MI_BATCHBUFFER_END (4 bytes)
19 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
20 * - Any state emitted by vtbl->finish_batch():
21 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
23 #define BATCH_RESERVED 24
25 struct intel_batchbuffer
;
27 void intel_batchbuffer_init(struct intel_context
*intel
);
28 void intel_batchbuffer_free(struct intel_context
*intel
);
29 void intel_batchbuffer_save_state(struct intel_context
*intel
);
30 void intel_batchbuffer_reset_to_saved(struct intel_context
*intel
);
32 int _intel_batchbuffer_flush(struct intel_context
*intel
,
33 const char *file
, int line
);
35 #define intel_batchbuffer_flush(intel) \
36 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
40 /* Unlike bmBufferData, this currently requires the buffer be mapped.
41 * Consider it a convenience function wrapping multple
42 * intel_buffer_dword() calls.
44 void intel_batchbuffer_data(struct intel_context
*intel
,
45 const void *data
, GLuint bytes
, bool is_blit
);
47 bool intel_batchbuffer_emit_reloc(struct intel_context
*intel
,
49 uint32_t read_domains
,
50 uint32_t write_domain
,
52 bool intel_batchbuffer_emit_reloc_fenced(struct intel_context
*intel
,
54 uint32_t read_domains
,
55 uint32_t write_domain
,
57 void intel_batchbuffer_emit_mi_flush(struct intel_context
*intel
);
58 void intel_emit_post_sync_nonzero_flush(struct intel_context
*intel
);
59 void intel_emit_depth_stall_flushes(struct intel_context
*intel
);
60 void gen7_emit_vs_workaround_flush(struct intel_context
*intel
);
62 static INLINE
uint32_t float_as_int(float f
)
73 /* Inline functions - might actually be better off with these
74 * non-inlined. Certainly better off switching all command packets to
75 * be passed as structs rather than dwords, but that's a little bit of
78 static INLINE
unsigned
79 intel_batchbuffer_space(struct intel_context
*intel
)
81 return (intel
->batch
.state_batch_offset
- intel
->batch
.reserved_space
)
82 - intel
->batch
.used
*4;
87 intel_batchbuffer_emit_dword(struct intel_context
*intel
, GLuint dword
)
90 assert(intel_batchbuffer_space(intel
) >= 4);
92 intel
->batch
.map
[intel
->batch
.used
++] = dword
;
96 intel_batchbuffer_emit_float(struct intel_context
*intel
, float f
)
98 intel_batchbuffer_emit_dword(intel
, float_as_int(f
));
102 intel_batchbuffer_require_space(struct intel_context
*intel
,
103 GLuint sz
, int is_blit
)
106 if (intel
->gen
>= 6 &&
107 intel
->batch
.is_blit
!= is_blit
&& intel
->batch
.used
) {
108 intel_batchbuffer_flush(intel
);
111 intel
->batch
.is_blit
= is_blit
;
114 assert(sz
< intel
->maxBatchSize
- BATCH_RESERVED
);
116 if (intel_batchbuffer_space(intel
) < sz
)
117 intel_batchbuffer_flush(intel
);
121 intel_batchbuffer_begin(struct intel_context
*intel
, int n
, bool is_blit
)
123 intel_batchbuffer_require_space(intel
, n
* 4, is_blit
);
125 intel
->batch
.emit
= intel
->batch
.used
;
127 intel
->batch
.total
= n
;
132 intel_batchbuffer_advance(struct intel_context
*intel
)
135 struct intel_batchbuffer
*batch
= &intel
->batch
;
136 unsigned int _n
= batch
->used
- batch
->emit
;
137 assert(batch
->total
!= 0);
138 if (_n
!= batch
->total
) {
139 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
147 void intel_batchbuffer_cached_advance(struct intel_context
*intel
);
149 /* Here are the crusty old macros, to be removed:
153 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
154 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
155 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
156 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
157 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
158 intel_batchbuffer_emit_reloc(intel, buf, \
159 read_domains, write_domain, delta); \
161 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
162 intel_batchbuffer_emit_reloc_fenced(intel, buf, \
163 read_domains, write_domain, delta); \
166 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
167 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);