1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
10 #define BATCH_RESERVED 16
12 void intel_batchbuffer_init(struct intel_context
*intel
);
13 void intel_batchbuffer_reset(struct intel_context
*intel
);
14 void intel_batchbuffer_free(struct intel_context
*intel
);
15 void intel_batchbuffer_save_state(struct intel_context
*intel
);
16 void intel_batchbuffer_reset_to_saved(struct intel_context
*intel
);
18 int _intel_batchbuffer_flush(struct intel_context
*intel
,
19 const char *file
, int line
);
21 #define intel_batchbuffer_flush(intel) \
22 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
26 /* Unlike bmBufferData, this currently requires the buffer be mapped.
27 * Consider it a convenience function wrapping multple
28 * intel_buffer_dword() calls.
30 void intel_batchbuffer_data(struct intel_context
*intel
,
31 const void *data
, GLuint bytes
, bool is_blit
);
33 bool intel_batchbuffer_emit_reloc(struct intel_context
*intel
,
35 uint32_t read_domains
,
36 uint32_t write_domain
,
38 bool intel_batchbuffer_emit_reloc_fenced(struct intel_context
*intel
,
40 uint32_t read_domains
,
41 uint32_t write_domain
,
43 void intel_batchbuffer_emit_mi_flush(struct intel_context
*intel
);
44 void intel_emit_post_sync_nonzero_flush(struct intel_context
*intel
);
45 void intel_emit_depth_stall_flushes(struct intel_context
*intel
);
47 static INLINE
uint32_t float_as_int(float f
)
58 /* Inline functions - might actually be better off with these
59 * non-inlined. Certainly better off switching all command packets to
60 * be passed as structs rather than dwords, but that's a little bit of
63 static INLINE
unsigned
64 intel_batchbuffer_space(struct intel_context
*intel
)
66 return (intel
->batch
.state_batch_offset
- intel
->batch
.reserved_space
)
67 - intel
->batch
.used
*4;
72 intel_batchbuffer_emit_dword(struct intel_context
*intel
, GLuint dword
)
75 assert(intel_batchbuffer_space(intel
) >= 4);
77 intel
->batch
.map
[intel
->batch
.used
++] = dword
;
81 intel_batchbuffer_emit_float(struct intel_context
*intel
, float f
)
83 intel_batchbuffer_emit_dword(intel
, float_as_int(f
));
87 intel_batchbuffer_require_space(struct intel_context
*intel
,
88 GLuint sz
, int is_blit
)
91 if (intel
->gen
>= 6 &&
92 intel
->batch
.is_blit
!= is_blit
&& intel
->batch
.used
) {
93 intel_batchbuffer_flush(intel
);
96 intel
->batch
.is_blit
= is_blit
;
99 assert(sz
< sizeof(intel
->batch
.map
) - BATCH_RESERVED
);
101 if (intel_batchbuffer_space(intel
) < sz
)
102 intel_batchbuffer_flush(intel
);
106 intel_batchbuffer_begin(struct intel_context
*intel
, int n
, bool is_blit
)
108 intel_batchbuffer_require_space(intel
, n
* 4, is_blit
);
110 intel
->batch
.emit
= intel
->batch
.used
;
112 intel
->batch
.total
= n
;
117 intel_batchbuffer_advance(struct intel_context
*intel
)
120 struct intel_batchbuffer
*batch
= &intel
->batch
;
121 unsigned int _n
= batch
->used
- batch
->emit
;
122 assert(batch
->total
!= 0);
123 if (_n
!= batch
->total
) {
124 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
132 void intel_batchbuffer_cached_advance(struct intel_context
*intel
);
134 /* Here are the crusty old macros, to be removed:
138 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
139 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
140 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
141 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
142 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
143 intel_batchbuffer_emit_reloc(intel, buf, \
144 read_domains, write_domain, delta); \
146 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
147 intel_batchbuffer_emit_reloc_fenced(intel, buf, \
148 read_domains, write_domain, delta); \
151 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
152 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);