1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "intel_context.h"
7 #include "intel_bufmgr.h"
10 #define BATCH_RESERVED 16
12 void intel_batchbuffer_reset(struct intel_context
*intel
);
13 void intel_batchbuffer_free(struct intel_context
*intel
);
15 void _intel_batchbuffer_flush(struct intel_context
*intel
,
16 const char *file
, int line
);
18 #define intel_batchbuffer_flush(intel) \
19 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
23 /* Unlike bmBufferData, this currently requires the buffer be mapped.
24 * Consider it a convenience function wrapping multple
25 * intel_buffer_dword() calls.
27 void intel_batchbuffer_data(struct intel_context
*intel
,
28 const void *data
, GLuint bytes
, bool is_blit
);
30 GLboolean
intel_batchbuffer_emit_reloc(struct intel_context
*intel
,
32 uint32_t read_domains
,
33 uint32_t write_domain
,
35 GLboolean
intel_batchbuffer_emit_reloc_fenced(struct intel_context
*intel
,
37 uint32_t read_domains
,
38 uint32_t write_domain
,
40 void intel_batchbuffer_emit_mi_flush(struct intel_context
*intel
);
42 static INLINE
uint32_t float_as_int(float f
)
53 /* Inline functions - might actually be better off with these
54 * non-inlined. Certainly better off switching all command packets to
55 * be passed as structs rather than dwords, but that's a little bit of
59 intel_batchbuffer_space(struct intel_context
*intel
)
61 return (intel
->batch
.state_batch_offset
- intel
->batch
.reserved_space
) - intel
->batch
.used
*4;
66 intel_batchbuffer_emit_dword(struct intel_context
*intel
, GLuint dword
)
69 assert(intel_batchbuffer_space(intel
) >= 4);
71 intel
->batch
.map
[intel
->batch
.used
++] = dword
;
75 intel_batchbuffer_emit_float(struct intel_context
*intel
, float f
)
77 intel_batchbuffer_emit_dword(intel
, float_as_int(f
));
81 intel_batchbuffer_require_space(struct intel_context
*intel
,
82 GLuint sz
, int is_blit
)
85 if (intel
->gen
>= 6 &&
86 intel
->batch
.is_blit
!= is_blit
&& intel
->batch
.used
) {
87 intel_batchbuffer_flush(intel
);
90 intel
->batch
.is_blit
= is_blit
;
93 assert(sz
< sizeof(intel
->batch
.map
) - BATCH_RESERVED
);
95 if (intel_batchbuffer_space(intel
) < sz
)
96 intel_batchbuffer_flush(intel
);
100 intel_batchbuffer_begin(struct intel_context
*intel
, int n
, bool is_blit
)
102 intel_batchbuffer_require_space(intel
, n
* 4, is_blit
);
104 intel
->batch
.emit
= intel
->batch
.used
;
106 intel
->batch
.total
= n
;
111 intel_batchbuffer_advance(struct intel_context
*intel
)
114 struct intel_batchbuffer
*batch
= &intel
->batch
;
115 unsigned int _n
= batch
->used
- batch
->emit
;
116 assert(batch
->total
!= 0);
117 if (_n
!= batch
->total
) {
118 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
126 void intel_batchbuffer_cached_advance(struct intel_context
*intel
);
128 /* Here are the crusty old macros, to be removed:
132 #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
133 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
134 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
135 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
136 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
137 intel_batchbuffer_emit_reloc(intel, buf, \
138 read_domains, write_domain, delta); \
140 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
141 intel_batchbuffer_emit_reloc_fenced(intel, buf, \
142 read_domains, write_domain, delta); \
145 #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
146 #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);