1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "brw_context.h"
7 #include "intel_bufmgr.h"
15 * Number of bytes to reserve for commands necessary to complete a batch.
18 * - MI_BATCHBUFFER_END (4 bytes)
19 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
20 * - Any state emitted by vtbl->finish_batch():
21 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
22 * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
24 #define BATCH_RESERVED 36
26 struct intel_batchbuffer
;
28 void intel_batchbuffer_emit_render_ring_prelude(struct brw_context
*brw
);
29 void intel_batchbuffer_init(struct brw_context
*brw
);
30 void intel_batchbuffer_free(struct brw_context
*brw
);
31 void intel_batchbuffer_save_state(struct brw_context
*brw
);
32 void intel_batchbuffer_reset_to_saved(struct brw_context
*brw
);
33 void intel_batchbuffer_clear_cache(struct brw_context
*brw
);
35 int _intel_batchbuffer_flush(struct brw_context
*brw
,
36 const char *file
, int line
);
38 #define intel_batchbuffer_flush(intel) \
39 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
43 /* Unlike bmBufferData, this currently requires the buffer be mapped.
44 * Consider it a convenience function wrapping multple
45 * intel_buffer_dword() calls.
47 void intel_batchbuffer_data(struct brw_context
*brw
,
48 const void *data
, GLuint bytes
,
49 enum brw_gpu_ring ring
);
51 bool intel_batchbuffer_emit_reloc(struct brw_context
*brw
,
53 uint32_t read_domains
,
54 uint32_t write_domain
,
56 bool intel_batchbuffer_emit_reloc_fenced(struct brw_context
*brw
,
58 uint32_t read_domains
,
59 uint32_t write_domain
,
61 void intel_batchbuffer_emit_mi_flush(struct brw_context
*brw
);
62 void intel_emit_post_sync_nonzero_flush(struct brw_context
*brw
);
63 void intel_emit_depth_stall_flushes(struct brw_context
*brw
);
64 void gen7_emit_vs_workaround_flush(struct brw_context
*brw
);
65 void gen7_emit_cs_stall_flush(struct brw_context
*brw
);
67 static INLINE
uint32_t float_as_int(float f
)
78 /* Inline functions - might actually be better off with these
79 * non-inlined. Certainly better off switching all command packets to
80 * be passed as structs rather than dwords, but that's a little bit of
83 static INLINE
unsigned
84 intel_batchbuffer_space(struct brw_context
*brw
)
86 return (brw
->batch
.state_batch_offset
- brw
->batch
.reserved_space
)
92 intel_batchbuffer_emit_dword(struct brw_context
*brw
, GLuint dword
)
95 assert(intel_batchbuffer_space(brw
) >= 4);
97 brw
->batch
.map
[brw
->batch
.used
++] = dword
;
98 assert(brw
->batch
.ring
!= UNKNOWN_RING
);
102 intel_batchbuffer_emit_float(struct brw_context
*brw
, float f
)
104 intel_batchbuffer_emit_dword(brw
, float_as_int(f
));
108 intel_batchbuffer_require_space(struct brw_context
*brw
, GLuint sz
,
109 enum brw_gpu_ring ring
)
111 /* If we're switching rings, implicitly flush the batch. */
112 if (unlikely(ring
!= brw
->batch
.ring
) && brw
->batch
.ring
!= UNKNOWN_RING
&&
114 intel_batchbuffer_flush(brw
);
118 assert(sz
< BATCH_SZ
- BATCH_RESERVED
);
120 if (intel_batchbuffer_space(brw
) < sz
)
121 intel_batchbuffer_flush(brw
);
123 enum brw_gpu_ring prev_ring
= brw
->batch
.ring
;
124 /* The intel_batchbuffer_flush() calls above might have changed
125 * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
127 brw
->batch
.ring
= ring
;
129 if (unlikely(prev_ring
== UNKNOWN_RING
&& ring
== RENDER_RING
))
130 intel_batchbuffer_emit_render_ring_prelude(brw
);
134 intel_batchbuffer_begin(struct brw_context
*brw
, int n
, enum brw_gpu_ring ring
)
136 intel_batchbuffer_require_space(brw
, n
* 4, ring
);
138 brw
->batch
.emit
= brw
->batch
.used
;
140 brw
->batch
.total
= n
;
145 intel_batchbuffer_advance(struct brw_context
*brw
)
148 struct intel_batchbuffer
*batch
= &brw
->batch
;
149 unsigned int _n
= batch
->used
- batch
->emit
;
150 assert(batch
->total
!= 0);
151 if (_n
!= batch
->total
) {
152 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
160 void intel_batchbuffer_cached_advance(struct brw_context
*brw
);
162 #define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, RENDER_RING)
163 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, BLT_RING)
164 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d)
165 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f)
166 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
167 intel_batchbuffer_emit_reloc(brw, buf, \
168 read_domains, write_domain, delta); \
170 #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
171 intel_batchbuffer_emit_reloc_fenced(brw, buf, \
172 read_domains, write_domain, delta); \
175 #define ADVANCE_BATCH() intel_batchbuffer_advance(brw);
176 #define CACHED_BATCH() intel_batchbuffer_cached_advance(brw);