1 #ifndef INTEL_BATCHBUFFER_H
2 #define INTEL_BATCHBUFFER_H
4 #include "main/mtypes.h"
6 #include "brw_context.h"
7 #include "intel_bufmgr.h"
15 * Number of bytes to reserve for commands necessary to complete a batch.
18 * - MI_BATCHBUFFER_END (4 bytes)
19 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
20 * - Any state emitted by vtbl->finish_batch():
21 * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
22 * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
23 * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
24 * - Two sets of PIPE_CONTROLs, which become 3 PIPE_CONTROLs each on SNB,
25 * which are 4 DWords each ==> 2 * 3 * 4 * 4 = 96 bytes
26 * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
27 * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
28 * Sandybridge PIPE_CONTROL madness.
30 #define BATCH_RESERVED 146
32 struct intel_batchbuffer
;
34 void intel_batchbuffer_emit_render_ring_prelude(struct brw_context
*brw
);
35 void intel_batchbuffer_init(struct brw_context
*brw
);
36 void intel_batchbuffer_free(struct brw_context
*brw
);
37 void intel_batchbuffer_save_state(struct brw_context
*brw
);
38 void intel_batchbuffer_reset_to_saved(struct brw_context
*brw
);
40 int _intel_batchbuffer_flush(struct brw_context
*brw
,
41 const char *file
, int line
);
43 #define intel_batchbuffer_flush(intel) \
44 _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
48 /* Unlike bmBufferData, this currently requires the buffer be mapped.
49 * Consider it a convenience function wrapping multple
50 * intel_buffer_dword() calls.
52 void intel_batchbuffer_data(struct brw_context
*brw
,
53 const void *data
, GLuint bytes
,
54 enum brw_gpu_ring ring
);
56 bool intel_batchbuffer_emit_reloc(struct brw_context
*brw
,
58 uint32_t read_domains
,
59 uint32_t write_domain
,
61 bool intel_batchbuffer_emit_reloc64(struct brw_context
*brw
,
63 uint32_t read_domains
,
64 uint32_t write_domain
,
66 void brw_emit_pipe_control_flush(struct brw_context
*brw
, uint32_t flags
);
67 void brw_emit_pipe_control_write(struct brw_context
*brw
, uint32_t flags
,
68 drm_intel_bo
*bo
, uint32_t offset
,
69 uint32_t imm_lower
, uint32_t imm_upper
);
70 void intel_batchbuffer_emit_mi_flush(struct brw_context
*brw
);
71 void intel_emit_post_sync_nonzero_flush(struct brw_context
*brw
);
72 void intel_emit_depth_stall_flushes(struct brw_context
*brw
);
73 void gen7_emit_vs_workaround_flush(struct brw_context
*brw
);
74 void gen7_emit_cs_stall_flush(struct brw_context
*brw
);
76 static inline uint32_t float_as_int(float f
)
87 /* Inline functions - might actually be better off with these
88 * non-inlined. Certainly better off switching all command packets to
89 * be passed as structs rather than dwords, but that's a little bit of
92 static inline unsigned
93 intel_batchbuffer_space(struct brw_context
*brw
)
95 return (brw
->batch
.state_batch_offset
- brw
->batch
.reserved_space
)
101 intel_batchbuffer_emit_dword(struct brw_context
*brw
, GLuint dword
)
104 assert(intel_batchbuffer_space(brw
) >= 4);
106 brw
->batch
.map
[brw
->batch
.used
++] = dword
;
107 assert(brw
->batch
.ring
!= UNKNOWN_RING
);
111 intel_batchbuffer_emit_float(struct brw_context
*brw
, float f
)
113 intel_batchbuffer_emit_dword(brw
, float_as_int(f
));
117 intel_batchbuffer_require_space(struct brw_context
*brw
, GLuint sz
,
118 enum brw_gpu_ring ring
)
120 /* If we're switching rings, implicitly flush the batch. */
121 if (unlikely(ring
!= brw
->batch
.ring
) && brw
->batch
.ring
!= UNKNOWN_RING
&&
123 intel_batchbuffer_flush(brw
);
127 assert(sz
< BATCH_SZ
- BATCH_RESERVED
);
129 if (intel_batchbuffer_space(brw
) < sz
)
130 intel_batchbuffer_flush(brw
);
132 enum brw_gpu_ring prev_ring
= brw
->batch
.ring
;
133 /* The intel_batchbuffer_flush() calls above might have changed
134 * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
136 brw
->batch
.ring
= ring
;
138 if (unlikely(prev_ring
== UNKNOWN_RING
&& ring
== RENDER_RING
))
139 intel_batchbuffer_emit_render_ring_prelude(brw
);
143 intel_batchbuffer_begin(struct brw_context
*brw
, int n
, enum brw_gpu_ring ring
)
145 intel_batchbuffer_require_space(brw
, n
* 4, ring
);
147 brw
->batch
.emit
= brw
->batch
.used
;
149 brw
->batch
.total
= n
;
154 intel_batchbuffer_advance(struct brw_context
*brw
)
157 struct intel_batchbuffer
*batch
= &brw
->batch
;
158 unsigned int _n
= batch
->used
- batch
->emit
;
159 assert(batch
->total
!= 0);
160 if (_n
!= batch
->total
) {
161 fprintf(stderr
, "ADVANCE_BATCH: %d of %d dwords emitted\n",
169 #define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, RENDER_RING)
170 #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, BLT_RING)
171 #define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d)
172 #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f)
173 #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
174 intel_batchbuffer_emit_reloc(brw, buf, \
175 read_domains, write_domain, delta); \
178 /* Handle 48-bit address relocations for Gen8+ */
179 #define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
180 intel_batchbuffer_emit_reloc64(brw, buf, read_domains, write_domain, delta); \
183 #define ADVANCE_BATCH() intel_batchbuffer_advance(brw);