intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
intel_batchbuffer_save_state(brw);
drm_intel_bo *saved_bo = brw->batch.bo;
- uint32_t saved_used = brw->batch.used;
+ uint32_t saved_used = USED_BATCH(brw->batch);
uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
switch (brw->gen) {
* reserved enough space that a wrap will never happen.
*/
assert(brw->batch.bo == saved_bo);
- assert((brw->batch.used - saved_used) * 4 +
+ assert((USED_BATCH(brw->batch) - saved_used) * 4 +
(saved_state_batch_offset - brw->batch.state_batch_offset) <
estimated_max_batch_usage);
/* Shut up compiler warnings on release build */
/* Make sure the commands to take a snapshot fits in a single batch. */
intel_batchbuffer_require_space(brw, MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4,
RENDER_RING);
- int batch_used = brw->batch.used;
+ int batch_used = USED_BATCH(brw->batch);
/* Reports apparently don't always get written unless we flush first. */
brw_emit_mi_flush(brw);
brw_emit_mi_flush(brw);
(void) batch_used;
- assert(brw->batch.used - batch_used <= MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4);
+ assert(USED_BATCH(brw->batch) - batch_used <= MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4);
}
/**
brw_perf_monitor_new_batch(struct brw_context *brw)
{
assert(brw->batch.ring == RENDER_RING);
- assert(brw->gen < 6 || brw->batch.used == 0);
+ assert(brw->gen < 6 || USED_BATCH(brw->batch) == 0);
if (brw->perfmon.oa_users == 0)
return;
drm_intel_aub_annotation annotations[annotation_count];
int a = 0;
make_annotation(&annotations[a++], AUB_TRACE_TYPE_BATCH, 0,
- 4*brw->batch.used);
+ 4 * USED_BATCH(brw->batch));
for (int i = brw->state_batch_count; i-- > 0; ) {
uint32_t type = brw->state_batch_list[i].type;
uint32_t start_offset = brw->state_batch_list[i].offset;
* space, then flush and try again.
*/
if (batch->state_batch_offset < size ||
- offset < 4*batch->used + batch->reserved_space) {
+ offset < 4 * USED_BATCH(*batch) + batch->reserved_space) {
intel_batchbuffer_flush(brw);
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
}
uf.bits1.cs_fence = brw->urb.size;
/* erratum: URB_FENCE must not cross a 64byte cacheline */
- if ((brw->batch.used & 15) > 12) {
- int pad = 16 - (brw->batch.used & 15);
+ if ((USED_BATCH(brw->batch) & 15) > 12) {
+ int pad = 16 - (USED_BATCH(brw->batch) & 15);
do
brw->batch.map[brw->batch.used++] = MI_NOOP;
while (--pad);
drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
brw->batch.used = brw->batch.saved.used;
- if (brw->batch.used == 0)
+ if (USED_BATCH(brw->batch) == 0)
brw->batch.ring = UNKNOWN_RING;
}
drm_intel_decode_set_batch_pointer(decode,
batch->bo->virtual,
batch->bo->offset64,
- batch->used);
+ USED_BATCH(*batch));
} else {
fprintf(stderr,
"WARNING: failed to map batchbuffer (%s), "
drm_intel_decode_set_batch_pointer(decode,
batch->map,
batch->bo->offset64,
- batch->used);
+ USED_BATCH(*batch));
}
drm_intel_decode_set_output_file(decode, stderr);
if (brw->has_llc) {
drm_intel_bo_unmap(batch->bo);
} else {
- ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
ret = drm_intel_bo_subdata(batch->bo,
batch->state_batch_offset,
brw_annotate_aub(brw);
if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
- ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
- flags);
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
+ NULL, 0, 0, flags);
} else {
ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
- 4 * batch->used, flags);
+ 4 * USED_BATCH(*batch), flags);
}
}
{
int ret;
- if (brw->batch.used == 0)
+ if (USED_BATCH(brw->batch) == 0)
return 0;
if (brw->throttle_batch[0] == NULL) {
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- int bytes_for_commands = 4 * brw->batch.used;
+ int bytes_for_commands = 4 * USED_BATCH(brw->batch);
int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
int total_bytes = bytes_for_commands + bytes_for_state;
fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
- if (brw->batch.used & 1) {
+ if (USED_BATCH(brw->batch) & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
intel_batchbuffer_emit_dword(brw, MI_NOOP);
}
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
+
+#define USED_BATCH(batch) ((batch).used)
+
static inline uint32_t float_as_int(float f)
{
union {
intel_batchbuffer_space(struct brw_context *brw)
{
return (brw->batch.state_batch_offset - brw->batch.reserved_space)
- - brw->batch.used*4;
+ - USED_BATCH(brw->batch) * 4;
}
intel_batchbuffer_require_space(brw, n * 4, ring);
#ifdef DEBUG
- brw->batch.emit = brw->batch.used;
+ brw->batch.emit = USED_BATCH(brw->batch);
brw->batch.total = n;
#endif
}
{
#ifdef DEBUG
struct intel_batchbuffer *batch = &brw->batch;
- unsigned int _n = batch->used - batch->emit;
+ unsigned int _n = USED_BATCH(*batch) - batch->emit;
assert(batch->total != 0);
if (_n != batch->total) {
fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",