i965: Add and use USED_BATCH macro.
authorMatt Turner <mattst88@gmail.com>
Sat, 11 Jul 2015 21:36:25 +0000 (14:36 -0700)
committerMatt Turner <mattst88@gmail.com>
Wed, 15 Jul 2015 20:09:22 +0000 (13:09 -0700)
The next patch will replace the .used field with an on-demand
calculation of batchbuffer usage.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
src/mesa/drivers/dri/i965/brw_blorp.cpp
src/mesa/drivers/dri/i965/brw_performance_monitor.c
src/mesa/drivers/dri/i965/brw_state_batch.c
src/mesa/drivers/dri/i965/brw_urb.c
src/mesa/drivers/dri/i965/intel_batchbuffer.c
src/mesa/drivers/dri/i965/intel_batchbuffer.h

index 2ccfae1d77ff7ea15fc2446732228435a67fc572..eac1f005496a3d83b3a67dd7b6b0ae8ddc334ddf 100644 (file)
@@ -226,7 +226,7 @@ retry:
    intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
    intel_batchbuffer_save_state(brw);
    drm_intel_bo *saved_bo = brw->batch.bo;
-   uint32_t saved_used = brw->batch.used;
+   uint32_t saved_used = USED_BATCH(brw->batch);
    uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
 
    switch (brw->gen) {
@@ -245,7 +245,7 @@ retry:
     * reserved enough space that a wrap will never happen.
     */
    assert(brw->batch.bo == saved_bo);
-   assert((brw->batch.used - saved_used) * 4 +
+   assert((USED_BATCH(brw->batch) - saved_used) * 4 +
           (saved_state_batch_offset - brw->batch.state_batch_offset) <
           estimated_max_batch_usage);
    /* Shut up compiler warnings on release build */
index 0a123754257c9b0e7a6c3364cd33622691874d4d..7e90e8a8fa1ce49abf22c730ec7c6efb414eb95f 100644 (file)
@@ -710,7 +710,7 @@ emit_mi_report_perf_count(struct brw_context *brw,
    /* Make sure the commands to take a snapshot fits in a single batch. */
    intel_batchbuffer_require_space(brw, MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4,
                                    RENDER_RING);
-   int batch_used = brw->batch.used;
+   int batch_used = USED_BATCH(brw->batch);
 
    /* Reports apparently don't always get written unless we flush first. */
    brw_emit_mi_flush(brw);
@@ -754,7 +754,7 @@ emit_mi_report_perf_count(struct brw_context *brw,
    brw_emit_mi_flush(brw);
 
    (void) batch_used;
-   assert(brw->batch.used - batch_used <= MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4);
+   assert(USED_BATCH(brw->batch) - batch_used <= MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4);
 }
 
 /**
@@ -1386,7 +1386,7 @@ void
 brw_perf_monitor_new_batch(struct brw_context *brw)
 {
    assert(brw->batch.ring == RENDER_RING);
-   assert(brw->gen < 6 || brw->batch.used == 0);
+   assert(brw->gen < 6 || USED_BATCH(brw->batch) == 0);
 
    if (brw->perfmon.oa_users == 0)
       return;
index a405a80ef6e3204284f3ea371ea7b03eb52d4308..d79e0ea00c763d2a2138a85ccc2f0c457ba8e312 100644 (file)
@@ -87,7 +87,7 @@ brw_annotate_aub(struct brw_context *brw)
    drm_intel_aub_annotation annotations[annotation_count];
    int a = 0;
    make_annotation(&annotations[a++], AUB_TRACE_TYPE_BATCH, 0,
-                   4*brw->batch.used);
+                   4 * USED_BATCH(brw->batch));
    for (int i = brw->state_batch_count; i-- > 0; ) {
       uint32_t type = brw->state_batch_list[i].type;
       uint32_t start_offset = brw->state_batch_list[i].offset;
@@ -136,7 +136,7 @@ __brw_state_batch(struct brw_context *brw,
     * space, then flush and try again.
     */
    if (batch->state_batch_offset < size ||
-       offset < 4*batch->used + batch->reserved_space) {
+       offset < 4 * USED_BATCH(*batch) + batch->reserved_space) {
       intel_batchbuffer_flush(brw);
       offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
    }
index 6fcf1b0cb1d38fd784a3f4c4b4f98b5ea65a2964..8fc06ba7cd9abcb1d40089314b647dfa3fe41e0e 100644 (file)
@@ -249,8 +249,8 @@ void brw_upload_urb_fence(struct brw_context *brw)
    uf.bits1.cs_fence  = brw->urb.size;
 
    /* erratum: URB_FENCE must not cross a 64byte cacheline */
-   if ((brw->batch.used & 15) > 12) {
-      int pad = 16 - (brw->batch.used & 15);
+   if ((USED_BATCH(brw->batch) & 15) > 12) {
+      int pad = 16 - (USED_BATCH(brw->batch) & 15);
       do
         brw->batch.map[brw->batch.used++] = MI_NOOP;
       while (--pad);
index f82958f0f6cc2a77d1e7ac8109902ef0fcd1fe03..628a7b774b29830fd4e00959628e78d53872bc5c 100644 (file)
@@ -94,7 +94,7 @@ intel_batchbuffer_reset_to_saved(struct brw_context *brw)
    drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
 
    brw->batch.used = brw->batch.saved.used;
-   if (brw->batch.used == 0)
+   if (USED_BATCH(brw->batch) == 0)
       brw->batch.ring = UNKNOWN_RING;
 }
 
@@ -122,7 +122,7 @@ do_batch_dump(struct brw_context *brw)
       drm_intel_decode_set_batch_pointer(decode,
                                         batch->bo->virtual,
                                         batch->bo->offset64,
-                                        batch->used);
+                                         USED_BATCH(*batch));
    } else {
       fprintf(stderr,
              "WARNING: failed to map batchbuffer (%s), "
@@ -131,7 +131,7 @@ do_batch_dump(struct brw_context *brw)
       drm_intel_decode_set_batch_pointer(decode,
                                         batch->map,
                                         batch->bo->offset64,
-                                        batch->used);
+                                         USED_BATCH(*batch));
    }
 
    drm_intel_decode_set_output_file(decode, stderr);
@@ -289,7 +289,7 @@ do_flush_locked(struct brw_context *brw)
    if (brw->has_llc) {
       drm_intel_bo_unmap(batch->bo);
    } else {
-      ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+      ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
       if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
         ret = drm_intel_bo_subdata(batch->bo,
                                    batch->state_batch_offset,
@@ -314,11 +314,11 @@ do_flush_locked(struct brw_context *brw)
             brw_annotate_aub(brw);
 
         if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
-           ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
-                                       flags);
+            ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
+                                        NULL, 0, 0, flags);
         } else {
            ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
-                                               4 * batch->used, flags);
+                                                4 * USED_BATCH(*batch), flags);
         }
       }
 
@@ -342,7 +342,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
 {
    int ret;
 
-   if (brw->batch.used == 0)
+   if (USED_BATCH(brw->batch) == 0)
       return 0;
 
    if (brw->throttle_batch[0] == NULL) {
@@ -351,7 +351,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
    }
 
    if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
-      int bytes_for_commands = 4 * brw->batch.used;
+      int bytes_for_commands = 4 * USED_BATCH(brw->batch);
       int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
       int total_bytes = bytes_for_commands + bytes_for_state;
       fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
@@ -367,7 +367,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
 
    /* Mark the end of the buffer. */
    intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
-   if (brw->batch.used & 1) {
+   if (USED_BATCH(brw->batch) & 1) {
       /* Round batchbuffer usage to 2 DWORDs. */
       intel_batchbuffer_emit_dword(brw, MI_NOOP);
    }
index c0456f3c1f65c938d09a1719bc06b0c148a806c6..12d20d363c3240094b19066d37226b49102bffbc 100644 (file)
@@ -67,6 +67,9 @@ uint64_t intel_batchbuffer_reloc64(struct brw_context *brw,
                                    uint32_t read_domains,
                                    uint32_t write_domain,
                                    uint32_t offset);
+
+#define USED_BATCH(batch) ((batch).used)
+
 static inline uint32_t float_as_int(float f)
 {
    union {
@@ -87,7 +90,7 @@ static inline unsigned
 intel_batchbuffer_space(struct brw_context *brw)
 {
    return (brw->batch.state_batch_offset - brw->batch.reserved_space)
-      - brw->batch.used*4;
+      - USED_BATCH(brw->batch) * 4;
 }
 
 
@@ -139,7 +142,7 @@ intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
    intel_batchbuffer_require_space(brw, n * 4, ring);
 
 #ifdef DEBUG
-   brw->batch.emit = brw->batch.used;
+   brw->batch.emit = USED_BATCH(brw->batch);
    brw->batch.total = n;
 #endif
 }
@@ -149,7 +152,7 @@ intel_batchbuffer_advance(struct brw_context *brw)
 {
 #ifdef DEBUG
    struct intel_batchbuffer *batch = &brw->batch;
-   unsigned int _n = batch->used - batch->emit;
+   unsigned int _n = USED_BATCH(*batch) - batch->emit;
    assert(batch->total != 0);
    if (_n != batch->total) {
       fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",