i965: Make/use a brw_batch_references() wrapper.
authorKenneth Graunke <kenneth@whitecape.org>
Tue, 28 Mar 2017 23:49:35 +0000 (16:49 -0700)
committerKenneth Graunke <kenneth@whitecape.org>
Mon, 10 Apr 2017 21:31:54 +0000 (14:31 -0700)
We'll want to change the implementation of this shortly.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
src/mesa/drivers/dri/i965/brw_performance_query.c
src/mesa/drivers/dri/i965/brw_queryobj.c
src/mesa/drivers/dri/i965/gen6_queryobj.c
src/mesa/drivers/dri/i965/gen6_sol.c
src/mesa/drivers/dri/i965/intel_batchbuffer.c
src/mesa/drivers/dri/i965/intel_batchbuffer.h
src/mesa/drivers/dri/i965/intel_buffer_objects.c
src/mesa/drivers/dri/i965/intel_mipmap_tree.c
src/mesa/drivers/dri/i965/intel_pixel_read.c
src/mesa/drivers/dri/i965/intel_tex_image.c
src/mesa/drivers/dri/i965/intel_tex_subimage.c

index dfea2f4bd572035ef68b7c07f21ee5ceedbc1b14..c9ba4fe92567f9b62080abcd441dd257e3bbb7a3 100644 (file)
@@ -1132,7 +1132,7 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
    /* If the current batch references our results bo then we need to
     * flush first...
     */
-   if (drm_bacon_bo_references(brw->batch.bo, bo))
+   if (brw_batch_references(&brw->batch, bo))
       intel_batchbuffer_flush(brw);
 
    if (unlikely(brw->perf_debug)) {
@@ -1157,12 +1157,12 @@ brw_is_perf_query_ready(struct gl_context *ctx,
    case OA_COUNTERS:
       return (obj->oa.results_accumulated ||
               (obj->oa.bo &&
-               !drm_bacon_bo_references(brw->batch.bo, obj->oa.bo) &&
+               !brw_batch_references(&brw->batch, obj->oa.bo) &&
                !drm_bacon_bo_busy(obj->oa.bo)));
 
    case PIPELINE_STATS:
       return (obj->pipeline_stats.bo &&
-              !drm_bacon_bo_references(brw->batch.bo, obj->pipeline_stats.bo) &&
+              !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
               !drm_bacon_bo_busy(obj->pipeline_stats.bo));
    }
 
index 04dfc5d576798278a64a0afb9712cd4fb967651b..8e742af67dfb09696d0230aab9142c66100f91fc 100644 (file)
@@ -137,7 +137,7 @@ brw_queryobj_get_results(struct gl_context *ctx,
     * still contributing to it, flush it now so the results will be present
     * when mapped.
     */
-   if (drm_bacon_bo_references(brw->batch.bo, query->bo))
+   if (brw_batch_references(&brw->batch, query->bo))
       intel_batchbuffer_flush(brw);
 
    if (unlikely(brw->perf_debug)) {
@@ -402,7 +402,7 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
     *      not ready yet on the first time it is queried.  This ensures that
     *      the async query will return true in finite time.
     */
-   if (query->bo && drm_bacon_bo_references(brw->batch.bo, query->bo))
+   if (query->bo && brw_batch_references(&brw->batch, query->bo))
       intel_batchbuffer_flush(brw);
 
    if (query->bo == NULL || !drm_bacon_bo_busy(query->bo)) {
index b439b2d2019c57146054caafe4af5556e59e2017..b61967bb57fc7a79e37efb082d6b2a2058c0b303 100644 (file)
@@ -467,7 +467,7 @@ flush_batch_if_needed(struct brw_context *brw, struct brw_query_object *query)
     * (for example, due to being full).  Record that it's been flushed.
     */
    query->flushed = query->flushed ||
-      !drm_bacon_bo_references(brw->batch.bo, query->bo);
+                    !brw_batch_references(&brw->batch, query->bo);
 
    if (!query->flushed)
       intel_batchbuffer_flush(brw);
index 4c73119c953b2e62257a49f603aa3726fab040a6..f7b53b2050194e466a83881863e6b6bed438d0f0 100644 (file)
@@ -241,7 +241,7 @@ tally_prims_generated(struct brw_context *brw,
    /* If the current batch is still contributing to the number of primitives
     * generated, flush it now so the results will be present when mapped.
     */
-   if (drm_bacon_bo_references(brw->batch.bo, obj->prim_count_bo))
+   if (brw_batch_references(&brw->batch, obj->prim_count_bo))
       intel_batchbuffer_flush(brw);
 
    if (unlikely(brw->perf_debug && drm_bacon_bo_busy(obj->prim_count_bo)))
index ff154cce1c50b5bc8bdbcfc8130269d249484d80..ab2ae961b9c62c4ec4f0fac6527279d96145fcd2 100644 (file)
@@ -576,6 +576,11 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
    return ret;
 }
 
+bool
+brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+{
+   return drm_bacon_bo_references(batch->bo, bo);
+}
 
 /*  This is the only way buffers get added to the validate list.
  */
index 332334f130cd00a1707095b606c7efe2455d3ed5..e67b18200f40645e9f24ca4ae6a209c380a74f1c 100644 (file)
@@ -65,6 +65,8 @@ void intel_batchbuffer_data(struct brw_context *brw,
                             const void *data, GLuint bytes,
                             enum brw_gpu_ring ring);
 
+bool brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo);
+
 uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
                         drm_bacon_bo *target, uint32_t target_offset,
                         uint32_t read_domains, uint32_t write_domain);
index 9ce13c2ee6208dd6163744848b766560e9cf246c..54a58df11d8e3b64954eefd87f4a253226dfe4d7 100644 (file)
@@ -271,7 +271,7 @@ brw_buffer_subdata(struct gl_context *ctx,
 
    busy =
       drm_bacon_bo_busy(intel_obj->buffer) ||
-      drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer);
+      brw_batch_references(&brw->batch, intel_obj->buffer);
 
    if (busy) {
       if (size == intel_obj->Base.Size) {
@@ -330,7 +330,7 @@ brw_get_buffer_subdata(struct gl_context *ctx,
    struct brw_context *brw = brw_context(ctx);
 
    assert(intel_obj);
-   if (drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer)) {
+   if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
       intel_batchbuffer_flush(brw);
    }
    drm_bacon_bo_get_subdata(intel_obj->buffer, offset, size, data);
@@ -389,7 +389,7 @@ brw_map_buffer_range(struct gl_context *ctx,
     * achieve the required synchronization.
     */
    if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
-      if (drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer)) {
+      if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
         if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
            drm_bacon_bo_unreference(intel_obj->buffer);
            alloc_buffer_object(brw, intel_obj);
index 59af60c39c32c1a4bfbb1df8df00622bd8dc30a8..6aaf4d20b7a43b052f364a054940c1c13fe3a65a 100644 (file)
@@ -2457,7 +2457,7 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
 
    drm_bacon_bo *bo = mt->bo;
 
-   if (drm_bacon_bo_references(brw->batch.bo, bo))
+   if (brw_batch_references(&brw->batch, bo))
       intel_batchbuffer_flush(brw);
 
    /* brw_bo_map() uses a WB mmaping of the buffer's backing storage. It
index 0d26d768560a8a5ef8880232f8d6e1f293de37a7..e81a17ffafa25a0ffa204034cea31b18164d9b46 100644 (file)
@@ -142,7 +142,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
 
    bo = irb->mt->bo;
 
-   if (drm_bacon_bo_references(brw->batch.bo, bo)) {
+   if (brw_batch_references(&brw->batch, bo)) {
       perf_debug("Flushing before mapping a referenced bo.\n");
       intel_batchbuffer_flush(brw);
    }
index 9c35dfbe6418daf493f60849387273531c3b95c8..e9b334deec0feccf976d95f8552852433402c842 100644 (file)
@@ -527,7 +527,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
 
    bo = image->mt->bo;
 
-   if (drm_bacon_bo_references(brw->batch.bo, bo)) {
+   if (brw_batch_references(&brw->batch, bo)) {
       perf_debug("Flushing before mapping a referenced bo.\n");
       intel_batchbuffer_flush(brw);
    }
index f5899f11473184e5dc106006200ec466f2326b3d..2a2298a8dee41c4fc6ae878b1259d18525539f12 100644 (file)
@@ -143,7 +143,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
 
    bo = image->mt->bo;
 
-   if (drm_bacon_bo_references(brw->batch.bo, bo)) {
+   if (brw_batch_references(&brw->batch, bo)) {
       perf_debug("Flushing before mapping a referenced bo.\n");
       intel_batchbuffer_flush(brw);
    }