freedreno: remove flush-queue
authorRob Clark <robdclark@chromium.org>
Wed, 22 Jan 2020 00:15:28 +0000 (16:15 -0800)
committerMarge Bot <eric+marge@anholt.net>
Wed, 29 Jan 2020 21:19:41 +0000 (21:19 +0000)
Signed-off-by: Rob Clark <robdclark@chromium.org>
Tested-by: Marge Bot <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3503>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3503>

14 files changed:
src/gallium/drivers/freedreno/a5xx/fd5_blitter.c
src/gallium/drivers/freedreno/a6xx/fd6_blitter.c
src/gallium/drivers/freedreno/a6xx/fd6_gmem.c
src/gallium/drivers/freedreno/freedreno_batch.c
src/gallium/drivers/freedreno/freedreno_batch.h
src/gallium/drivers/freedreno/freedreno_batch_cache.c
src/gallium/drivers/freedreno/freedreno_context.c
src/gallium/drivers/freedreno/freedreno_context.h
src/gallium/drivers/freedreno/freedreno_draw.c
src/gallium/drivers/freedreno/freedreno_fence.c
src/gallium/drivers/freedreno/freedreno_query_acc.c
src/gallium/drivers/freedreno/freedreno_query_hw.c
src/gallium/drivers/freedreno/freedreno_resource.c
src/gallium/drivers/freedreno/freedreno_state.c

index b5765263ce54b29d334fe362eb7c61ba187a6eb1..cffcae86a8b7d87dd9cbd10875a0721cc68c1a0c 100644 (file)
@@ -479,7 +479,7 @@ fd5_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
        fd_resource(info->dst.resource)->valid = true;
        batch->needs_flush = true;
 
-       fd_batch_flush(batch, false);
+       fd_batch_flush(batch);
        fd_batch_reference(&batch, NULL);
 
        return true;
index c19672842580dfb9206ada2d92aa79cc58d6ee12..f812aab37a6cd0c08e763743536e06ff4cc99c25 100644 (file)
@@ -668,7 +668,7 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
        fd_resource(info->dst.resource)->valid = true;
        batch->needs_flush = true;
 
-       fd_batch_flush(batch, false);
+       fd_batch_flush(batch);
        fd_batch_reference(&batch, NULL);
 
        return true;
index 28388bfb8a2115292c84e8097c8568f544bf9133..b6f1d5f2281acff4b7aea1f12fb3ddb7abd0fff7 100644 (file)
@@ -1432,7 +1432,7 @@ emit_sysmem_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
                }
        }
 
-       fd6_event_write(batch, ring, 0x1d, true);
+       fd6_event_write(batch, ring, UNK_1D, true);
 }
 
 static void
index f0dd8148229fa8bd370ee70849edb762dad83646..dc8e17915133e1709abbd19ef87c63cd0c28e2e6 100644 (file)
@@ -41,9 +41,6 @@ batch_init(struct fd_batch *batch)
        struct fd_context *ctx = batch->ctx;
        unsigned size = 0;
 
-       if (ctx->screen->reorder)
-               util_queue_fence_init(&batch->flush_fence);
-
        /* if kernel is too old to support unlimited # of cmd buffers, we
         * have no option but to allocate large worst-case sizes so that
         * we don't need to grow the ringbuffer.  Performance is likely to
@@ -191,9 +188,6 @@ batch_fini(struct fd_batch *batch)
                fd_hw_sample_reference(batch->ctx, &samp, NULL);
        }
        util_dynarray_fini(&batch->samples);
-
-       if (batch->ctx->screen->reorder)
-               util_queue_fence_destroy(&batch->flush_fence);
 }
 
 static void
@@ -204,7 +198,7 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
 
        foreach_batch(dep, cache, batch->dependents_mask) {
                if (flush)
-                       fd_batch_flush(dep, false);
+                       fd_batch_flush(dep);
                fd_batch_reference(&dep, NULL);
        }
 
@@ -239,8 +233,6 @@ batch_reset(struct fd_batch *batch)
 {
        DBG("%p", batch);
 
-       fd_batch_sync(batch);
-
        batch_flush_reset_dependencies(batch, false);
        batch_reset_resources(batch);
 
@@ -286,32 +278,6 @@ __fd_batch_describe(char* buf, const struct fd_batch *batch)
        sprintf(buf, "fd_batch<%u>", batch->seqno);
 }
 
-void
-fd_batch_sync(struct fd_batch *batch)
-{
-       if (!batch->ctx->screen->reorder)
-               return;
-       util_queue_fence_wait(&batch->flush_fence);
-}
-
-static void
-batch_flush_func(void *job, int id)
-{
-       struct fd_batch *batch = job;
-
-       DBG("%p", batch);
-
-       fd_gmem_render_tiles(batch);
-       batch_reset_resources(batch);
-}
-
-static void
-batch_cleanup_func(void *job, int id)
-{
-       struct fd_batch *batch = job;
-       fd_batch_reference(&batch, NULL);
-}
-
 static void
 batch_flush(struct fd_batch *batch)
 {
@@ -333,20 +299,8 @@ batch_flush(struct fd_batch *batch)
 
        fd_fence_ref(&batch->ctx->last_fence, batch->fence);
 
-       if (batch->ctx->screen->reorder) {
-               struct fd_batch *tmp = NULL;
-               fd_batch_reference(&tmp, batch);
-
-               if (!util_queue_is_initialized(&batch->ctx->flush_queue))
-                       util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
-
-               util_queue_add_job(&batch->ctx->flush_queue,
-                               batch, &batch->flush_fence,
-                               batch_flush_func, batch_cleanup_func, 0);
-       } else {
-               fd_gmem_render_tiles(batch);
-               batch_reset_resources(batch);
-       }
+       fd_gmem_render_tiles(batch);
+       batch_reset_resources(batch);
 
        debug_assert(batch->reference.count > 0);
 
@@ -364,7 +318,7 @@ batch_flush(struct fd_batch *batch)
  *   a fence to sync on
  */
 void
-fd_batch_flush(struct fd_batch *batch, bool sync)
+fd_batch_flush(struct fd_batch *batch)
 {
        struct fd_batch *tmp = NULL;
        bool newbatch = false;
@@ -401,9 +355,6 @@ fd_batch_flush(struct fd_batch *batch, bool sync)
                fd_context_all_dirty(ctx);
        }
 
-       if (sync)
-               fd_batch_sync(tmp);
-
        fd_batch_reference(&tmp, NULL);
 }
 
@@ -445,7 +396,7 @@ flush_write_batch(struct fd_resource *rsc)
        fd_batch_reference_locked(&b, rsc->write_batch);
 
        mtx_unlock(&b->ctx->screen->lock);
-       fd_batch_flush(b, true);
+       fd_batch_flush(b);
        mtx_lock(&b->ctx->screen->lock);
 
        fd_bc_invalidate_batch(b, false);
@@ -519,7 +470,7 @@ fd_batch_check_size(struct fd_batch *batch)
        debug_assert(!batch->flushed);
 
        if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
-               fd_batch_flush(batch, true);
+               fd_batch_flush(batch);
                return;
        }
 
@@ -528,7 +479,7 @@ fd_batch_check_size(struct fd_batch *batch)
 
        struct fd_ringbuffer *ring = batch->draw;
        if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
-               fd_batch_flush(batch, true);
+               fd_batch_flush(batch);
 }
 
 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
index 9d6defc7564c5790ee86ee1ec661f79e891780ef..a3bdfda10773c3db514244b858ce053db1c15f4f 100644 (file)
@@ -74,8 +74,6 @@ struct fd_batch {
 
        struct fd_context *ctx;
 
-       struct util_queue_fence flush_fence;
-
        /* do we need to mem2gmem before rendering.  We don't, if for example,
         * there was a glClear() that invalidated the entire previous buffer
         * contents.  Keep track of which buffer(s) are cleared, or needs
@@ -247,8 +245,7 @@ struct fd_batch {
 struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw);
 
 void fd_batch_reset(struct fd_batch *batch);
-void fd_batch_sync(struct fd_batch *batch);
-void fd_batch_flush(struct fd_batch *batch, bool sync);
+void fd_batch_flush(struct fd_batch *batch);
 void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
 void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
 void fd_batch_check_size(struct fd_batch *batch);
index 82b285c9c6ba87d4d5b826b9dc9dcb5f0471d0d7..ec1d1acf9b324c56b53b45c7397820688c25ce36 100644 (file)
@@ -159,7 +159,7 @@ bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
                fd_context_unlock(ctx);
 
                for (unsigned i = 0; i < n; i++) {
-                       fd_batch_flush(batches[i], false);
+                       fd_batch_flush(batches[i]);
                }
        }
 
@@ -307,7 +307,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool non
                 */
                mtx_unlock(&ctx->screen->lock);
                DBG("%p: too many batches!  flush forced!", flush_batch);
-               fd_batch_flush(flush_batch, true);
+               fd_batch_flush(flush_batch);
                mtx_lock(&ctx->screen->lock);
 
                /* While the resources get cleaned up automatically, the flush_batch
index 47b7a27ad1f6e847f332dbeb5bb87b68a490b3b2..e396d61c6cbca814f09c0a384e8353f3e96f47f0 100644 (file)
@@ -75,7 +75,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
                batch->needs_out_fence_fd = true;
 
        if (!ctx->screen->reorder) {
-               fd_batch_flush(batch, true);
+               fd_batch_flush(batch);
        } else if (flags & PIPE_FLUSH_DEFERRED) {
                fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
        } else {
@@ -170,9 +170,6 @@ fd_context_destroy(struct pipe_context *pctx)
 
        fd_fence_ref(&ctx->last_fence, NULL);
 
-       if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
-               util_queue_destroy(&ctx->flush_queue);
-
        util_copy_framebuffer_state(&ctx->framebuffer, NULL);
        fd_batch_reference(&ctx->batch, NULL);  /* unref current batch */
        fd_bc_invalidate_context(ctx);
index 80f2462fdd97952baf1cc53e4356b153fc3b297e..f92790c0ecf4353f526c51454fee7935184f29a7 100644 (file)
@@ -173,8 +173,6 @@ struct fd_context {
        struct fd_screen *screen;
        struct fd_pipe *pipe;
 
-       struct util_queue flush_queue;
-
        struct blitter_context *blitter;
        void *clear_rs_state;
        struct primconvert_context *primconvert;
index a4724221b12522e458c8ef8f8aeb526c48c4e2a5..94e2aae9e68b929fc440766420bf2e3ad073233f 100644 (file)
@@ -464,7 +464,7 @@ fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
        batch->needs_flush = true;
        ctx->launch_grid(ctx, info);
 
-       fd_batch_flush(batch, false);
+       fd_batch_flush(batch);
 
        fd_batch_reference(&ctx->batch, save_batch);
        fd_context_all_dirty(ctx);
index 9d49f10ed94e444d4a27e2bbe2277396a140e0ab..71472da704a701b887294641cdabe3204bb919fb 100644 (file)
@@ -48,7 +48,7 @@ struct pipe_fence_handle {
 static void fence_flush(struct pipe_fence_handle *fence)
 {
        if (fence->batch)
-               fd_batch_flush(fence->batch, true);
+               fd_batch_flush(fence->batch);
        debug_assert(!fence->batch);
 }
 
index 49c71c4b6446dae3417dbb0cd6b6bbf3837268b3..89282f289665a6eb965bc284eef0319736c2f2c6 100644 (file)
@@ -139,7 +139,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
                         * spin forever:
                         */
                        if (aq->no_wait_cnt++ > 5)
-                               fd_batch_flush(rsc->write_batch, false);
+                               fd_batch_flush(rsc->write_batch);
                        return false;
                }
 
@@ -152,7 +152,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
        }
 
        if (rsc->write_batch)
-               fd_batch_flush(rsc->write_batch, true);
+               fd_batch_flush(rsc->write_batch);
 
        /* get the result: */
        fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
index f1d22328df8e9f4aed42539779960dabf708c5a8..bed59a53ab27f0540ddfd76e7cefede1f4db65e5 100644 (file)
@@ -209,7 +209,7 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
                         * spin forever:
                         */
                        if (hq->no_wait_cnt++ > 5)
-                               fd_batch_flush(rsc->write_batch, false);
+                               fd_batch_flush(rsc->write_batch);
                        return false;
                }
 
@@ -237,7 +237,7 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
                struct fd_resource *rsc = fd_resource(start->prsc);
 
                if (rsc->write_batch)
-                       fd_batch_flush(rsc->write_batch, true);
+                       fd_batch_flush(rsc->write_batch);
 
                /* some piglit tests at least do query with no draws, I guess: */
                if (!rsc->bo)
index d1df32cab9adef06ecfe716c3bf4abfa2db8da45..2e15bbadea11d0f020f9ab94b9fc4403c2b22477 100644 (file)
@@ -450,15 +450,14 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
                mtx_unlock(&ctx->screen->lock);
 
                foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
-                       fd_batch_flush(batch, false);
+                       fd_batch_flush(batch);
 
                foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
-                       fd_batch_sync(batch);
                        fd_batch_reference(&batches[batch->idx], NULL);
                }
                assert(rsc->batch_mask == 0);
        } else if (write_batch) {
-               fd_batch_flush(write_batch, true);
+               fd_batch_flush(write_batch);
        }
 
        fd_batch_reference(&write_batch, NULL);
@@ -560,21 +559,6 @@ fd_resource_transfer_map(struct pipe_context *pctx,
                        if (usage & PIPE_TRANSFER_READ) {
                                fd_blit_to_staging(ctx, trans);
 
-                               struct fd_batch *batch = NULL;
-
-                               fd_context_lock(ctx);
-                               fd_batch_reference_locked(&batch, staging_rsc->write_batch);
-                               fd_context_unlock(ctx);
-
-                               /* we can't fd_bo_cpu_prep() until the blit to staging
-                                * is submitted to kernel.. in that case write_batch
-                                * wouldn't be NULL yet:
-                                */
-                               if (batch) {
-                                       fd_batch_sync(batch);
-                                       fd_batch_reference(&batch, NULL);
-                               }
-
                                fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
                                                DRM_FREEDRENO_PREP_READ);
                        }
index 4f315488d1b39eafcbe8cc95ee34b00a17a984ae..a231441d314a09970f5a5a0016b2640d0440bfb7 100644 (file)
@@ -248,14 +248,14 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
                         * multiple times to the same surface), so we might as
                         * well go ahead and flush this one:
                         */
-                       fd_batch_flush(old_batch, false);
+                       fd_batch_flush(old_batch);
                }
 
                fd_batch_reference(&old_batch, NULL);
        } else {
                DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
                                framebuffer->cbufs[0], framebuffer->zsbuf);
-               fd_batch_flush(ctx->batch, false);
+               fd_batch_flush(ctx->batch);
                util_copy_framebuffer_state(&ctx->batch->framebuffer, cso);
        }