freedreno/batch: split out helper for rb alloc
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
index 3ee150fce75d1ab2d0bdf0588df62efede6f9765..86e8b5b7834e038a6ceead8193a670fd8a03e051 100644 (file)
 #include "freedreno_resource.h"
 #include "freedreno_query_hw.h"
 
-static void
-batch_init(struct fd_batch *batch)
+static struct fd_ringbuffer *
+alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
 {
        struct fd_context *ctx = batch->ctx;
-       enum fd_ringbuffer_flags flags = 0;
-       unsigned size = 0;
 
        /* if kernel is too old to support unlimited # of cmd buffers, we
         * have no option but to allocate large worst-case sizes so that
         * we don't need to grow the ringbuffer.  Performance is likely to
         * suffer, but there is no good alternative.
         *
-        * XXX I think we can just require new enough kernel for this?
+        * Otherwise if supported, allocate a growable ring with initial
+        * size of zero.
         */
-       if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
-                       (fd_mesa_debug & FD_DBG_NOGROW)){
-               size = 0x100000;
-       } else {
-               flags = FD_RINGBUFFER_GROWABLE;
+       if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
+                       !(fd_mesa_debug & FD_DBG_NOGROW)){
+               flags |= FD_RINGBUFFER_GROWABLE;
+               sz = 0;
        }
 
+       return fd_submit_new_ringbuffer(batch->submit, sz, flags);
+}
+
+static void
+batch_init(struct fd_batch *batch)
+{
+       struct fd_context *ctx = batch->ctx;
+
        batch->submit = fd_submit_new(ctx->pipe);
        if (batch->nondraw) {
-               batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
-                               FD_RINGBUFFER_PRIMARY | flags);
+               batch->draw = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
        } else {
-               batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
-                               FD_RINGBUFFER_PRIMARY | flags);
-               batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
-                               flags);
+               batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
+               batch->draw = alloc_ring(batch, 0x100000, 0);
 
+               /* a6xx+ re-uses draw rb for both draw and binning pass: */
                if (ctx->screen->gpu_id < 600) {
-                       batch->binning = fd_submit_new_ringbuffer(batch->submit,
-                                       size, flags);
+                       batch->binning = alloc_ring(batch, 0x100000, 0);
                }
        }
 
@@ -128,6 +131,11 @@ fd_batch_create(struct fd_context *ctx, bool nondraw)
 
        batch_init(batch);
 
+       fd_screen_assert_locked(ctx->screen);
+       if (BATCH_DEBUG) {
+               _mesa_set_add(ctx->screen->live_batches, batch);
+       }
+
        return batch;
 }
 
@@ -161,6 +169,11 @@ batch_fini(struct fd_batch *batch)
                batch->lrz_clear = NULL;
        }
 
+       if (batch->epilogue) {
+               fd_ringbuffer_del(batch->epilogue);
+               batch->epilogue = NULL;
+       }
+
        if (batch->tile_setup) {
                fd_ringbuffer_del(batch->tile_setup);
                batch->tile_setup = NULL;
@@ -266,6 +279,10 @@ __fd_batch_destroy(struct fd_batch *batch)
 
        fd_context_assert_locked(batch->ctx);
 
+       if (BATCH_DEBUG) {
+               _mesa_set_remove_key(ctx->screen->live_batches, batch);
+       }
+
        fd_bc_invalidate_batch(batch, true);
 
        batch_reset_resources_locked(batch);
@@ -392,12 +409,6 @@ flush_write_batch(struct fd_resource *rsc)
        fd_batch_reference_locked(&b, NULL);
 }
 
-static bool
-fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
-{
-       return rsc->batch_mask & (1 << batch->idx);
-}
-
 static void
 fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
 {
@@ -456,17 +467,10 @@ fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
 }
 
 void
-fd_batch_resource_read(struct fd_batch *batch, struct fd_resource *rsc)
+fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
 {
        fd_screen_assert_locked(batch->ctx->screen);
 
-       /* Early out, if we hit this then we know we don't have anyone else
-        * writing to it (since both _write and _read flush other writers), and
-        * that we've already recursed for stencil.
-        */
-       if (likely(fd_batch_references_resource(batch, rsc)))
-               return;
-
        if (rsc->stencil)
                fd_batch_resource_read(batch, rsc->stencil);