freedreno/a5xx: bordercolor fixes
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
index 2dd7eda72adfd1f9a507e9a9bc35a5e665249372..9918c259e48e9dac69ae8b36d012d1f9be679241 100644 (file)
@@ -31,7 +31,9 @@
 
 #include "freedreno_batch.h"
 #include "freedreno_context.h"
+#include "freedreno_fence.h"
 #include "freedreno_resource.h"
+#include "freedreno_query_hw.h"
 
 static void
 batch_init(struct fd_batch *batch)
@@ -39,43 +41,59 @@ batch_init(struct fd_batch *batch)
        struct fd_context *ctx = batch->ctx;
        unsigned size = 0;
 
+       if (ctx->screen->reorder)
+               util_queue_fence_init(&batch->flush_fence);
+
        /* if kernel is too old to support unlimited # of cmd buffers, we
         * have no option but to allocate large worst-case sizes so that
         * we don't need to grow the ringbuffer.  Performance is likely to
         * suffer, but there is no good alternative.
         */
-       if (fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) {
+       if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
+                       (fd_mesa_debug & FD_DBG_NOGROW)){
                size = 0x100000;
        }
 
-       batch->draw    = fd_ringbuffer_new(ctx->screen->pipe, size);
-       batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
-       batch->gmem    = fd_ringbuffer_new(ctx->screen->pipe, size);
+       batch->draw    = fd_ringbuffer_new(ctx->pipe, size);
+       if (!batch->nondraw) {
+               batch->binning = fd_ringbuffer_new(ctx->pipe, size);
+               batch->gmem    = fd_ringbuffer_new(ctx->pipe, size);
 
-       fd_ringbuffer_set_parent(batch->gmem, NULL);
-       fd_ringbuffer_set_parent(batch->draw, batch->gmem);
-       fd_ringbuffer_set_parent(batch->binning, batch->gmem);
+               fd_ringbuffer_set_parent(batch->gmem, NULL);
+               fd_ringbuffer_set_parent(batch->draw, batch->gmem);
+               fd_ringbuffer_set_parent(batch->binning, batch->gmem);
+       } else {
+               fd_ringbuffer_set_parent(batch->draw, NULL);
+       }
+
+       batch->in_fence_fd = -1;
+       batch->fence = fd_fence_create(batch);
 
        batch->cleared = batch->partial_cleared = 0;
        batch->restore = batch->resolve = 0;
        batch->needs_flush = false;
        batch->gmem_reason = 0;
        batch->num_draws = 0;
+       batch->stage = FD_STAGE_NULL;
+
+       fd_reset_wfi(batch);
 
        /* reset maximal bounds: */
        batch->max_scissor.minx = batch->max_scissor.miny = ~0;
        batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
 
-       util_dynarray_init(&batch->draw_patches);
+       util_dynarray_init(&batch->draw_patches, NULL);
 
        if (is_a3xx(ctx->screen))
-               util_dynarray_init(&batch->rbrc_patches);
+               util_dynarray_init(&batch->rbrc_patches, NULL);
 
        assert(batch->resources->entries == 0);
+
+       util_dynarray_init(&batch->samples, NULL);
 }
 
 struct fd_batch *
-fd_batch_create(struct fd_context *ctx)
+fd_batch_create(struct fd_context *ctx, bool nondraw)
 {
        struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
 
@@ -86,6 +104,7 @@ fd_batch_create(struct fd_context *ctx)
 
        pipe_reference_init(&batch->reference, 1);
        batch->ctx = ctx;
+       batch->nondraw = nondraw;
 
        batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
                        _mesa_key_pointer_equal);
@@ -98,14 +117,43 @@ fd_batch_create(struct fd_context *ctx)
 static void
 batch_fini(struct fd_batch *batch)
 {
+       pipe_resource_reference(&batch->query_buf, NULL);
+
+       if (batch->in_fence_fd != -1)
+               close(batch->in_fence_fd);
+
+       /* in case batch wasn't flushed but fence was created: */
+       fd_fence_populate(batch->fence, 0, -1);
+
+       fd_fence_ref(NULL, &batch->fence, NULL);
+
        fd_ringbuffer_del(batch->draw);
-       fd_ringbuffer_del(batch->binning);
-       fd_ringbuffer_del(batch->gmem);
+       if (!batch->nondraw) {
+               fd_ringbuffer_del(batch->binning);
+               fd_ringbuffer_del(batch->gmem);
+       } else {
+               debug_assert(!batch->binning);
+               debug_assert(!batch->gmem);
+       }
+       if (batch->lrz_clear) {
+               fd_ringbuffer_del(batch->lrz_clear);
+               batch->lrz_clear = NULL;
+       }
 
        util_dynarray_fini(&batch->draw_patches);
 
        if (is_a3xx(batch->ctx->screen))
                util_dynarray_fini(&batch->rbrc_patches);
+
+       while (batch->samples.size > 0) {
+               struct fd_hw_sample *samp =
+                       util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
+               fd_hw_sample_reference(batch->ctx, &samp, NULL);
+       }
+       util_dynarray_fini(&batch->samples);
+
+       if (batch->ctx->screen->reorder)
+               util_queue_fence_destroy(&batch->flush_fence);
 }
 
 static void
@@ -116,7 +164,7 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
 
        foreach_batch(dep, cache, batch->dependents_mask) {
                if (flush)
-                       fd_batch_flush(dep);
+                       fd_batch_flush(dep, false, false);
                fd_batch_reference(&dep, NULL);
        }
 
@@ -124,25 +172,37 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
 }
 
 static void
-batch_reset_resources(struct fd_batch *batch)
+batch_reset_resources_locked(struct fd_batch *batch)
 {
        struct set_entry *entry;
 
+       pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
        set_foreach(batch->resources, entry) {
                struct fd_resource *rsc = (struct fd_resource *)entry->key;
                _mesa_set_remove(batch->resources, entry);
                debug_assert(rsc->batch_mask & (1 << batch->idx));
                rsc->batch_mask &= ~(1 << batch->idx);
                if (rsc->write_batch == batch)
-                       fd_batch_reference(&rsc->write_batch, NULL);
+                       fd_batch_reference_locked(&rsc->write_batch, NULL);
        }
 }
 
+static void
+batch_reset_resources(struct fd_batch *batch)
+{
+       mtx_lock(&batch->ctx->screen->lock);
+       batch_reset_resources_locked(batch);
+       mtx_unlock(&batch->ctx->screen->lock);
+}
+
 static void
 batch_reset(struct fd_batch *batch)
 {
        DBG("%p", batch);
 
+       fd_batch_sync(batch);
+
        batch_flush_reset_dependencies(batch, false);
        batch_reset_resources(batch);
 
@@ -160,12 +220,14 @@ fd_batch_reset(struct fd_batch *batch)
 void
 __fd_batch_destroy(struct fd_batch *batch)
 {
-       fd_bc_invalidate_batch(batch, true);
-
        DBG("%p", batch);
 
        util_copy_framebuffer_state(&batch->framebuffer, NULL);
 
+       mtx_lock(&batch->ctx->screen->lock);
+       fd_bc_invalidate_batch(batch, true);
+       mtx_unlock(&batch->ctx->screen->lock);
+
        batch_fini(batch);
 
        batch_reset_resources(batch);
@@ -184,41 +246,101 @@ __fd_batch_describe(char* buf, const struct fd_batch *batch)
        util_sprintf(buf, "fd_batch<%u>", batch->seqno);
 }
 
+void
+fd_batch_sync(struct fd_batch *batch)
+{
+       if (!batch->ctx->screen->reorder)
+               return;
+       util_queue_fence_wait(&batch->flush_fence);
+}
+
+static void
+batch_flush_func(void *job, int id)
+{
+       struct fd_batch *batch = job;
+
+       fd_gmem_render_tiles(batch);
+       batch_reset_resources(batch);
+}
+
+static void
+batch_cleanup_func(void *job, int id)
+{
+       struct fd_batch *batch = job;
+       fd_batch_reference(&batch, NULL);
+}
+
 static void
-batch_flush(struct fd_batch *batch)
+batch_flush(struct fd_batch *batch, bool force)
 {
        DBG("%p: needs_flush=%d", batch, batch->needs_flush);
 
-       if (!batch->needs_flush)
+       if (!batch->needs_flush) {
+               if (force) {
+                       fd_gmem_render_noop(batch);
+                       goto out;
+               }
                return;
+       }
 
        batch->needs_flush = false;
 
+       /* close out the draw cmds by making sure any active queries are
+        * paused:
+        */
+       fd_batch_set_stage(batch, FD_STAGE_NULL);
+
+       fd_context_all_dirty(batch->ctx);
        batch_flush_reset_dependencies(batch, true);
 
-       fd_gmem_render_tiles(batch);
+       if (batch->ctx->screen->reorder) {
+               struct fd_batch *tmp = NULL;
+               fd_batch_reference(&tmp, batch);
 
-       batch_reset_resources(batch);
+               if (!util_queue_is_initialized(&batch->ctx->flush_queue))
+                       util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
+
+               util_queue_add_job(&batch->ctx->flush_queue,
+                               batch, &batch->flush_fence,
+                               batch_flush_func, batch_cleanup_func);
+       } else {
+               fd_gmem_render_tiles(batch);
+               batch_reset_resources(batch);
+       }
 
        debug_assert(batch->reference.count > 0);
 
+out:
        if (batch == batch->ctx->batch) {
                batch_reset(batch);
        } else {
+               mtx_lock(&batch->ctx->screen->lock);
                fd_bc_invalidate_batch(batch, false);
+               mtx_unlock(&batch->ctx->screen->lock);
        }
 }
 
+/* NOTE: could drop the last ref to batch
+ *
+ * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
+ *   to kernel before this returns, as opposed to just being queued to be
+ *   flushed
+ * @force: force a flush even if no rendering, mostly useful if you need
+ *   a fence to sync on
+ */
 void
-fd_batch_flush(struct fd_batch *batch)
+fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
 {
        /* NOTE: we need to hold an extra ref across the body of flush,
         * since the last ref to this batch could be dropped when cleaning
         * up used_resources
         */
        struct fd_batch *tmp = NULL;
+
        fd_batch_reference(&tmp, batch);
-       batch_flush(tmp);
+       batch_flush(tmp, force);
+       if (sync)
+               fd_batch_sync(tmp);
        fd_batch_reference(&tmp, NULL);
 }
 
@@ -239,8 +361,8 @@ batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
        return false;
 }
 
-static void
-batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
+void
+fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
 {
        if (batch->dependents_mask & (1 << dep->idx))
                return;
@@ -250,10 +372,12 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
         */
        if (batch_depends_on(dep, batch)) {
                DBG("%p: flush forced on %p!", batch, dep);
-               fd_batch_flush(dep);
+               mtx_unlock(&batch->ctx->screen->lock);
+               fd_batch_flush(dep, false, false);
+               mtx_lock(&batch->ctx->screen->lock);
        } else {
                struct fd_batch *other = NULL;
-               fd_batch_reference(&other, dep);
+               fd_batch_reference_locked(&other, dep);
                batch->dependents_mask |= (1 << dep->idx);
                DBG("%p: added dependency on %p", batch, dep);
        }
@@ -262,11 +386,16 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
 void
 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
 {
+       pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
        if (rsc->stencil)
                fd_batch_resource_used(batch, rsc->stencil, write);
 
        DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
 
+       if (write)
+               rsc->valid = true;
+
        /* note, invalidate write batch, to avoid further writes to rsc
         * resulting in a write-after-read hazard.
         */
@@ -276,22 +405,37 @@ fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool wri
                if (rsc->batch_mask != (1 << batch->idx)) {
                        struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
                        struct fd_batch *dep;
+
+                       if (rsc->write_batch && rsc->write_batch != batch) {
+                               struct fd_batch *b = NULL;
+                               fd_batch_reference(&b, rsc->write_batch);
+
+                               mtx_unlock(&batch->ctx->screen->lock);
+                               fd_batch_flush(b, true, false);
+                               mtx_lock(&batch->ctx->screen->lock);
+
+                               fd_bc_invalidate_batch(b, false);
+                               fd_batch_reference_locked(&b, NULL);
+                       }
+
                        foreach_batch(dep, cache, rsc->batch_mask) {
                                struct fd_batch *b = NULL;
+                               if (dep == batch)
+                                       continue;
                                /* note that batch_add_dep could flush and unref dep, so
                                 * we need to hold a reference to keep it live for the
                                 * fd_bc_invalidate_batch()
                                 */
                                fd_batch_reference(&b, dep);
-                               batch_add_dep(batch, b);
+                               fd_batch_add_dep(batch, b);
                                fd_bc_invalidate_batch(b, false);
                                fd_batch_reference_locked(&b, NULL);
                        }
                }
-               fd_batch_reference(&rsc->write_batch, batch);
+               fd_batch_reference_locked(&rsc->write_batch, batch);
        } else {
                if (rsc->write_batch) {
-                       batch_add_dep(batch, rsc->write_batch);
+                       fd_batch_add_dep(batch, rsc->write_batch);
                        fd_bc_invalidate_batch(rsc->write_batch, false);
                }
        }
@@ -314,5 +458,20 @@ fd_batch_check_size(struct fd_batch *batch)
        struct fd_ringbuffer *ring = batch->draw;
        if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
                        (fd_mesa_debug & FD_DBG_FLUSH))
-               fd_batch_flush(batch);
+               fd_batch_flush(batch, true, false);
+}
+
+/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
+ * been one since last draw:
+ */
+void
+fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
+{
+       if (batch->needs_wfi) {
+               if (batch->ctx->screen->gpu_id >= 500)
+                       OUT_WFI5(ring);
+               else
+                       OUT_WFI(ring);
+               batch->needs_wfi = false;
+       }
 }