freedreno/log: avoid duplicate ts's
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
index de4c8198c201478dfaba4fc362a994714ff77479..106959b4fed9d1bfb952c94cd1cce982ebb2996e 100644 (file)
@@ -39,11 +39,9 @@ static void
 batch_init(struct fd_batch *batch)
 {
        struct fd_context *ctx = batch->ctx;
+       enum fd_ringbuffer_flags flags = 0;
        unsigned size = 0;
 
-       if (ctx->screen->reorder)
-               util_queue_fence_init(&batch->flush_fence);
-
        /* if kernel is too old to support unlimited # of cmd buffers, we
         * have no option but to allocate large worst-case sizes so that
         * we don't need to grow the ringbuffer.  Performance is likely to
@@ -54,21 +52,23 @@ batch_init(struct fd_batch *batch)
        if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
                        (fd_mesa_debug & FD_DBG_NOGROW)){
                size = 0x100000;
+       } else {
+               flags = FD_RINGBUFFER_GROWABLE;
        }
 
        batch->submit = fd_submit_new(ctx->pipe);
        if (batch->nondraw) {
                batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
-                               FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
+                               FD_RINGBUFFER_PRIMARY | flags);
        } else {
                batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
-                               FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
+                               FD_RINGBUFFER_PRIMARY | flags);
                batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
-                               FD_RINGBUFFER_GROWABLE);
+                               flags);
 
                if (ctx->screen->gpu_id < 600) {
                        batch->binning = fd_submit_new_ringbuffer(batch->submit,
-                                       size, FD_RINGBUFFER_GROWABLE);
+                                       size, flags);
                }
        }
 
@@ -76,26 +76,34 @@ batch_init(struct fd_batch *batch)
        batch->fence = fd_fence_create(batch);
 
        batch->cleared = 0;
+       batch->fast_cleared = 0;
        batch->invalidated = 0;
        batch->restore = batch->resolve = 0;
        batch->needs_flush = false;
        batch->flushed = false;
        batch->gmem_reason = 0;
        batch->num_draws = 0;
+       batch->num_vertices = 0;
        batch->stage = FD_STAGE_NULL;
 
        fd_reset_wfi(batch);
 
        util_dynarray_init(&batch->draw_patches, NULL);
+       util_dynarray_init(&batch->fb_read_patches, NULL);
+
+       if (is_a2xx(ctx->screen)) {
+               util_dynarray_init(&batch->shader_patches, NULL);
+               util_dynarray_init(&batch->gmem_patches, NULL);
+       }
 
        if (is_a3xx(ctx->screen))
                util_dynarray_init(&batch->rbrc_patches, NULL);
 
-       util_dynarray_init(&batch->gmem_patches, NULL);
-
        assert(batch->resources->entries == 0);
 
        util_dynarray_init(&batch->samples, NULL);
+
+       list_inithead(&batch->log_chunks);
 }
 
 struct fd_batch *
@@ -133,7 +141,7 @@ batch_fini(struct fd_batch *batch)
        /* in case batch wasn't flushed but fence was created: */
        fd_fence_populate(batch->fence, 0, -1);
 
-       fd_fence_ref(NULL, &batch->fence, NULL);
+       fd_fence_ref(&batch->fence, NULL);
 
        fd_ringbuffer_del(batch->draw);
        if (!batch->nondraw) {
@@ -160,15 +168,25 @@ batch_fini(struct fd_batch *batch)
                batch->tile_fini = NULL;
        }
 
+       if (batch->tessellation) {
+               fd_bo_del(batch->tessfactor_bo);
+               fd_bo_del(batch->tessparam_bo);
+               fd_ringbuffer_del(batch->tess_addrs_constobj);
+       }
+
        fd_submit_del(batch->submit);
 
        util_dynarray_fini(&batch->draw_patches);
+       util_dynarray_fini(&batch->fb_read_patches);
+
+       if (is_a2xx(batch->ctx->screen)) {
+               util_dynarray_fini(&batch->shader_patches);
+               util_dynarray_fini(&batch->gmem_patches);
+       }
 
        if (is_a3xx(batch->ctx->screen))
                util_dynarray_fini(&batch->rbrc_patches);
 
-       util_dynarray_fini(&batch->gmem_patches);
-
        while (batch->samples.size > 0) {
                struct fd_hw_sample *samp =
                        util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
@@ -176,8 +194,7 @@ batch_fini(struct fd_batch *batch)
        }
        util_dynarray_fini(&batch->samples);
 
-       if (batch->ctx->screen->reorder)
-               util_queue_fence_destroy(&batch->flush_fence);
+       assert(list_is_empty(&batch->log_chunks));
 }
 
 static void
@@ -188,7 +205,7 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
 
        foreach_batch(dep, cache, batch->dependents_mask) {
                if (flush)
-                       fd_batch_flush(dep, false, false);
+                       fd_batch_flush(dep);
                fd_batch_reference(&dep, NULL);
        }
 
@@ -223,8 +240,6 @@ batch_reset(struct fd_batch *batch)
 {
        DBG("%p", batch);
 
-       fd_batch_sync(batch);
-
        batch_flush_reset_dependencies(batch, false);
        batch_reset_resources(batch);
 
@@ -267,37 +282,11 @@ __fd_batch_destroy(struct fd_batch *batch)
 void
 __fd_batch_describe(char* buf, const struct fd_batch *batch)
 {
-       util_sprintf(buf, "fd_batch<%u>", batch->seqno);
-}
-
-void
-fd_batch_sync(struct fd_batch *batch)
-{
-       if (!batch->ctx->screen->reorder)
-               return;
-       util_queue_fence_wait(&batch->flush_fence);
-}
-
-static void
-batch_flush_func(void *job, int id)
-{
-       struct fd_batch *batch = job;
-
-       DBG("%p", batch);
-
-       fd_gmem_render_tiles(batch);
-       batch_reset_resources(batch);
-}
-
-static void
-batch_cleanup_func(void *job, int id)
-{
-       struct fd_batch *batch = job;
-       fd_batch_reference(&batch, NULL);
+       sprintf(buf, "fd_batch<%u>", batch->seqno);
 }
 
 static void
-batch_flush(struct fd_batch *batch, bool force)
+batch_flush(struct fd_batch *batch)
 {
        DBG("%p: needs_flush=%d", batch, batch->needs_flush);
 
@@ -315,20 +304,10 @@ batch_flush(struct fd_batch *batch, bool force)
 
        batch->flushed = true;
 
-       if (batch->ctx->screen->reorder) {
-               struct fd_batch *tmp = NULL;
-               fd_batch_reference(&tmp, batch);
-
-               if (!util_queue_is_initialized(&batch->ctx->flush_queue))
-                       util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
+       fd_fence_ref(&batch->ctx->last_fence, batch->fence);
 
-               util_queue_add_job(&batch->ctx->flush_queue,
-                               batch, &batch->flush_fence,
-                               batch_flush_func, batch_cleanup_func);
-       } else {
-               fd_gmem_render_tiles(batch);
-               batch_reset_resources(batch);
-       }
+       fd_gmem_render_tiles(batch);
+       batch_reset_resources(batch);
 
        debug_assert(batch->reference.count > 0);
 
@@ -346,10 +325,9 @@ batch_flush(struct fd_batch *batch, bool force)
  *   a fence to sync on
  */
 void
-fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
+fd_batch_flush(struct fd_batch *batch)
 {
        struct fd_batch *tmp = NULL;
-       bool newbatch = false;
 
        /* NOTE: we need to hold an extra ref across the body of flush,
         * since the last ref to this batch could be dropped when cleaning
@@ -357,63 +335,39 @@ fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
         */
        fd_batch_reference(&tmp, batch);
 
-       if (batch == batch->ctx->batch) {
-               batch->ctx->batch = NULL;
-               newbatch = true;
-       }
-
-       batch_flush(tmp, force);
-
-       if (newbatch) {
-               struct fd_context *ctx = batch->ctx;
-               struct fd_batch *new_batch;
-
-               if (ctx->screen->reorder) {
-                       /* defer allocating new batch until one is needed for rendering
-                        * to avoid unused batches for apps that create many contexts
-                        */
-                       new_batch = NULL;
-               } else {
-                       new_batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, false);
-                       util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
-               }
+       batch_flush(tmp);
 
-               fd_batch_reference(&batch, NULL);
-               ctx->batch = new_batch;
-               fd_context_all_dirty(ctx);
+       if (batch == batch->ctx->batch) {
+               fd_batch_reference(&batch->ctx->batch, NULL);
        }
 
-       if (sync)
-               fd_batch_sync(tmp);
-
        fd_batch_reference(&tmp, NULL);
 }
 
-/* does 'batch' depend directly or indirectly on 'other' ? */
-static bool
-batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
+/* find a batches dependents mask, including recursive dependencies: */
+static uint32_t
+recursive_dependents_mask(struct fd_batch *batch)
 {
        struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
        struct fd_batch *dep;
-
-       if (batch->dependents_mask & (1 << other->idx))
-               return true;
+       uint32_t dependents_mask = batch->dependents_mask;
 
        foreach_batch(dep, cache, batch->dependents_mask)
-               if (batch_depends_on(batch, dep))
-                       return true;
+               dependents_mask |= recursive_dependents_mask(dep);
 
-       return false;
+       return dependents_mask;
 }
 
 void
 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
 {
+       pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
        if (batch->dependents_mask & (1 << dep->idx))
                return;
 
        /* a loop should not be possible */
-       debug_assert(!batch_depends_on(dep, batch));
+       debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
 
        struct fd_batch *other = NULL;
        fd_batch_reference_locked(&other, dep);
@@ -425,10 +379,10 @@ static void
 flush_write_batch(struct fd_resource *rsc)
 {
        struct fd_batch *b = NULL;
-       fd_batch_reference(&b, rsc->write_batch);
+       fd_batch_reference_locked(&b, rsc->write_batch);
 
        mtx_unlock(&b->ctx->screen->lock);
-       fd_batch_flush(b, true, false);
+       fd_batch_flush(b);
        mtx_lock(&b->ctx->screen->lock);
 
        fd_bc_invalidate_batch(b, false);
@@ -502,7 +456,7 @@ fd_batch_check_size(struct fd_batch *batch)
        debug_assert(!batch->flushed);
 
        if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
-               fd_batch_flush(batch, true, false);
+               fd_batch_flush(batch);
                return;
        }
 
@@ -511,7 +465,7 @@ fd_batch_check_size(struct fd_batch *batch)
 
        struct fd_ringbuffer *ring = batch->draw;
        if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
-               fd_batch_flush(batch, true, false);
+               fd_batch_flush(batch);
 }
 
 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already