etnaviv: Rework resource status tracking
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch_cache.c
index c947a559df9f64e773181e3199b9815ffcd19738..82b285c9c6ba87d4d5b826b9dc9dcb5f0471d0d7 100644 (file)
@@ -81,7 +81,8 @@ struct key {
        struct {
                struct pipe_resource *texture;
                union pipe_surface_desc u;
-               uint16_t pos, format;
+               uint8_t pos, samples;
+               uint16_t format;
        } surf[0];
 };
 
@@ -124,23 +125,64 @@ fd_bc_fini(struct fd_batch_cache *cache)
        _mesa_hash_table_destroy(cache->ht, NULL);
 }
 
-uint32_t
-fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+static void
+bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
 {
-       struct hash_entry *entry;
-       uint32_t timestamp = 0;
+       /* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
+        * can cause batches to be unref'd and freed under our feet, so grab
+        * a reference to all the batches we need up-front.
+        */
+       struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0};
+       struct fd_batch *batch;
+       unsigned n = 0;
 
-       hash_table_foreach(cache->ht, entry) {
-               struct fd_batch *batch = NULL;
-               fd_batch_reference(&batch, (struct fd_batch *)entry->data);
+       fd_context_lock(ctx);
+
+       foreach_batch(batch, cache, cache->batch_mask) {
                if (batch->ctx == ctx) {
-                       fd_batch_flush(batch);
-                       timestamp = MAX2(timestamp, fd_ringbuffer_timestamp(batch->gmem));
+                       fd_batch_reference_locked(&batches[n++], batch);
                }
-               fd_batch_reference(&batch, NULL);
        }
 
-       return timestamp;
+       if (deferred) {
+               struct fd_batch *current_batch = fd_context_batch(ctx);
+
+               for (unsigned i = 0; i < n; i++) {
+                       if (batches[i] && (batches[i]->ctx == ctx) &&
+                                       (batches[i] != current_batch)) {
+                               fd_batch_add_dep(current_batch, batches[i]);
+                       }
+               }
+
+               fd_context_unlock(ctx);
+       } else {
+               fd_context_unlock(ctx);
+
+               for (unsigned i = 0; i < n; i++) {
+                       fd_batch_flush(batches[i], false);
+               }
+       }
+
+       for (unsigned i = 0; i < n; i++) {
+               fd_batch_reference(&batches[i], NULL);
+       }
+}
+
+void
+fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+       bc_flush(cache, ctx, false);
+}
+
+/* deferred flush doesn't actually flush, but it marks every other
+ * batch associated with the context as dependent on the current
+ * batch.  So when the current batch gets flushed, all other batches
+ * that came before also get flushed.
+ */
+void
+fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+       bc_flush(cache, ctx, true);
 }
 
 void
@@ -149,21 +191,38 @@ fd_bc_invalidate_context(struct fd_context *ctx)
        struct fd_batch_cache *cache = &ctx->screen->batch_cache;
        struct fd_batch *batch;
 
+       mtx_lock(&ctx->screen->lock);
+
        foreach_batch(batch, cache, cache->batch_mask) {
-               if (batch->ctx == ctx) {
-                       fd_batch_reset(batch);
-                       fd_batch_reference(&batch, NULL);
-               }
+               if (batch->ctx == ctx)
+                       fd_bc_invalidate_batch(batch, true);
        }
+
+       mtx_unlock(&ctx->screen->lock);
 }
 
+/**
+ * Note that when batch is flushed, it needs to remain in the cache so
+ * that fd_bc_invalidate_resource() can work.. otherwise we can have
+ * the case where a rsc is destroyed while a batch still has a dangling
+ * reference to it.
+ *
+ * Note that the cmdstream (or, after the SUBMIT ioctl, the kernel)
+ * would have a reference to the underlying bo, so it is ok for the
+ * rsc to be destroyed before the batch.
+ */
 void
-fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
+fd_bc_invalidate_batch(struct fd_batch *batch, bool remove)
 {
+       if (!batch)
+               return;
+
        struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
        struct key *key = (struct key *)batch->key;
 
-       if (destroy) {
+       fd_context_assert_locked(batch->ctx);
+
+       if (remove) {
                cache->batches[batch->idx] = NULL;
                cache->batch_mask &= ~(1 << batch->idx);
        }
@@ -188,8 +247,10 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
 void
 fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
 {
-       struct fd_screen *screen = fd_screen(rsc->base.b.screen);
-               struct fd_batch *batch;
+       struct fd_screen *screen = fd_screen(rsc->base.screen);
+       struct fd_batch *batch;
+
+       mtx_lock(&screen->lock);
 
        if (destroy) {
                foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
@@ -198,27 +259,30 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
                }
                rsc->batch_mask = 0;
 
-               fd_batch_reference(&rsc->write_batch, NULL);
+               fd_batch_reference_locked(&rsc->write_batch, NULL);
        }
 
        foreach_batch(batch, &screen->batch_cache, rsc->bc_batch_mask)
                fd_bc_invalidate_batch(batch, false);
 
        rsc->bc_batch_mask = 0;
+
+       mtx_unlock(&screen->lock);
 }
 
 struct fd_batch *
-fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
+fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw)
 {
        struct fd_batch *batch;
        uint32_t idx;
 
+       mtx_lock(&ctx->screen->lock);
+
        while ((idx = ffs(~cache->batch_mask)) == 0) {
 #if 0
                for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
                        batch = cache->batches[i];
                        debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
-                       struct set_entry *entry;
                        set_foreach(batch->dependencies, entry) {
                                struct fd_batch *dep = (struct fd_batch *)entry->key;
                                debug_printf(" %d", dep->idx);
@@ -235,10 +299,16 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                                        !cache->batches[i]->needs_flush)
                                continue;
                        if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
-                               fd_batch_reference(&flush_batch, cache->batches[i]);
+                               fd_batch_reference_locked(&flush_batch, cache->batches[i]);
                }
+
+               /* we can drop lock temporarily here, since we hold a ref,
+                * flush_batch won't disappear under us.
+                */
+               mtx_unlock(&ctx->screen->lock);
                DBG("%p: too many batches!  flush forced!", flush_batch);
-               fd_batch_flush(flush_batch);
+               fd_batch_flush(flush_batch, true);
+               mtx_lock(&ctx->screen->lock);
 
                /* While the resources get cleaned up automatically, the flush_batch
                 * doesn't get removed from the dependencies of other batches, so
@@ -254,18 +324,18 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                        if (other->dependents_mask & (1 << flush_batch->idx)) {
                                other->dependents_mask &= ~(1 << flush_batch->idx);
                                struct fd_batch *ref = flush_batch;
-                               fd_batch_reference(&ref, NULL);
+                               fd_batch_reference_locked(&ref, NULL);
                        }
                }
 
-               fd_batch_reference(&flush_batch, NULL);
+               fd_batch_reference_locked(&flush_batch, NULL);
        }
 
        idx--;              /* bit zero returns 1 for ffs() */
 
-       batch = fd_batch_create(ctx);
+       batch = fd_batch_create(ctx, nondraw);
        if (!batch)
-               return NULL;
+               goto out;
 
        batch->seqno = cache->cnt++;
        batch->idx = idx;
@@ -274,6 +344,9 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
        debug_assert(cache->batches[idx] == NULL);
        cache->batches[idx] = batch;
 
+out:
+       mtx_unlock(&ctx->screen->lock);
+
        return batch;
 }
 
@@ -292,7 +365,7 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
                return batch;
        }
 
-       batch = fd_bc_alloc_batch(cache, ctx);
+       batch = fd_bc_alloc_batch(cache, ctx, false);
 #ifdef DEBUG
        DBG("%p: hash=0x%08x, %ux%u, %u layers, %u samples", batch, hash,
                        key->width, key->height, key->layers, key->samples);
@@ -307,6 +380,8 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
        if (!batch)
                return NULL;
 
+       mtx_lock(&ctx->screen->lock);
+
        _mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
        batch->key = key;
        batch->hash = hash;
@@ -316,6 +391,8 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
                rsc->bc_batch_mask = (1 << batch->idx);
        }
 
+       mtx_unlock(&ctx->screen->lock);
+
        return batch;
 }
 
@@ -325,6 +402,7 @@ key_surf(struct key *key, unsigned idx, unsigned pos, struct pipe_surface *psurf
        key->surf[idx].texture = psurf->texture;
        key->surf[idx].u = psurf->u;
        key->surf[idx].pos = pos;
+       key->surf[idx].samples = MAX2(1, psurf->nr_samples);
        key->surf[idx].format = psurf->format;
 }
 
@@ -338,7 +416,7 @@ fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
        key->width = pfb->width;
        key->height = pfb->height;
        key->layers = pfb->layers;
-       key->samples = pfb->samples;
+       key->samples = util_framebuffer_get_num_samples(pfb);
        key->ctx = ctx;
 
        if (pfb->zsbuf)