radeonsi/nir: call nir_serialize only once per shader
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch_cache.c
index c4640a7cfeee83d85729f54e12a6fb4ebc8e3616..82b285c9c6ba87d4d5b826b9dc9dcb5f0471d0d7 100644 (file)
@@ -81,7 +81,8 @@ struct key {
        struct {
                struct pipe_resource *texture;
                union pipe_surface_desc u;
-               uint16_t pos, format;
+               uint8_t pos, samples;
+               uint16_t format;
        } surf[0];
 };
 
@@ -144,10 +145,11 @@ bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
        }
 
        if (deferred) {
-               struct fd_batch *current_batch = ctx->batch;
+               struct fd_batch *current_batch = fd_context_batch(ctx);
 
                for (unsigned i = 0; i < n; i++) {
-                       if (batches[i] != current_batch) {
+                       if (batches[i] && (batches[i]->ctx == ctx) &&
+                                       (batches[i] != current_batch)) {
                                fd_batch_add_dep(current_batch, batches[i]);
                        }
                }
@@ -157,7 +159,7 @@ bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
                fd_context_unlock(ctx);
 
                for (unsigned i = 0; i < n; i++) {
-                       fd_batch_flush(batches[i], false, false);
+                       fd_batch_flush(batches[i], false);
                }
        }
 
@@ -199,8 +201,18 @@ fd_bc_invalidate_context(struct fd_context *ctx)
        mtx_unlock(&ctx->screen->lock);
 }
 
+/**
+ * Note that when batch is flushed, it needs to remain in the cache so
+ * that fd_bc_invalidate_resource() can work.. otherwise we can have
+ * the case where a rsc is destroyed while a batch still has a dangling
+ * reference to it.
+ *
+ * Note that the cmdstream (or, after the SUBMIT ioctl, the kernel)
+ * would have a reference to the underlying bo, so it is ok for the
+ * rsc to be destroyed before the batch.
+ */
 void
-fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
+fd_bc_invalidate_batch(struct fd_batch *batch, bool remove)
 {
        if (!batch)
                return;
@@ -208,9 +220,9 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
        struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
        struct key *key = (struct key *)batch->key;
 
-       pipe_mutex_assert_locked(batch->ctx->screen->lock);
+       fd_context_assert_locked(batch->ctx);
 
-       if (destroy) {
+       if (remove) {
                cache->batches[batch->idx] = NULL;
                cache->batch_mask &= ~(1 << batch->idx);
        }
@@ -259,7 +271,7 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
 }
 
 struct fd_batch *
-fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
+fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw)
 {
        struct fd_batch *batch;
        uint32_t idx;
@@ -271,7 +283,6 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
                        batch = cache->batches[i];
                        debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
-                       struct set_entry *entry;
                        set_foreach(batch->dependencies, entry) {
                                struct fd_batch *dep = (struct fd_batch *)entry->key;
                                debug_printf(" %d", dep->idx);
@@ -296,7 +307,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                 */
                mtx_unlock(&ctx->screen->lock);
                DBG("%p: too many batches!  flush forced!", flush_batch);
-               fd_batch_flush(flush_batch, true, false);
+               fd_batch_flush(flush_batch, true);
                mtx_lock(&ctx->screen->lock);
 
                /* While the resources get cleaned up automatically, the flush_batch
@@ -322,7 +333,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
 
        idx--;              /* bit zero returns 1 for ffs() */
 
-       batch = fd_batch_create(ctx, false);
+       batch = fd_batch_create(ctx, nondraw);
        if (!batch)
                goto out;
 
@@ -354,7 +365,7 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
                return batch;
        }
 
-       batch = fd_bc_alloc_batch(cache, ctx);
+       batch = fd_bc_alloc_batch(cache, ctx, false);
 #ifdef DEBUG
        DBG("%p: hash=0x%08x, %ux%u, %u layers, %u samples", batch, hash,
                        key->width, key->height, key->layers, key->samples);
@@ -391,6 +402,7 @@ key_surf(struct key *key, unsigned idx, unsigned pos, struct pipe_surface *psurf
        key->surf[idx].texture = psurf->texture;
        key->surf[idx].u = psurf->u;
        key->surf[idx].pos = pos;
+       key->surf[idx].samples = MAX2(1, psurf->nr_samples);
        key->surf[idx].format = psurf->format;
 }
 
@@ -404,7 +416,7 @@ fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
        key->width = pfb->width;
        key->height = pfb->height;
        key->layers = pfb->layers;
-       key->samples = pfb->samples;
+       key->samples = util_framebuffer_get_num_samples(pfb);
        key->ctx = ctx;
 
        if (pfb->zsbuf)