freedreno: replace fnv1a hash function with xxhash
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch_cache.c
index 635f2a7c99429137c8758168b68e474eb7e5b97f..329ed41fba363e34b9e9cdb7393cd234c10ebad7 100644 (file)
@@ -28,6 +28,8 @@
 #include "util/set.h"
 #include "util/list.h"
 #include "util/u_string.h"
+#define XXH_INLINE_ALL
+#include "util/xxhash.h"
 
 #include "freedreno_batch.h"
 #include "freedreno_batch_cache.h"
@@ -81,7 +83,8 @@ struct key {
        struct {
                struct pipe_resource *texture;
                union pipe_surface_desc u;
-               uint16_t pos, format;
+               uint8_t pos, samples;
+               uint16_t format;
        } surf[0];
 };
 
@@ -97,9 +100,9 @@ static uint32_t
 key_hash(const void *_key)
 {
        const struct key *key = _key;
-       uint32_t hash = _mesa_fnv32_1a_offset_bias;
-       hash = _mesa_fnv32_1a_accumulate_block(hash, key, offsetof(struct key, surf[0]));
-       hash = _mesa_fnv32_1a_accumulate_block(hash, key->surf, sizeof(key->surf[0]) * key->num_surfs);
+       uint32_t hash = 0;
+       hash = XXH32(key, offsetof(struct key, surf[0]), hash);
+       hash = XXH32(key->surf, sizeof(key->surf[0]) * key->num_surfs , hash);
        return hash;
 }
 
@@ -124,28 +127,64 @@ fd_bc_fini(struct fd_batch_cache *cache)
        _mesa_hash_table_destroy(cache->ht, NULL);
 }
 
-uint32_t
-fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+static void
+bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
 {
-       struct hash_entry *entry;
-       struct fd_batch *last_batch = NULL;
+       /* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
+        * can cause batches to be unref'd and freed under our feet, so grab
+        * a reference to all the batches we need up-front.
+        */
+       struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0};
+       struct fd_batch *batch;
+       unsigned n = 0;
 
-       hash_table_foreach(cache->ht, entry) {
-               struct fd_batch *batch = NULL;
-               fd_batch_reference(&batch, (struct fd_batch *)entry->data);
+       fd_context_lock(ctx);
+
+       foreach_batch(batch, cache, cache->batch_mask) {
                if (batch->ctx == ctx) {
-                       fd_batch_reference(&last_batch, batch);
-                       fd_batch_flush(batch, false);
+                       fd_batch_reference_locked(&batches[n++], batch);
                }
-               fd_batch_reference(&batch, NULL);
        }
 
-       if (last_batch) {
-               fd_batch_sync(last_batch);
-               fd_batch_reference(&last_batch, NULL);
+       if (deferred) {
+               struct fd_batch *current_batch = fd_context_batch(ctx);
+
+               for (unsigned i = 0; i < n; i++) {
+                       if (batches[i] && (batches[i]->ctx == ctx) &&
+                                       (batches[i] != current_batch)) {
+                               fd_batch_add_dep(current_batch, batches[i]);
+                       }
+               }
+
+               fd_context_unlock(ctx);
+       } else {
+               fd_context_unlock(ctx);
+
+               for (unsigned i = 0; i < n; i++) {
+                       fd_batch_flush(batches[i]);
+               }
        }
 
-       return ctx->last_fence;
+       for (unsigned i = 0; i < n; i++) {
+               fd_batch_reference(&batches[i], NULL);
+       }
+}
+
+void
+fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+       bc_flush(cache, ctx, false);
+}
+
+/* deferred flush doesn't actually flush, but it marks every other
+ * batch associated with the context as dependent on the current
+ * batch.  So when the current batch gets flushed, all other batches
+ * that came before also get flushed.
+ */
+void
+fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+       bc_flush(cache, ctx, true);
 }
 
 void
@@ -154,21 +193,38 @@ fd_bc_invalidate_context(struct fd_context *ctx)
        struct fd_batch_cache *cache = &ctx->screen->batch_cache;
        struct fd_batch *batch;
 
+       fd_screen_lock(ctx->screen);
+
        foreach_batch(batch, cache, cache->batch_mask) {
-               if (batch->ctx == ctx) {
-                       fd_batch_reset(batch);
-                       fd_batch_reference(&batch, NULL);
-               }
+               if (batch->ctx == ctx)
+                       fd_bc_invalidate_batch(batch, true);
        }
+
+       fd_screen_unlock(ctx->screen);
 }
 
+/**
+ * Note that when batch is flushed, it needs to remain in the cache so
+ * that fd_bc_invalidate_resource() can work.. otherwise we can have
+ * the case where a rsc is destroyed while a batch still has a dangling
+ * reference to it.
+ *
+ * Note that the cmdstream (or, after the SUBMIT ioctl, the kernel)
+ * would have a reference to the underlying bo, so it is ok for the
+ * rsc to be destroyed before the batch.
+ */
 void
-fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
+fd_bc_invalidate_batch(struct fd_batch *batch, bool remove)
 {
+       if (!batch)
+               return;
+
        struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
        struct key *key = (struct key *)batch->key;
 
-       if (destroy) {
+       fd_context_assert_locked(batch->ctx);
+
+       if (remove) {
                cache->batches[batch->idx] = NULL;
                cache->batch_mask &= ~(1 << batch->idx);
        }
@@ -193,8 +249,10 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
 void
 fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
 {
-       struct fd_screen *screen = fd_screen(rsc->base.b.screen);
-               struct fd_batch *batch;
+       struct fd_screen *screen = fd_screen(rsc->base.screen);
+       struct fd_batch *batch;
+
+       fd_screen_lock(screen);
 
        if (destroy) {
                foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
@@ -203,27 +261,30 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
                }
                rsc->batch_mask = 0;
 
-               fd_batch_reference(&rsc->write_batch, NULL);
+               fd_batch_reference_locked(&rsc->write_batch, NULL);
        }
 
        foreach_batch(batch, &screen->batch_cache, rsc->bc_batch_mask)
                fd_bc_invalidate_batch(batch, false);
 
        rsc->bc_batch_mask = 0;
+
+       fd_screen_unlock(screen);
 }
 
 struct fd_batch *
-fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
+fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw)
 {
        struct fd_batch *batch;
        uint32_t idx;
 
+       fd_screen_lock(ctx->screen);
+
        while ((idx = ffs(~cache->batch_mask)) == 0) {
 #if 0
                for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
                        batch = cache->batches[i];
                        debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
-                       struct set_entry *entry;
                        set_foreach(batch->dependencies, entry) {
                                struct fd_batch *dep = (struct fd_batch *)entry->key;
                                debug_printf(" %d", dep->idx);
@@ -236,14 +297,17 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                 */
                struct fd_batch *flush_batch = NULL;
                for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
-                       if ((cache->batches[i] == ctx->batch) ||
-                                       !cache->batches[i]->needs_flush)
-                               continue;
                        if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
-                               fd_batch_reference(&flush_batch, cache->batches[i]);
+                               fd_batch_reference_locked(&flush_batch, cache->batches[i]);
                }
+
+               /* we can drop lock temporarily here, since we hold a ref,
+                * flush_batch won't disappear under us.
+                */
+               fd_screen_unlock(ctx->screen);
                DBG("%p: too many batches!  flush forced!", flush_batch);
-               fd_batch_flush(flush_batch, true);
+               fd_batch_flush(flush_batch);
+               fd_screen_lock(ctx->screen);
 
                /* While the resources get cleaned up automatically, the flush_batch
                 * doesn't get removed from the dependencies of other batches, so
@@ -259,18 +323,18 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                        if (other->dependents_mask & (1 << flush_batch->idx)) {
                                other->dependents_mask &= ~(1 << flush_batch->idx);
                                struct fd_batch *ref = flush_batch;
-                               fd_batch_reference(&ref, NULL);
+                               fd_batch_reference_locked(&ref, NULL);
                        }
                }
 
-               fd_batch_reference(&flush_batch, NULL);
+               fd_batch_reference_locked(&flush_batch, NULL);
        }
 
        idx--;              /* bit zero returns 1 for ffs() */
 
-       batch = fd_batch_create(ctx);
+       batch = fd_batch_create(ctx, nondraw);
        if (!batch)
-               return NULL;
+               goto out;
 
        batch->seqno = cache->cnt++;
        batch->idx = idx;
@@ -279,6 +343,9 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
        debug_assert(cache->batches[idx] == NULL);
        cache->batches[idx] = batch;
 
+out:
+       fd_screen_unlock(ctx->screen);
+
        return batch;
 }
 
@@ -297,7 +364,7 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
                return batch;
        }
 
-       batch = fd_bc_alloc_batch(cache, ctx);
+       batch = fd_bc_alloc_batch(cache, ctx, false);
 #ifdef DEBUG
        DBG("%p: hash=0x%08x, %ux%u, %u layers, %u samples", batch, hash,
                        key->width, key->height, key->layers, key->samples);
@@ -312,6 +379,16 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
        if (!batch)
                return NULL;
 
+       /* reset max_scissor, which will be adjusted on draws
+        * according to the actual scissor.
+        */
+       batch->max_scissor.minx = ~0;
+       batch->max_scissor.miny = ~0;
+       batch->max_scissor.maxx = 0;
+       batch->max_scissor.maxy = 0;
+
+       fd_screen_lock(ctx->screen);
+
        _mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
        batch->key = key;
        batch->hash = hash;
@@ -321,6 +398,8 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
                rsc->bc_batch_mask = (1 << batch->idx);
        }
 
+       fd_screen_unlock(ctx->screen);
+
        return batch;
 }
 
@@ -330,6 +409,7 @@ key_surf(struct key *key, unsigned idx, unsigned pos, struct pipe_surface *psurf
        key->surf[idx].texture = psurf->texture;
        key->surf[idx].u = psurf->u;
        key->surf[idx].pos = pos;
+       key->surf[idx].samples = MAX2(1, psurf->nr_samples);
        key->surf[idx].format = psurf->format;
 }
 
@@ -343,7 +423,7 @@ fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
        key->width = pfb->width;
        key->height = pfb->height;
        key->layers = pfb->layers;
-       key->samples = pfb->samples;
+       key->samples = util_framebuffer_get_num_samples(pfb);
        key->ctx = ctx;
 
        if (pfb->zsbuf)