freedreno: replace fnv1a hash function with xxhash
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch_cache.c
index f3d5078d1c3b6f883a48212b4f60ea2d09de4a3e..329ed41fba363e34b9e9cdb7393cd234c10ebad7 100644 (file)
@@ -28,6 +28,8 @@
 #include "util/set.h"
 #include "util/list.h"
 #include "util/u_string.h"
+#define XXH_INLINE_ALL
+#include "util/xxhash.h"
 
 #include "freedreno_batch.h"
 #include "freedreno_batch_cache.h"
@@ -81,7 +83,8 @@ struct key {
        struct {
                struct pipe_resource *texture;
                union pipe_surface_desc u;
-               uint16_t pos, format;
+               uint8_t pos, samples;
+               uint16_t format;
        } surf[0];
 };
 
@@ -97,9 +100,9 @@ static uint32_t
 key_hash(const void *_key)
 {
        const struct key *key = _key;
-       uint32_t hash = _mesa_fnv32_1a_offset_bias;
-       hash = _mesa_fnv32_1a_accumulate_block(hash, key, offsetof(struct key, surf[0]));
-       hash = _mesa_fnv32_1a_accumulate_block(hash, key->surf, sizeof(key->surf[0]) * key->num_surfs);
+       uint32_t hash = 0;
+       hash = XXH32(key, offsetof(struct key, surf[0]), hash);
+       hash = XXH32(key->surf, sizeof(key->surf[0]) * key->num_surfs , hash);
        return hash;
 }
 
@@ -124,52 +127,94 @@ fd_bc_fini(struct fd_batch_cache *cache)
        _mesa_hash_table_destroy(cache->ht, NULL);
 }
 
-void
-fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+static void
+bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
 {
-       struct hash_entry *entry;
-       struct fd_batch *last_batch = NULL;
+       /* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
+        * can cause batches to be unref'd and freed under our feet, so grab
+        * a reference to all the batches we need up-front.
+        */
+       struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0};
+       struct fd_batch *batch;
+       unsigned n = 0;
 
-       pipe_mutex_lock(ctx->screen->lock);
+       fd_context_lock(ctx);
 
-       hash_table_foreach(cache->ht, entry) {
-               struct fd_batch *batch = NULL;
-               fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
+       foreach_batch(batch, cache, cache->batch_mask) {
                if (batch->ctx == ctx) {
-                       pipe_mutex_unlock(ctx->screen->lock);
-                       fd_batch_reference(&last_batch, batch);
-                       fd_batch_flush(batch, false);
-                       pipe_mutex_lock(ctx->screen->lock);
+                       fd_batch_reference_locked(&batches[n++], batch);
                }
-               fd_batch_reference_locked(&batch, NULL);
        }
 
-       pipe_mutex_unlock(ctx->screen->lock);
+       if (deferred) {
+               struct fd_batch *current_batch = fd_context_batch(ctx);
+
+               for (unsigned i = 0; i < n; i++) {
+                       if (batches[i] && (batches[i]->ctx == ctx) &&
+                                       (batches[i] != current_batch)) {
+                               fd_batch_add_dep(current_batch, batches[i]);
+                       }
+               }
+
+               fd_context_unlock(ctx);
+       } else {
+               fd_context_unlock(ctx);
 
-       if (last_batch) {
-               fd_batch_sync(last_batch);
-               fd_batch_reference(&last_batch, NULL);
+               for (unsigned i = 0; i < n; i++) {
+                       fd_batch_flush(batches[i]);
+               }
+       }
+
+       for (unsigned i = 0; i < n; i++) {
+               fd_batch_reference(&batches[i], NULL);
        }
 }
 
+void
+fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+       bc_flush(cache, ctx, false);
+}
+
+/* deferred flush doesn't actually flush, but it marks every other
+ * batch associated with the context as dependent on the current
+ * batch.  So when the current batch gets flushed, all other batches
+ * that came before also get flushed.
+ */
+void
+fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+       bc_flush(cache, ctx, true);
+}
+
 void
 fd_bc_invalidate_context(struct fd_context *ctx)
 {
        struct fd_batch_cache *cache = &ctx->screen->batch_cache;
        struct fd_batch *batch;
 
-       pipe_mutex_lock(ctx->screen->lock);
+       fd_screen_lock(ctx->screen);
 
        foreach_batch(batch, cache, cache->batch_mask) {
                if (batch->ctx == ctx)
-                       fd_batch_reference_locked(&batch, NULL);
+                       fd_bc_invalidate_batch(batch, true);
        }
 
-       pipe_mutex_unlock(ctx->screen->lock);
+       fd_screen_unlock(ctx->screen);
 }
 
+/**
+ * Note that when batch is flushed, it needs to remain in the cache so
+ * that fd_bc_invalidate_resource() can work.. otherwise we can have
+ * the case where a rsc is destroyed while a batch still has a dangling
+ * reference to it.
+ *
+ * Note that the cmdstream (or, after the SUBMIT ioctl, the kernel)
+ * would have a reference to the underlying bo, so it is ok for the
+ * rsc to be destroyed before the batch.
+ */
 void
-fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
+fd_bc_invalidate_batch(struct fd_batch *batch, bool remove)
 {
        if (!batch)
                return;
@@ -177,9 +222,9 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
        struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
        struct key *key = (struct key *)batch->key;
 
-       pipe_mutex_assert_locked(batch->ctx->screen->lock);
+       fd_context_assert_locked(batch->ctx);
 
-       if (destroy) {
+       if (remove) {
                cache->batches[batch->idx] = NULL;
                cache->batch_mask &= ~(1 << batch->idx);
        }
@@ -204,10 +249,10 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
 void
 fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
 {
-       struct fd_screen *screen = fd_screen(rsc->base.b.screen);
+       struct fd_screen *screen = fd_screen(rsc->base.screen);
        struct fd_batch *batch;
 
-       pipe_mutex_lock(screen->lock);
+       fd_screen_lock(screen);
 
        if (destroy) {
                foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
@@ -224,23 +269,22 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
 
        rsc->bc_batch_mask = 0;
 
-       pipe_mutex_unlock(screen->lock);
+       fd_screen_unlock(screen);
 }
 
 struct fd_batch *
-fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
+fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw)
 {
        struct fd_batch *batch;
        uint32_t idx;
 
-       pipe_mutex_lock(ctx->screen->lock);
+       fd_screen_lock(ctx->screen);
 
        while ((idx = ffs(~cache->batch_mask)) == 0) {
 #if 0
                for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
                        batch = cache->batches[i];
                        debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
-                       struct set_entry *entry;
                        set_foreach(batch->dependencies, entry) {
                                struct fd_batch *dep = (struct fd_batch *)entry->key;
                                debug_printf(" %d", dep->idx);
@@ -253,9 +297,6 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                 */
                struct fd_batch *flush_batch = NULL;
                for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
-                       if ((cache->batches[i] == ctx->batch) ||
-                                       !cache->batches[i]->needs_flush)
-                               continue;
                        if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
                                fd_batch_reference_locked(&flush_batch, cache->batches[i]);
                }
@@ -263,10 +304,10 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
                /* we can drop lock temporarily here, since we hold a ref,
                 * flush_batch won't disappear under us.
                 */
-               pipe_mutex_unlock(ctx->screen->lock);
+               fd_screen_unlock(ctx->screen);
                DBG("%p: too many batches!  flush forced!", flush_batch);
-               fd_batch_flush(flush_batch, true);
-               pipe_mutex_lock(ctx->screen->lock);
+               fd_batch_flush(flush_batch);
+               fd_screen_lock(ctx->screen);
 
                /* While the resources get cleaned up automatically, the flush_batch
                 * doesn't get removed from the dependencies of other batches, so
@@ -291,7 +332,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
 
        idx--;              /* bit zero returns 1 for ffs() */
 
-       batch = fd_batch_create(ctx);
+       batch = fd_batch_create(ctx, nondraw);
        if (!batch)
                goto out;
 
@@ -303,7 +344,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
        cache->batches[idx] = batch;
 
 out:
-       pipe_mutex_unlock(ctx->screen->lock);
+       fd_screen_unlock(ctx->screen);
 
        return batch;
 }
@@ -323,7 +364,7 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
                return batch;
        }
 
-       batch = fd_bc_alloc_batch(cache, ctx);
+       batch = fd_bc_alloc_batch(cache, ctx, false);
 #ifdef DEBUG
        DBG("%p: hash=0x%08x, %ux%u, %u layers, %u samples", batch, hash,
                        key->width, key->height, key->layers, key->samples);
@@ -338,7 +379,15 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
        if (!batch)
                return NULL;
 
-       pipe_mutex_lock(ctx->screen->lock);
+       /* reset max_scissor, which will be adjusted on draws
+        * according to the actual scissor.
+        */
+       batch->max_scissor.minx = ~0;
+       batch->max_scissor.miny = ~0;
+       batch->max_scissor.maxx = 0;
+       batch->max_scissor.maxy = 0;
+
+       fd_screen_lock(ctx->screen);
 
        _mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
        batch->key = key;
@@ -349,7 +398,7 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
                rsc->bc_batch_mask = (1 << batch->idx);
        }
 
-       pipe_mutex_unlock(ctx->screen->lock);
+       fd_screen_unlock(ctx->screen);
 
        return batch;
 }
@@ -360,6 +409,7 @@ key_surf(struct key *key, unsigned idx, unsigned pos, struct pipe_surface *psurf
        key->surf[idx].texture = psurf->texture;
        key->surf[idx].u = psurf->u;
        key->surf[idx].pos = pos;
+       key->surf[idx].samples = MAX2(1, psurf->nr_samples);
        key->surf[idx].format = psurf->format;
 }
 
@@ -373,7 +423,7 @@ fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
        key->width = pfb->width;
        key->height = pfb->height;
        key->layers = pfb->layers;
-       key->samples = pfb->samples;
+       key->samples = util_framebuffer_get_num_samples(pfb);
        key->ctx = ctx;
 
        if (pfb->zsbuf)