#include "util/set.h"
#include "util/list.h"
#include "util/u_string.h"
+#define XXH_INLINE_ALL
+#include "util/xxhash.h"
#include "freedreno_batch.h"
#include "freedreno_batch_cache.h"
key_hash(const void *_key)
{
const struct key *key = _key;
- uint32_t hash = _mesa_fnv32_1a_offset_bias;
- hash = _mesa_fnv32_1a_accumulate_block(hash, key, offsetof(struct key, surf[0]));
- hash = _mesa_fnv32_1a_accumulate_block(hash, key->surf, sizeof(key->surf[0]) * key->num_surfs);
+ uint32_t hash = 0;
+ hash = XXH32(key, offsetof(struct key, surf[0]), hash);
+ hash = XXH32(key->surf, sizeof(key->surf[0]) * key->num_surfs , hash);
return hash;
}
bc_flush(cache, ctx, true);
}
+static bool
+batch_in_cache(struct fd_batch_cache *cache, struct fd_batch *batch)
+{
+ struct fd_batch *b;
+
+ foreach_batch (b, cache, cache->batch_mask)
+ if (b == batch)
+ return true;
+
+ return false;
+}
+
+void
+fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
+{
+ struct fd_batch_cache *cache = &screen->batch_cache;
+
+ if (!BATCH_DEBUG)
+ return;
+
+ fd_screen_lock(screen);
+
+ va_list ap;
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+
+ set_foreach (screen->live_batches, entry) {
+ struct fd_batch *batch = (struct fd_batch *)entry->key;
+ printf(" %p<%u>%s%s\n", batch, batch->seqno,
+ batch->needs_flush ? ", NEEDS FLUSH" : "",
+ batch_in_cache(cache, batch) ? "" : ", ORPHAN");
+ }
+
+ printf("----\n");
+
+ fd_screen_unlock(screen);
+}
+
void
fd_bc_invalidate_context(struct fd_context *ctx)
{
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx)
fd_bc_invalidate_batch(batch, true);
}
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
}
/**
struct fd_screen *screen = fd_screen(rsc->base.screen);
struct fd_batch *batch;
- mtx_lock(&screen->lock);
+ fd_screen_lock(screen);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
rsc->bc_batch_mask = 0;
- mtx_unlock(&screen->lock);
+ fd_screen_unlock(screen);
}
struct fd_batch *
struct fd_batch *batch;
uint32_t idx;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
/* we can drop lock temporarily here, since we hold a ref,
* flush_batch won't disappear under us.
*/
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch);
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
cache->batches[idx] = batch;
out:
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
return batch;
}
if (!batch)
return NULL;
- mtx_lock(&ctx->screen->lock);
+ /* reset max_scissor, which will be adjusted on draws
+ * according to the actual scissor.
+ */
+ batch->max_scissor.minx = ~0;
+ batch->max_scissor.miny = ~0;
+ batch->max_scissor.maxx = 0;
+ batch->max_scissor.maxy = 0;
+
+ fd_screen_lock(ctx->screen);
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;
rsc->bc_batch_mask = (1 << batch->idx);
}
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
return batch;
}