#include "util/set.h"
#include "util/list.h"
#include "util/u_string.h"
+#define XXH_INLINE_ALL
+#include "util/xxhash.h"
#include "freedreno_batch.h"
#include "freedreno_batch_cache.h"
struct {
struct pipe_resource *texture;
union pipe_surface_desc u;
- uint16_t pos, format;
+ uint8_t pos, samples;
+ uint16_t format;
} surf[0];
};
key_hash(const void *_key)
{
const struct key *key = _key;
- uint32_t hash = _mesa_fnv32_1a_offset_bias;
- hash = _mesa_fnv32_1a_accumulate_block(hash, key, offsetof(struct key, surf[0]));
- hash = _mesa_fnv32_1a_accumulate_block(hash, key->surf, sizeof(key->surf[0]) * key->num_surfs);
+ uint32_t hash = 0;
+ hash = XXH32(key, offsetof(struct key, surf[0]), hash);
+ hash = XXH32(key->surf, sizeof(key->surf[0]) * key->num_surfs , hash);
return hash;
}
_mesa_hash_table_destroy(cache->ht, NULL);
}
-void
-fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+static void
+bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
{
- struct hash_entry *entry;
- struct fd_batch *last_batch = NULL;
+ /* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
+ * can cause batches to be unref'd and freed under our feet, so grab
+ * a reference to all the batches we need up-front.
+ */
+ struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0};
+ struct fd_batch *batch;
+ unsigned n = 0;
- mtx_lock(&ctx->screen->lock);
+ fd_context_lock(ctx);
- hash_table_foreach(cache->ht, entry) {
- struct fd_batch *batch = NULL;
- /* hold a reference since we can drop screen->lock: */
- fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
+ foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx) {
- mtx_unlock(&ctx->screen->lock);
- fd_batch_reference(&last_batch, batch);
- fd_batch_flush(batch, false, false);
- mtx_lock(&ctx->screen->lock);
+ fd_batch_reference_locked(&batches[n++], batch);
}
- fd_batch_reference_locked(&batch, NULL);
}
- mtx_unlock(&ctx->screen->lock);
+ if (deferred) {
+ struct fd_batch *current_batch = fd_context_batch(ctx);
- if (last_batch) {
- fd_batch_sync(last_batch);
- fd_batch_reference(&last_batch, NULL);
+ for (unsigned i = 0; i < n; i++) {
+ if (batches[i] && (batches[i]->ctx == ctx) &&
+ (batches[i] != current_batch)) {
+ fd_batch_add_dep(current_batch, batches[i]);
+ }
+ }
+
+ fd_context_unlock(ctx);
+ } else {
+ fd_context_unlock(ctx);
+
+ for (unsigned i = 0; i < n; i++) {
+ fd_batch_flush(batches[i]);
+ }
+ }
+
+ for (unsigned i = 0; i < n; i++) {
+ fd_batch_reference(&batches[i], NULL);
}
}
+void
+fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+ bc_flush(cache, ctx, false);
+}
+
/* deferred flush doesn't actually flush, but it marks every other
* batch associated with the context as dependent on the current
* batch. So when the current batch gets flushed, all other batches
void
fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
{
- struct fd_batch *current_batch = ctx->batch;
- struct hash_entry *entry;
+ bc_flush(cache, ctx, true);
+}
- mtx_lock(&ctx->screen->lock);
+static bool
+batch_in_cache(struct fd_batch_cache *cache, struct fd_batch *batch)
+{
+ struct fd_batch *b;
- hash_table_foreach(cache->ht, entry) {
- struct fd_batch *batch = entry->data;
- if (batch == current_batch)
- continue;
- if (batch->ctx == ctx)
- fd_batch_add_dep(current_batch, batch);
+ foreach_batch (b, cache, cache->batch_mask)
+ if (b == batch)
+ return true;
+
+ return false;
+}
+
+void
+fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
+{
+ struct fd_batch_cache *cache = &screen->batch_cache;
+
+ if (!BATCH_DEBUG)
+ return;
+
+ fd_screen_lock(screen);
+
+ va_list ap;
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+
+ set_foreach (screen->live_batches, entry) {
+ struct fd_batch *batch = (struct fd_batch *)entry->key;
+ printf(" %p<%u>%s%s\n", batch, batch->seqno,
+ batch->needs_flush ? ", NEEDS FLUSH" : "",
+ batch_in_cache(cache, batch) ? "" : ", ORPHAN");
}
- mtx_unlock(&ctx->screen->lock);
+ printf("----\n");
+
+ fd_screen_unlock(screen);
}
void
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx)
- fd_batch_reference_locked(&batch, NULL);
+ fd_bc_invalidate_batch(batch, true);
}
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
}
+/**
+ * Note that when batch is flushed, it needs to remain in the cache so
+ * that fd_bc_invalidate_resource() can work.. otherwise we can have
+ * the case where a rsc is destroyed while a batch still has a dangling
+ * reference to it.
+ *
+ * Note that the cmdstream (or, after the SUBMIT ioctl, the kernel)
+ * would have a reference to the underlying bo, so it is ok for the
+ * rsc to be destroyed before the batch.
+ */
void
-fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
+fd_bc_invalidate_batch(struct fd_batch *batch, bool remove)
{
if (!batch)
return;
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct key *key = (struct key *)batch->key;
- pipe_mutex_assert_locked(batch->ctx->screen->lock);
+ fd_context_assert_locked(batch->ctx);
- if (destroy) {
+ if (remove) {
cache->batches[batch->idx] = NULL;
cache->batch_mask &= ~(1 << batch->idx);
}
struct fd_screen *screen = fd_screen(rsc->base.screen);
struct fd_batch *batch;
- mtx_lock(&screen->lock);
+ fd_screen_lock(screen);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
rsc->bc_batch_mask = 0;
- mtx_unlock(&screen->lock);
+ fd_screen_unlock(screen);
}
struct fd_batch *
-fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
+fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw)
{
struct fd_batch *batch;
uint32_t idx;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
batch = cache->batches[i];
debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
- struct set_entry *entry;
set_foreach(batch->dependencies, entry) {
struct fd_batch *dep = (struct fd_batch *)entry->key;
debug_printf(" %d", dep->idx);
*/
struct fd_batch *flush_batch = NULL;
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
- if ((cache->batches[i] == ctx->batch) ||
- !cache->batches[i]->needs_flush)
- continue;
if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
fd_batch_reference_locked(&flush_batch, cache->batches[i]);
}
/* we can drop lock temporarily here, since we hold a ref,
* flush_batch won't disappear under us.
*/
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
DBG("%p: too many batches! flush forced!", flush_batch);
- fd_batch_flush(flush_batch, true, false);
- mtx_lock(&ctx->screen->lock);
+ fd_batch_flush(flush_batch);
+ fd_screen_lock(ctx->screen);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
idx--; /* bit zero returns 1 for ffs() */
- batch = fd_batch_create(ctx, false);
+ batch = fd_batch_create(ctx, nondraw);
if (!batch)
goto out;
cache->batches[idx] = batch;
out:
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
return batch;
}
return batch;
}
- batch = fd_bc_alloc_batch(cache, ctx);
+ batch = fd_bc_alloc_batch(cache, ctx, false);
#ifdef DEBUG
DBG("%p: hash=0x%08x, %ux%u, %u layers, %u samples", batch, hash,
key->width, key->height, key->layers, key->samples);
if (!batch)
return NULL;
- mtx_lock(&ctx->screen->lock);
+ /* reset max_scissor, which will be adjusted on draws
+ * according to the actual scissor.
+ */
+ batch->max_scissor.minx = ~0;
+ batch->max_scissor.miny = ~0;
+ batch->max_scissor.maxx = 0;
+ batch->max_scissor.maxy = 0;
+
+ fd_screen_lock(ctx->screen);
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;
rsc->bc_batch_mask = (1 << batch->idx);
}
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
return batch;
}
key->surf[idx].texture = psurf->texture;
key->surf[idx].u = psurf->u;
key->surf[idx].pos = pos;
+ key->surf[idx].samples = MAX2(1, psurf->nr_samples);
key->surf[idx].format = psurf->format;
}
key->width = pfb->width;
key->height = pfb->height;
key->layers = pfb->layers;
- key->samples = pfb->samples;
+ key->samples = util_framebuffer_get_num_samples(pfb);
key->ctx = ctx;
if (pfb->zsbuf)