_mesa_hash_table_destroy(cache->ht, NULL);
}
-uint32_t
+void
fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
{
struct hash_entry *entry;
struct fd_batch *last_batch = NULL;
+ mtx_lock(&ctx->screen->lock);
+
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
- fd_batch_reference(&batch, (struct fd_batch *)entry->data);
+ /* hold a reference since we can drop screen->lock: */
+ fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
if (batch->ctx == ctx) {
+ mtx_unlock(&ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch, false, false);
+ mtx_lock(&ctx->screen->lock);
}
- fd_batch_reference(&batch, NULL);
+ fd_batch_reference_locked(&batch, NULL);
}
+ mtx_unlock(&ctx->screen->lock);
+
if (last_batch) {
fd_batch_sync(last_batch);
fd_batch_reference(&last_batch, NULL);
}
+}
+
+/* deferred flush doesn't actually flush, but it marks every other
+ * batch associated with the context as dependent on the current
+ * batch. So when the current batch gets flushed, all other batches
+ * that came before also get flushed.
+ */
+void
+fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+ struct fd_batch *current_batch = ctx->batch;
+ struct hash_entry *entry;
+
+ mtx_lock(&ctx->screen->lock);
+
+ hash_table_foreach(cache->ht, entry) {
+ struct fd_batch *batch = entry->data;
+ if (batch == current_batch)
+ continue;
+ if (batch->ctx == ctx)
+ fd_batch_add_dep(current_batch, batch);
+ }
- return ctx->last_fence;
+ mtx_unlock(&ctx->screen->lock);
}
void
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
+ mtx_lock(&ctx->screen->lock);
+
foreach_batch(batch, cache, cache->batch_mask) {
- if (batch->ctx == ctx) {
- fd_batch_reset(batch);
- fd_batch_reference(&batch, NULL);
- }
+ if (batch->ctx == ctx)
+ fd_batch_reference_locked(&batch, NULL);
}
+
+ mtx_unlock(&ctx->screen->lock);
}
void
fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
{
+ if (!batch)
+ return;
+
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct key *key = (struct key *)batch->key;
+ pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
if (destroy) {
cache->batches[batch->idx] = NULL;
cache->batch_mask &= ~(1 << batch->idx);
void
fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
{
- struct fd_screen *screen = fd_screen(rsc->base.b.screen);
- struct fd_batch *batch;
+ struct fd_screen *screen = fd_screen(rsc->base.screen);
+ struct fd_batch *batch;
+
+ mtx_lock(&screen->lock);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
}
rsc->batch_mask = 0;
- fd_batch_reference(&rsc->write_batch, NULL);
+ fd_batch_reference_locked(&rsc->write_batch, NULL);
}
foreach_batch(batch, &screen->batch_cache, rsc->bc_batch_mask)
fd_bc_invalidate_batch(batch, false);
rsc->bc_batch_mask = 0;
+
+ mtx_unlock(&screen->lock);
}
struct fd_batch *
struct fd_batch *batch;
uint32_t idx;
+ mtx_lock(&ctx->screen->lock);
+
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
!cache->batches[i]->needs_flush)
continue;
if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
- fd_batch_reference(&flush_batch, cache->batches[i]);
+ fd_batch_reference_locked(&flush_batch, cache->batches[i]);
}
+
+ /* we can drop lock temporarily here, since we hold a ref,
+ * flush_batch won't disappear under us.
+ */
+ mtx_unlock(&ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
- fd_batch_flush(flush_batch, true);
+ fd_batch_flush(flush_batch, true, false);
+ mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
if (other->dependents_mask & (1 << flush_batch->idx)) {
other->dependents_mask &= ~(1 << flush_batch->idx);
struct fd_batch *ref = flush_batch;
- fd_batch_reference(&ref, NULL);
+ fd_batch_reference_locked(&ref, NULL);
}
}
- fd_batch_reference(&flush_batch, NULL);
+ fd_batch_reference_locked(&flush_batch, NULL);
}
idx--; /* bit zero returns 1 for ffs() */
- batch = fd_batch_create(ctx);
+ batch = fd_batch_create(ctx, false);
if (!batch)
- return NULL;
+ goto out;
batch->seqno = cache->cnt++;
batch->idx = idx;
debug_assert(cache->batches[idx] == NULL);
cache->batches[idx] = batch;
+out:
+ mtx_unlock(&ctx->screen->lock);
+
return batch;
}
if (!batch)
return NULL;
+ mtx_lock(&ctx->screen->lock);
+
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;
batch->hash = hash;
rsc->bc_batch_mask = (1 << batch->idx);
}
+ mtx_unlock(&ctx->screen->lock);
+
return batch;
}