return false;
}
-static void
-batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
+void
+fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
{
if (batch->dependents_mask & (1 << dep->idx))
return;
* fd_bc_invalidate_batch()
*/
fd_batch_reference(&b, dep);
- batch_add_dep(batch, b);
+ fd_batch_add_dep(batch, b);
fd_bc_invalidate_batch(b, false);
fd_batch_reference_locked(&b, NULL);
}
fd_batch_reference_locked(&rsc->write_batch, batch);
} else {
if (rsc->write_batch) {
- batch_add_dep(batch, rsc->write_batch);
+ fd_batch_add_dep(batch, rsc->write_batch);
fd_bc_invalidate_batch(rsc->write_batch, false);
}
}
void fd_batch_reset(struct fd_batch *batch);
void fd_batch_sync(struct fd_batch *batch);
void fd_batch_flush(struct fd_batch *batch, bool sync, bool force);
+void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
void fd_batch_check_size(struct fd_batch *batch);
}
}
+/* deferred flush doesn't actually flush, but it marks every other
+ * batch associated with the context as dependent on the current
+ * batch. So when the current batch gets flushed, all other batches
+ * that came before also get flushed.
+ */
+void
+fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+ struct fd_batch *current_batch = ctx->batch;
+ struct hash_entry *entry;
+
+ mtx_lock(&ctx->screen->lock);
+
+ hash_table_foreach(cache->ht, entry) {
+ struct fd_batch *batch = entry->data;
+ if (batch == current_batch)
+ continue;
+ if (batch->ctx == ctx)
+ fd_batch_add_dep(current_batch, batch);
+ }
+
+ mtx_unlock(&ctx->screen->lock);
+}
+
void
fd_bc_invalidate_context(struct fd_context *ctx)
{
void fd_bc_fini(struct fd_batch_cache *cache);
void fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx);
+void fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx);
void fd_bc_invalidate_context(struct fd_context *ctx);
void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);
if (!ctx->screen->reorder) {
fd_batch_flush(ctx->batch, true, false);
+ } else if (flags & PIPE_FLUSH_DEFERRED) {
+ fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
} else {
fd_bc_flush(&ctx->screen->batch_cache, ctx);
}