#include "freedreno_batch.h"
#include "freedreno_context.h"
+#include "freedreno_fence.h"
#include "freedreno_resource.h"
#include "freedreno_query_hw.h"
size = 0x100000;
}
- batch->draw = fd_ringbuffer_new(ctx->screen->pipe, size);
- batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
- batch->gmem = fd_ringbuffer_new(ctx->screen->pipe, size);
+ batch->draw = fd_ringbuffer_new(ctx->pipe, size);
+ if (!batch->nondraw) {
+ batch->binning = fd_ringbuffer_new(ctx->pipe, size);
+ batch->gmem = fd_ringbuffer_new(ctx->pipe, size);
- fd_ringbuffer_set_parent(batch->gmem, NULL);
- fd_ringbuffer_set_parent(batch->draw, batch->gmem);
- fd_ringbuffer_set_parent(batch->binning, batch->gmem);
+ fd_ringbuffer_set_parent(batch->gmem, NULL);
+ fd_ringbuffer_set_parent(batch->draw, batch->gmem);
+ fd_ringbuffer_set_parent(batch->binning, batch->gmem);
+ } else {
+ fd_ringbuffer_set_parent(batch->draw, NULL);
+ }
batch->in_fence_fd = -1;
+ batch->fence = fd_fence_create(batch);
batch->cleared = batch->partial_cleared = 0;
batch->restore = batch->resolve = 0;
}
struct fd_batch *
-fd_batch_create(struct fd_context *ctx)
+fd_batch_create(struct fd_context *ctx, bool nondraw)
{
struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
pipe_reference_init(&batch->reference, 1);
batch->ctx = ctx;
+ batch->nondraw = nondraw;
batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
if (batch->in_fence_fd != -1)
close(batch->in_fence_fd);
+ /* in case batch wasn't flushed but fence was created: */
+ fd_fence_populate(batch->fence, 0, -1);
+
+ fd_fence_ref(NULL, &batch->fence, NULL);
+
fd_ringbuffer_del(batch->draw);
- fd_ringbuffer_del(batch->binning);
- fd_ringbuffer_del(batch->gmem);
+ if (!batch->nondraw) {
+ fd_ringbuffer_del(batch->binning);
+ fd_ringbuffer_del(batch->gmem);
+ } else {
+ debug_assert(!batch->binning);
+ debug_assert(!batch->gmem);
+ }
if (batch->lrz_clear) {
fd_ringbuffer_del(batch->lrz_clear);
batch->lrz_clear = NULL;
foreach_batch(dep, cache, batch->dependents_mask) {
if (flush)
- fd_batch_flush(dep, false);
+ fd_batch_flush(dep, false, false);
fd_batch_reference(&dep, NULL);
}
}
static void
-batch_flush(struct fd_batch *batch)
+batch_flush(struct fd_batch *batch, bool force)
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
- if (!batch->needs_flush)
+ if (!batch->needs_flush) {
+ if (force) {
+ fd_gmem_render_noop(batch);
+ goto out;
+ }
return;
+ }
batch->needs_flush = false;
debug_assert(batch->reference.count > 0);
+out:
if (batch == batch->ctx->batch) {
batch_reset(batch);
} else {
}
}
-/* NOTE: could drop the last ref to batch */
+/* NOTE: could drop the last ref to batch
+ *
+ * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
+ * to kernel before this returns, as opposed to just being queued to be
+ * flushed
+ * @force: force a flush even if no rendering, mostly useful if you need
+ * a fence to sync on
+ */
void
-fd_batch_flush(struct fd_batch *batch, bool sync)
+fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
{
/* NOTE: we need to hold an extra ref across the body of flush,
* since the last ref to this batch could be dropped when cleaning
* up used_resources
*/
struct fd_batch *tmp = NULL;
+
fd_batch_reference(&tmp, batch);
- batch_flush(tmp);
+ batch_flush(tmp, force);
if (sync)
fd_batch_sync(tmp);
fd_batch_reference(&tmp, NULL);
return false;
}
-static void
-batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
+void
+fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
{
if (batch->dependents_mask & (1 << dep->idx))
return;
if (batch_depends_on(dep, batch)) {
DBG("%p: flush forced on %p!", batch, dep);
mtx_unlock(&batch->ctx->screen->lock);
- fd_batch_flush(dep, false);
+ fd_batch_flush(dep, false, false);
mtx_lock(&batch->ctx->screen->lock);
} else {
struct fd_batch *other = NULL;
DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
+ if (write)
+ rsc->valid = true;
+
/* note, invalidate write batch, to avoid further writes to rsc
* resulting in a write-after-read hazard.
*/
if (rsc->batch_mask != (1 << batch->idx)) {
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct fd_batch *dep;
+
+ if (rsc->write_batch && rsc->write_batch != batch) {
+ struct fd_batch *b = NULL;
+ fd_batch_reference(&b, rsc->write_batch);
+
+ mtx_unlock(&batch->ctx->screen->lock);
+ fd_batch_flush(b, true, false);
+ mtx_lock(&batch->ctx->screen->lock);
+
+ fd_bc_invalidate_batch(b, false);
+ fd_batch_reference_locked(&b, NULL);
+ }
+
foreach_batch(dep, cache, rsc->batch_mask) {
struct fd_batch *b = NULL;
+ if (dep == batch)
+ continue;
/* note that batch_add_dep could flush and unref dep, so
* we need to hold a reference to keep it live for the
* fd_bc_invalidate_batch()
*/
fd_batch_reference(&b, dep);
- batch_add_dep(batch, b);
+ fd_batch_add_dep(batch, b);
fd_bc_invalidate_batch(b, false);
fd_batch_reference_locked(&b, NULL);
}
fd_batch_reference_locked(&rsc->write_batch, batch);
} else {
if (rsc->write_batch) {
- batch_add_dep(batch, rsc->write_batch);
+ fd_batch_add_dep(batch, rsc->write_batch);
fd_bc_invalidate_batch(rsc->write_batch, false);
}
}
struct fd_ringbuffer *ring = batch->draw;
if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
(fd_mesa_debug & FD_DBG_FLUSH))
- fd_batch_flush(batch, true);
+ fd_batch_flush(batch, true, false);
}
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already