#include "freedreno_batch.h"
#include "freedreno_context.h"
+#include "freedreno_fence.h"
#include "freedreno_resource.h"
#include "freedreno_query_hw.h"
fd_ringbuffer_set_parent(batch->binning, batch->gmem);
batch->in_fence_fd = -1;
+ batch->fence = fd_fence_create(batch);
batch->cleared = batch->partial_cleared = 0;
batch->restore = batch->resolve = 0;
if (batch->in_fence_fd != -1)
close(batch->in_fence_fd);
+ /* in case batch wasn't flushed but fence was created: */
+ fd_fence_populate(batch->fence, 0, -1);
+
+ fd_fence_ref(NULL, &batch->fence, NULL);
+
fd_ringbuffer_del(batch->draw);
fd_ringbuffer_del(batch->binning);
fd_ringbuffer_del(batch->gmem);
foreach_batch(dep, cache, batch->dependents_mask) {
if (flush)
- fd_batch_flush(dep, false);
+ fd_batch_flush(dep, false, false);
fd_batch_reference(&dep, NULL);
}
}
static void
-batch_flush(struct fd_batch *batch)
+batch_flush(struct fd_batch *batch, bool force)
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
- if (!batch->needs_flush)
+ if (!batch->needs_flush) {
+ if (force) {
+ fd_gmem_render_noop(batch);
+ goto out;
+ }
return;
+ }
batch->needs_flush = false;
debug_assert(batch->reference.count > 0);
+out:
if (batch == batch->ctx->batch) {
batch_reset(batch);
} else {
}
}
-/* NOTE: could drop the last ref to batch */
+/* NOTE: could drop the last ref to batch
+ *
+ * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
+ * to kernel before this returns, as opposed to just being queued to be
+ * flushed
+ * @force: force a flush even if no rendering, mostly useful if you need
+ * a fence to sync on
+ */
void
-fd_batch_flush(struct fd_batch *batch, bool sync)
+fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
{
/* NOTE: we need to hold an extra ref across the body of flush,
* since the last ref to this batch could be dropped when cleaning
*/
struct fd_batch *tmp = NULL;
fd_batch_reference(&tmp, batch);
- batch_flush(tmp);
+ batch_flush(tmp, force);
if (sync)
fd_batch_sync(tmp);
fd_batch_reference(&tmp, NULL);
if (batch_depends_on(dep, batch)) {
DBG("%p: flush forced on %p!", batch, dep);
mtx_unlock(&batch->ctx->screen->lock);
- fd_batch_flush(dep, false);
+ fd_batch_flush(dep, false, false);
mtx_lock(&batch->ctx->screen->lock);
} else {
struct fd_batch *other = NULL;
struct fd_ringbuffer *ring = batch->draw;
if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
(fd_mesa_debug & FD_DBG_FLUSH))
- fd_batch_flush(batch, true);
+ fd_batch_flush(batch, true, false);
}
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
int in_fence_fd;
bool needs_out_fence_fd;
+ struct pipe_fence_handle *fence;
struct fd_context *ctx;
void fd_batch_reset(struct fd_batch *batch);
void fd_batch_sync(struct fd_batch *batch);
-void fd_batch_flush(struct fd_batch *batch, bool sync);
+void fd_batch_flush(struct fd_batch *batch, bool sync, bool force);
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
void fd_batch_check_size(struct fd_batch *batch);
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
+ /* hold a reference since we can drop screen->lock: */
fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
if (batch->ctx == ctx) {
mtx_unlock(&ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch, false, false);
mtx_lock(&ctx->screen->lock);
}
fd_batch_reference_locked(&batch, NULL);
*/
mtx_unlock(&ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
- fd_batch_flush(flush_batch, true);
+ fd_batch_flush(flush_batch, true, false);
mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
#include "util/u_upload_mgr.h"
static void
-fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
+fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
unsigned flags)
{
struct fd_context *ctx = fd_context(pctx);
+ struct pipe_fence_handle *fence = NULL;
+
+ /* Take a ref to the batch's fence (batch can be unref'd when flushed: */
+ fd_fence_ref(pctx->screen, &fence, ctx->batch->fence);
if (flags & PIPE_FLUSH_FENCE_FD)
ctx->batch->needs_out_fence_fd = true;
if (!ctx->screen->reorder) {
- fd_batch_flush(ctx->batch, true);
+ fd_batch_flush(ctx->batch, true, false);
} else {
fd_bc_flush(&ctx->screen->batch_cache, ctx);
}
- if (fence) {
- /* if there hasn't been any rendering submitted yet, we might not
- * have actually created a fence
- */
- if (!ctx->last_fence || ctx->batch->needs_out_fence_fd) {
- ctx->batch->needs_flush = true;
- fd_gmem_render_noop(ctx->batch);
- fd_batch_reset(ctx->batch);
- }
- fd_fence_ref(pctx->screen, fence, ctx->last_fence);
- }
+ if (fencep)
+ fd_fence_ref(pctx->screen, fencep, fence);
+
+ fd_fence_ref(pctx->screen, &fence, NULL);
}
static void
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
fd_bc_invalidate_context(ctx);
- fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
-
fd_prog_fini(pctx);
if (ctx->blitter)
*/
struct fd_batch *batch;
- struct pipe_fence_handle *last_fence;
-
/* Are we in process of shadowing a resource? Used to detect recursion
* in transfer_map, and skip unneeded synchronization.
*/
struct pipe_fence_handle {
struct pipe_reference reference;
+ /* fence holds a weak reference to the batch until the batch is flushed,
+ * at which point fd_fence_populate() is called and timestamp and possibly
+ * fence_fd become valid and the week reference is dropped.
+ */
+ struct fd_batch *batch;
struct fd_context *ctx;
struct fd_screen *screen;
int fence_fd;
uint32_t timestamp;
};
+static void fence_flush(struct pipe_fence_handle *fence)
+{
+ if (fence->batch)
+ fd_batch_flush(fence->batch, true, true);
+ debug_assert(!fence->batch);
+}
+
+void fd_fence_populate(struct pipe_fence_handle *fence,
+ uint32_t timestamp, int fence_fd)
+{
+ if (!fence->batch)
+ return;
+ fence->timestamp = timestamp;
+ fence->fence_fd = fence_fd;
+ fence->batch = NULL;
+}
+
static void fd_fence_destroy(struct pipe_fence_handle *fence)
{
if (fence->fence_fd != -1)
struct pipe_fence_handle *fence,
uint64_t timeout)
{
+ fence_flush(fence);
+
if (fence->fence_fd != -1) {
int ret = sync_wait(fence->fence_fd, timeout / 1000000);
return ret == 0;
return true;
}
+static struct pipe_fence_handle * fence_create(struct fd_context *ctx,
+ struct fd_batch *batch, uint32_t timestamp, int fence_fd)
+{
+ struct pipe_fence_handle *fence;
+
+ fence = CALLOC_STRUCT(pipe_fence_handle);
+ if (!fence)
+ return NULL;
+
+ pipe_reference_init(&fence->reference, 1);
+
+ fence->batch = batch;
+ fence->ctx = ctx;
+ fence->screen = ctx->screen;
+ fence->timestamp = timestamp;
+ fence->fence_fd = fence_fd;
+
+ return fence;
+}
+
void fd_create_fence_fd(struct pipe_context *pctx,
struct pipe_fence_handle **pfence, int fd)
{
- *pfence = fd_fence_create(fd_context(pctx), 0, dup(fd));
+ *pfence = fence_create(fd_context(pctx), NULL, 0, dup(fd));
}
void fd_fence_server_sync(struct pipe_context *pctx,
struct fd_context *ctx = fd_context(pctx);
struct fd_batch *batch = ctx->batch;
+ fence_flush(fence);
+
if (sync_accumulate("freedreno", &batch->in_fence_fd, fence->fence_fd)) {
/* error */
}
int fd_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *fence)
{
+ fence_flush(fence);
return dup(fence->fence_fd);
}
-struct pipe_fence_handle * fd_fence_create(struct fd_context *ctx,
- uint32_t timestamp, int fence_fd)
+struct pipe_fence_handle * fd_fence_create(struct fd_batch *batch)
{
- struct pipe_fence_handle *fence;
-
- fence = CALLOC_STRUCT(pipe_fence_handle);
- if (!fence)
- return NULL;
-
- pipe_reference_init(&fence->reference, 1);
-
- fence->ctx = ctx;
- fence->screen = ctx->screen;
- fence->timestamp = timestamp;
- fence->fence_fd = fence_fd;
-
- return fence;
+ return fence_create(batch->ctx, batch, 0, -1);
}
#include "pipe/p_context.h"
+void fd_fence_populate(struct pipe_fence_handle *fence,
+ uint32_t timestamp, int fence_fd);
void fd_fence_ref(struct pipe_screen *pscreen,
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence);
int fd_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *pfence);
-struct fd_context;
-struct pipe_fence_handle * fd_fence_create(struct fd_context *ctx,
- uint32_t timestamp, int fence_fd);
+struct fd_batch;
+struct pipe_fence_handle * fd_fence_create(struct fd_batch *batch);
#endif /* FREEDRENO_FENCE_H_ */
static void
flush_ring(struct fd_batch *batch)
{
- struct fd_context *ctx = batch->ctx;
+ uint32_t timestamp;
int out_fence_fd = -1;
fd_ringbuffer_flush2(batch->gmem, batch->in_fence_fd,
batch->needs_out_fence_fd ? &out_fence_fd : NULL);
- fd_fence_ref(&ctx->screen->base, &ctx->last_fence, NULL);
- ctx->last_fence = fd_fence_create(ctx,
- fd_ringbuffer_timestamp(batch->gmem), out_fence_fd);
+ timestamp = fd_ringbuffer_timestamp(batch->gmem);
+ fd_fence_populate(batch->fence, timestamp, out_fence_fd);
}
void
* spin forever:
*/
if (aq->no_wait_cnt++ > 5)
- fd_batch_flush(rsc->write_batch, false);
+ fd_batch_flush(rsc->write_batch, false, false);
return false;
}
}
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true);
+ fd_batch_flush(rsc->write_batch, true, false);
/* get the result: */
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
* spin forever:
*/
if (hq->no_wait_cnt++ > 5)
- fd_batch_flush(rsc->write_batch, false);
+ fd_batch_flush(rsc->write_batch, false, false);
return false;
}
struct fd_resource *rsc = fd_resource(start->prsc);
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true);
+ fd_batch_flush(rsc->write_batch, true, false);
/* some piglit tests at least do query with no draws, I guess: */
if (!rsc->bo)
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch, false, false);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
fd_batch_sync(batch);
}
assert(rsc->batch_mask == 0);
} else {
- fd_batch_flush(write_batch, true);
+ fd_batch_flush(write_batch, true, false);
}
assert(!rsc->write_batch);
}
struct fd_resource *rsc = fd_resource(prsc);
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true);
+ fd_batch_flush(rsc->write_batch, true, false);
assert(!rsc->write_batch);
}
* multiple times to the same surface), so we might as
* well go ahead and flush this one:
*/
- fd_batch_flush(old_batch, false);
+ fd_batch_flush(old_batch, false, false);
}
fd_batch_reference(&old_batch, NULL);
} else {
DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
framebuffer->cbufs[0], framebuffer->zsbuf);
- fd_batch_flush(ctx->batch, false);
+ fd_batch_flush(ctx->batch, false, false);
}
cso = &ctx->batch->framebuffer;