struct fd_batch *batch;
uint32_t idx;
+ /* For normal draw batches, pctx->set_framebuffer_state() handles
+ * this, but for nondraw batches, this is a nice central location
+ * to handle them all.
+ */
+ if (nondraw)
+ fd_context_switch_from(ctx);
+
fd_screen_lock(ctx->screen);
while ((idx = ffs(~cache->batch_mask)) == 0) {
debug_assert(cache->batches[idx] == NULL);
cache->batches[idx] = batch;
+ if (nondraw)
+ fd_context_switch_to(ctx, batch);
+
out:
fd_screen_unlock(ctx->screen);
fd_fence_ref(&ctx->last_fence, NULL);
+ if (ctx->in_fence_fd != -1)
+ close(ctx->in_fence_fd);
+
util_copy_framebuffer_state(&ctx->framebuffer, NULL);
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
fd_bc_invalidate_context(ctx);
ctx->screen = screen;
ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio);
+ ctx->in_fence_fd = -1;
+
if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) {
ctx->context_reset_count = fd_get_reset_count(ctx, true);
ctx->global_reset_count = fd_get_reset_count(ctx, false);
#ifndef FREEDRENO_CONTEXT_H_
#define FREEDRENO_CONTEXT_H_
+#include <libsync.h>
+
#include "pipe/p_context.h"
#include "indices/u_primconvert.h"
#include "util/u_blitter.h"
*/
struct pipe_fence_handle *last_fence;
+ /* Fence fd we are told to wait on via ->fence_server_sync() (or -1
+ * if none). The in-fence is transferred over to the batch on the
+ * next draw/blit/grid.
+ *
+ * The reason for this extra complexity is that apps will typically
+ * do eglWaitSyncKHR()/etc at the beginning of the frame, before the
+ * first draw. But mesa/st doesn't flush down framebuffer state
+ * change until we hit a draw, so at ->fence_server_sync() time, we
+ * don't yet have the correct batch. If we created a batch at that
+ * point, it would be the wrong one, and we'd have to flush it pre-
+ * maturely, causing us to stall early in the frame where we could
+ * be building up cmdstream.
+ */
+ int in_fence_fd;
+
/* track last known reset status globally and per-context to
* determine if more resets occurred since then. If global reset
* count increases, it means some other context crashed. If
return (1 << prim) & ctx->primtype_mask;
}
+/**
+ * If we have a pending fence_server_sync() (GPU side sync), flush now.
+ * The alternative to try to track this with batch dependencies gets
+ * hairy quickly.
+ *
+ * Call this before switching to a different batch, to handle this case.
+ */
+static inline void
+fd_context_switch_from(struct fd_context *ctx)
+{
+ if (ctx->batch && (ctx->batch->in_fence_fd != -1))
+ fd_batch_flush(ctx->batch);
+}
+
+/**
+ * If there is a pending fence-fd that we need to sync on, this will
+ * transfer the reference to the next batch we are going to render
+ * to.
+ */
+static inline void
+fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch)
+{
+ if (ctx->in_fence_fd != -1) {
+ sync_accumulate("freedreno", &batch->in_fence_fd, ctx->in_fence_fd);
+ ctx->in_fence_fd = -1;
+ }
+}
+
static inline struct fd_batch *
fd_context_batch(struct fd_context *ctx)
{
ctx->batch = batch;
fd_context_all_dirty(ctx);
}
+ fd_context_switch_to(ctx, ctx->batch);
return ctx->batch;
}