batch_init(batch);
+ fd_screen_assert_locked(ctx->screen);
+ if (BATCH_DEBUG) {
+ _mesa_set_add(ctx->screen->live_batches, batch);
+ }
+
return batch;
}
fd_context_assert_locked(batch->ctx);
+ if (BATCH_DEBUG) {
+ _mesa_set_remove_key(ctx->screen->live_batches, batch);
+ }
+
fd_bc_invalidate_batch(batch, true);
batch_reset_resources_locked(batch);
#include "freedreno_util.h"
+#ifdef DEBUG
+# define BATCH_DEBUG (fd_mesa_debug & FD_DBG_MSGS)
+#else
+# define BATCH_DEBUG 0
+#endif
+
struct fd_context;
struct fd_resource;
enum fd_resource_status;
bc_flush(cache, ctx, true);
}
+static bool
+batch_in_cache(struct fd_batch_cache *cache, struct fd_batch *batch)
+{
+ struct fd_batch *b;
+
+ foreach_batch (b, cache, cache->batch_mask)
+ if (b == batch)
+ return true;
+
+ return false;
+}
+
+void
+fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
+{
+ struct fd_batch_cache *cache = &screen->batch_cache;
+
+ if (!BATCH_DEBUG)
+ return;
+
+ fd_screen_lock(screen);
+
+ va_list ap;
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+
+ set_foreach (screen->live_batches, entry) {
+ struct fd_batch *batch = (struct fd_batch *)entry->key;
+ printf(" %p<%u>%s%s\n", batch, batch->seqno,
+ batch->needs_flush ? ", NEEDS FLUSH" : "",
+ batch_in_cache(cache, batch) ? "" : ", ORPHAN");
+ }
+
+ printf("----\n");
+
+ fd_screen_unlock(screen);
+}
+
void
fd_bc_invalidate_context(struct fd_context *ctx)
{
struct fd_resource;
struct fd_batch;
struct fd_context;
+struct fd_screen;
struct hash_table;
void fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx);
void fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx);
+void fd_bc_dump(struct fd_screen *screen, const char *fmt, ...) _util_printf_format(2, 3);
void fd_bc_invalidate_context(struct fd_context *ctx);
void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);
/* In some sequence of events, we can end up with a last_fence that is
* not an "fd" fence, which results in eglDupNativeFenceFDANDROID()
* errors.
- *
*/
if (flags & PIPE_FLUSH_FENCE_FD)
fd_fence_ref(&ctx->last_fence, NULL);
*/
if (ctx->last_fence) {
fd_fence_ref(&fence, ctx->last_fence);
+ fd_bc_dump(ctx->screen, "%p: reuse last_fence, remaining:\n", ctx);
goto out;
}
- if (!batch)
+ if (!batch) {
+ fd_bc_dump(ctx->screen, "%p: NULL batch, remaining:\n", ctx);
return;
+ }
/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
fd_fence_ref(&fence, batch->fence);
if (flags & PIPE_FLUSH_FENCE_FD)
batch->needs_out_fence_fd = true;
+ fd_bc_dump(ctx->screen, "%p: flushing %p<%u>, flags=0x%x, pending:\n",
+ ctx, batch, batch->seqno, flags);
+
if (!ctx->screen->reorder) {
fd_batch_flush(batch);
} else if (flags & PIPE_FLUSH_DEFERRED) {
fd_bc_flush(&ctx->screen->batch_cache, ctx);
}
+ fd_bc_dump(ctx->screen, "%p: remaining:\n", ctx);
+
out:
if (fencep)
fd_fence_ref(fencep, fence);
simple_mtx_destroy(&screen->lock);
ralloc_free(screen->compiler);
+ ralloc_free(screen->live_batches);
free(screen->perfcntr_queries);
free(screen);
if (fd_device_version(dev) >= FD_VERSION_UNLIMITED_CMDS)
screen->reorder = !(fd_mesa_debug & FD_DBG_INORDER);
+ if (BATCH_DEBUG)
+ screen->live_batches = _mesa_pointer_set_create(NULL);
+
fd_bc_init(&screen->batch_cache);
list_inithead(&screen->context_list);
const uint64_t *supported_modifiers;
struct renderonly *ro;
+
+ /* when BATCH_DEBUG is enabled, tracking for fd_batch's which are not yet
+ * freed:
+ */
+ struct set *live_batches;
};
static inline struct fd_screen *