uint64_t prims_generated;
uint64_t draw_calls;
uint64_t batch_total, batch_sysmem, batch_gmem, batch_restore;
+ uint64_t staging_uploads, shadow_uploads;
} stats;
/* Current batch.. the rule here is that you can deref ctx->batch
{"batches-gmem", FD_QUERY_BATCH_GMEM, {0}},
{"restores", FD_QUERY_BATCH_RESTORE, {0}},
{"prims-emitted", PIPE_QUERY_PRIMITIVES_EMITTED, {0}},
+ {"staging", FD_QUERY_STAGING_UPLOADS, {0}},
+ {"shadow", FD_QUERY_SHADOW_UPLOADS, {0}},
};
if (!info)
#define FD_QUERY_BATCH_SYSMEM (PIPE_QUERY_DRIVER_SPECIFIC + 2) /* batches using system memory (GMEM bypass) */
#define FD_QUERY_BATCH_GMEM (PIPE_QUERY_DRIVER_SPECIFIC + 3) /* batches using GMEM */
#define FD_QUERY_BATCH_RESTORE (PIPE_QUERY_DRIVER_SPECIFIC + 4) /* batches requiring GMEM restore */
+#define FD_QUERY_STAGING_UPLOADS (PIPE_QUERY_DRIVER_SPECIFIC + 5) /* texture/buffer uploads using staging blit */
+#define FD_QUERY_SHADOW_UPLOADS (PIPE_QUERY_DRIVER_SPECIFIC + 6) /* texture/buffer uploads that shadowed rsc */
void fd_query_screen_init(struct pipe_screen *pscreen);
void fd_query_context_init(struct pipe_context *pctx);
return ctx->stats.batch_gmem;
case FD_QUERY_BATCH_RESTORE:
return ctx->stats.batch_restore;
+ case FD_QUERY_STAGING_UPLOADS:
+ return ctx->stats.staging_uploads;
+ case FD_QUERY_SHADOW_UPLOADS:
+ return ctx->stats.shadow_uploads;
}
return 0;
}
case FD_QUERY_BATCH_SYSMEM:
case FD_QUERY_BATCH_GMEM:
case FD_QUERY_BATCH_RESTORE:
+ case FD_QUERY_STAGING_UPLOADS:
+ case FD_QUERY_SHADOW_UPLOADS:
return true;
default:
return false;
case FD_QUERY_BATCH_SYSMEM:
case FD_QUERY_BATCH_GMEM:
case FD_QUERY_BATCH_RESTORE:
+ case FD_QUERY_STAGING_UPLOADS:
+ case FD_QUERY_SHADOW_UPLOADS:
break;
default:
return NULL;
if (needs_flush && fd_try_shadow_resource(ctx, rsc, level, box)) {
needs_flush = busy = false;
rebind_resource(ctx, prsc);
+ ctx->stats.shadow_uploads++;
} else {
struct fd_resource *staging_rsc;
fd_batch_reference(&write_batch, NULL);
+ ctx->stats.staging_uploads++;
+
return buf;
}
}