static void
dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
{
- struct gl_context *ctx = brw_void;
+ struct brw_context *ctx = brw_void;
+ struct gen_perf_context *perf_ctx = ctx->perf_ctx;
struct gl_perf_query_object *o = query_void;
struct brw_perf_query_object * brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- switch (obj->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
- id,
- o->Used ? "Dirty," : "New,",
- o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
- obj->oa.bo ? "yes," : "no,",
- brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
- obj->oa.results_accumulated ? "accumulated" : "not accumulated");
- break;
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- DBG("%4d: %-6s %-8s BO: %-4s\n",
- id,
- o->Used ? "Dirty," : "New,",
- o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
- obj->pipeline_stats.bo ? "yes" : "no");
- break;
- default:
- unreachable("Unknown query type");
- break;
- }
+ DBG("%4d: %-6s %-8s ",
+ id,
+ o->Used ? "Dirty," : "New,",
+ o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"));
+ gen_perf_dump_query(perf_ctx, obj, &ctx->batch);
}
static void
dump_perf_queries(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
- DBG("Queries: (Open queries = %d, OA users = %d)\n",
- brw->perf_ctx.n_active_oa_queries, brw->perf_ctx.n_oa_users);
+ gen_perf_dump_query_count(brw->perf_ctx);
_mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
}
GLuint *n_active)
{
struct brw_context *brw = brw_context(ctx);
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- const struct gen_perf_query_info *query =
- &perf_ctx->perf->queries[query_index];
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
+ struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
+ const struct gen_perf_query_info *query = &perf_cfg->queries[query_index];
*name = query->name;
*data_size = query->data_size;
*n_counters = query->n_counters;
-
- switch (query->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- *n_active = perf_ctx->n_active_oa_queries;
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- *n_active = perf_ctx->n_active_pipeline_stats_queries;
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
+ *n_active = gen_perf_active_queries(perf_ctx, query);
}
static GLuint
GLuint64 *raw_max)
{
struct brw_context *brw = brw_context(ctx);
+ struct gen_perf_config *perf_cfg = gen_perf_config(brw->perf_ctx);
const struct gen_perf_query_info *query =
- &brw->perf_ctx.perf->queries[query_index];
+ &perf_cfg->queries[query_index];
const struct gen_perf_query_counter *counter =
&query->counters[counter_index];
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
/* We can assume the frontend hides mistaken attempts to Begin a
* query object multiple times before its End. Similarly if an
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
DBG("End(%d)\n", o->Id);
gen_perf_end_query(perf_ctx, obj);
assert(!o->Ready);
- gen_perf_wait_query(&brw->perf_ctx, obj, &brw->batch);
+ gen_perf_wait_query(brw->perf_ctx, obj, &brw->batch);
}
static bool
if (o->Ready)
return true;
- return gen_perf_is_query_ready(&brw->perf_ctx, obj, &brw->batch);
+ return gen_perf_is_query_ready(brw->perf_ctx, obj, &brw->batch);
}
/**
*/
assert(o->Ready);
- gen_perf_get_query_data(&brw->perf_ctx, obj,
+ gen_perf_get_query_data(brw->perf_ctx, obj,
data_size, data, bytes_written);
}
brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
{
struct brw_context *brw = brw_context(ctx);
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
struct gen_perf_query_object * obj = gen_perf_new_query(perf_ctx, query_index);
if (unlikely(!obj))
return NULL;
struct brw_perf_query_object *brw_query = calloc(1, sizeof(struct brw_perf_query_object));
- if (unlikely(!brw_query))
+ if (unlikely(!brw_query)) {
+ gen_perf_delete_query(perf_ctx, obj);
return NULL;
+ }
brw_query->query = obj;
return &brw_query->base;
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
/* We can assume that the frontend waits for a query to complete
* before ever calling into here, so we don't have to worry about
struct brw_context *brw = brw_context(ctx);
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- if (perf_ctx->perf)
- return perf_ctx->perf->n_queries;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
+ struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
+
+ if (perf_cfg)
+ return perf_cfg->n_queries;
+
+ if (!oa_metrics_kernel_support(brw->screen->driScrnPriv->fd, devinfo))
+ return 0;
- perf_ctx->perf = gen_perf_new(brw);
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
+ perf_cfg = gen_perf_new(ctx);
perf_cfg->vtbl.bo_alloc = brw_oa_bo_alloc;
perf_cfg->vtbl.bo_unreference = (bo_unreference_t)brw_bo_unreference;
gen_perf_init_context(perf_ctx, perf_cfg, brw, brw->bufmgr, devinfo,
brw->hw_ctx, brw->screen->driScrnPriv->fd);
-
- if (!oa_metrics_kernel_support(perf_ctx->drm_fd, devinfo))
- return 0;
-
- gen_perf_init_metrics(perf_cfg, devinfo, perf_ctx->drm_fd);
+ gen_perf_init_metrics(perf_cfg, devinfo, brw->screen->driScrnPriv->fd);
return perf_cfg->n_queries;
}