static void
dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
{
- struct gl_context *ctx = brw_void;
+ struct brw_context *ctx = brw_void;
+ struct gen_perf_context *perf_ctx = ctx->perf_ctx;
struct gl_perf_query_object *o = query_void;
struct brw_perf_query_object * brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- switch (obj->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
- id,
- o->Used ? "Dirty," : "New,",
- o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
- obj->oa.bo ? "yes," : "no,",
- brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
- obj->oa.results_accumulated ? "accumulated" : "not accumulated");
- break;
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- DBG("%4d: %-6s %-8s BO: %-4s\n",
- id,
- o->Used ? "Dirty," : "New,",
- o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
- obj->pipeline_stats.bo ? "yes" : "no");
- break;
- default:
- unreachable("Unknown query type");
- break;
- }
+ DBG("%4d: %-6s %-8s ",
+ id,
+ o->Used ? "Dirty," : "New,",
+ o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"));
+ gen_perf_dump_query(perf_ctx, obj, &ctx->batch);
}
static void
dump_perf_queries(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
- DBG("Queries: (Open queries = %d, OA users = %d)\n",
- brw->perf_ctx.n_active_oa_queries, brw->perf_ctx.n_oa_users);
+ gen_perf_dump_query_count(brw->perf_ctx);
_mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
}
GLuint *n_active)
{
struct brw_context *brw = brw_context(ctx);
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- const struct gen_perf_query_info *query =
- &perf_ctx->perf->queries[query_index];
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
+ struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
+ const struct gen_perf_query_info *query = &perf_cfg->queries[query_index];
*name = query->name;
*data_size = query->data_size;
*n_counters = query->n_counters;
-
- switch (query->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- *n_active = perf_ctx->n_active_oa_queries;
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- *n_active = perf_ctx->n_active_pipeline_stats_queries;
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
+ *n_active = gen_perf_active_queries(perf_ctx, query);
}
static GLuint
GLuint64 *raw_max)
{
struct brw_context *brw = brw_context(ctx);
+ struct gen_perf_config *perf_cfg = gen_perf_config(brw->perf_ctx);
const struct gen_perf_query_info *query =
- &brw->perf_ctx.perf->queries[query_index];
+ &perf_cfg->queries[query_index];
const struct gen_perf_query_counter *counter =
&query->counters[counter_index];
*raw_max = counter->raw_max;
}
-/**
- * Remove a query from the global list of unaccumulated queries once
- * after successfully accumulating the OA reports associated with the
- * query in accumulate_oa_reports() or when discarding unwanted query
- * results.
- */
-static void
-drop_from_unaccumulated_query_list(struct brw_context *brw,
- struct gen_perf_query_object *obj)
-{
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
- if (perf_ctx->unaccumulated[i] == obj) {
- int last_elt = --perf_ctx->unaccumulated_elements;
-
- if (i == last_elt)
- perf_ctx->unaccumulated[i] = NULL;
- else {
- perf_ctx->unaccumulated[i] =
- perf_ctx->unaccumulated[last_elt];
- }
-
- break;
- }
- }
-
- /* Drop our samples_head reference so that associated periodic
- * sample data buffers can potentially be reaped if they aren't
- * referenced by any other queries...
- */
-
- struct oa_sample_buf *buf =
- exec_node_data(struct oa_sample_buf, obj->oa.samples_head, link);
-
- assert(buf->refcount > 0);
- buf->refcount--;
-
- obj->oa.samples_head = NULL;
-
- gen_perf_reap_old_sample_buffers(&brw->perf_ctx);
-}
-
-/* In general if we see anything spurious while accumulating results,
- * we don't try and continue accumulating the current query, hoping
- * for the best, we scrap anything outstanding, and then hope for the
- * best with new queries.
- */
-static void
-discard_all_queries(struct brw_context *brw)
-{
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- while (perf_ctx->unaccumulated_elements) {
- struct gen_perf_query_object *obj = perf_ctx->unaccumulated[0];
-
- obj->oa.results_accumulated = true;
- drop_from_unaccumulated_query_list(brw, perf_ctx->unaccumulated[0]);
-
- gen_perf_dec_n_users(perf_ctx);
- }
-}
-
enum OaReadStatus {
OA_READ_STATUS_ERROR,
OA_READ_STATUS_UNFINISHED,
OA_READ_STATUS_FINISHED,
};
-static enum OaReadStatus
-read_oa_samples_until(struct brw_context *brw,
- uint32_t start_timestamp,
- uint32_t end_timestamp)
-{
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- struct exec_node *tail_node =
- exec_list_get_tail(&perf_ctx->sample_buffers);
- struct oa_sample_buf *tail_buf =
- exec_node_data(struct oa_sample_buf, tail_node, link);
- uint32_t last_timestamp = tail_buf->last_timestamp;
-
- while (1) {
- struct oa_sample_buf *buf = gen_perf_get_free_sample_buf(perf_ctx);
- uint32_t offset;
- int len;
-
- while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
- sizeof(buf->buf))) < 0 && errno == EINTR)
- ;
-
- if (len <= 0) {
- exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
-
- if (len < 0) {
- if (errno == EAGAIN)
- return ((last_timestamp - start_timestamp) >=
- (end_timestamp - start_timestamp)) ?
- OA_READ_STATUS_FINISHED :
- OA_READ_STATUS_UNFINISHED;
- else {
- DBG("Error reading i915 perf samples: %m\n");
- }
- } else
- DBG("Spurious EOF reading i915 perf samples\n");
-
- return OA_READ_STATUS_ERROR;
- }
-
- buf->len = len;
- exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
-
- /* Go through the reports and update the last timestamp. */
- offset = 0;
- while (offset < buf->len) {
- const struct drm_i915_perf_record_header *header =
- (const struct drm_i915_perf_record_header *) &buf->buf[offset];
- uint32_t *report = (uint32_t *) (header + 1);
-
- if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
- last_timestamp = report[1];
-
- offset += header->size;
- }
-
- buf->last_timestamp = last_timestamp;
- }
-
- unreachable("not reached");
- return OA_READ_STATUS_ERROR;
-}
-
-/**
- * Try to read all the reports until either the delimiting timestamp
- * or an error arises.
- */
-static bool
-read_oa_samples_for_query(struct brw_context *brw,
- struct gen_perf_query_object *obj)
-{
- uint32_t *start;
- uint32_t *last;
- uint32_t *end;
-
- struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
-
- /* We need the MI_REPORT_PERF_COUNT to land before we can start
- * accumulate. */
- assert(!perf_cfg->vtbl.batch_references(&brw->batch, obj->oa.bo) &&
- !brw_bo_busy(obj->oa.bo));
-
- /* Map the BO once here and let accumulate_oa_reports() unmap
- * it. */
- if (obj->oa.map == NULL)
- obj->oa.map = perf_cfg->vtbl.bo_map(brw, obj->oa.bo, MAP_READ);
-
- start = last = obj->oa.map;
- end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
-
- if (start[0] != obj->oa.begin_report_id) {
- DBG("Spurious start report id=%"PRIu32"\n", start[0]);
- return true;
- }
- if (end[0] != (obj->oa.begin_report_id + 1)) {
- DBG("Spurious end report id=%"PRIu32"\n", end[0]);
- return true;
- }
-
- /* Read the reports until the end timestamp. */
- switch (read_oa_samples_until(brw, start[1], end[1])) {
- case OA_READ_STATUS_ERROR:
- /* Fallthrough and let accumulate_oa_reports() deal with the
- * error. */
- case OA_READ_STATUS_FINISHED:
- return true;
- case OA_READ_STATUS_UNFINISHED:
- return false;
- }
-
- unreachable("invalid read status");
- return false;
-}
-
-/**
- * Accumulate raw OA counter values based on deltas between pairs of
- * OA reports.
- *
- * Accumulation starts from the first report captured via
- * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
- * last MI_RPC report requested by brw_end_perf_query(). Between these
- * two reports there may also some number of periodically sampled OA
- * reports collected via the i915 perf interface - depending on the
- * duration of the query.
- *
- * These periodic snapshots help to ensure we handle counter overflow
- * correctly by being frequent enough to ensure we don't miss multiple
- * overflows of a counter between snapshots. For Gen8+ the i915 perf
- * snapshots provide the extra context-switch reports that let us
- * subtract out the progress of counters associated with other
- * contexts running on the system.
- */
-static void
-accumulate_oa_reports(struct brw_context *brw,
- struct brw_perf_query_object *brw_query)
-{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- uint32_t *start;
- uint32_t *last;
- uint32_t *end;
- struct exec_node *first_samples_node;
- bool in_ctx = true;
- int out_duration = 0;
-
- assert(brw_query->base.Ready);
- assert(obj->oa.map != NULL);
-
- start = last = obj->oa.map;
- end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
-
- if (start[0] != obj->oa.begin_report_id) {
- DBG("Spurious start report id=%"PRIu32"\n", start[0]);
- goto error;
- }
- if (end[0] != (obj->oa.begin_report_id + 1)) {
- DBG("Spurious end report id=%"PRIu32"\n", end[0]);
- goto error;
- }
-
- /* See if we have any periodic reports to accumulate too... */
-
- /* N.B. The oa.samples_head was set when the query began and
- * pointed to the tail of the perf_ctx->sample_buffers list at
- * the time the query started. Since the buffer existed before the
- * first MI_REPORT_PERF_COUNT command was emitted we therefore know
- * that no data in this particular node's buffer can possibly be
- * associated with the query - so skip ahead one...
- */
- first_samples_node = obj->oa.samples_head->next;
-
- foreach_list_typed_from(struct oa_sample_buf, buf, link,
- &brw->perf_ctx.sample_buffers,
- first_samples_node)
- {
- int offset = 0;
-
- while (offset < buf->len) {
- const struct drm_i915_perf_record_header *header =
- (const struct drm_i915_perf_record_header *)(buf->buf + offset);
-
- assert(header->size != 0);
- assert(header->size <= buf->len);
-
- offset += header->size;
-
- switch (header->type) {
- case DRM_I915_PERF_RECORD_SAMPLE: {
- uint32_t *report = (uint32_t *)(header + 1);
- bool add = true;
-
- /* Ignore reports that come before the start marker.
- * (Note: takes care to allow overflow of 32bit timestamps)
- */
- if (gen_device_info_timebase_scale(devinfo,
- report[1] - start[1]) > 5000000000) {
- continue;
- }
-
- /* Ignore reports that come after the end marker.
- * (Note: takes care to allow overflow of 32bit timestamps)
- */
- if (gen_device_info_timebase_scale(devinfo,
- report[1] - end[1]) <= 5000000000) {
- goto end;
- }
-
- /* For Gen8+ since the counters continue while other
- * contexts are running we need to discount any unrelated
- * deltas. The hardware automatically generates a report
- * on context switch which gives us a new reference point
- * to continuing adding deltas from.
- *
- * For Haswell we can rely on the HW to stop the progress
- * of OA counters while any other context is acctive.
- */
- if (devinfo->gen >= 8) {
- if (in_ctx && report[2] != obj->oa.result.hw_id) {
- DBG("i915 perf: Switch AWAY (observed by ID change)\n");
- in_ctx = false;
- out_duration = 0;
- } else if (in_ctx == false && report[2] == obj->oa.result.hw_id) {
- DBG("i915 perf: Switch TO\n");
- in_ctx = true;
-
- /* From experimentation in IGT, we found that the OA unit
- * might label some report as "idle" (using an invalid
- * context ID), right after a report for a given context.
- * Deltas generated by those reports actually belong to the
- * previous context, even though they're not labelled as
- * such.
- *
- * We didn't *really* Switch AWAY in the case that we e.g.
- * saw a single periodic report while idle...
- */
- if (out_duration >= 1)
- add = false;
- } else if (in_ctx) {
- assert(report[2] == obj->oa.result.hw_id);
- DBG("i915 perf: Continuation IN\n");
- } else {
- assert(report[2] != obj->oa.result.hw_id);
- DBG("i915 perf: Continuation OUT\n");
- add = false;
- out_duration++;
- }
- }
-
- if (add) {
- gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo,
- last, report);
- }
-
- last = report;
-
- break;
- }
-
- case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
- DBG("i915 perf: OA error: all reports lost\n");
- goto error;
- case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
- DBG("i915 perf: OA report lost\n");
- break;
- }
- }
- }
-
-end:
-
- gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo,
- last, end);
-
- DBG("Marking %d accumulated - results gathered\n", brw_query->base.Id);
-
- obj->oa.results_accumulated = true;
- drop_from_unaccumulated_query_list(brw, obj);
- gen_perf_dec_n_users(perf_ctx);
-
- return;
-
-error:
-
- discard_all_queries(brw);
-}
-
/******************************************************************************/
static void
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
/* We can assume the frontend hides mistaken attempts to Begin a
* query object multiple times before its End. Similarly if an
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
DBG("End(%d)\n", o->Id);
gen_perf_end_query(perf_ctx, obj);
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct brw_bo *bo = NULL;
- struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
assert(!o->Ready);
- switch (obj->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- bo = obj->oa.bo;
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- bo = obj->pipeline_stats.bo;
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- if (bo == NULL)
- return;
-
- /* If the current batch references our results bo then we need to
- * flush first...
- */
- if (perf_cfg->vtbl.batch_references(&brw->batch, bo))
- perf_cfg->vtbl.batchbuffer_flush(brw, __FILE__, __LINE__);
-
- perf_cfg->vtbl.bo_wait_rendering(bo);
-
- /* Due to a race condition between the OA unit signaling report
- * availability and the report actually being written into memory,
- * we need to wait for all the reports to come in before we can
- * read them.
- */
- if (obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
- obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
- while (!read_oa_samples_for_query(brw, obj))
- ;
- }
+ gen_perf_wait_query(brw->perf_ctx, obj, &brw->batch);
}
static bool
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
if (o->Ready)
return true;
- switch (obj->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- return (obj->oa.results_accumulated ||
- (obj->oa.bo &&
- !perf_cfg->vtbl.batch_references(&brw->batch, obj->oa.bo) &&
- !brw_bo_busy(obj->oa.bo) &&
- read_oa_samples_for_query(brw, obj)));
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- return (obj->pipeline_stats.bo &&
- !perf_cfg->vtbl.batch_references(&brw->batch, obj->pipeline_stats.bo) &&
- !brw_bo_busy(obj->pipeline_stats.bo));
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- return false;
-}
-
-static void
-read_slice_unslice_frequencies(struct brw_context *brw,
- struct gen_perf_query_object *obj)
-{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- uint32_t *begin_report = obj->oa.map, *end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
-
- gen_perf_query_result_read_frequencies(&obj->oa.result,
- devinfo, begin_report, end_report);
-}
-
-static void
-read_gt_frequency(struct brw_context *brw,
- struct gen_perf_query_object *obj)
-{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
- end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
-
- switch (devinfo->gen) {
- case 7:
- case 8:
- obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
- obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
- break;
- case 9:
- case 10:
- case 11:
- obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
- obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
- break;
- default:
- unreachable("unexpected gen");
- }
-
- /* Put the numbers into Hz. */
- obj->oa.gt_frequency[0] *= 1000000ULL;
- obj->oa.gt_frequency[1] *= 1000000ULL;
-}
-
-static int
-get_oa_counter_data(struct brw_context *brw,
- struct gen_perf_query_object *obj,
- size_t data_size,
- uint8_t *data)
-{
- struct gen_perf_config *perf = brw->perf_ctx.perf;
- const struct gen_perf_query_info *query = obj->queryinfo;
- int n_counters = query->n_counters;
- int written = 0;
-
- for (int i = 0; i < n_counters; i++) {
- const struct gen_perf_query_counter *counter = &query->counters[i];
- uint64_t *out_uint64;
- float *out_float;
- size_t counter_size = gen_perf_query_counter_get_size(counter);
-
- if (counter_size) {
- switch (counter->data_type) {
- case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
- out_uint64 = (uint64_t *)(data + counter->offset);
- *out_uint64 =
- counter->oa_counter_read_uint64(perf, query,
- obj->oa.result.accumulator);
- break;
- case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
- out_float = (float *)(data + counter->offset);
- *out_float =
- counter->oa_counter_read_float(perf, query,
- obj->oa.result.accumulator);
- break;
- default:
- /* So far we aren't using uint32, double or bool32... */
- unreachable("unexpected counter data type");
- }
- written = counter->offset + counter_size;
- }
- }
-
- return written;
-}
-
-static int
-get_pipeline_stats_data(struct brw_context *brw,
- struct gen_perf_query_object *obj,
- size_t data_size,
- uint8_t *data)
-
-{
- const struct gen_perf_query_info *query = obj->queryinfo;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
- int n_counters = obj->queryinfo->n_counters;
- uint8_t *p = data;
-
- uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, obj->pipeline_stats.bo, MAP_READ);
- uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
-
- for (int i = 0; i < n_counters; i++) {
- const struct gen_perf_query_counter *counter = &query->counters[i];
- uint64_t value = end[i] - start[i];
-
- if (counter->pipeline_stat.numerator !=
- counter->pipeline_stat.denominator) {
- value *= counter->pipeline_stat.numerator;
- value /= counter->pipeline_stat.denominator;
- }
-
- *((uint64_t *)p) = value;
- p += 8;
- }
-
- perf_cfg->vtbl.bo_unmap(obj->pipeline_stats.bo);
-
- return p - data;
+ return gen_perf_is_query_ready(brw->perf_ctx, obj, &brw->batch);
}
/**
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- int written = 0;
assert(brw_is_perf_query_ready(ctx, o));
*/
assert(o->Ready);
- switch (obj->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- if (!obj->oa.results_accumulated) {
- read_gt_frequency(brw, obj);
- read_slice_unslice_frequencies(brw, obj);
- accumulate_oa_reports(brw, brw_query);
- assert(obj->oa.results_accumulated);
-
- brw->perf_ctx.perf->vtbl.bo_unmap(obj->oa.bo);
- obj->oa.map = NULL;
- }
- if (obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
- written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
- } else {
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
-
- written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
- devinfo, &obj->oa.result,
- obj->oa.gt_frequency[0],
- obj->oa.gt_frequency[1]);
- }
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- if (bytes_written)
- *bytes_written = written;
+ gen_perf_get_query_data(brw->perf_ctx, obj,
+ data_size, data, bytes_written);
}
static struct gl_perf_query_object *
brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
{
struct brw_context *brw = brw_context(ctx);
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- const struct gen_perf_query_info *queryinfo =
- &perf_ctx->perf->queries[query_index];
- struct gen_perf_query_object *obj =
- calloc(1, sizeof(struct gen_perf_query_object));
-
- if (!obj)
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
+ struct gen_perf_query_object * obj = gen_perf_new_query(perf_ctx, query_index);
+ if (unlikely(!obj))
return NULL;
- obj->queryinfo = queryinfo;
-
- perf_ctx->n_query_instances++;
-
struct brw_perf_query_object *brw_query = calloc(1, sizeof(struct brw_perf_query_object));
- if (unlikely(!brw_query))
+ if (unlikely(!brw_query)) {
+ gen_perf_delete_query(perf_ctx, obj);
return NULL;
+ }
+
brw_query->query = obj;
return &brw_query->base;
}
struct gl_perf_query_object *o)
{
struct brw_context *brw = brw_context(ctx);
- struct gen_perf_config *perf_cfg = brw->perf_ctx.perf;
struct brw_perf_query_object *brw_query = brw_perf_query(o);
struct gen_perf_query_object *obj = brw_query->query;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
/* We can assume that the frontend waits for a query to complete
* before ever calling into here, so we don't have to worry about
DBG("Delete(%d)\n", o->Id);
- switch (obj->queryinfo->kind) {
- case GEN_PERF_QUERY_TYPE_OA:
- case GEN_PERF_QUERY_TYPE_RAW:
- if (obj->oa.bo) {
- if (!obj->oa.results_accumulated) {
- drop_from_unaccumulated_query_list(brw, obj);
- gen_perf_dec_n_users(perf_ctx);
- }
-
- perf_cfg->vtbl.bo_unreference(obj->oa.bo);
- obj->oa.bo = NULL;
- }
-
- obj->oa.results_accumulated = false;
- break;
-
- case GEN_PERF_QUERY_TYPE_PIPELINE:
- if (obj->pipeline_stats.bo) {
- perf_cfg->vtbl.bo_unreference(obj->pipeline_stats.bo);
- obj->pipeline_stats.bo = NULL;
- }
- break;
-
- default:
- unreachable("Unknown query type");
- break;
- }
-
- /* As an indication that the INTEL_performance_query extension is no
- * longer in use, it's a good time to free our cache of sample
- * buffers and close any current i915-perf stream.
- */
- if (--perf_ctx->n_query_instances == 0) {
- gen_perf_free_sample_bufs(perf_ctx);
- gen_perf_close(perf_ctx, obj->queryinfo);
- }
-
- free(obj);
+ gen_perf_delete_query(perf_ctx, obj);
free(brw_query);
}
/******************************************************************************/
-
-static void
-init_pipeline_statistic_query_registers(struct brw_context *brw)
-{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gen_perf_config *perf = brw->perf_ctx.perf;
- struct gen_perf_query_info *query =
- gen_perf_query_append_query_info(perf, MAX_STAT_COUNTERS);
-
- query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
- query->name = "Pipeline Statistics Registers";
-
- gen_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
- "N vertices submitted");
- gen_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
- "N primitives submitted");
- gen_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
- "N vertex shader invocations");
-
- if (devinfo->gen == 6) {
- gen_perf_query_info_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
- "SO_PRIM_STORAGE_NEEDED",
- "N geometry shader stream-out primitives (total)");
- gen_perf_query_info_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
- "SO_NUM_PRIMS_WRITTEN",
- "N geometry shader stream-out primitives (written)");
- } else {
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
- "SO_PRIM_STORAGE_NEEDED (Stream 0)",
- "N stream-out (stream 0) primitives (total)");
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
- "SO_PRIM_STORAGE_NEEDED (Stream 1)",
- "N stream-out (stream 1) primitives (total)");
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
- "SO_PRIM_STORAGE_NEEDED (Stream 2)",
- "N stream-out (stream 2) primitives (total)");
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
- "SO_PRIM_STORAGE_NEEDED (Stream 3)",
- "N stream-out (stream 3) primitives (total)");
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
- "SO_NUM_PRIMS_WRITTEN (Stream 0)",
- "N stream-out (stream 0) primitives (written)");
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
- "SO_NUM_PRIMS_WRITTEN (Stream 1)",
- "N stream-out (stream 1) primitives (written)");
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
- "SO_NUM_PRIMS_WRITTEN (Stream 2)",
- "N stream-out (stream 2) primitives (written)");
- gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
- "SO_NUM_PRIMS_WRITTEN (Stream 3)",
- "N stream-out (stream 3) primitives (written)");
- }
-
- gen_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
- "N TCS shader invocations");
- gen_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
- "N TES shader invocations");
-
- gen_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
- "N geometry shader invocations");
- gen_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
- "N geometry shader primitives emitted");
-
- gen_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
- "N primitives entering clipping");
- gen_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
- "N primitives leaving clipping");
-
- if (devinfo->is_haswell || devinfo->gen == 8) {
- gen_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
- "N fragment shader invocations",
- "N fragment shader invocations");
- } else {
- gen_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
- "N fragment shader invocations");
- }
-
- gen_perf_query_info_add_basic_stat_reg(query, PS_DEPTH_COUNT,
- "N z-pass fragments");
-
- if (devinfo->gen >= 7) {
- gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
- "N compute shader invocations");
- }
-
- query->data_size = sizeof(uint64_t) * query->n_counters;
-}
-
/* gen_device_info will have incorrect default topology values for unsupported kernels.
* verify kernel support to ensure OA metrics are accurate.
*/
uint32_t reg, uint32_t offset);
typedef bool (*batch_references_t)(void *batch, void *bo);
typedef void (*bo_wait_rendering_t)(void *bo);
-
+typedef int (*bo_busy_t)(void *bo);
static unsigned
brw_init_perf_query_info(struct gl_context *ctx)
struct brw_context *brw = brw_context(ctx);
const struct gen_device_info *devinfo = &brw->screen->devinfo;
- struct gen_perf_context *perf_ctx = &brw->perf_ctx;
- if (perf_ctx->perf)
- return perf_ctx->perf->n_queries;
+ struct gen_perf_context *perf_ctx = brw->perf_ctx;
+ struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
- perf_ctx->perf = gen_perf_new(brw);
- struct gen_perf_config *perf_cfg = perf_ctx->perf;
+ if (perf_cfg)
+ return perf_cfg->n_queries;
+
+ if (!oa_metrics_kernel_support(brw->screen->driScrnPriv->fd, devinfo))
+ return 0;
+
+ perf_cfg = gen_perf_new(ctx);
perf_cfg->vtbl.bo_alloc = brw_oa_bo_alloc;
perf_cfg->vtbl.bo_unreference = (bo_unreference_t)brw_bo_unreference;
(store_register_mem64_t) brw_store_register_mem64;
perf_cfg->vtbl.batch_references = (batch_references_t)brw_batch_references;
perf_cfg->vtbl.bo_wait_rendering = (bo_wait_rendering_t)brw_bo_wait_rendering;
+ perf_cfg->vtbl.bo_busy = (bo_busy_t)brw_bo_busy;
gen_perf_init_context(perf_ctx, perf_cfg, brw, brw->bufmgr, devinfo,
brw->hw_ctx, brw->screen->driScrnPriv->fd);
-
- init_pipeline_statistic_query_registers(brw);
- gen_perf_query_register_mdapi_statistic_query(devinfo, perf_cfg);
-
- if ((oa_metrics_kernel_support(perf_ctx->drm_fd, devinfo)) &&
- (gen_perf_load_oa_metrics(perf_cfg, perf_ctx->drm_fd, devinfo)))
- gen_perf_query_register_mdapi_oa_query(devinfo, perf_cfg);
+ gen_perf_init_metrics(perf_cfg, devinfo, brw->screen->driScrnPriv->fd);
return perf_cfg->n_queries;
}