X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_performance_query.c;h=dc4a833f563eb55408594388f3e5c2bde5e9fa6f;hb=9f84efb452f810494e8ba78a68b56444e343e5f6;hp=08c1db1b1bc8bb0a0a8c326c55dbcc253c16bc28;hpb=8ae6667992ccca41d08884d863b8aeb22a4c4e65;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_performance_query.c b/src/mesa/drivers/dri/i965/brw_performance_query.c index 08c1db1b1bc..dc4a833f563 100644 --- a/src/mesa/drivers/dri/i965/brw_performance_query.c +++ b/src/mesa/drivers/dri/i965/brw_performance_query.c @@ -161,8 +161,9 @@ brw_get_perf_query_info(struct gl_context *ctx, GLuint *n_active) { struct brw_context *brw = brw_context(ctx); + struct gen_perf_context *perf_ctx = &brw->perf_ctx; const struct gen_perf_query_info *query = - &brw->perf_ctx.perf->queries[query_index]; + &perf_ctx->perf->queries[query_index]; *name = query->name; *data_size = query->data_size; @@ -171,11 +172,11 @@ brw_get_perf_query_info(struct gl_context *ctx, switch (query->kind) { case GEN_PERF_QUERY_TYPE_OA: case GEN_PERF_QUERY_TYPE_RAW: - *n_active = brw->perf_ctx.n_active_oa_queries; + *n_active = perf_ctx->n_active_oa_queries; break; case GEN_PERF_QUERY_TYPE_PIPELINE: - *n_active = brw->perf_ctx.n_active_pipeline_stats_queries; + *n_active = perf_ctx->n_active_pipeline_stats_queries; break; default: @@ -243,490 +244,14 @@ brw_get_perf_counter_info(struct gl_context *ctx, *raw_max = counter->raw_max; } -/******************************************************************************/ - -/** - * Emit MI_STORE_REGISTER_MEM commands to capture all of the - * pipeline statistics for the performance query object. - */ -static void -snapshot_statistics_registers(struct brw_context *brw, - struct gen_perf_query_object *obj, - uint32_t offset_in_bytes) -{ - const struct gen_perf_query_info *query = obj->queryinfo; - const int n_counters = query->n_counters; - - for (int i = 0; i < n_counters; i++) { - const struct gen_perf_query_counter *counter = &query->counters[i]; - - assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64); - - brw->perf_ctx.perf->vtbl.store_register_mem64(brw, obj->pipeline_stats.bo, - counter->pipeline_stat.reg, - offset_in_bytes + i * sizeof(uint64_t)); - } -} - -/** - * Add a query to the global list of "unaccumulated queries." - * - * Queries are tracked here until all the associated OA reports have - * been accumulated via accumulate_oa_reports() after the end - * MI_REPORT_PERF_COUNT has landed in query->oa.bo. - */ -static void -add_to_unaccumulated_query_list(struct brw_context *brw, - struct gen_perf_query_object *obj) -{ - if (brw->perf_ctx.unaccumulated_elements >= - brw->perf_ctx.unaccumulated_array_size) - { - brw->perf_ctx.unaccumulated_array_size *= 1.5; - brw->perf_ctx.unaccumulated = - reralloc(brw, brw->perf_ctx.unaccumulated, - struct gen_perf_query_object *, - brw->perf_ctx.unaccumulated_array_size); - } - - brw->perf_ctx.unaccumulated[brw->perf_ctx.unaccumulated_elements++] = obj; -} - -/** - * Remove a query from the global list of unaccumulated queries once - * after successfully accumulating the OA reports associated with the - * query in accumulate_oa_reports() or when discarding unwanted query - * results. - */ -static void -drop_from_unaccumulated_query_list(struct brw_context *brw, - struct gen_perf_query_object *obj) -{ - for (int i = 0; i < brw->perf_ctx.unaccumulated_elements; i++) { - if (brw->perf_ctx.unaccumulated[i] == obj) { - int last_elt = --brw->perf_ctx.unaccumulated_elements; - - if (i == last_elt) - brw->perf_ctx.unaccumulated[i] = NULL; - else { - brw->perf_ctx.unaccumulated[i] = - brw->perf_ctx.unaccumulated[last_elt]; - } - - break; - } - } - - /* Drop our samples_head reference so that associated periodic - * sample data buffers can potentially be reaped if they aren't - * referenced by any other queries... - */ - - struct oa_sample_buf *buf = - exec_node_data(struct oa_sample_buf, obj->oa.samples_head, link); - - assert(buf->refcount > 0); - buf->refcount--; - - obj->oa.samples_head = NULL; - - gen_perf_reap_old_sample_buffers(&brw->perf_ctx); -} - -static bool -inc_n_oa_users(struct brw_context *brw) -{ - if (brw->perf_ctx.n_oa_users == 0 && - drmIoctl(brw->perf_ctx.oa_stream_fd, - I915_PERF_IOCTL_ENABLE, 0) < 0) - { - return false; - } - ++brw->perf_ctx.n_oa_users; - - return true; -} - -static void -dec_n_oa_users(struct brw_context *brw) -{ - /* Disabling the i915 perf stream will effectively disable the OA - * counters. Note it's important to be sure there are no outstanding - * MI_RPC commands at this point since they could stall the CS - * indefinitely once OACONTROL is disabled. - */ - --brw->perf_ctx.n_oa_users; - if (brw->perf_ctx.n_oa_users == 0 && - drmIoctl(brw->perf_ctx.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0) - { - DBG("WARNING: Error disabling i915 perf stream: %m\n"); - } -} - -/* In general if we see anything spurious while accumulating results, - * we don't try and continue accumulating the current query, hoping - * for the best, we scrap anything outstanding, and then hope for the - * best with new queries. - */ -static void -discard_all_queries(struct brw_context *brw) -{ - while (brw->perf_ctx.unaccumulated_elements) { - struct gen_perf_query_object *obj = brw->perf_ctx.unaccumulated[0]; - - obj->oa.results_accumulated = true; - drop_from_unaccumulated_query_list(brw, brw->perf_ctx.unaccumulated[0]); - - dec_n_oa_users(brw); - } -} - enum OaReadStatus { OA_READ_STATUS_ERROR, OA_READ_STATUS_UNFINISHED, OA_READ_STATUS_FINISHED, }; -static enum OaReadStatus -read_oa_samples_until(struct brw_context *brw, - uint32_t start_timestamp, - uint32_t end_timestamp) -{ - struct exec_node *tail_node = - exec_list_get_tail(&brw->perf_ctx.sample_buffers); - struct oa_sample_buf *tail_buf = - exec_node_data(struct oa_sample_buf, tail_node, link); - uint32_t last_timestamp = tail_buf->last_timestamp; - - while (1) { - struct oa_sample_buf *buf = gen_perf_get_free_sample_buf(&brw->perf_ctx); - uint32_t offset; - int len; - - while ((len = read(brw->perf_ctx.oa_stream_fd, buf->buf, - sizeof(buf->buf))) < 0 && errno == EINTR) - ; - - if (len <= 0) { - exec_list_push_tail(&brw->perf_ctx.free_sample_buffers, &buf->link); - - if (len < 0) { - if (errno == EAGAIN) - return ((last_timestamp - start_timestamp) >= - (end_timestamp - start_timestamp)) ? - OA_READ_STATUS_FINISHED : - OA_READ_STATUS_UNFINISHED; - else { - DBG("Error reading i915 perf samples: %m\n"); - } - } else - DBG("Spurious EOF reading i915 perf samples\n"); - - return OA_READ_STATUS_ERROR; - } - - buf->len = len; - exec_list_push_tail(&brw->perf_ctx.sample_buffers, &buf->link); - - /* Go through the reports and update the last timestamp. */ - offset = 0; - while (offset < buf->len) { - const struct drm_i915_perf_record_header *header = - (const struct drm_i915_perf_record_header *) &buf->buf[offset]; - uint32_t *report = (uint32_t *) (header + 1); - - if (header->type == DRM_I915_PERF_RECORD_SAMPLE) - last_timestamp = report[1]; - - offset += header->size; - } - - buf->last_timestamp = last_timestamp; - } - - unreachable("not reached"); - return OA_READ_STATUS_ERROR; -} - -/** - * Try to read all the reports until either the delimiting timestamp - * or an error arises. - */ -static bool -read_oa_samples_for_query(struct brw_context *brw, - struct gen_perf_query_object *obj) -{ - uint32_t *start; - uint32_t *last; - uint32_t *end; - - /* We need the MI_REPORT_PERF_COUNT to land before we can start - * accumulate. */ - assert(!brw_batch_references(&brw->batch, obj->oa.bo) && - !brw_bo_busy(obj->oa.bo)); - - /* Map the BO once here and let accumulate_oa_reports() unmap - * it. */ - if (obj->oa.map == NULL) - obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ); - - start = last = obj->oa.map; - end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES; - - if (start[0] != obj->oa.begin_report_id) { - DBG("Spurious start report id=%"PRIu32"\n", start[0]); - return true; - } - if (end[0] != (obj->oa.begin_report_id + 1)) { - DBG("Spurious end report id=%"PRIu32"\n", end[0]); - return true; - } - - /* Read the reports until the end timestamp. */ - switch (read_oa_samples_until(brw, start[1], end[1])) { - case OA_READ_STATUS_ERROR: - /* Fallthrough and let accumulate_oa_reports() deal with the - * error. */ - case OA_READ_STATUS_FINISHED: - return true; - case OA_READ_STATUS_UNFINISHED: - return false; - } - - unreachable("invalid read status"); - return false; -} - -/** - * Accumulate raw OA counter values based on deltas between pairs of - * OA reports. - * - * Accumulation starts from the first report captured via - * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the - * last MI_RPC report requested by brw_end_perf_query(). Between these - * two reports there may also some number of periodically sampled OA - * reports collected via the i915 perf interface - depending on the - * duration of the query. - * - * These periodic snapshots help to ensure we handle counter overflow - * correctly by being frequent enough to ensure we don't miss multiple - * overflows of a counter between snapshots. For Gen8+ the i915 perf - * snapshots provide the extra context-switch reports that let us - * subtract out the progress of counters associated with other - * contexts running on the system. - */ -static void -accumulate_oa_reports(struct brw_context *brw, - struct brw_perf_query_object *brw_query) -{ - const struct gen_device_info *devinfo = &brw->screen->devinfo; - struct gen_perf_query_object *obj = brw_query->query; - uint32_t *start; - uint32_t *last; - uint32_t *end; - struct exec_node *first_samples_node; - bool in_ctx = true; - int out_duration = 0; - - assert(brw_query->base.Ready); - assert(obj->oa.map != NULL); - - start = last = obj->oa.map; - end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES; - - if (start[0] != obj->oa.begin_report_id) { - DBG("Spurious start report id=%"PRIu32"\n", start[0]); - goto error; - } - if (end[0] != (obj->oa.begin_report_id + 1)) { - DBG("Spurious end report id=%"PRIu32"\n", end[0]); - goto error; - } - - /* See if we have any periodic reports to accumulate too... */ - - /* N.B. The oa.samples_head was set when the query began and - * pointed to the tail of the brw->perf_ctx.sample_buffers list at - * the time the query started. Since the buffer existed before the - * first MI_REPORT_PERF_COUNT command was emitted we therefore know - * that no data in this particular node's buffer can possibly be - * associated with the query - so skip ahead one... - */ - first_samples_node = obj->oa.samples_head->next; - - foreach_list_typed_from(struct oa_sample_buf, buf, link, - &brw->perf_ctx.sample_buffers, - first_samples_node) - { - int offset = 0; - - while (offset < buf->len) { - const struct drm_i915_perf_record_header *header = - (const struct drm_i915_perf_record_header *)(buf->buf + offset); - - assert(header->size != 0); - assert(header->size <= buf->len); - - offset += header->size; - - switch (header->type) { - case DRM_I915_PERF_RECORD_SAMPLE: { - uint32_t *report = (uint32_t *)(header + 1); - bool add = true; - - /* Ignore reports that come before the start marker. - * (Note: takes care to allow overflow of 32bit timestamps) - */ - if (gen_device_info_timebase_scale(devinfo, - report[1] - start[1]) > 5000000000) { - continue; - } - - /* Ignore reports that come after the end marker. - * (Note: takes care to allow overflow of 32bit timestamps) - */ - if (gen_device_info_timebase_scale(devinfo, - report[1] - end[1]) <= 5000000000) { - goto end; - } - - /* For Gen8+ since the counters continue while other - * contexts are running we need to discount any unrelated - * deltas. The hardware automatically generates a report - * on context switch which gives us a new reference point - * to continuing adding deltas from. - * - * For Haswell we can rely on the HW to stop the progress - * of OA counters while any other context is acctive. - */ - if (devinfo->gen >= 8) { - if (in_ctx && report[2] != obj->oa.result.hw_id) { - DBG("i915 perf: Switch AWAY (observed by ID change)\n"); - in_ctx = false; - out_duration = 0; - } else if (in_ctx == false && report[2] == obj->oa.result.hw_id) { - DBG("i915 perf: Switch TO\n"); - in_ctx = true; - - /* From experimentation in IGT, we found that the OA unit - * might label some report as "idle" (using an invalid - * context ID), right after a report for a given context. - * Deltas generated by those reports actually belong to the - * previous context, even though they're not labelled as - * such. - * - * We didn't *really* Switch AWAY in the case that we e.g. - * saw a single periodic report while idle... - */ - if (out_duration >= 1) - add = false; - } else if (in_ctx) { - assert(report[2] == obj->oa.result.hw_id); - DBG("i915 perf: Continuation IN\n"); - } else { - assert(report[2] != obj->oa.result.hw_id); - DBG("i915 perf: Continuation OUT\n"); - add = false; - out_duration++; - } - } - - if (add) { - gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo, - last, report); - } - - last = report; - - break; - } - - case DRM_I915_PERF_RECORD_OA_BUFFER_LOST: - DBG("i915 perf: OA error: all reports lost\n"); - goto error; - case DRM_I915_PERF_RECORD_OA_REPORT_LOST: - DBG("i915 perf: OA report lost\n"); - break; - } - } - } - -end: - - gen_perf_query_result_accumulate(&obj->oa.result, obj->queryinfo, - last, end); - - DBG("Marking %d accumulated - results gathered\n", brw_query->base.Id); - - obj->oa.results_accumulated = true; - drop_from_unaccumulated_query_list(brw, obj); - dec_n_oa_users(brw); - - return; - -error: - - discard_all_queries(brw); -} - /******************************************************************************/ -static bool -open_i915_perf_oa_stream(struct brw_context *brw, - int metrics_set_id, - int report_format, - int period_exponent, - int drm_fd, - uint32_t ctx_id) -{ - uint64_t properties[] = { - /* Single context sampling */ - DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id, - - /* Include OA reports in samples */ - DRM_I915_PERF_PROP_SAMPLE_OA, true, - - /* OA unit configuration */ - DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id, - DRM_I915_PERF_PROP_OA_FORMAT, report_format, - DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent, - }; - struct drm_i915_perf_open_param param = { - .flags = I915_PERF_FLAG_FD_CLOEXEC | - I915_PERF_FLAG_FD_NONBLOCK | - I915_PERF_FLAG_DISABLED, - .num_properties = ARRAY_SIZE(properties) / 2, - .properties_ptr = (uintptr_t) properties, - }; - int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m); - if (fd == -1) { - DBG("Error opening i915 perf OA stream: %m\n"); - return false; - } - - brw->perf_ctx.oa_stream_fd = fd; - - brw->perf_ctx.current_oa_metrics_set_id = metrics_set_id; - brw->perf_ctx.current_oa_format = report_format; - - return true; -} - -static void -close_perf(struct brw_context *brw, - const struct gen_perf_query_info *query) -{ - if (brw->perf_ctx.oa_stream_fd != -1) { - close(brw->perf_ctx.oa_stream_fd); - brw->perf_ctx.oa_stream_fd = -1; - } - if (query->kind == GEN_PERF_QUERY_TYPE_RAW) { - struct gen_perf_query_info *raw_query = - (struct gen_perf_query_info *) query; - raw_query->oa_metrics_set_id = 0; - } -} - static void capture_frequency_stat_register(struct brw_context *brw, struct brw_bo *bo, @@ -752,8 +277,7 @@ brw_begin_perf_query(struct gl_context *ctx, struct brw_context *brw = brw_context(ctx); struct brw_perf_query_object *brw_query = brw_perf_query(o); struct gen_perf_query_object *obj = brw_query->query; - const struct gen_perf_query_info *query = obj->queryinfo; - struct gen_perf_config *perf_cfg = brw->perf_ctx.perf; + struct gen_perf_context *perf_ctx = &brw->perf_ctx; /* We can assume the frontend hides mistaken attempts to Begin a * query object multiple times before its End. Similarly if an @@ -766,230 +290,7 @@ brw_begin_perf_query(struct gl_context *ctx, DBG("Begin(%d)\n", o->Id); - /* XXX: We have to consider that the command parser unit that parses batch - * buffer commands and is used to capture begin/end counter snapshots isn't - * implicitly synchronized with what's currently running across other GPU - * units (such as the EUs running shaders) that the performance counters are - * associated with. - * - * The intention of performance queries is to measure the work associated - * with commands between the begin/end delimiters and so for that to be the - * case we need to explicitly synchronize the parsing of commands to capture - * Begin/End counter snapshots with what's running across other parts of the - * GPU. - * - * When the command parser reaches a Begin marker it effectively needs to - * drain everything currently running on the GPU until the hardware is idle - * before capturing the first snapshot of counters - otherwise the results - * would also be measuring the effects of earlier commands. - * - * When the command parser reaches an End marker it needs to stall until - * everything currently running on the GPU has finished before capturing the - * end snapshot - otherwise the results won't be a complete representation - * of the work. - * - * Theoretically there could be opportunities to minimize how much of the - * GPU pipeline is drained, or that we stall for, when we know what specific - * units the performance counters being queried relate to but we don't - * currently attempt to be clever here. - * - * Note: with our current simple approach here then for back-to-back queries - * we will redundantly emit duplicate commands to synchronize the command - * streamer with the rest of the GPU pipeline, but we assume that in HW the - * second synchronization is effectively a NOOP. - * - * N.B. The final results are based on deltas of counters between (inside) - * Begin/End markers so even though the total wall clock time of the - * workload is stretched by larger pipeline bubbles the bubbles themselves - * are generally invisible to the query results. Whether that's a good or a - * bad thing depends on the use case. For a lower real-time impact while - * capturing metrics then periodic sampling may be a better choice than - * INTEL_performance_query. - * - * - * This is our Begin synchronization point to drain current work on the - * GPU before we capture our first counter snapshot... - */ - brw_emit_mi_flush(brw); - - switch (query->kind) { - case GEN_PERF_QUERY_TYPE_OA: - case GEN_PERF_QUERY_TYPE_RAW: { - - /* Opening an i915 perf stream implies exclusive access to the OA unit - * which will generate counter reports for a specific counter set with a - * specific layout/format so we can't begin any OA based queries that - * require a different counter set or format unless we get an opportunity - * to close the stream and open a new one... - */ - uint64_t metric_id = gen_perf_query_get_metric_id(brw->perf_ctx.perf, query); - - if (brw->perf_ctx.oa_stream_fd != -1 && - brw->perf_ctx.current_oa_metrics_set_id != metric_id) { - - if (brw->perf_ctx.n_oa_users != 0) { - DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64"\n", - o->Id, brw->perf_ctx.current_oa_metrics_set_id, metric_id); - return false; - } else - close_perf(brw, query); - } - - /* If the OA counters aren't already on, enable them. */ - if (brw->perf_ctx.oa_stream_fd == -1) { - __DRIscreen *screen = brw->screen->driScrnPriv; - const struct gen_device_info *devinfo = &brw->screen->devinfo; - - /* The period_exponent gives a sampling period as follows: - * sample_period = timestamp_period * 2^(period_exponent + 1) - * - * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or - * ~83ns (GEN8/9). - * - * The counter overflow period is derived from the EuActive counter - * which reads a counter that increments by the number of clock - * cycles multiplied by the number of EUs. It can be calculated as: - * - * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2) - * - * (E.g. 40 EUs @ 1GHz = ~53ms) - * - * We select a sampling period inferior to that overflow period to - * ensure we cannot see more than 1 counter overflow, otherwise we - * could loose information. - */ - - int a_counter_in_bits = 32; - if (devinfo->gen >= 8) - a_counter_in_bits = 40; - - uint64_t overflow_period = pow(2, a_counter_in_bits) / - (brw->perf_ctx.perf->sys_vars.n_eus * - /* drop 1GHz freq to have units in nanoseconds */ - 2); - - DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n", - overflow_period, overflow_period / 1000000ul, brw->perf_ctx.perf->sys_vars.n_eus); - - int period_exponent = 0; - uint64_t prev_sample_period, next_sample_period; - for (int e = 0; e < 30; e++) { - prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency; - next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency; - - /* Take the previous sampling period, lower than the overflow - * period. - */ - if (prev_sample_period < overflow_period && - next_sample_period > overflow_period) - period_exponent = e + 1; - } - - if (period_exponent == 0) { - DBG("WARNING: enable to find a sampling exponent\n"); - return false; - } - - DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent, - prev_sample_period / 1000000ul); - - if (!open_i915_perf_oa_stream(brw, - metric_id, - query->oa_format, - period_exponent, - screen->fd, /* drm fd */ - brw->hw_ctx)) - return false; - } else { - assert(brw->perf_ctx.current_oa_metrics_set_id == metric_id && - brw->perf_ctx.current_oa_format == query->oa_format); - } - - if (!inc_n_oa_users(brw)) { - DBG("WARNING: Error enabling i915 perf stream: %m\n"); - return false; - } - - if (obj->oa.bo) { - brw->perf_ctx.perf->vtbl.bo_unreference(obj->oa.bo); - obj->oa.bo = NULL; - } - - obj->oa.bo = - brw->perf_ctx.perf->vtbl.bo_alloc(brw->bufmgr, - "perf. query OA MI_RPC bo", - MI_RPC_BO_SIZE); -#ifdef DEBUG - /* Pre-filling the BO helps debug whether writes landed. */ - void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE); - memset(map, 0x80, MI_RPC_BO_SIZE); - brw_bo_unmap(obj->oa.bo); -#endif - - obj->oa.begin_report_id = brw->perf_ctx.next_query_start_report_id; - brw->perf_ctx.next_query_start_report_id += 2; - - /* We flush the batchbuffer here to minimize the chances that MI_RPC - * delimiting commands end up in different batchbuffers. If that's the - * case, the measurement will include the time it takes for the kernel - * scheduler to load a new request into the hardware. This is manifested in - * tools like frameretrace by spikes in the "GPU Core Clocks" counter. - */ - perf_cfg->vtbl.batchbuffer_flush(brw, __FILE__, __LINE__); - - /* Take a starting OA counter snapshot. */ - brw->perf_ctx.perf->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0, - obj->oa.begin_report_id); - perf_cfg->vtbl.capture_frequency_stat_register(brw, obj->oa.bo, - MI_FREQ_START_OFFSET_BYTES); - - ++brw->perf_ctx.n_active_oa_queries; - - /* No already-buffered samples can possibly be associated with this query - * so create a marker within the list of sample buffers enabling us to - * easily ignore earlier samples when processing this query after - * completion. - */ - assert(!exec_list_is_empty(&brw->perf_ctx.sample_buffers)); - obj->oa.samples_head = exec_list_get_tail(&brw->perf_ctx.sample_buffers); - - struct oa_sample_buf *buf = - exec_node_data(struct oa_sample_buf, obj->oa.samples_head, link); - - /* This reference will ensure that future/following sample - * buffers (that may relate to this query) can't be freed until - * this drops to zero. - */ - buf->refcount++; - - gen_perf_query_result_clear(&obj->oa.result); - obj->oa.results_accumulated = false; - - add_to_unaccumulated_query_list(brw, obj); - break; - } - - case GEN_PERF_QUERY_TYPE_PIPELINE: - if (obj->pipeline_stats.bo) { - brw->perf_ctx.perf->vtbl.bo_unreference(obj->pipeline_stats.bo); - obj->pipeline_stats.bo = NULL; - } - - obj->pipeline_stats.bo = - brw->perf_ctx.perf->vtbl.bo_alloc(brw->bufmgr, - "perf. query pipeline stats bo", - STATS_BO_SIZE); - - /* Take starting snapshots. */ - snapshot_statistics_registers(brw, obj, 0); - - ++brw->perf_ctx.n_active_pipeline_stats_queries; - break; - - default: - unreachable("Unknown query type"); - break; - } + gen_perf_begin_query(perf_ctx, obj); if (INTEL_DEBUG & DEBUG_PERFMON) dump_perf_queries(brw); @@ -1007,54 +308,10 @@ brw_end_perf_query(struct gl_context *ctx, struct brw_context *brw = brw_context(ctx); struct brw_perf_query_object *brw_query = brw_perf_query(o); struct gen_perf_query_object *obj = brw_query->query; - struct gen_perf_config *perf_cfg = brw->perf_ctx.perf; + struct gen_perf_context *perf_ctx = &brw->perf_ctx; DBG("End(%d)\n", o->Id); - - /* Ensure that the work associated with the queried commands will have - * finished before taking our query end counter readings. - * - * For more details see comment in brw_begin_perf_query for - * corresponding flush. - */ - brw_emit_mi_flush(brw); - - switch (obj->queryinfo->kind) { - case GEN_PERF_QUERY_TYPE_OA: - case GEN_PERF_QUERY_TYPE_RAW: - - /* NB: It's possible that the query will have already been marked - * as 'accumulated' if an error was seen while reading samples - * from perf. In this case we mustn't try and emit a closing - * MI_RPC command in case the OA unit has already been disabled - */ - if (!obj->oa.results_accumulated) { - /* Take an ending OA counter snapshot. */ - perf_cfg->vtbl.capture_frequency_stat_register(brw, obj->oa.bo, - MI_FREQ_END_OFFSET_BYTES); - brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, - MI_RPC_BO_END_OFFSET_BYTES, - obj->oa.begin_report_id + 1); - } - - --brw->perf_ctx.n_active_oa_queries; - - /* NB: even though the query has now ended, it can't be accumulated - * until the end MI_REPORT_PERF_COUNT snapshot has been written - * to query->oa.bo - */ - break; - - case GEN_PERF_QUERY_TYPE_PIPELINE: - snapshot_statistics_registers(brw, obj, - STATS_BO_END_OFFSET_BYTES); - --brw->perf_ctx.n_active_pipeline_stats_queries; - break; - - default: - unreachable("Unknown query type"); - break; - } + gen_perf_end_query(perf_ctx, obj); } static void @@ -1063,47 +320,10 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o) struct brw_context *brw = brw_context(ctx); struct brw_perf_query_object *brw_query = brw_perf_query(o); struct gen_perf_query_object *obj = brw_query->query; - struct brw_bo *bo = NULL; - struct gen_perf_config *perf_cfg = brw->perf_ctx.perf; assert(!o->Ready); - switch (obj->queryinfo->kind) { - case GEN_PERF_QUERY_TYPE_OA: - case GEN_PERF_QUERY_TYPE_RAW: - bo = obj->oa.bo; - break; - - case GEN_PERF_QUERY_TYPE_PIPELINE: - bo = obj->pipeline_stats.bo; - break; - - default: - unreachable("Unknown query type"); - break; - } - - if (bo == NULL) - return; - - /* If the current batch references our results bo then we need to - * flush first... - */ - if (brw_batch_references(&brw->batch, bo)) - perf_cfg->vtbl.batchbuffer_flush(brw, __FILE__, __LINE__); - - brw_bo_wait_rendering(bo); - - /* Due to a race condition between the OA unit signaling report - * availability and the report actually being written into memory, - * we need to wait for all the reports to come in before we can - * read them. - */ - if (obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA || - obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) { - while (!read_oa_samples_for_query(brw, obj)) - ; - } + gen_perf_wait_query(&brw->perf_ctx, obj, &brw->batch); } static bool @@ -1117,140 +337,7 @@ brw_is_perf_query_ready(struct gl_context *ctx, if (o->Ready) return true; - switch (obj->queryinfo->kind) { - case GEN_PERF_QUERY_TYPE_OA: - case GEN_PERF_QUERY_TYPE_RAW: - return (obj->oa.results_accumulated || - (obj->oa.bo && - !brw_batch_references(&brw->batch, obj->oa.bo) && - !brw_bo_busy(obj->oa.bo) && - read_oa_samples_for_query(brw, obj))); - case GEN_PERF_QUERY_TYPE_PIPELINE: - return (obj->pipeline_stats.bo && - !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) && - !brw_bo_busy(obj->pipeline_stats.bo)); - - default: - unreachable("Unknown query type"); - break; - } - - return false; -} - -static void -read_slice_unslice_frequencies(struct brw_context *brw, - struct gen_perf_query_object *obj) -{ - const struct gen_device_info *devinfo = &brw->screen->devinfo; - uint32_t *begin_report = obj->oa.map, *end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES; - - gen_perf_query_result_read_frequencies(&obj->oa.result, - devinfo, begin_report, end_report); -} - -static void -read_gt_frequency(struct brw_context *brw, - struct gen_perf_query_object *obj) -{ - const struct gen_device_info *devinfo = &brw->screen->devinfo; - uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)), - end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES)); - - switch (devinfo->gen) { - case 7: - case 8: - obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL; - obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL; - break; - case 9: - case 10: - case 11: - obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL; - obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL; - break; - default: - unreachable("unexpected gen"); - } - - /* Put the numbers into Hz. */ - obj->oa.gt_frequency[0] *= 1000000ULL; - obj->oa.gt_frequency[1] *= 1000000ULL; -} - -static int -get_oa_counter_data(struct brw_context *brw, - struct gen_perf_query_object *obj, - size_t data_size, - uint8_t *data) -{ - struct gen_perf_config *perf = brw->perf_ctx.perf; - const struct gen_perf_query_info *query = obj->queryinfo; - int n_counters = query->n_counters; - int written = 0; - - for (int i = 0; i < n_counters; i++) { - const struct gen_perf_query_counter *counter = &query->counters[i]; - uint64_t *out_uint64; - float *out_float; - size_t counter_size = gen_perf_query_counter_get_size(counter); - - if (counter_size) { - switch (counter->data_type) { - case GEN_PERF_COUNTER_DATA_TYPE_UINT64: - out_uint64 = (uint64_t *)(data + counter->offset); - *out_uint64 = - counter->oa_counter_read_uint64(perf, query, - obj->oa.result.accumulator); - break; - case GEN_PERF_COUNTER_DATA_TYPE_FLOAT: - out_float = (float *)(data + counter->offset); - *out_float = - counter->oa_counter_read_float(perf, query, - obj->oa.result.accumulator); - break; - default: - /* So far we aren't using uint32, double or bool32... */ - unreachable("unexpected counter data type"); - } - written = counter->offset + counter_size; - } - } - - return written; -} - -static int -get_pipeline_stats_data(struct brw_context *brw, - struct gen_perf_query_object *obj, - size_t data_size, - uint8_t *data) - -{ - const struct gen_perf_query_info *query = obj->queryinfo; - int n_counters = obj->queryinfo->n_counters; - uint8_t *p = data; - - uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ); - uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t)); - - for (int i = 0; i < n_counters; i++) { - const struct gen_perf_query_counter *counter = &query->counters[i]; - uint64_t value = end[i] - start[i]; - - if (counter->pipeline_stat.numerator != - counter->pipeline_stat.denominator) { - value *= counter->pipeline_stat.numerator; - value /= counter->pipeline_stat.denominator; - } - - *((uint64_t *)p) = value; - p += 8; - } - - brw_bo_unmap(obj->pipeline_stats.bo); - - return p - data; + return gen_perf_is_query_ready(&brw->perf_ctx, obj, &brw->batch); } /** @@ -1266,7 +353,6 @@ brw_get_perf_query_data(struct gl_context *ctx, struct brw_context *brw = brw_context(ctx); struct brw_perf_query_object *brw_query = brw_perf_query(o); struct gen_perf_query_object *obj = brw_query->query; - int written = 0; assert(brw_is_perf_query_ready(ctx, o)); @@ -1280,49 +366,17 @@ brw_get_perf_query_data(struct gl_context *ctx, */ assert(o->Ready); - switch (obj->queryinfo->kind) { - case GEN_PERF_QUERY_TYPE_OA: - case GEN_PERF_QUERY_TYPE_RAW: - if (!obj->oa.results_accumulated) { - read_gt_frequency(brw, obj); - read_slice_unslice_frequencies(brw, obj); - accumulate_oa_reports(brw, brw_query); - assert(obj->oa.results_accumulated); - - brw_bo_unmap(obj->oa.bo); - obj->oa.map = NULL; - } - if (obj->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) { - written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data); - } else { - const struct gen_device_info *devinfo = &brw->screen->devinfo; - - written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size, - devinfo, &obj->oa.result, - obj->oa.gt_frequency[0], - obj->oa.gt_frequency[1]); - } - break; - - case GEN_PERF_QUERY_TYPE_PIPELINE: - written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data); - break; - - default: - unreachable("Unknown query type"); - break; - } - - if (bytes_written) - *bytes_written = written; + gen_perf_get_query_data(&brw->perf_ctx, obj, + data_size, data, bytes_written); } static struct gl_perf_query_object * brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index) { struct brw_context *brw = brw_context(ctx); + struct gen_perf_context *perf_ctx = &brw->perf_ctx; const struct gen_perf_query_info *queryinfo = - &brw->perf_ctx.perf->queries[query_index]; + &perf_ctx->perf->queries[query_index]; struct gen_perf_query_object *obj = calloc(1, sizeof(struct gen_perf_query_object)); @@ -1331,7 +385,7 @@ brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index) obj->queryinfo = queryinfo; - brw->perf_ctx.n_query_instances++; + perf_ctx->n_query_instances++; struct brw_perf_query_object *brw_query = calloc(1, sizeof(struct brw_perf_query_object)); if (unlikely(!brw_query)) @@ -1348,9 +402,9 @@ brw_delete_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o) { struct brw_context *brw = brw_context(ctx); - struct gen_perf_config *perf_cfg = brw->perf_ctx.perf; struct brw_perf_query_object *brw_query = brw_perf_query(o); struct gen_perf_query_object *obj = brw_query->query; + struct gen_perf_context *perf_ctx = &brw->perf_ctx; /* We can assume that the frontend waits for a query to complete * before ever calling into here, so we don't have to worry about @@ -1361,44 +415,7 @@ brw_delete_perf_query(struct gl_context *ctx, DBG("Delete(%d)\n", o->Id); - switch (obj->queryinfo->kind) { - case GEN_PERF_QUERY_TYPE_OA: - case GEN_PERF_QUERY_TYPE_RAW: - if (obj->oa.bo) { - if (!obj->oa.results_accumulated) { - drop_from_unaccumulated_query_list(brw, obj); - dec_n_oa_users(brw); - } - - perf_cfg->vtbl.bo_unreference(obj->oa.bo); - obj->oa.bo = NULL; - } - - obj->oa.results_accumulated = false; - break; - - case GEN_PERF_QUERY_TYPE_PIPELINE: - if (obj->pipeline_stats.bo) { - perf_cfg->vtbl.bo_unreference(obj->pipeline_stats.bo); - obj->pipeline_stats.bo = NULL; - } - break; - - default: - unreachable("Unknown query type"); - break; - } - - /* As an indication that the INTEL_performance_query extension is no - * longer in use, it's a good time to free our cache of sample - * buffers and close any current i915-perf stream. - */ - if (--brw->perf_ctx.n_query_instances == 0) { - gen_perf_free_sample_bufs(&brw->perf_ctx); - close_perf(brw, obj->queryinfo); - } - - free(obj); + gen_perf_delete_query(perf_ctx, obj); free(brw_query); } @@ -1552,7 +569,10 @@ brw_oa_emit_mi_report_perf_count(void *c, } typedef void (*bo_unreference_t)(void *); +typedef void *(*bo_map_t)(void *, void *, unsigned flags); +typedef void (*bo_unmap_t)(void *); typedef void (* emit_mi_report_t)(void *, void *, uint32_t, uint32_t); +typedef void (*emit_mi_flush_t)(void *); static void brw_oa_batchbuffer_flush(void *c, const char *file, int line) @@ -1564,22 +584,28 @@ brw_oa_batchbuffer_flush(void *c, const char *file, int line) typedef void (*capture_frequency_stat_register_t)(void *, void *, uint32_t ); typedef void (*store_register_mem64_t)(void *ctx, void *bo, uint32_t reg, uint32_t offset); +typedef bool (*batch_references_t)(void *batch, void *bo); +typedef void (*bo_wait_rendering_t)(void *bo); +typedef int (*bo_busy_t)(void *bo); static unsigned brw_init_perf_query_info(struct gl_context *ctx) { struct brw_context *brw = brw_context(ctx); const struct gen_device_info *devinfo = &brw->screen->devinfo; - __DRIscreen *screen = brw->screen->driScrnPriv; - struct gen_perf_config *perf_cfg = brw->perf_ctx.perf; - if (perf_cfg) - return perf_cfg->n_queries; + struct gen_perf_context *perf_ctx = &brw->perf_ctx; + if (perf_ctx->perf) + return perf_ctx->perf->n_queries; + + perf_ctx->perf = gen_perf_new(brw); + struct gen_perf_config *perf_cfg = perf_ctx->perf; - perf_cfg = gen_perf_new(brw); - brw->perf_ctx.perf = perf_cfg; perf_cfg->vtbl.bo_alloc = brw_oa_bo_alloc; perf_cfg->vtbl.bo_unreference = (bo_unreference_t)brw_bo_unreference; + perf_cfg->vtbl.bo_map = (bo_map_t)brw_bo_map; + perf_cfg->vtbl.bo_unmap = (bo_unmap_t)brw_bo_unmap; + perf_cfg->vtbl.emit_mi_flush = (emit_mi_flush_t)brw_emit_mi_flush; perf_cfg->vtbl.emit_mi_report_perf_count = (emit_mi_report_t)brw_oa_emit_mi_report_perf_count; perf_cfg->vtbl.batchbuffer_flush = brw_oa_batchbuffer_flush; @@ -1587,35 +613,19 @@ brw_init_perf_query_info(struct gl_context *ctx) (capture_frequency_stat_register_t) capture_frequency_stat_register; perf_cfg->vtbl.store_register_mem64 = (store_register_mem64_t) brw_store_register_mem64; + perf_cfg->vtbl.batch_references = (batch_references_t)brw_batch_references; + perf_cfg->vtbl.bo_wait_rendering = (bo_wait_rendering_t)brw_bo_wait_rendering; + perf_cfg->vtbl.bo_busy = (bo_busy_t)brw_bo_busy; - init_pipeline_statistic_query_registers(brw); - gen_perf_query_register_mdapi_statistic_query(&brw->screen->devinfo, - brw->perf_ctx.perf); - - if ((oa_metrics_kernel_support(screen->fd, devinfo)) && - (gen_perf_load_oa_metrics(perf_cfg, screen->fd, devinfo))) - gen_perf_query_register_mdapi_oa_query(&brw->screen->devinfo, - brw->perf_ctx.perf); - - brw->perf_ctx.unaccumulated = - ralloc_array(brw, struct gen_perf_query_object *, 2); - brw->perf_ctx.unaccumulated_elements = 0; - brw->perf_ctx.unaccumulated_array_size = 2; - - exec_list_make_empty(&brw->perf_ctx.sample_buffers); - exec_list_make_empty(&brw->perf_ctx.free_sample_buffers); - - /* It's convenient to guarantee that this linked list of sample - * buffers is never empty so we add an empty head so when we - * Begin an OA query we can always take a reference on a buffer - * in this list. - */ - struct oa_sample_buf *buf = gen_perf_get_free_sample_buf(&brw->perf_ctx); - exec_list_push_head(&brw->perf_ctx.sample_buffers, &buf->link); + gen_perf_init_context(perf_ctx, perf_cfg, brw, brw->bufmgr, devinfo, + brw->hw_ctx, brw->screen->driScrnPriv->fd); - brw->perf_ctx.oa_stream_fd = -1; + init_pipeline_statistic_query_registers(brw); + gen_perf_query_register_mdapi_statistic_query(devinfo, perf_cfg); - brw->perf_ctx.next_query_start_report_id = 1000; + if ((oa_metrics_kernel_support(perf_ctx->drm_fd, devinfo)) && + (gen_perf_load_oa_metrics(perf_cfg, perf_ctx->drm_fd, devinfo))) + gen_perf_query_register_mdapi_oa_query(devinfo, perf_cfg); return perf_cfg->n_queries; }