2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "common/gen_gem.h"
28 #include "dev/gen_debug.h"
29 #include "dev/gen_device_info.h"
31 #include "perf/gen_perf.h"
32 #include "perf/gen_perf_mdapi.h"
33 #include "perf/gen_perf_private.h"
34 #include "perf/gen_perf_query.h"
35 #include "perf/gen_perf_regs.h"
37 #include "drm-uapi/i915_drm.h"
39 #include "util/u_math.h"
41 #define FILE_DEBUG_FLAG DEBUG_PERFMON
42 #define MI_RPC_BO_SIZE 4096
43 #define MI_FREQ_START_OFFSET_BYTES (3072)
44 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
45 #define MI_FREQ_END_OFFSET_BYTES (3076)
47 #define MAP_READ (1 << 0)
48 #define MAP_WRITE (1 << 1)
51 * Periodic OA samples are read() into these buffer structures via the
52 * i915 perf kernel interface and appended to the
53 * perf_ctx->sample_buffers linked list. When we process the
54 * results of an OA metrics query we need to consider all the periodic
55 * samples between the Begin and End MI_REPORT_PERF_COUNT command
58 * 'Periodic' is a simplification as there are other automatic reports
59 * written by the hardware also buffered here.
61 * Considering three queries, A, B and C:
64 * ________________A_________________
66 * | ________B_________ _____C___________
69 * And an illustration of sample buffers read over this time frame:
70 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
72 * These nodes may hold samples for query A:
73 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
75 * These nodes may hold samples for query B:
76 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
78 * These nodes may hold samples for query C:
79 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
81 * The illustration assumes we have an even distribution of periodic
82 * samples so all nodes have the same size plotted against time:
84 * Note, to simplify code, the list is never empty.
86 * With overlapping queries we can see that periodic OA reports may
87 * relate to multiple queries and care needs to be take to keep
88 * track of sample buffers until there are no queries that might
89 * depend on their contents.
91 * We use a node ref counting system where a reference ensures that a
92 * node and all following nodes can't be freed/recycled until the
93 * reference drops to zero.
95 * E.g. with a ref of one here:
96 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
98 * These nodes could be freed or recycled ("reaped"):
101 * These must be preserved until the leading ref drops to zero:
102 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
104 * When a query starts we take a reference on the current tail of
105 * the list, knowing that no already-buffered samples can possibly
106 * relate to the newly-started query. A pointer to this node is
107 * also saved in the query object's ->oa.samples_head.
109 * E.g. starting query A while there are two nodes in .sample_buffers:
110 * ________________A________
114 * ^_______ Add a reference and store pointer to node in
117 * Moving forward to when the B query starts with no new buffer nodes:
118 * (for reference, i915 perf reads() are only done when queries finish)
119 * ________________A_______
124 * ^_______ Add a reference and store pointer to
125 * node in B->oa.samples_head
127 * Once a query is finished, after an OA query has become 'Ready',
128 * once the End OA report has landed and after we we have processed
129 * all the intermediate periodic samples then we drop the
130 * ->oa.samples_head reference we took at the start.
132 * So when the B query has finished we have:
133 * ________________A________
134 * | ______B___________
136 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
137 * ^_______ Drop B->oa.samples_head reference
139 * We still can't free these due to the A->oa.samples_head ref:
140 * [ 1 ][ 0 ][ 0 ][ 0 ]
142 * When the A query finishes: (note there's a new ref for C's samples_head)
143 * ________________A_________________
147 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
148 * ^_______ Drop A->oa.samples_head reference
150 * And we can now reap these nodes up to the C->oa.samples_head:
151 * [ X ][ X ][ X ][ X ]
152 * keeping -> [ 1 ][ 0 ][ 0 ]
154 * We reap old sample buffers each time we finish processing an OA
155 * query by iterating the sample_buffers list from the head until we
156 * find a referenced node and stop.
158 * Reaped buffers move to a perfquery.free_sample_buffers list and
159 * when we come to read() we first look to recycle a buffer from the
160 * free_sample_buffers list before allocating a new buffer.
162 struct oa_sample_buf
{
163 struct exec_node link
;
166 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
167 uint32_t last_timestamp
;
171 * gen representation of a performance query object.
173 * NB: We want to keep this structure relatively lean considering that
174 * applications may expect to allocate enough objects to be able to
175 * query around all draw calls in a frame.
177 struct gen_perf_query_object
179 const struct gen_perf_query_info
*queryinfo
;
181 /* See query->kind to know which state below is in use... */
186 * BO containing OA counter snapshots at query Begin/End time.
191 * Address of mapped of @bo
196 * The MI_REPORT_PERF_COUNT command lets us specify a unique
197 * ID that will be reflected in the resulting OA report
198 * that's written by the GPU. This is the ID we're expecting
199 * in the begin report and the the end report should be
200 * @begin_report_id + 1.
205 * Reference the head of the brw->perfquery.sample_buffers
206 * list at the time that the query started (so we only need
207 * to look at nodes after this point when looking for samples
208 * related to this query)
210 * (See struct brw_oa_sample_buf description for more details)
212 struct exec_node
*samples_head
;
215 * false while in the unaccumulated_elements list, and set to
216 * true when the final, end MI_RPC snapshot has been
219 bool results_accumulated
;
222 * Frequency of the GT at begin and end of the query.
224 uint64_t gt_frequency
[2];
227 * Accumulated OA results between begin and end of the query.
229 struct gen_perf_query_result result
;
234 * BO containing starting and ending snapshots for the
235 * statistics counters.
242 struct gen_perf_context
{
243 struct gen_perf_config
*perf
;
245 void * ctx
; /* driver context (eg, brw_context) */
247 const struct gen_device_info
*devinfo
;
252 /* The i915 perf stream we open to setup + enable the OA counters */
255 /* An i915 perf stream fd gives exclusive access to the OA unit that will
256 * report counter snapshots for a specific counter set/profile in a
257 * specific layout/format so we can only start OA queries that are
258 * compatible with the currently open fd...
260 int current_oa_metrics_set_id
;
261 int current_oa_format
;
263 /* List of buffers containing OA reports */
264 struct exec_list sample_buffers
;
266 /* Cached list of empty sample buffers */
267 struct exec_list free_sample_buffers
;
269 int n_active_oa_queries
;
270 int n_active_pipeline_stats_queries
;
272 /* The number of queries depending on running OA counters which
273 * extends beyond brw_end_perf_query() since we need to wait until
274 * the last MI_RPC command has parsed by the GPU.
276 * Accurate accounting is important here as emitting an
277 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
278 * effectively hang the gpu.
282 /* To help catch an spurious problem with the hardware or perf
283 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
284 * with a unique ID that we can explicitly check for...
286 int next_query_start_report_id
;
289 * An array of queries whose results haven't yet been assembled
290 * based on the data in buffer objects.
292 * These may be active, or have already ended. However, the
293 * results have not been requested.
295 struct gen_perf_query_object
**unaccumulated
;
296 int unaccumulated_elements
;
297 int unaccumulated_array_size
;
299 /* The total number of query objects so we can relinquish
300 * our exclusive access to perf if the application deletes
301 * all of its objects. (NB: We only disable perf while
302 * there are no active queries)
304 int n_query_instances
;
308 inc_n_users(struct gen_perf_context
*perf_ctx
)
310 if (perf_ctx
->n_oa_users
== 0 &&
311 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_ENABLE
, 0) < 0)
315 ++perf_ctx
->n_oa_users
;
321 dec_n_users(struct gen_perf_context
*perf_ctx
)
323 /* Disabling the i915 perf stream will effectively disable the OA
324 * counters. Note it's important to be sure there are no outstanding
325 * MI_RPC commands at this point since they could stall the CS
326 * indefinitely once OACONTROL is disabled.
328 --perf_ctx
->n_oa_users
;
329 if (perf_ctx
->n_oa_users
== 0 &&
330 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
332 DBG("WARNING: Error disabling gen perf stream: %m\n");
337 gen_perf_close(struct gen_perf_context
*perfquery
,
338 const struct gen_perf_query_info
*query
)
340 if (perfquery
->oa_stream_fd
!= -1) {
341 close(perfquery
->oa_stream_fd
);
342 perfquery
->oa_stream_fd
= -1;
344 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
345 struct gen_perf_query_info
*raw_query
=
346 (struct gen_perf_query_info
*) query
;
347 raw_query
->oa_metrics_set_id
= 0;
351 #define NUM_PERF_PROPERTIES(array) (ARRAY_SIZE(array) / 2)
354 gen_perf_open(struct gen_perf_context
*perf_ctx
,
361 uint64_t properties
[] = {
362 /* Single context sampling */
363 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
365 /* Include OA reports in samples */
366 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
368 /* OA unit configuration */
369 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
370 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
371 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
373 /* SSEU configuration */
374 DRM_I915_PERF_PROP_GLOBAL_SSEU
, to_user_pointer(&perf_ctx
->perf
->sseu
),
376 struct drm_i915_perf_open_param param
= {
377 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
378 I915_PERF_FLAG_FD_NONBLOCK
|
379 I915_PERF_FLAG_DISABLED
,
380 .num_properties
= perf_ctx
->perf
->i915_perf_version
>= 4 ?
381 NUM_PERF_PROPERTIES(properties
) :
382 NUM_PERF_PROPERTIES(properties
) - 1,
383 .properties_ptr
= (uintptr_t) properties
,
385 int fd
= gen_ioctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
387 DBG("Error opening gen perf OA stream: %m\n");
391 perf_ctx
->oa_stream_fd
= fd
;
393 perf_ctx
->current_oa_metrics_set_id
= metrics_set_id
;
394 perf_ctx
->current_oa_format
= report_format
;
400 get_metric_id(struct gen_perf_config
*perf
,
401 const struct gen_perf_query_info
*query
)
403 /* These queries are know not to ever change, their config ID has been
404 * loaded upon the first query creation. No need to look them up again.
406 if (query
->kind
== GEN_PERF_QUERY_TYPE_OA
)
407 return query
->oa_metrics_set_id
;
409 assert(query
->kind
== GEN_PERF_QUERY_TYPE_RAW
);
411 /* Raw queries can be reprogrammed up by an external application/library.
412 * When a raw query is used for the first time it's id is set to a value !=
413 * 0. When it stops being used the id returns to 0. No need to reload the
414 * ID when it's already loaded.
416 if (query
->oa_metrics_set_id
!= 0) {
417 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64
"\n",
418 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
419 return query
->oa_metrics_set_id
;
422 struct gen_perf_query_info
*raw_query
= (struct gen_perf_query_info
*)query
;
423 if (!gen_perf_load_metric_id(perf
, query
->guid
,
424 &raw_query
->oa_metrics_set_id
)) {
425 DBG("Unable to read query guid=%s ID, falling back to test config\n", query
->guid
);
426 raw_query
->oa_metrics_set_id
= 1ULL;
428 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64
"\n",
429 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
431 return query
->oa_metrics_set_id
;
434 static struct oa_sample_buf
*
435 get_free_sample_buf(struct gen_perf_context
*perf_ctx
)
437 struct exec_node
*node
= exec_list_pop_head(&perf_ctx
->free_sample_buffers
);
438 struct oa_sample_buf
*buf
;
441 buf
= exec_node_data(struct oa_sample_buf
, node
, link
);
443 buf
= ralloc_size(perf_ctx
->perf
, sizeof(*buf
));
445 exec_node_init(&buf
->link
);
454 reap_old_sample_buffers(struct gen_perf_context
*perf_ctx
)
456 struct exec_node
*tail_node
=
457 exec_list_get_tail(&perf_ctx
->sample_buffers
);
458 struct oa_sample_buf
*tail_buf
=
459 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
461 /* Remove all old, unreferenced sample buffers walking forward from
462 * the head of the list, except always leave at least one node in
463 * the list so we always have a node to reference when we Begin
466 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
467 &perf_ctx
->sample_buffers
)
469 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
470 exec_node_remove(&buf
->link
);
471 exec_list_push_head(&perf_ctx
->free_sample_buffers
, &buf
->link
);
478 free_sample_bufs(struct gen_perf_context
*perf_ctx
)
480 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
481 &perf_ctx
->free_sample_buffers
)
484 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
488 struct gen_perf_query_object
*
489 gen_perf_new_query(struct gen_perf_context
*perf_ctx
, unsigned query_index
)
491 const struct gen_perf_query_info
*query
=
492 &perf_ctx
->perf
->queries
[query_index
];
493 struct gen_perf_query_object
*obj
=
494 calloc(1, sizeof(struct gen_perf_query_object
));
499 obj
->queryinfo
= query
;
501 perf_ctx
->n_query_instances
++;
506 gen_perf_active_queries(struct gen_perf_context
*perf_ctx
,
507 const struct gen_perf_query_info
*query
)
509 assert(perf_ctx
->n_active_oa_queries
== 0 || perf_ctx
->n_active_pipeline_stats_queries
== 0);
511 switch (query
->kind
) {
512 case GEN_PERF_QUERY_TYPE_OA
:
513 case GEN_PERF_QUERY_TYPE_RAW
:
514 return perf_ctx
->n_active_oa_queries
;
517 case GEN_PERF_QUERY_TYPE_PIPELINE
:
518 return perf_ctx
->n_active_pipeline_stats_queries
;
522 unreachable("Unknown query type");
527 const struct gen_perf_query_info
*
528 gen_perf_query_info(const struct gen_perf_query_object
*query
)
530 return query
->queryinfo
;
533 struct gen_perf_context
*
534 gen_perf_new_context(void *parent
)
536 struct gen_perf_context
*ctx
= rzalloc(parent
, struct gen_perf_context
);
538 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
542 struct gen_perf_config
*
543 gen_perf_config(struct gen_perf_context
*ctx
)
549 gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
550 struct gen_perf_config
*perf_cfg
,
551 void * ctx
, /* driver context (eg, brw_context) */
552 void * bufmgr
, /* eg brw_bufmgr */
553 const struct gen_device_info
*devinfo
,
557 perf_ctx
->perf
= perf_cfg
;
559 perf_ctx
->bufmgr
= bufmgr
;
560 perf_ctx
->drm_fd
= drm_fd
;
561 perf_ctx
->hw_ctx
= hw_ctx
;
562 perf_ctx
->devinfo
= devinfo
;
564 perf_ctx
->unaccumulated
=
565 ralloc_array(ctx
, struct gen_perf_query_object
*, 2);
566 perf_ctx
->unaccumulated_elements
= 0;
567 perf_ctx
->unaccumulated_array_size
= 2;
569 exec_list_make_empty(&perf_ctx
->sample_buffers
);
570 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
572 /* It's convenient to guarantee that this linked list of sample
573 * buffers is never empty so we add an empty head so when we
574 * Begin an OA query we can always take a reference on a buffer
577 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
578 exec_list_push_head(&perf_ctx
->sample_buffers
, &buf
->link
);
580 perf_ctx
->oa_stream_fd
= -1;
581 perf_ctx
->next_query_start_report_id
= 1000;
585 * Add a query to the global list of "unaccumulated queries."
587 * Queries are tracked here until all the associated OA reports have
588 * been accumulated via accumulate_oa_reports() after the end
589 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
592 add_to_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
593 struct gen_perf_query_object
*obj
)
595 if (perf_ctx
->unaccumulated_elements
>=
596 perf_ctx
->unaccumulated_array_size
)
598 perf_ctx
->unaccumulated_array_size
*= 1.5;
599 perf_ctx
->unaccumulated
=
600 reralloc(perf_ctx
->ctx
, perf_ctx
->unaccumulated
,
601 struct gen_perf_query_object
*,
602 perf_ctx
->unaccumulated_array_size
);
605 perf_ctx
->unaccumulated
[perf_ctx
->unaccumulated_elements
++] = obj
;
609 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
610 * pipeline statistics for the performance query object.
613 snapshot_statistics_registers(struct gen_perf_context
*ctx
,
614 struct gen_perf_query_object
*obj
,
615 uint32_t offset_in_bytes
)
617 struct gen_perf_config
*perf
= ctx
->perf
;
618 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
619 const int n_counters
= query
->n_counters
;
621 for (int i
= 0; i
< n_counters
; i
++) {
622 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
624 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
626 perf
->vtbl
.store_register_mem(ctx
->ctx
, obj
->pipeline_stats
.bo
,
627 counter
->pipeline_stat
.reg
, 8,
628 offset_in_bytes
+ i
* sizeof(uint64_t));
633 snapshot_freq_register(struct gen_perf_context
*ctx
,
634 struct gen_perf_query_object
*query
,
637 struct gen_perf_config
*perf
= ctx
->perf
;
638 const struct gen_device_info
*devinfo
= ctx
->devinfo
;
640 if (devinfo
->gen
== 8 && !devinfo
->is_cherryview
)
641 perf
->vtbl
.store_register_mem(ctx
->ctx
, query
->oa
.bo
, GEN7_RPSTAT1
, 4, bo_offset
);
642 else if (devinfo
->gen
>= 9)
643 perf
->vtbl
.store_register_mem(ctx
->ctx
, query
->oa
.bo
, GEN9_RPSTAT0
, 4, bo_offset
);
647 gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
648 struct gen_perf_query_object
*query
)
650 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
651 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
653 /* XXX: We have to consider that the command parser unit that parses batch
654 * buffer commands and is used to capture begin/end counter snapshots isn't
655 * implicitly synchronized with what's currently running across other GPU
656 * units (such as the EUs running shaders) that the performance counters are
659 * The intention of performance queries is to measure the work associated
660 * with commands between the begin/end delimiters and so for that to be the
661 * case we need to explicitly synchronize the parsing of commands to capture
662 * Begin/End counter snapshots with what's running across other parts of the
665 * When the command parser reaches a Begin marker it effectively needs to
666 * drain everything currently running on the GPU until the hardware is idle
667 * before capturing the first snapshot of counters - otherwise the results
668 * would also be measuring the effects of earlier commands.
670 * When the command parser reaches an End marker it needs to stall until
671 * everything currently running on the GPU has finished before capturing the
672 * end snapshot - otherwise the results won't be a complete representation
675 * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
676 * additional work to be processed by the pipeline until all pixels of the
677 * previous draw has be completed).
679 * N.B. The final results are based on deltas of counters between (inside)
680 * Begin/End markers so even though the total wall clock time of the
681 * workload is stretched by larger pipeline bubbles the bubbles themselves
682 * are generally invisible to the query results. Whether that's a good or a
683 * bad thing depends on the use case. For a lower real-time impact while
684 * capturing metrics then periodic sampling may be a better choice than
685 * INTEL_performance_query.
688 * This is our Begin synchronization point to drain current work on the
689 * GPU before we capture our first counter snapshot...
691 perf_cfg
->vtbl
.emit_stall_at_pixel_scoreboard(perf_ctx
->ctx
);
693 switch (queryinfo
->kind
) {
694 case GEN_PERF_QUERY_TYPE_OA
:
695 case GEN_PERF_QUERY_TYPE_RAW
: {
697 /* Opening an i915 perf stream implies exclusive access to the OA unit
698 * which will generate counter reports for a specific counter set with a
699 * specific layout/format so we can't begin any OA based queries that
700 * require a different counter set or format unless we get an opportunity
701 * to close the stream and open a new one...
703 uint64_t metric_id
= get_metric_id(perf_ctx
->perf
, queryinfo
);
705 if (perf_ctx
->oa_stream_fd
!= -1 &&
706 perf_ctx
->current_oa_metrics_set_id
!= metric_id
) {
708 if (perf_ctx
->n_oa_users
!= 0) {
709 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64
"\n",
710 perf_ctx
->current_oa_metrics_set_id
, metric_id
);
713 gen_perf_close(perf_ctx
, queryinfo
);
716 /* If the OA counters aren't already on, enable them. */
717 if (perf_ctx
->oa_stream_fd
== -1) {
718 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
720 /* The period_exponent gives a sampling period as follows:
721 * sample_period = timestamp_period * 2^(period_exponent + 1)
723 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
726 * The counter overflow period is derived from the EuActive counter
727 * which reads a counter that increments by the number of clock
728 * cycles multiplied by the number of EUs. It can be calculated as:
730 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
732 * (E.g. 40 EUs @ 1GHz = ~53ms)
734 * We select a sampling period inferior to that overflow period to
735 * ensure we cannot see more than 1 counter overflow, otherwise we
736 * could loose information.
739 int a_counter_in_bits
= 32;
740 if (devinfo
->gen
>= 8)
741 a_counter_in_bits
= 40;
743 uint64_t overflow_period
= pow(2, a_counter_in_bits
) / (perf_cfg
->sys_vars
.n_eus
*
744 /* drop 1GHz freq to have units in nanoseconds */
747 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
748 overflow_period
, overflow_period
/ 1000000ul, perf_cfg
->sys_vars
.n_eus
);
750 int period_exponent
= 0;
751 uint64_t prev_sample_period
, next_sample_period
;
752 for (int e
= 0; e
< 30; e
++) {
753 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
754 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
756 /* Take the previous sampling period, lower than the overflow
759 if (prev_sample_period
< overflow_period
&&
760 next_sample_period
> overflow_period
)
761 period_exponent
= e
+ 1;
764 if (period_exponent
== 0) {
765 DBG("WARNING: enable to find a sampling exponent\n");
769 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
770 prev_sample_period
/ 1000000ul);
772 if (!gen_perf_open(perf_ctx
, metric_id
, queryinfo
->oa_format
,
773 period_exponent
, perf_ctx
->drm_fd
,
777 assert(perf_ctx
->current_oa_metrics_set_id
== metric_id
&&
778 perf_ctx
->current_oa_format
== queryinfo
->oa_format
);
781 if (!inc_n_users(perf_ctx
)) {
782 DBG("WARNING: Error enabling i915 perf stream: %m\n");
787 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
791 query
->oa
.bo
= perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
792 "perf. query OA MI_RPC bo",
795 /* Pre-filling the BO helps debug whether writes landed. */
796 void *map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_WRITE
);
797 memset(map
, 0x80, MI_RPC_BO_SIZE
);
798 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
801 query
->oa
.begin_report_id
= perf_ctx
->next_query_start_report_id
;
802 perf_ctx
->next_query_start_report_id
+= 2;
804 /* Take a starting OA counter snapshot. */
805 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
, 0,
806 query
->oa
.begin_report_id
);
807 snapshot_freq_register(perf_ctx
, query
, MI_FREQ_START_OFFSET_BYTES
);
809 ++perf_ctx
->n_active_oa_queries
;
811 /* No already-buffered samples can possibly be associated with this query
812 * so create a marker within the list of sample buffers enabling us to
813 * easily ignore earlier samples when processing this query after
816 assert(!exec_list_is_empty(&perf_ctx
->sample_buffers
));
817 query
->oa
.samples_head
= exec_list_get_tail(&perf_ctx
->sample_buffers
);
819 struct oa_sample_buf
*buf
=
820 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
822 /* This reference will ensure that future/following sample
823 * buffers (that may relate to this query) can't be freed until
824 * this drops to zero.
828 gen_perf_query_result_clear(&query
->oa
.result
);
829 query
->oa
.results_accumulated
= false;
831 add_to_unaccumulated_query_list(perf_ctx
, query
);
835 case GEN_PERF_QUERY_TYPE_PIPELINE
:
836 if (query
->pipeline_stats
.bo
) {
837 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
838 query
->pipeline_stats
.bo
= NULL
;
841 query
->pipeline_stats
.bo
=
842 perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
843 "perf. query pipeline stats bo",
846 /* Take starting snapshots. */
847 snapshot_statistics_registers(perf_ctx
, query
, 0);
849 ++perf_ctx
->n_active_pipeline_stats_queries
;
853 unreachable("Unknown query type");
861 gen_perf_end_query(struct gen_perf_context
*perf_ctx
,
862 struct gen_perf_query_object
*query
)
864 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
866 /* Ensure that the work associated with the queried commands will have
867 * finished before taking our query end counter readings.
869 * For more details see comment in brw_begin_perf_query for
870 * corresponding flush.
872 perf_cfg
->vtbl
.emit_stall_at_pixel_scoreboard(perf_ctx
->ctx
);
874 switch (query
->queryinfo
->kind
) {
875 case GEN_PERF_QUERY_TYPE_OA
:
876 case GEN_PERF_QUERY_TYPE_RAW
:
878 /* NB: It's possible that the query will have already been marked
879 * as 'accumulated' if an error was seen while reading samples
880 * from perf. In this case we mustn't try and emit a closing
881 * MI_RPC command in case the OA unit has already been disabled
883 if (!query
->oa
.results_accumulated
) {
884 /* Take an ending OA counter snapshot. */
885 snapshot_freq_register(perf_ctx
, query
, MI_FREQ_END_OFFSET_BYTES
);
886 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
,
887 MI_RPC_BO_END_OFFSET_BYTES
,
888 query
->oa
.begin_report_id
+ 1);
891 --perf_ctx
->n_active_oa_queries
;
893 /* NB: even though the query has now ended, it can't be accumulated
894 * until the end MI_REPORT_PERF_COUNT snapshot has been written
899 case GEN_PERF_QUERY_TYPE_PIPELINE
:
900 snapshot_statistics_registers(perf_ctx
, query
,
901 STATS_BO_END_OFFSET_BYTES
);
902 --perf_ctx
->n_active_pipeline_stats_queries
;
906 unreachable("Unknown query type");
912 OA_READ_STATUS_ERROR
,
913 OA_READ_STATUS_UNFINISHED
,
914 OA_READ_STATUS_FINISHED
,
917 static enum OaReadStatus
918 read_oa_samples_until(struct gen_perf_context
*perf_ctx
,
919 uint32_t start_timestamp
,
920 uint32_t end_timestamp
)
922 struct exec_node
*tail_node
=
923 exec_list_get_tail(&perf_ctx
->sample_buffers
);
924 struct oa_sample_buf
*tail_buf
=
925 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
926 uint32_t last_timestamp
=
927 tail_buf
->len
== 0 ? start_timestamp
: tail_buf
->last_timestamp
;
930 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
934 while ((len
= read(perf_ctx
->oa_stream_fd
, buf
->buf
,
935 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
939 exec_list_push_tail(&perf_ctx
->free_sample_buffers
, &buf
->link
);
942 if (errno
== EAGAIN
) {
943 return ((last_timestamp
- start_timestamp
) < INT32_MAX
&&
944 (last_timestamp
- start_timestamp
) >=
945 (end_timestamp
- start_timestamp
)) ?
946 OA_READ_STATUS_FINISHED
:
947 OA_READ_STATUS_UNFINISHED
;
949 DBG("Error reading i915 perf samples: %m\n");
952 DBG("Spurious EOF reading i915 perf samples\n");
954 return OA_READ_STATUS_ERROR
;
958 exec_list_push_tail(&perf_ctx
->sample_buffers
, &buf
->link
);
960 /* Go through the reports and update the last timestamp. */
962 while (offset
< buf
->len
) {
963 const struct drm_i915_perf_record_header
*header
=
964 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
965 uint32_t *report
= (uint32_t *) (header
+ 1);
967 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
968 last_timestamp
= report
[1];
970 offset
+= header
->size
;
973 buf
->last_timestamp
= last_timestamp
;
976 unreachable("not reached");
977 return OA_READ_STATUS_ERROR
;
981 * Try to read all the reports until either the delimiting timestamp
982 * or an error arises.
985 read_oa_samples_for_query(struct gen_perf_context
*perf_ctx
,
986 struct gen_perf_query_object
*query
,
992 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
994 /* We need the MI_REPORT_PERF_COUNT to land before we can start
996 assert(!perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
997 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
));
999 /* Map the BO once here and let accumulate_oa_reports() unmap
1001 if (query
->oa
.map
== NULL
)
1002 query
->oa
.map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_READ
);
1004 start
= last
= query
->oa
.map
;
1005 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1007 if (start
[0] != query
->oa
.begin_report_id
) {
1008 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
1011 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
1012 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
1016 /* Read the reports until the end timestamp. */
1017 switch (read_oa_samples_until(perf_ctx
, start
[1], end
[1])) {
1018 case OA_READ_STATUS_ERROR
:
1019 /* Fallthrough and let accumulate_oa_reports() deal with the
1021 case OA_READ_STATUS_FINISHED
:
1023 case OA_READ_STATUS_UNFINISHED
:
1027 unreachable("invalid read status");
1032 gen_perf_wait_query(struct gen_perf_context
*perf_ctx
,
1033 struct gen_perf_query_object
*query
,
1034 void *current_batch
)
1036 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1037 struct brw_bo
*bo
= NULL
;
1039 switch (query
->queryinfo
->kind
) {
1040 case GEN_PERF_QUERY_TYPE_OA
:
1041 case GEN_PERF_QUERY_TYPE_RAW
:
1045 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1046 bo
= query
->pipeline_stats
.bo
;
1050 unreachable("Unknown query type");
1057 /* If the current batch references our results bo then we need to
1060 if (perf_cfg
->vtbl
.batch_references(current_batch
, bo
))
1061 perf_cfg
->vtbl
.batchbuffer_flush(perf_ctx
->ctx
, __FILE__
, __LINE__
);
1063 perf_cfg
->vtbl
.bo_wait_rendering(bo
);
1065 /* Due to a race condition between the OA unit signaling report
1066 * availability and the report actually being written into memory,
1067 * we need to wait for all the reports to come in before we can
1070 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1071 query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1072 while (!read_oa_samples_for_query(perf_ctx
, query
, current_batch
))
1078 gen_perf_is_query_ready(struct gen_perf_context
*perf_ctx
,
1079 struct gen_perf_query_object
*query
,
1080 void *current_batch
)
1082 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1084 switch (query
->queryinfo
->kind
) {
1085 case GEN_PERF_QUERY_TYPE_OA
:
1086 case GEN_PERF_QUERY_TYPE_RAW
:
1087 return (query
->oa
.results_accumulated
||
1089 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
1090 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
) &&
1091 read_oa_samples_for_query(perf_ctx
, query
, current_batch
)));
1092 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1093 return (query
->pipeline_stats
.bo
&&
1094 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->pipeline_stats
.bo
) &&
1095 !perf_cfg
->vtbl
.bo_busy(query
->pipeline_stats
.bo
));
1098 unreachable("Unknown query type");
1106 * Remove a query from the global list of unaccumulated queries once
1107 * after successfully accumulating the OA reports associated with the
1108 * query in accumulate_oa_reports() or when discarding unwanted query
1112 drop_from_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
1113 struct gen_perf_query_object
*query
)
1115 for (int i
= 0; i
< perf_ctx
->unaccumulated_elements
; i
++) {
1116 if (perf_ctx
->unaccumulated
[i
] == query
) {
1117 int last_elt
= --perf_ctx
->unaccumulated_elements
;
1120 perf_ctx
->unaccumulated
[i
] = NULL
;
1122 perf_ctx
->unaccumulated
[i
] =
1123 perf_ctx
->unaccumulated
[last_elt
];
1130 /* Drop our samples_head reference so that associated periodic
1131 * sample data buffers can potentially be reaped if they aren't
1132 * referenced by any other queries...
1135 struct oa_sample_buf
*buf
=
1136 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
1138 assert(buf
->refcount
> 0);
1141 query
->oa
.samples_head
= NULL
;
1143 reap_old_sample_buffers(perf_ctx
);
1146 /* In general if we see anything spurious while accumulating results,
1147 * we don't try and continue accumulating the current query, hoping
1148 * for the best, we scrap anything outstanding, and then hope for the
1149 * best with new queries.
1152 discard_all_queries(struct gen_perf_context
*perf_ctx
)
1154 while (perf_ctx
->unaccumulated_elements
) {
1155 struct gen_perf_query_object
*query
= perf_ctx
->unaccumulated
[0];
1157 query
->oa
.results_accumulated
= true;
1158 drop_from_unaccumulated_query_list(perf_ctx
, query
);
1160 dec_n_users(perf_ctx
);
1164 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
1166 oa_report_ctx_id_valid(const struct gen_device_info
*devinfo
,
1167 const uint32_t *report
)
1169 assert(devinfo
->gen
>= 8);
1170 if (devinfo
->gen
== 8)
1171 return (report
[0] & (1 << 25)) != 0;
1172 return (report
[0] & (1 << 16)) != 0;
1176 * Accumulate raw OA counter values based on deltas between pairs of
1179 * Accumulation starts from the first report captured via
1180 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
1181 * last MI_RPC report requested by brw_end_perf_query(). Between these
1182 * two reports there may also some number of periodically sampled OA
1183 * reports collected via the i915 perf interface - depending on the
1184 * duration of the query.
1186 * These periodic snapshots help to ensure we handle counter overflow
1187 * correctly by being frequent enough to ensure we don't miss multiple
1188 * overflows of a counter between snapshots. For Gen8+ the i915 perf
1189 * snapshots provide the extra context-switch reports that let us
1190 * subtract out the progress of counters associated with other
1191 * contexts running on the system.
1194 accumulate_oa_reports(struct gen_perf_context
*perf_ctx
,
1195 struct gen_perf_query_object
*query
)
1197 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1201 struct exec_node
*first_samples_node
;
1202 bool last_report_ctx_match
= true;
1203 int out_duration
= 0;
1205 assert(query
->oa
.map
!= NULL
);
1207 start
= last
= query
->oa
.map
;
1208 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1210 if (start
[0] != query
->oa
.begin_report_id
) {
1211 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
1214 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
1215 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
1219 /* On Gen12+ OA reports are sourced from per context counters, so we don't
1220 * ever have to look at the global OA buffer. Yey \o/
1222 if (perf_ctx
->devinfo
->gen
>= 12) {
1227 /* See if we have any periodic reports to accumulate too... */
1229 /* N.B. The oa.samples_head was set when the query began and
1230 * pointed to the tail of the perf_ctx->sample_buffers list at
1231 * the time the query started. Since the buffer existed before the
1232 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
1233 * that no data in this particular node's buffer can possibly be
1234 * associated with the query - so skip ahead one...
1236 first_samples_node
= query
->oa
.samples_head
->next
;
1238 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
1239 &perf_ctx
->sample_buffers
,
1244 while (offset
< buf
->len
) {
1245 const struct drm_i915_perf_record_header
*header
=
1246 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
1248 assert(header
->size
!= 0);
1249 assert(header
->size
<= buf
->len
);
1251 offset
+= header
->size
;
1253 switch (header
->type
) {
1254 case DRM_I915_PERF_RECORD_SAMPLE
: {
1255 uint32_t *report
= (uint32_t *)(header
+ 1);
1256 bool report_ctx_match
= true;
1259 /* Ignore reports that come before the start marker.
1260 * (Note: takes care to allow overflow of 32bit timestamps)
1262 if (gen_device_info_timebase_scale(devinfo
,
1263 report
[1] - start
[1]) > 5000000000) {
1267 /* Ignore reports that come after the end marker.
1268 * (Note: takes care to allow overflow of 32bit timestamps)
1270 if (gen_device_info_timebase_scale(devinfo
,
1271 report
[1] - end
[1]) <= 5000000000) {
1275 /* For Gen8+ since the counters continue while other
1276 * contexts are running we need to discount any unrelated
1277 * deltas. The hardware automatically generates a report
1278 * on context switch which gives us a new reference point
1279 * to continuing adding deltas from.
1281 * For Haswell we can rely on the HW to stop the progress
1282 * of OA counters while any other context is acctive.
1284 if (devinfo
->gen
>= 8) {
1285 /* Consider that the current report matches our context only if
1286 * the report says the report ID is valid.
1288 report_ctx_match
= oa_report_ctx_id_valid(devinfo
, report
) &&
1289 report
[2] == start
[2];
1290 if (report_ctx_match
)
1295 /* Only add the delta between <last, report> if the last report
1296 * was clearly identified as our context, or if we have at most
1297 * 1 report without a matching ID.
1299 * The OA unit will sometimes label reports with an invalid
1300 * context ID when i915 rewrites the execlist submit register
1301 * with the same context as the one currently running. This
1302 * happens when i915 wants to notify the HW of ringbuffer tail
1303 * register update. We have to consider this report as part of
1304 * our context as the 3d pipeline behind the OACS unit is still
1305 * processing the operations started at the previous execlist
1308 add
= last_report_ctx_match
&& out_duration
< 2;
1312 gen_perf_query_result_accumulate(&query
->oa
.result
,
1316 /* We're not adding the delta because we've identified it's not
1317 * for the context we filter for. We can consider that the
1320 query
->oa
.result
.query_disjoint
= true;
1324 last_report_ctx_match
= report_ctx_match
;
1329 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
1330 DBG("i915 perf: OA error: all reports lost\n");
1332 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
1333 DBG("i915 perf: OA report lost\n");
1341 gen_perf_query_result_accumulate(&query
->oa
.result
, query
->queryinfo
,
1344 query
->oa
.results_accumulated
= true;
1345 drop_from_unaccumulated_query_list(perf_ctx
, query
);
1346 dec_n_users(perf_ctx
);
1352 discard_all_queries(perf_ctx
);
1356 gen_perf_delete_query(struct gen_perf_context
*perf_ctx
,
1357 struct gen_perf_query_object
*query
)
1359 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1361 /* We can assume that the frontend waits for a query to complete
1362 * before ever calling into here, so we don't have to worry about
1363 * deleting an in-flight query object.
1365 switch (query
->queryinfo
->kind
) {
1366 case GEN_PERF_QUERY_TYPE_OA
:
1367 case GEN_PERF_QUERY_TYPE_RAW
:
1369 if (!query
->oa
.results_accumulated
) {
1370 drop_from_unaccumulated_query_list(perf_ctx
, query
);
1371 dec_n_users(perf_ctx
);
1374 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
1375 query
->oa
.bo
= NULL
;
1378 query
->oa
.results_accumulated
= false;
1381 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1382 if (query
->pipeline_stats
.bo
) {
1383 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
1384 query
->pipeline_stats
.bo
= NULL
;
1389 unreachable("Unknown query type");
1393 /* As an indication that the INTEL_performance_query extension is no
1394 * longer in use, it's a good time to free our cache of sample
1395 * buffers and close any current i915-perf stream.
1397 if (--perf_ctx
->n_query_instances
== 0) {
1398 free_sample_bufs(perf_ctx
);
1399 gen_perf_close(perf_ctx
, query
->queryinfo
);
1405 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
1408 read_gt_frequency(struct gen_perf_context
*perf_ctx
,
1409 struct gen_perf_query_object
*obj
)
1411 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1412 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
1413 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
1415 switch (devinfo
->gen
) {
1418 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1419 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1425 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1426 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1429 unreachable("unexpected gen");
1432 /* Put the numbers into Hz. */
1433 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
1434 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
1438 get_oa_counter_data(struct gen_perf_context
*perf_ctx
,
1439 struct gen_perf_query_object
*query
,
1443 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1444 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
1445 int n_counters
= queryinfo
->n_counters
;
1448 for (int i
= 0; i
< n_counters
; i
++) {
1449 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
1450 uint64_t *out_uint64
;
1452 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
1455 switch (counter
->data_type
) {
1456 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
1457 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1459 counter
->oa_counter_read_uint64(perf_cfg
, queryinfo
,
1460 query
->oa
.result
.accumulator
);
1462 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
1463 out_float
= (float *)(data
+ counter
->offset
);
1465 counter
->oa_counter_read_float(perf_cfg
, queryinfo
,
1466 query
->oa
.result
.accumulator
);
1469 /* So far we aren't using uint32, double or bool32... */
1470 unreachable("unexpected counter data type");
1472 written
= counter
->offset
+ counter_size
;
1480 get_pipeline_stats_data(struct gen_perf_context
*perf_ctx
,
1481 struct gen_perf_query_object
*query
,
1486 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1487 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
1488 int n_counters
= queryinfo
->n_counters
;
1491 uint64_t *start
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->pipeline_stats
.bo
, MAP_READ
);
1492 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1494 for (int i
= 0; i
< n_counters
; i
++) {
1495 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
1496 uint64_t value
= end
[i
] - start
[i
];
1498 if (counter
->pipeline_stat
.numerator
!=
1499 counter
->pipeline_stat
.denominator
) {
1500 value
*= counter
->pipeline_stat
.numerator
;
1501 value
/= counter
->pipeline_stat
.denominator
;
1504 *((uint64_t *)p
) = value
;
1508 perf_cfg
->vtbl
.bo_unmap(query
->pipeline_stats
.bo
);
1514 gen_perf_get_query_data(struct gen_perf_context
*perf_ctx
,
1515 struct gen_perf_query_object
*query
,
1518 unsigned *bytes_written
)
1520 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1523 switch (query
->queryinfo
->kind
) {
1524 case GEN_PERF_QUERY_TYPE_OA
:
1525 case GEN_PERF_QUERY_TYPE_RAW
:
1526 if (!query
->oa
.results_accumulated
) {
1527 read_gt_frequency(perf_ctx
, query
);
1528 uint32_t *begin_report
= query
->oa
.map
;
1529 uint32_t *end_report
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1530 gen_perf_query_result_read_frequencies(&query
->oa
.result
,
1534 accumulate_oa_reports(perf_ctx
, query
);
1535 assert(query
->oa
.results_accumulated
);
1537 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
1538 query
->oa
.map
= NULL
;
1540 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
1541 written
= get_oa_counter_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
1543 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1545 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
1546 devinfo
, &query
->oa
.result
,
1547 query
->oa
.gt_frequency
[0],
1548 query
->oa
.gt_frequency
[1]);
1552 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1553 written
= get_pipeline_stats_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
1557 unreachable("Unknown query type");
1562 *bytes_written
= written
;
1566 gen_perf_dump_query_count(struct gen_perf_context
*perf_ctx
)
1568 DBG("Queries: (Open queries = %d, OA users = %d)\n",
1569 perf_ctx
->n_active_oa_queries
, perf_ctx
->n_oa_users
);
1573 gen_perf_dump_query(struct gen_perf_context
*ctx
,
1574 struct gen_perf_query_object
*obj
,
1575 void *current_batch
)
1577 switch (obj
->queryinfo
->kind
) {
1578 case GEN_PERF_QUERY_TYPE_OA
:
1579 case GEN_PERF_QUERY_TYPE_RAW
:
1580 DBG("BO: %-4s OA data: %-10s %-15s\n",
1581 obj
->oa
.bo
? "yes," : "no,",
1582 gen_perf_is_query_ready(ctx
, obj
, current_batch
) ? "ready," : "not ready,",
1583 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
1585 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1587 obj
->pipeline_stats
.bo
? "yes" : "no");
1590 unreachable("Unknown query type");