2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <sys/types.h>
32 #ifndef HAVE_DIRENT_D_TYPE
33 #include <limits.h> // PATH_MAX
36 #include <drm-uapi/i915_drm.h>
38 #include "common/gen_gem.h"
40 #include "gen_perf_regs.h"
41 #include "perf/gen_perf_mdapi.h"
42 #include "perf/gen_perf_metrics.h"
44 #include "dev/gen_debug.h"
45 #include "dev/gen_device_info.h"
46 #include "util/bitscan.h"
47 #include "util/mesa-sha1.h"
48 #include "util/u_math.h"
50 #define FILE_DEBUG_FLAG DEBUG_PERFMON
51 #define MI_RPC_BO_SIZE 4096
52 #define MI_FREQ_START_OFFSET_BYTES (3072)
53 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
54 #define MI_FREQ_END_OFFSET_BYTES (3076)
56 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
58 #define GEN7_RPSTAT1 0xA01C
59 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
60 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
61 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
62 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
64 #define GEN9_RPSTAT0 0xA01C
65 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
66 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
67 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
68 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
70 #define GEN6_SO_PRIM_STORAGE_NEEDED 0x2280
71 #define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
72 #define GEN6_SO_NUM_PRIMS_WRITTEN 0x2288
73 #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
75 #define MAP_READ (1 << 0)
76 #define MAP_WRITE (1 << 1)
78 #define OA_REPORT_INVALID_CTX_ID (0xffffffff)
81 * Periodic OA samples are read() into these buffer structures via the
82 * i915 perf kernel interface and appended to the
83 * perf_ctx->sample_buffers linked list. When we process the
84 * results of an OA metrics query we need to consider all the periodic
85 * samples between the Begin and End MI_REPORT_PERF_COUNT command
88 * 'Periodic' is a simplification as there are other automatic reports
89 * written by the hardware also buffered here.
91 * Considering three queries, A, B and C:
94 * ________________A_________________
96 * | ________B_________ _____C___________
99 * And an illustration of sample buffers read over this time frame:
100 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
102 * These nodes may hold samples for query A:
103 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
105 * These nodes may hold samples for query B:
106 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
108 * These nodes may hold samples for query C:
109 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
111 * The illustration assumes we have an even distribution of periodic
112 * samples so all nodes have the same size plotted against time:
114 * Note, to simplify code, the list is never empty.
116 * With overlapping queries we can see that periodic OA reports may
117 * relate to multiple queries and care needs to be take to keep
118 * track of sample buffers until there are no queries that might
119 * depend on their contents.
121 * We use a node ref counting system where a reference ensures that a
122 * node and all following nodes can't be freed/recycled until the
123 * reference drops to zero.
125 * E.g. with a ref of one here:
126 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
128 * These nodes could be freed or recycled ("reaped"):
131 * These must be preserved until the leading ref drops to zero:
132 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
134 * When a query starts we take a reference on the current tail of
135 * the list, knowing that no already-buffered samples can possibly
136 * relate to the newly-started query. A pointer to this node is
137 * also saved in the query object's ->oa.samples_head.
139 * E.g. starting query A while there are two nodes in .sample_buffers:
140 * ________________A________
144 * ^_______ Add a reference and store pointer to node in
147 * Moving forward to when the B query starts with no new buffer nodes:
148 * (for reference, i915 perf reads() are only done when queries finish)
149 * ________________A_______
154 * ^_______ Add a reference and store pointer to
155 * node in B->oa.samples_head
157 * Once a query is finished, after an OA query has become 'Ready',
158 * once the End OA report has landed and after we we have processed
159 * all the intermediate periodic samples then we drop the
160 * ->oa.samples_head reference we took at the start.
162 * So when the B query has finished we have:
163 * ________________A________
164 * | ______B___________
166 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
167 * ^_______ Drop B->oa.samples_head reference
169 * We still can't free these due to the A->oa.samples_head ref:
170 * [ 1 ][ 0 ][ 0 ][ 0 ]
172 * When the A query finishes: (note there's a new ref for C's samples_head)
173 * ________________A_________________
177 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
178 * ^_______ Drop A->oa.samples_head reference
180 * And we can now reap these nodes up to the C->oa.samples_head:
181 * [ X ][ X ][ X ][ X ]
182 * keeping -> [ 1 ][ 0 ][ 0 ]
184 * We reap old sample buffers each time we finish processing an OA
185 * query by iterating the sample_buffers list from the head until we
186 * find a referenced node and stop.
188 * Reaped buffers move to a perfquery.free_sample_buffers list and
189 * when we come to read() we first look to recycle a buffer from the
190 * free_sample_buffers list before allocating a new buffer.
192 struct oa_sample_buf
{
193 struct exec_node link
;
196 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
197 uint32_t last_timestamp
;
201 * gen representation of a performance query object.
203 * NB: We want to keep this structure relatively lean considering that
204 * applications may expect to allocate enough objects to be able to
205 * query around all draw calls in a frame.
207 struct gen_perf_query_object
209 const struct gen_perf_query_info
*queryinfo
;
211 /* See query->kind to know which state below is in use... */
216 * BO containing OA counter snapshots at query Begin/End time.
221 * Address of mapped of @bo
226 * The MI_REPORT_PERF_COUNT command lets us specify a unique
227 * ID that will be reflected in the resulting OA report
228 * that's written by the GPU. This is the ID we're expecting
229 * in the begin report and the the end report should be
230 * @begin_report_id + 1.
235 * Reference the head of the brw->perfquery.sample_buffers
236 * list at the time that the query started (so we only need
237 * to look at nodes after this point when looking for samples
238 * related to this query)
240 * (See struct brw_oa_sample_buf description for more details)
242 struct exec_node
*samples_head
;
245 * false while in the unaccumulated_elements list, and set to
246 * true when the final, end MI_RPC snapshot has been
249 bool results_accumulated
;
252 * Frequency of the GT at begin and end of the query.
254 uint64_t gt_frequency
[2];
257 * Accumulated OA results between begin and end of the query.
259 struct gen_perf_query_result result
;
264 * BO containing starting and ending snapshots for the
265 * statistics counters.
272 struct gen_perf_context
{
273 struct gen_perf_config
*perf
;
275 void * ctx
; /* driver context (eg, brw_context) */
277 const struct gen_device_info
*devinfo
;
282 /* The i915 perf stream we open to setup + enable the OA counters */
285 /* An i915 perf stream fd gives exclusive access to the OA unit that will
286 * report counter snapshots for a specific counter set/profile in a
287 * specific layout/format so we can only start OA queries that are
288 * compatible with the currently open fd...
290 int current_oa_metrics_set_id
;
291 int current_oa_format
;
293 /* List of buffers containing OA reports */
294 struct exec_list sample_buffers
;
296 /* Cached list of empty sample buffers */
297 struct exec_list free_sample_buffers
;
299 int n_active_oa_queries
;
300 int n_active_pipeline_stats_queries
;
302 /* The number of queries depending on running OA counters which
303 * extends beyond brw_end_perf_query() since we need to wait until
304 * the last MI_RPC command has parsed by the GPU.
306 * Accurate accounting is important here as emitting an
307 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
308 * effectively hang the gpu.
312 /* To help catch an spurious problem with the hardware or perf
313 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
314 * with a unique ID that we can explicitly check for...
316 int next_query_start_report_id
;
319 * An array of queries whose results haven't yet been assembled
320 * based on the data in buffer objects.
322 * These may be active, or have already ended. However, the
323 * results have not been requested.
325 struct gen_perf_query_object
**unaccumulated
;
326 int unaccumulated_elements
;
327 int unaccumulated_array_size
;
329 /* The total number of query objects so we can relinquish
330 * our exclusive access to perf if the application deletes
331 * all of its objects. (NB: We only disable perf while
332 * there are no active queries)
334 int n_query_instances
;
337 const struct gen_perf_query_info
*
338 gen_perf_query_info(const struct gen_perf_query_object
*query
)
340 return query
->queryinfo
;
343 struct gen_perf_context
*
344 gen_perf_new_context(void *parent
)
346 struct gen_perf_context
*ctx
= rzalloc(parent
, struct gen_perf_context
);
348 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
352 struct gen_perf_config
*
353 gen_perf_config(struct gen_perf_context
*ctx
)
358 struct gen_perf_query_object
*
359 gen_perf_new_query(struct gen_perf_context
*perf_ctx
, unsigned query_index
)
361 const struct gen_perf_query_info
*query
=
362 &perf_ctx
->perf
->queries
[query_index
];
363 struct gen_perf_query_object
*obj
=
364 calloc(1, sizeof(struct gen_perf_query_object
));
369 obj
->queryinfo
= query
;
371 perf_ctx
->n_query_instances
++;
376 gen_perf_active_queries(struct gen_perf_context
*perf_ctx
,
377 const struct gen_perf_query_info
*query
)
379 assert(perf_ctx
->n_active_oa_queries
== 0 || perf_ctx
->n_active_pipeline_stats_queries
== 0);
381 switch (query
->kind
) {
382 case GEN_PERF_QUERY_TYPE_OA
:
383 case GEN_PERF_QUERY_TYPE_RAW
:
384 return perf_ctx
->n_active_oa_queries
;
387 case GEN_PERF_QUERY_TYPE_PIPELINE
:
388 return perf_ctx
->n_active_pipeline_stats_queries
;
392 unreachable("Unknown query type");
397 static inline uint64_t to_user_pointer(void *ptr
)
399 return (uintptr_t) ptr
;
403 is_dir_or_link(const struct dirent
*entry
, const char *parent_dir
)
405 #ifdef HAVE_DIRENT_D_TYPE
406 return entry
->d_type
== DT_DIR
|| entry
->d_type
== DT_LNK
;
409 char path
[PATH_MAX
+ 1];
410 snprintf(path
, sizeof(path
), "%s/%s", parent_dir
, entry
->d_name
);
412 return S_ISDIR(st
.st_mode
) || S_ISLNK(st
.st_mode
);
417 get_sysfs_dev_dir(struct gen_perf_config
*perf
, int fd
)
422 struct dirent
*drm_entry
;
425 perf
->sysfs_dev_dir
[0] = '\0';
427 if (fstat(fd
, &sb
)) {
428 DBG("Failed to stat DRM fd\n");
432 maj
= major(sb
.st_rdev
);
433 min
= minor(sb
.st_rdev
);
435 if (!S_ISCHR(sb
.st_mode
)) {
436 DBG("DRM fd is not a character device as expected\n");
440 len
= snprintf(perf
->sysfs_dev_dir
,
441 sizeof(perf
->sysfs_dev_dir
),
442 "/sys/dev/char/%d:%d/device/drm", maj
, min
);
443 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
)) {
444 DBG("Failed to concatenate sysfs path to drm device\n");
448 drmdir
= opendir(perf
->sysfs_dev_dir
);
450 DBG("Failed to open %s: %m\n", perf
->sysfs_dev_dir
);
454 while ((drm_entry
= readdir(drmdir
))) {
455 if (is_dir_or_link(drm_entry
, perf
->sysfs_dev_dir
) &&
456 strncmp(drm_entry
->d_name
, "card", 4) == 0)
458 len
= snprintf(perf
->sysfs_dev_dir
,
459 sizeof(perf
->sysfs_dev_dir
),
460 "/sys/dev/char/%d:%d/device/drm/%s",
461 maj
, min
, drm_entry
->d_name
);
463 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
))
472 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
479 read_file_uint64(const char *file
, uint64_t *val
)
487 while ((n
= read(fd
, buf
, sizeof (buf
) - 1)) < 0 &&
494 *val
= strtoull(buf
, NULL
, 0);
500 read_sysfs_drm_device_file_uint64(struct gen_perf_config
*perf
,
507 len
= snprintf(buf
, sizeof(buf
), "%s/%s", perf
->sysfs_dev_dir
, file
);
508 if (len
< 0 || len
>= sizeof(buf
)) {
509 DBG("Failed to concatenate sys filename to read u64 from\n");
513 return read_file_uint64(buf
, value
);
516 static inline struct gen_perf_query_info
*
517 append_query_info(struct gen_perf_config
*perf
, int max_counters
)
519 struct gen_perf_query_info
*query
;
521 perf
->queries
= reralloc(perf
, perf
->queries
,
522 struct gen_perf_query_info
,
524 query
= &perf
->queries
[perf
->n_queries
- 1];
525 memset(query
, 0, sizeof(*query
));
527 if (max_counters
> 0) {
528 query
->max_counters
= max_counters
;
530 rzalloc_array(perf
, struct gen_perf_query_counter
, max_counters
);
537 register_oa_config(struct gen_perf_config
*perf
,
538 const struct gen_perf_query_info
*query
,
541 struct gen_perf_query_info
*registered_query
= append_query_info(perf
, 0);
543 *registered_query
= *query
;
544 registered_query
->oa_metrics_set_id
= config_id
;
545 DBG("metric set registered: id = %" PRIu64
", guid = %s\n",
546 registered_query
->oa_metrics_set_id
, query
->guid
);
550 enumerate_sysfs_metrics(struct gen_perf_config
*perf
)
552 DIR *metricsdir
= NULL
;
553 struct dirent
*metric_entry
;
557 len
= snprintf(buf
, sizeof(buf
), "%s/metrics", perf
->sysfs_dev_dir
);
558 if (len
< 0 || len
>= sizeof(buf
)) {
559 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
563 metricsdir
= opendir(buf
);
565 DBG("Failed to open %s: %m\n", buf
);
569 while ((metric_entry
= readdir(metricsdir
))) {
570 struct hash_entry
*entry
;
571 if (!is_dir_or_link(metric_entry
, buf
) ||
572 metric_entry
->d_name
[0] == '.')
575 DBG("metric set: %s\n", metric_entry
->d_name
);
576 entry
= _mesa_hash_table_search(perf
->oa_metrics_table
,
577 metric_entry
->d_name
);
580 if (!gen_perf_load_metric_id(perf
, metric_entry
->d_name
, &id
)) {
581 DBG("Failed to read metric set id from %s: %m", buf
);
585 register_oa_config(perf
, (const struct gen_perf_query_info
*)entry
->data
, id
);
587 DBG("metric set not known by mesa (skipping)\n");
590 closedir(metricsdir
);
594 kernel_has_dynamic_config_support(struct gen_perf_config
*perf
, int fd
)
596 uint64_t invalid_config_id
= UINT64_MAX
;
598 return gen_ioctl(fd
, DRM_IOCTL_I915_PERF_REMOVE_CONFIG
,
599 &invalid_config_id
) < 0 && errno
== ENOENT
;
603 i915_query_items(struct gen_perf_config
*perf
, int fd
,
604 struct drm_i915_query_item
*items
, uint32_t n_items
)
606 struct drm_i915_query q
= {
607 .num_items
= n_items
,
608 .items_ptr
= to_user_pointer(items
),
610 return gen_ioctl(fd
, DRM_IOCTL_I915_QUERY
, &q
);
614 i915_query_perf_config_supported(struct gen_perf_config
*perf
, int fd
)
616 struct drm_i915_query_item item
= {
617 .query_id
= DRM_I915_QUERY_PERF_CONFIG
,
618 .flags
= DRM_I915_QUERY_PERF_CONFIG_LIST
,
621 return i915_query_items(perf
, fd
, &item
, 1) == 0 && item
.length
> 0;
625 i915_query_perf_config_data(struct gen_perf_config
*perf
,
626 int fd
, const char *guid
,
627 struct drm_i915_perf_oa_config
*config
)
630 struct drm_i915_query_perf_config query
;
631 struct drm_i915_perf_oa_config config
;
633 struct drm_i915_query_item item
= {
634 .query_id
= DRM_I915_QUERY_PERF_CONFIG
,
635 .flags
= DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
,
636 .data_ptr
= to_user_pointer(&item_data
),
637 .length
= sizeof(item_data
),
640 memset(&item_data
, 0, sizeof(item_data
));
641 memcpy(item_data
.query
.uuid
, guid
, sizeof(item_data
.query
.uuid
));
642 memcpy(&item_data
.config
, config
, sizeof(item_data
.config
));
644 if (!(i915_query_items(perf
, fd
, &item
, 1) == 0 && item
.length
> 0))
647 memcpy(config
, &item_data
.config
, sizeof(item_data
.config
));
653 gen_perf_load_metric_id(struct gen_perf_config
*perf_cfg
,
657 char config_path
[280];
659 snprintf(config_path
, sizeof(config_path
), "%s/metrics/%s/id",
660 perf_cfg
->sysfs_dev_dir
, guid
);
662 /* Don't recreate already loaded configs. */
663 return read_file_uint64(config_path
, metric_id
);
667 i915_add_config(struct gen_perf_config
*perf
, int fd
,
668 const struct gen_perf_registers
*config
,
671 struct drm_i915_perf_oa_config i915_config
= { 0, };
673 memcpy(i915_config
.uuid
, guid
, sizeof(i915_config
.uuid
));
675 i915_config
.n_mux_regs
= config
->n_mux_regs
;
676 i915_config
.mux_regs_ptr
= to_user_pointer(config
->mux_regs
);
678 i915_config
.n_boolean_regs
= config
->n_b_counter_regs
;
679 i915_config
.boolean_regs_ptr
= to_user_pointer(config
->b_counter_regs
);
681 i915_config
.n_flex_regs
= config
->n_flex_regs
;
682 i915_config
.flex_regs_ptr
= to_user_pointer(config
->flex_regs
);
684 int ret
= gen_ioctl(fd
, DRM_IOCTL_I915_PERF_ADD_CONFIG
, &i915_config
);
685 return ret
> 0 ? ret
: 0;
689 init_oa_configs(struct gen_perf_config
*perf
, int fd
)
691 hash_table_foreach(perf
->oa_metrics_table
, entry
) {
692 const struct gen_perf_query_info
*query
= entry
->data
;
695 if (gen_perf_load_metric_id(perf
, query
->guid
, &config_id
)) {
696 DBG("metric set: %s (already loaded)\n", query
->guid
);
697 register_oa_config(perf
, query
, config_id
);
701 int ret
= i915_add_config(perf
, fd
, &query
->config
, query
->guid
);
703 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
704 query
->name
, query
->guid
, strerror(errno
));
708 register_oa_config(perf
, query
, ret
);
709 DBG("metric set: %s (added)\n", query
->guid
);
714 compute_topology_builtins(struct gen_perf_config
*perf
,
715 const struct gen_device_info
*devinfo
)
717 perf
->sys_vars
.slice_mask
= devinfo
->slice_masks
;
718 perf
->sys_vars
.n_eu_slices
= devinfo
->num_slices
;
720 for (int i
= 0; i
< sizeof(devinfo
->subslice_masks
[i
]); i
++) {
721 perf
->sys_vars
.n_eu_sub_slices
+=
722 __builtin_popcount(devinfo
->subslice_masks
[i
]);
725 for (int i
= 0; i
< sizeof(devinfo
->eu_masks
); i
++)
726 perf
->sys_vars
.n_eus
+= __builtin_popcount(devinfo
->eu_masks
[i
]);
728 perf
->sys_vars
.eu_threads_count
= devinfo
->num_thread_per_eu
;
730 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
731 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
734 * Ideally equations would be updated to have a slice/subslice query
737 perf
->sys_vars
.subslice_mask
= 0;
739 int bits_per_subslice
= devinfo
->gen
== 11 ? 8 : 3;
741 for (int s
= 0; s
< util_last_bit(devinfo
->slice_masks
); s
++) {
742 for (int ss
= 0; ss
< (devinfo
->subslice_slice_stride
* 8); ss
++) {
743 if (gen_device_info_subslice_available(devinfo
, s
, ss
))
744 perf
->sys_vars
.subslice_mask
|= 1ULL << (s
* bits_per_subslice
+ ss
);
750 init_oa_sys_vars(struct gen_perf_config
*perf
, const struct gen_device_info
*devinfo
)
752 uint64_t min_freq_mhz
= 0, max_freq_mhz
= 0;
754 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_min_freq_mhz", &min_freq_mhz
))
757 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_max_freq_mhz", &max_freq_mhz
))
760 memset(&perf
->sys_vars
, 0, sizeof(perf
->sys_vars
));
761 perf
->sys_vars
.gt_min_freq
= min_freq_mhz
* 1000000;
762 perf
->sys_vars
.gt_max_freq
= max_freq_mhz
* 1000000;
763 perf
->sys_vars
.timestamp_frequency
= devinfo
->timestamp_frequency
;
764 perf
->sys_vars
.revision
= devinfo
->revision
;
765 compute_topology_builtins(perf
, devinfo
);
770 typedef void (*perf_register_oa_queries_t
)(struct gen_perf_config
*);
772 static perf_register_oa_queries_t
773 get_register_queries_function(const struct gen_device_info
*devinfo
)
775 if (devinfo
->is_haswell
)
776 return gen_oa_register_queries_hsw
;
777 if (devinfo
->is_cherryview
)
778 return gen_oa_register_queries_chv
;
779 if (devinfo
->is_broadwell
)
780 return gen_oa_register_queries_bdw
;
781 if (devinfo
->is_broxton
)
782 return gen_oa_register_queries_bxt
;
783 if (devinfo
->is_skylake
) {
784 if (devinfo
->gt
== 2)
785 return gen_oa_register_queries_sklgt2
;
786 if (devinfo
->gt
== 3)
787 return gen_oa_register_queries_sklgt3
;
788 if (devinfo
->gt
== 4)
789 return gen_oa_register_queries_sklgt4
;
791 if (devinfo
->is_kabylake
) {
792 if (devinfo
->gt
== 2)
793 return gen_oa_register_queries_kblgt2
;
794 if (devinfo
->gt
== 3)
795 return gen_oa_register_queries_kblgt3
;
797 if (devinfo
->is_geminilake
)
798 return gen_oa_register_queries_glk
;
799 if (devinfo
->is_coffeelake
) {
800 if (devinfo
->gt
== 2)
801 return gen_oa_register_queries_cflgt2
;
802 if (devinfo
->gt
== 3)
803 return gen_oa_register_queries_cflgt3
;
805 if (devinfo
->is_cannonlake
)
806 return gen_oa_register_queries_cnl
;
807 if (devinfo
->gen
== 11) {
808 if (devinfo
->is_elkhartlake
)
809 return gen_oa_register_queries_lkf
;
810 return gen_oa_register_queries_icl
;
812 if (devinfo
->gen
== 12)
813 return gen_oa_register_queries_tgl
;
819 add_stat_reg(struct gen_perf_query_info
*query
, uint32_t reg
,
820 uint32_t numerator
, uint32_t denominator
,
821 const char *name
, const char *description
)
823 struct gen_perf_query_counter
*counter
;
825 assert(query
->n_counters
< query
->max_counters
);
827 counter
= &query
->counters
[query
->n_counters
];
828 counter
->name
= name
;
829 counter
->desc
= description
;
830 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
831 counter
->data_type
= GEN_PERF_COUNTER_DATA_TYPE_UINT64
;
832 counter
->offset
= sizeof(uint64_t) * query
->n_counters
;
833 counter
->pipeline_stat
.reg
= reg
;
834 counter
->pipeline_stat
.numerator
= numerator
;
835 counter
->pipeline_stat
.denominator
= denominator
;
841 add_basic_stat_reg(struct gen_perf_query_info
*query
,
842 uint32_t reg
, const char *name
)
844 add_stat_reg(query
, reg
, 1, 1, name
, name
);
848 load_pipeline_statistic_metrics(struct gen_perf_config
*perf_cfg
,
849 const struct gen_device_info
*devinfo
)
851 struct gen_perf_query_info
*query
=
852 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
854 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
855 query
->name
= "Pipeline Statistics Registers";
857 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
858 "N vertices submitted");
859 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
860 "N primitives submitted");
861 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
862 "N vertex shader invocations");
864 if (devinfo
->gen
== 6) {
865 add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
866 "SO_PRIM_STORAGE_NEEDED",
867 "N geometry shader stream-out primitives (total)");
868 add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
869 "SO_NUM_PRIMS_WRITTEN",
870 "N geometry shader stream-out primitives (written)");
872 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
873 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
874 "N stream-out (stream 0) primitives (total)");
875 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
876 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
877 "N stream-out (stream 1) primitives (total)");
878 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
879 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
880 "N stream-out (stream 2) primitives (total)");
881 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
882 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
883 "N stream-out (stream 3) primitives (total)");
884 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
885 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
886 "N stream-out (stream 0) primitives (written)");
887 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
888 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
889 "N stream-out (stream 1) primitives (written)");
890 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
891 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
892 "N stream-out (stream 2) primitives (written)");
893 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
894 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
895 "N stream-out (stream 3) primitives (written)");
898 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
899 "N TCS shader invocations");
900 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
901 "N TES shader invocations");
903 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
904 "N geometry shader invocations");
905 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
906 "N geometry shader primitives emitted");
908 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
909 "N primitives entering clipping");
910 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
911 "N primitives leaving clipping");
913 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
914 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
915 "N fragment shader invocations",
916 "N fragment shader invocations");
918 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
919 "N fragment shader invocations");
922 add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
923 "N z-pass fragments");
925 if (devinfo
->gen
>= 7) {
926 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
927 "N compute shader invocations");
930 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
934 load_oa_metrics(struct gen_perf_config
*perf
, int fd
,
935 const struct gen_device_info
*devinfo
)
937 perf_register_oa_queries_t oa_register
= get_register_queries_function(devinfo
);
938 bool i915_perf_oa_available
= false;
941 perf
->i915_query_supported
= i915_query_perf_config_supported(perf
, fd
);
943 /* The existence of this sysctl parameter implies the kernel supports
944 * the i915 perf interface.
946 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb
) == 0) {
948 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
949 * metrics unless running as root.
951 if (devinfo
->is_haswell
)
952 i915_perf_oa_available
= true;
954 uint64_t paranoid
= 1;
956 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", ¶noid
);
958 if (paranoid
== 0 || geteuid() == 0)
959 i915_perf_oa_available
= true;
963 if (!i915_perf_oa_available
||
965 !get_sysfs_dev_dir(perf
, fd
) ||
966 !init_oa_sys_vars(perf
, devinfo
))
969 perf
->oa_metrics_table
=
970 _mesa_hash_table_create(perf
, _mesa_key_hash_string
,
971 _mesa_key_string_equal
);
973 /* Index all the metric sets mesa knows about before looking to see what
974 * the kernel is advertising.
978 if (likely((INTEL_DEBUG
& DEBUG_NO_OACONFIG
) == 0) &&
979 kernel_has_dynamic_config_support(perf
, fd
))
980 init_oa_configs(perf
, fd
);
982 enumerate_sysfs_metrics(perf
);
987 struct gen_perf_registers
*
988 gen_perf_load_configuration(struct gen_perf_config
*perf_cfg
, int fd
, const char *guid
)
990 if (!perf_cfg
->i915_query_supported
)
993 struct drm_i915_perf_oa_config i915_config
= { 0, };
994 if (!i915_query_perf_config_data(perf_cfg
, fd
, guid
, &i915_config
))
997 struct gen_perf_registers
*config
= rzalloc(NULL
, struct gen_perf_registers
);
998 config
->n_flex_regs
= i915_config
.n_flex_regs
;
999 config
->flex_regs
= rzalloc_array(config
, struct gen_perf_query_register_prog
, config
->n_flex_regs
);
1000 config
->n_mux_regs
= i915_config
.n_mux_regs
;
1001 config
->mux_regs
= rzalloc_array(config
, struct gen_perf_query_register_prog
, config
->n_mux_regs
);
1002 config
->n_b_counter_regs
= i915_config
.n_boolean_regs
;
1003 config
->b_counter_regs
= rzalloc_array(config
, struct gen_perf_query_register_prog
, config
->n_b_counter_regs
);
1006 * struct gen_perf_query_register_prog maps exactly to the tuple of
1007 * (register offset, register value) returned by the i915.
1009 i915_config
.flex_regs_ptr
= to_user_pointer(config
->flex_regs
);
1010 i915_config
.mux_regs_ptr
= to_user_pointer(config
->mux_regs
);
1011 i915_config
.boolean_regs_ptr
= to_user_pointer(config
->b_counter_regs
);
1012 if (!i915_query_perf_config_data(perf_cfg
, fd
, guid
, &i915_config
)) {
1013 ralloc_free(config
);
1021 gen_perf_store_configuration(struct gen_perf_config
*perf_cfg
, int fd
,
1022 const struct gen_perf_registers
*config
,
1026 return i915_add_config(perf_cfg
, fd
, config
, guid
);
1028 struct mesa_sha1 sha1_ctx
;
1029 _mesa_sha1_init(&sha1_ctx
);
1031 if (config
->flex_regs
) {
1032 _mesa_sha1_update(&sha1_ctx
, config
->flex_regs
,
1033 sizeof(config
->flex_regs
[0]) *
1034 config
->n_flex_regs
);
1036 if (config
->mux_regs
) {
1037 _mesa_sha1_update(&sha1_ctx
, config
->mux_regs
,
1038 sizeof(config
->mux_regs
[0]) *
1039 config
->n_mux_regs
);
1041 if (config
->b_counter_regs
) {
1042 _mesa_sha1_update(&sha1_ctx
, config
->b_counter_regs
,
1043 sizeof(config
->b_counter_regs
[0]) *
1044 config
->n_b_counter_regs
);
1048 _mesa_sha1_final(&sha1_ctx
, hash
);
1050 char formatted_hash
[41];
1051 _mesa_sha1_format(formatted_hash
, hash
);
1053 char generated_guid
[37];
1054 snprintf(generated_guid
, sizeof(generated_guid
),
1055 "%.8s-%.4s-%.4s-%.4s-%.12s",
1056 &formatted_hash
[0], &formatted_hash
[8],
1057 &formatted_hash
[8 + 4], &formatted_hash
[8 + 4 + 4],
1058 &formatted_hash
[8 + 4 + 4 + 4]);
1060 /* Check if already present. */
1062 if (gen_perf_load_metric_id(perf_cfg
, generated_guid
, &id
))
1065 return i915_add_config(perf_cfg
, fd
, config
, generated_guid
);
1068 /* Accumulate 32bits OA counters */
1070 accumulate_uint32(const uint32_t *report0
,
1071 const uint32_t *report1
,
1072 uint64_t *accumulator
)
1074 *accumulator
+= (uint32_t)(*report1
- *report0
);
1077 /* Accumulate 40bits OA counters */
1079 accumulate_uint40(int a_index
,
1080 const uint32_t *report0
,
1081 const uint32_t *report1
,
1082 uint64_t *accumulator
)
1084 const uint8_t *high_bytes0
= (uint8_t *)(report0
+ 40);
1085 const uint8_t *high_bytes1
= (uint8_t *)(report1
+ 40);
1086 uint64_t high0
= (uint64_t)(high_bytes0
[a_index
]) << 32;
1087 uint64_t high1
= (uint64_t)(high_bytes1
[a_index
]) << 32;
1088 uint64_t value0
= report0
[a_index
+ 4] | high0
;
1089 uint64_t value1
= report1
[a_index
+ 4] | high1
;
1092 if (value0
> value1
)
1093 delta
= (1ULL << 40) + value1
- value0
;
1095 delta
= value1
- value0
;
1097 *accumulator
+= delta
;
1101 gen8_read_report_clock_ratios(const uint32_t *report
,
1102 uint64_t *slice_freq_hz
,
1103 uint64_t *unslice_freq_hz
)
1105 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1106 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1107 * divided this way :
1109 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1110 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1111 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1113 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1114 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1116 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1117 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1120 uint32_t unslice_freq
= report
[0] & 0x1ff;
1121 uint32_t slice_freq_low
= (report
[0] >> 25) & 0x7f;
1122 uint32_t slice_freq_high
= (report
[0] >> 9) & 0x3;
1123 uint32_t slice_freq
= slice_freq_low
| (slice_freq_high
<< 7);
1125 *slice_freq_hz
= slice_freq
* 16666667ULL;
1126 *unslice_freq_hz
= unslice_freq
* 16666667ULL;
1130 gen_perf_query_result_read_frequencies(struct gen_perf_query_result
*result
,
1131 const struct gen_device_info
*devinfo
,
1132 const uint32_t *start
,
1133 const uint32_t *end
)
1135 /* Slice/Unslice frequency is only available in the OA reports when the
1136 * "Disable OA reports due to clock ratio change" field in
1137 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1138 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1140 * Documentation says this should be available on Gen9+ but experimentation
1141 * shows that Gen8 reports similar values, so we enable it there too.
1143 if (devinfo
->gen
< 8)
1146 gen8_read_report_clock_ratios(start
,
1147 &result
->slice_frequency
[0],
1148 &result
->unslice_frequency
[0]);
1149 gen8_read_report_clock_ratios(end
,
1150 &result
->slice_frequency
[1],
1151 &result
->unslice_frequency
[1]);
1155 gen_perf_query_result_accumulate(struct gen_perf_query_result
*result
,
1156 const struct gen_perf_query_info
*query
,
1157 const uint32_t *start
,
1158 const uint32_t *end
)
1162 if (result
->hw_id
== OA_REPORT_INVALID_CTX_ID
&&
1163 start
[2] != OA_REPORT_INVALID_CTX_ID
)
1164 result
->hw_id
= start
[2];
1165 if (result
->reports_accumulated
== 0)
1166 result
->begin_timestamp
= start
[1];
1167 result
->reports_accumulated
++;
1169 switch (query
->oa_format
) {
1170 case I915_OA_FORMAT_A32u40_A4u32_B8_C8
:
1171 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
+ idx
++); /* timestamp */
1172 accumulate_uint32(start
+ 3, end
+ 3, result
->accumulator
+ idx
++); /* clock */
1174 /* 32x 40bit A counters... */
1175 for (i
= 0; i
< 32; i
++)
1176 accumulate_uint40(i
, start
, end
, result
->accumulator
+ idx
++);
1178 /* 4x 32bit A counters... */
1179 for (i
= 0; i
< 4; i
++)
1180 accumulate_uint32(start
+ 36 + i
, end
+ 36 + i
, result
->accumulator
+ idx
++);
1182 /* 8x 32bit B counters + 8x 32bit C counters... */
1183 for (i
= 0; i
< 16; i
++)
1184 accumulate_uint32(start
+ 48 + i
, end
+ 48 + i
, result
->accumulator
+ idx
++);
1187 case I915_OA_FORMAT_A45_B8_C8
:
1188 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
); /* timestamp */
1190 for (i
= 0; i
< 61; i
++)
1191 accumulate_uint32(start
+ 3 + i
, end
+ 3 + i
, result
->accumulator
+ 1 + i
);
1195 unreachable("Can't accumulate OA counters in unknown format");
1201 gen_perf_query_result_clear(struct gen_perf_query_result
*result
)
1203 memset(result
, 0, sizeof(*result
));
1204 result
->hw_id
= OA_REPORT_INVALID_CTX_ID
; /* invalid */
1208 register_mdapi_statistic_query(struct gen_perf_config
*perf_cfg
,
1209 const struct gen_device_info
*devinfo
)
1211 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
1214 struct gen_perf_query_info
*query
=
1215 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
1217 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1218 query
->name
= "Intel_Raw_Pipeline_Statistics_Query";
1220 /* The order has to match mdapi_pipeline_metrics. */
1221 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1222 "N vertices submitted");
1223 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1224 "N primitives submitted");
1225 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1226 "N vertex shader invocations");
1227 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1228 "N geometry shader invocations");
1229 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1230 "N geometry shader primitives emitted");
1231 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1232 "N primitives entering clipping");
1233 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1234 "N primitives leaving clipping");
1235 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1236 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1237 "N fragment shader invocations",
1238 "N fragment shader invocations");
1240 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1241 "N fragment shader invocations");
1243 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1244 "N TCS shader invocations");
1245 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1246 "N TES shader invocations");
1247 if (devinfo
->gen
>= 7) {
1248 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1249 "N compute shader invocations");
1252 if (devinfo
->gen
>= 10) {
1253 /* Reuse existing CS invocation register until we can expose this new
1256 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1260 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1264 fill_mdapi_perf_query_counter(struct gen_perf_query_info
*query
,
1266 uint32_t data_offset
,
1268 enum gen_perf_counter_data_type data_type
)
1270 struct gen_perf_query_counter
*counter
= &query
->counters
[query
->n_counters
];
1272 assert(query
->n_counters
<= query
->max_counters
);
1274 counter
->name
= name
;
1275 counter
->desc
= "Raw counter value";
1276 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
1277 counter
->data_type
= data_type
;
1278 counter
->offset
= data_offset
;
1280 query
->n_counters
++;
1282 assert(counter
->offset
+ gen_perf_query_counter_get_size(counter
) <= query
->data_size
);
1285 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
1286 fill_mdapi_perf_query_counter(query, #field_name, \
1287 (uint8_t *) &struct_name.field_name - \
1288 (uint8_t *) &struct_name, \
1289 sizeof(struct_name.field_name), \
1290 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1291 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
1292 fill_mdapi_perf_query_counter(query, \
1293 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
1294 (uint8_t *) &struct_name.field_name[idx] - \
1295 (uint8_t *) &struct_name, \
1296 sizeof(struct_name.field_name[0]), \
1297 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1300 register_mdapi_oa_query(const struct gen_device_info
*devinfo
,
1301 struct gen_perf_config
*perf
)
1303 struct gen_perf_query_info
*query
= NULL
;
1305 /* MDAPI requires different structures for pretty much every generation
1306 * (right now we have definitions for gen 7 to 11).
1308 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
1311 switch (devinfo
->gen
) {
1313 query
= append_query_info(perf
, 1 + 45 + 16 + 7);
1314 query
->oa_format
= I915_OA_FORMAT_A45_B8_C8
;
1316 struct gen7_mdapi_metrics metric_data
;
1317 query
->data_size
= sizeof(metric_data
);
1319 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1320 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.ACounters
); i
++) {
1321 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1322 metric_data
, ACounters
, i
, UINT64
);
1324 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NOACounters
); i
++) {
1325 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1326 metric_data
, NOACounters
, i
, UINT64
);
1328 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1329 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1330 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1331 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1332 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1333 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1334 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1338 query
= append_query_info(perf
, 2 + 36 + 16 + 16);
1339 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
1341 struct gen8_mdapi_metrics metric_data
;
1342 query
->data_size
= sizeof(metric_data
);
1344 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1345 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
1346 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
1347 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1348 metric_data
, OaCntr
, i
, UINT64
);
1350 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
1351 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1352 metric_data
, NoaCntr
, i
, UINT64
);
1354 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
1355 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
1356 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
1357 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
1358 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
1359 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
1360 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
1361 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
1362 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
1363 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1364 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1365 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1366 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1367 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1368 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1369 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1375 query
= append_query_info(perf
, 2 + 36 + 16 + 16 + 16 + 2);
1376 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
1378 struct gen9_mdapi_metrics metric_data
;
1379 query
->data_size
= sizeof(metric_data
);
1381 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1382 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
1383 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
1384 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1385 metric_data
, OaCntr
, i
, UINT64
);
1387 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
1388 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1389 metric_data
, NoaCntr
, i
, UINT64
);
1391 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
1392 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
1393 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
1394 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
1395 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
1396 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
1397 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
1398 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
1399 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
1400 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1401 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1402 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1403 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1404 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1405 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1406 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1407 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.UserCntr
); i
++) {
1408 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1409 metric_data
, UserCntr
, i
, UINT64
);
1411 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UserCntrCfgId
, UINT32
);
1412 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved4
, UINT32
);
1416 unreachable("Unsupported gen");
1420 query
->kind
= GEN_PERF_QUERY_TYPE_RAW
;
1421 query
->name
= "Intel_Raw_Hardware_Counters_Set_0_Query";
1422 query
->guid
= GEN_PERF_QUERY_GUID_MDAPI
;
1425 /* Accumulation buffer offsets copied from an actual query... */
1426 const struct gen_perf_query_info
*copy_query
=
1429 query
->gpu_time_offset
= copy_query
->gpu_time_offset
;
1430 query
->gpu_clock_offset
= copy_query
->gpu_clock_offset
;
1431 query
->a_offset
= copy_query
->a_offset
;
1432 query
->b_offset
= copy_query
->b_offset
;
1433 query
->c_offset
= copy_query
->c_offset
;
1438 get_metric_id(struct gen_perf_config
*perf
,
1439 const struct gen_perf_query_info
*query
)
1441 /* These queries are know not to ever change, their config ID has been
1442 * loaded upon the first query creation. No need to look them up again.
1444 if (query
->kind
== GEN_PERF_QUERY_TYPE_OA
)
1445 return query
->oa_metrics_set_id
;
1447 assert(query
->kind
== GEN_PERF_QUERY_TYPE_RAW
);
1449 /* Raw queries can be reprogrammed up by an external application/library.
1450 * When a raw query is used for the first time it's id is set to a value !=
1451 * 0. When it stops being used the id returns to 0. No need to reload the
1452 * ID when it's already loaded.
1454 if (query
->oa_metrics_set_id
!= 0) {
1455 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64
"\n",
1456 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
1457 return query
->oa_metrics_set_id
;
1460 struct gen_perf_query_info
*raw_query
= (struct gen_perf_query_info
*)query
;
1461 if (!gen_perf_load_metric_id(perf
, query
->guid
,
1462 &raw_query
->oa_metrics_set_id
)) {
1463 DBG("Unable to read query guid=%s ID, falling back to test config\n", query
->guid
);
1464 raw_query
->oa_metrics_set_id
= 1ULL;
1466 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64
"\n",
1467 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
1469 return query
->oa_metrics_set_id
;
1472 static struct oa_sample_buf
*
1473 get_free_sample_buf(struct gen_perf_context
*perf_ctx
)
1475 struct exec_node
*node
= exec_list_pop_head(&perf_ctx
->free_sample_buffers
);
1476 struct oa_sample_buf
*buf
;
1479 buf
= exec_node_data(struct oa_sample_buf
, node
, link
);
1481 buf
= ralloc_size(perf_ctx
->perf
, sizeof(*buf
));
1483 exec_node_init(&buf
->link
);
1492 reap_old_sample_buffers(struct gen_perf_context
*perf_ctx
)
1494 struct exec_node
*tail_node
=
1495 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1496 struct oa_sample_buf
*tail_buf
=
1497 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
1499 /* Remove all old, unreferenced sample buffers walking forward from
1500 * the head of the list, except always leave at least one node in
1501 * the list so we always have a node to reference when we Begin
1504 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1505 &perf_ctx
->sample_buffers
)
1507 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
1508 exec_node_remove(&buf
->link
);
1509 exec_list_push_head(&perf_ctx
->free_sample_buffers
, &buf
->link
);
1516 free_sample_bufs(struct gen_perf_context
*perf_ctx
)
1518 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1519 &perf_ctx
->free_sample_buffers
)
1522 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1525 /******************************************************************************/
1528 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1529 * pipeline statistics for the performance query object.
1532 snapshot_statistics_registers(struct gen_perf_context
*ctx
,
1533 struct gen_perf_query_object
*obj
,
1534 uint32_t offset_in_bytes
)
1536 struct gen_perf_config
*perf
= ctx
->perf
;
1537 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
1538 const int n_counters
= query
->n_counters
;
1540 for (int i
= 0; i
< n_counters
; i
++) {
1541 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1543 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
1545 perf
->vtbl
.store_register_mem(ctx
->ctx
, obj
->pipeline_stats
.bo
,
1546 counter
->pipeline_stat
.reg
, 8,
1547 offset_in_bytes
+ i
* sizeof(uint64_t));
1552 snapshot_freq_register(struct gen_perf_context
*ctx
,
1553 struct gen_perf_query_object
*query
,
1556 struct gen_perf_config
*perf
= ctx
->perf
;
1557 const struct gen_device_info
*devinfo
= ctx
->devinfo
;
1559 if (devinfo
->gen
== 8 && !devinfo
->is_cherryview
)
1560 perf
->vtbl
.store_register_mem(ctx
->ctx
, query
->oa
.bo
, GEN7_RPSTAT1
, 4, bo_offset
);
1561 else if (devinfo
->gen
>= 9)
1562 perf
->vtbl
.store_register_mem(ctx
->ctx
, query
->oa
.bo
, GEN9_RPSTAT0
, 4, bo_offset
);
1566 gen_perf_close(struct gen_perf_context
*perfquery
,
1567 const struct gen_perf_query_info
*query
)
1569 if (perfquery
->oa_stream_fd
!= -1) {
1570 close(perfquery
->oa_stream_fd
);
1571 perfquery
->oa_stream_fd
= -1;
1573 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1574 struct gen_perf_query_info
*raw_query
=
1575 (struct gen_perf_query_info
*) query
;
1576 raw_query
->oa_metrics_set_id
= 0;
1581 gen_perf_open(struct gen_perf_context
*perf_ctx
,
1584 int period_exponent
,
1588 uint64_t properties
[] = {
1589 /* Single context sampling */
1590 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
1592 /* Include OA reports in samples */
1593 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
1595 /* OA unit configuration */
1596 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
1597 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
1598 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
1600 struct drm_i915_perf_open_param param
= {
1601 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
1602 I915_PERF_FLAG_FD_NONBLOCK
|
1603 I915_PERF_FLAG_DISABLED
,
1604 .num_properties
= ARRAY_SIZE(properties
) / 2,
1605 .properties_ptr
= (uintptr_t) properties
,
1607 int fd
= gen_ioctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
1609 DBG("Error opening gen perf OA stream: %m\n");
1613 perf_ctx
->oa_stream_fd
= fd
;
1615 perf_ctx
->current_oa_metrics_set_id
= metrics_set_id
;
1616 perf_ctx
->current_oa_format
= report_format
;
1622 inc_n_users(struct gen_perf_context
*perf_ctx
)
1624 if (perf_ctx
->n_oa_users
== 0 &&
1625 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_ENABLE
, 0) < 0)
1629 ++perf_ctx
->n_oa_users
;
1635 dec_n_users(struct gen_perf_context
*perf_ctx
)
1637 /* Disabling the i915 perf stream will effectively disable the OA
1638 * counters. Note it's important to be sure there are no outstanding
1639 * MI_RPC commands at this point since they could stall the CS
1640 * indefinitely once OACONTROL is disabled.
1642 --perf_ctx
->n_oa_users
;
1643 if (perf_ctx
->n_oa_users
== 0 &&
1644 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
1646 DBG("WARNING: Error disabling gen perf stream: %m\n");
1651 gen_perf_init_metrics(struct gen_perf_config
*perf_cfg
,
1652 const struct gen_device_info
*devinfo
,
1655 load_pipeline_statistic_metrics(perf_cfg
, devinfo
);
1656 register_mdapi_statistic_query(perf_cfg
, devinfo
);
1657 if (load_oa_metrics(perf_cfg
, drm_fd
, devinfo
))
1658 register_mdapi_oa_query(devinfo
, perf_cfg
);
1662 gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
1663 struct gen_perf_config
*perf_cfg
,
1664 void * ctx
, /* driver context (eg, brw_context) */
1665 void * bufmgr
, /* eg brw_bufmgr */
1666 const struct gen_device_info
*devinfo
,
1670 perf_ctx
->perf
= perf_cfg
;
1671 perf_ctx
->ctx
= ctx
;
1672 perf_ctx
->bufmgr
= bufmgr
;
1673 perf_ctx
->drm_fd
= drm_fd
;
1674 perf_ctx
->hw_ctx
= hw_ctx
;
1675 perf_ctx
->devinfo
= devinfo
;
1677 perf_ctx
->unaccumulated
=
1678 ralloc_array(ctx
, struct gen_perf_query_object
*, 2);
1679 perf_ctx
->unaccumulated_elements
= 0;
1680 perf_ctx
->unaccumulated_array_size
= 2;
1682 exec_list_make_empty(&perf_ctx
->sample_buffers
);
1683 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1685 /* It's convenient to guarantee that this linked list of sample
1686 * buffers is never empty so we add an empty head so when we
1687 * Begin an OA query we can always take a reference on a buffer
1690 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
1691 exec_list_push_head(&perf_ctx
->sample_buffers
, &buf
->link
);
1693 perf_ctx
->oa_stream_fd
= -1;
1694 perf_ctx
->next_query_start_report_id
= 1000;
1698 * Add a query to the global list of "unaccumulated queries."
1700 * Queries are tracked here until all the associated OA reports have
1701 * been accumulated via accumulate_oa_reports() after the end
1702 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1705 add_to_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
1706 struct gen_perf_query_object
*obj
)
1708 if (perf_ctx
->unaccumulated_elements
>=
1709 perf_ctx
->unaccumulated_array_size
)
1711 perf_ctx
->unaccumulated_array_size
*= 1.5;
1712 perf_ctx
->unaccumulated
=
1713 reralloc(perf_ctx
->ctx
, perf_ctx
->unaccumulated
,
1714 struct gen_perf_query_object
*,
1715 perf_ctx
->unaccumulated_array_size
);
1718 perf_ctx
->unaccumulated
[perf_ctx
->unaccumulated_elements
++] = obj
;
1722 gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
1723 struct gen_perf_query_object
*query
)
1725 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1726 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
1728 /* XXX: We have to consider that the command parser unit that parses batch
1729 * buffer commands and is used to capture begin/end counter snapshots isn't
1730 * implicitly synchronized with what's currently running across other GPU
1731 * units (such as the EUs running shaders) that the performance counters are
1734 * The intention of performance queries is to measure the work associated
1735 * with commands between the begin/end delimiters and so for that to be the
1736 * case we need to explicitly synchronize the parsing of commands to capture
1737 * Begin/End counter snapshots with what's running across other parts of the
1740 * When the command parser reaches a Begin marker it effectively needs to
1741 * drain everything currently running on the GPU until the hardware is idle
1742 * before capturing the first snapshot of counters - otherwise the results
1743 * would also be measuring the effects of earlier commands.
1745 * When the command parser reaches an End marker it needs to stall until
1746 * everything currently running on the GPU has finished before capturing the
1747 * end snapshot - otherwise the results won't be a complete representation
1750 * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
1751 * additional work to be processed by the pipeline until all pixels of the
1752 * previous draw has be completed).
1754 * N.B. The final results are based on deltas of counters between (inside)
1755 * Begin/End markers so even though the total wall clock time of the
1756 * workload is stretched by larger pipeline bubbles the bubbles themselves
1757 * are generally invisible to the query results. Whether that's a good or a
1758 * bad thing depends on the use case. For a lower real-time impact while
1759 * capturing metrics then periodic sampling may be a better choice than
1760 * INTEL_performance_query.
1763 * This is our Begin synchronization point to drain current work on the
1764 * GPU before we capture our first counter snapshot...
1766 perf_cfg
->vtbl
.emit_stall_at_pixel_scoreboard(perf_ctx
->ctx
);
1768 switch (queryinfo
->kind
) {
1769 case GEN_PERF_QUERY_TYPE_OA
:
1770 case GEN_PERF_QUERY_TYPE_RAW
: {
1772 /* Opening an i915 perf stream implies exclusive access to the OA unit
1773 * which will generate counter reports for a specific counter set with a
1774 * specific layout/format so we can't begin any OA based queries that
1775 * require a different counter set or format unless we get an opportunity
1776 * to close the stream and open a new one...
1778 uint64_t metric_id
= get_metric_id(perf_ctx
->perf
, queryinfo
);
1780 if (perf_ctx
->oa_stream_fd
!= -1 &&
1781 perf_ctx
->current_oa_metrics_set_id
!= metric_id
) {
1783 if (perf_ctx
->n_oa_users
!= 0) {
1784 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64
"\n",
1785 perf_ctx
->current_oa_metrics_set_id
, metric_id
);
1788 gen_perf_close(perf_ctx
, queryinfo
);
1791 /* If the OA counters aren't already on, enable them. */
1792 if (perf_ctx
->oa_stream_fd
== -1) {
1793 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1795 /* The period_exponent gives a sampling period as follows:
1796 * sample_period = timestamp_period * 2^(period_exponent + 1)
1798 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1801 * The counter overflow period is derived from the EuActive counter
1802 * which reads a counter that increments by the number of clock
1803 * cycles multiplied by the number of EUs. It can be calculated as:
1805 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1807 * (E.g. 40 EUs @ 1GHz = ~53ms)
1809 * We select a sampling period inferior to that overflow period to
1810 * ensure we cannot see more than 1 counter overflow, otherwise we
1811 * could loose information.
1814 int a_counter_in_bits
= 32;
1815 if (devinfo
->gen
>= 8)
1816 a_counter_in_bits
= 40;
1818 uint64_t overflow_period
= pow(2, a_counter_in_bits
) / (perf_cfg
->sys_vars
.n_eus
*
1819 /* drop 1GHz freq to have units in nanoseconds */
1822 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
1823 overflow_period
, overflow_period
/ 1000000ul, perf_cfg
->sys_vars
.n_eus
);
1825 int period_exponent
= 0;
1826 uint64_t prev_sample_period
, next_sample_period
;
1827 for (int e
= 0; e
< 30; e
++) {
1828 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
1829 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
1831 /* Take the previous sampling period, lower than the overflow
1834 if (prev_sample_period
< overflow_period
&&
1835 next_sample_period
> overflow_period
)
1836 period_exponent
= e
+ 1;
1839 if (period_exponent
== 0) {
1840 DBG("WARNING: enable to find a sampling exponent\n");
1844 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
1845 prev_sample_period
/ 1000000ul);
1847 if (!gen_perf_open(perf_ctx
, metric_id
, queryinfo
->oa_format
,
1848 period_exponent
, perf_ctx
->drm_fd
,
1852 assert(perf_ctx
->current_oa_metrics_set_id
== metric_id
&&
1853 perf_ctx
->current_oa_format
== queryinfo
->oa_format
);
1856 if (!inc_n_users(perf_ctx
)) {
1857 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1862 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
1863 query
->oa
.bo
= NULL
;
1866 query
->oa
.bo
= perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1867 "perf. query OA MI_RPC bo",
1870 /* Pre-filling the BO helps debug whether writes landed. */
1871 void *map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_WRITE
);
1872 memset(map
, 0x80, MI_RPC_BO_SIZE
);
1873 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
1876 query
->oa
.begin_report_id
= perf_ctx
->next_query_start_report_id
;
1877 perf_ctx
->next_query_start_report_id
+= 2;
1879 /* Take a starting OA counter snapshot. */
1880 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
, 0,
1881 query
->oa
.begin_report_id
);
1882 snapshot_freq_register(perf_ctx
, query
, MI_FREQ_START_OFFSET_BYTES
);
1884 ++perf_ctx
->n_active_oa_queries
;
1886 /* No already-buffered samples can possibly be associated with this query
1887 * so create a marker within the list of sample buffers enabling us to
1888 * easily ignore earlier samples when processing this query after
1891 assert(!exec_list_is_empty(&perf_ctx
->sample_buffers
));
1892 query
->oa
.samples_head
= exec_list_get_tail(&perf_ctx
->sample_buffers
);
1894 struct oa_sample_buf
*buf
=
1895 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
1897 /* This reference will ensure that future/following sample
1898 * buffers (that may relate to this query) can't be freed until
1899 * this drops to zero.
1903 gen_perf_query_result_clear(&query
->oa
.result
);
1904 query
->oa
.results_accumulated
= false;
1906 add_to_unaccumulated_query_list(perf_ctx
, query
);
1910 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1911 if (query
->pipeline_stats
.bo
) {
1912 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
1913 query
->pipeline_stats
.bo
= NULL
;
1916 query
->pipeline_stats
.bo
=
1917 perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1918 "perf. query pipeline stats bo",
1921 /* Take starting snapshots. */
1922 snapshot_statistics_registers(perf_ctx
, query
, 0);
1924 ++perf_ctx
->n_active_pipeline_stats_queries
;
1928 unreachable("Unknown query type");
1936 gen_perf_end_query(struct gen_perf_context
*perf_ctx
,
1937 struct gen_perf_query_object
*query
)
1939 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1941 /* Ensure that the work associated with the queried commands will have
1942 * finished before taking our query end counter readings.
1944 * For more details see comment in brw_begin_perf_query for
1945 * corresponding flush.
1947 perf_cfg
->vtbl
.emit_stall_at_pixel_scoreboard(perf_ctx
->ctx
);
1949 switch (query
->queryinfo
->kind
) {
1950 case GEN_PERF_QUERY_TYPE_OA
:
1951 case GEN_PERF_QUERY_TYPE_RAW
:
1953 /* NB: It's possible that the query will have already been marked
1954 * as 'accumulated' if an error was seen while reading samples
1955 * from perf. In this case we mustn't try and emit a closing
1956 * MI_RPC command in case the OA unit has already been disabled
1958 if (!query
->oa
.results_accumulated
) {
1959 /* Take an ending OA counter snapshot. */
1960 snapshot_freq_register(perf_ctx
, query
, MI_FREQ_END_OFFSET_BYTES
);
1961 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
,
1962 MI_RPC_BO_END_OFFSET_BYTES
,
1963 query
->oa
.begin_report_id
+ 1);
1966 --perf_ctx
->n_active_oa_queries
;
1968 /* NB: even though the query has now ended, it can't be accumulated
1969 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1974 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1975 snapshot_statistics_registers(perf_ctx
, query
,
1976 STATS_BO_END_OFFSET_BYTES
);
1977 --perf_ctx
->n_active_pipeline_stats_queries
;
1981 unreachable("Unknown query type");
1987 OA_READ_STATUS_ERROR
,
1988 OA_READ_STATUS_UNFINISHED
,
1989 OA_READ_STATUS_FINISHED
,
1992 static enum OaReadStatus
1993 read_oa_samples_until(struct gen_perf_context
*perf_ctx
,
1994 uint32_t start_timestamp
,
1995 uint32_t end_timestamp
)
1997 struct exec_node
*tail_node
=
1998 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1999 struct oa_sample_buf
*tail_buf
=
2000 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
2001 uint32_t last_timestamp
=
2002 tail_buf
->len
== 0 ? start_timestamp
: tail_buf
->last_timestamp
;
2005 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
2009 while ((len
= read(perf_ctx
->oa_stream_fd
, buf
->buf
,
2010 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
2014 exec_list_push_tail(&perf_ctx
->free_sample_buffers
, &buf
->link
);
2017 if (errno
== EAGAIN
) {
2018 return ((last_timestamp
- start_timestamp
) < INT32_MAX
&&
2019 (last_timestamp
- start_timestamp
) >=
2020 (end_timestamp
- start_timestamp
)) ?
2021 OA_READ_STATUS_FINISHED
:
2022 OA_READ_STATUS_UNFINISHED
;
2024 DBG("Error reading i915 perf samples: %m\n");
2027 DBG("Spurious EOF reading i915 perf samples\n");
2029 return OA_READ_STATUS_ERROR
;
2033 exec_list_push_tail(&perf_ctx
->sample_buffers
, &buf
->link
);
2035 /* Go through the reports and update the last timestamp. */
2037 while (offset
< buf
->len
) {
2038 const struct drm_i915_perf_record_header
*header
=
2039 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
2040 uint32_t *report
= (uint32_t *) (header
+ 1);
2042 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
2043 last_timestamp
= report
[1];
2045 offset
+= header
->size
;
2048 buf
->last_timestamp
= last_timestamp
;
2051 unreachable("not reached");
2052 return OA_READ_STATUS_ERROR
;
2056 * Try to read all the reports until either the delimiting timestamp
2057 * or an error arises.
2060 read_oa_samples_for_query(struct gen_perf_context
*perf_ctx
,
2061 struct gen_perf_query_object
*query
,
2062 void *current_batch
)
2067 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2069 /* We need the MI_REPORT_PERF_COUNT to land before we can start
2071 assert(!perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
2072 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
));
2074 /* Map the BO once here and let accumulate_oa_reports() unmap
2076 if (query
->oa
.map
== NULL
)
2077 query
->oa
.map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_READ
);
2079 start
= last
= query
->oa
.map
;
2080 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2082 if (start
[0] != query
->oa
.begin_report_id
) {
2083 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
2086 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
2087 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
2091 /* Read the reports until the end timestamp. */
2092 switch (read_oa_samples_until(perf_ctx
, start
[1], end
[1])) {
2093 case OA_READ_STATUS_ERROR
:
2094 /* Fallthrough and let accumulate_oa_reports() deal with the
2096 case OA_READ_STATUS_FINISHED
:
2098 case OA_READ_STATUS_UNFINISHED
:
2102 unreachable("invalid read status");
2107 gen_perf_wait_query(struct gen_perf_context
*perf_ctx
,
2108 struct gen_perf_query_object
*query
,
2109 void *current_batch
)
2111 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2112 struct brw_bo
*bo
= NULL
;
2114 switch (query
->queryinfo
->kind
) {
2115 case GEN_PERF_QUERY_TYPE_OA
:
2116 case GEN_PERF_QUERY_TYPE_RAW
:
2120 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2121 bo
= query
->pipeline_stats
.bo
;
2125 unreachable("Unknown query type");
2132 /* If the current batch references our results bo then we need to
2135 if (perf_cfg
->vtbl
.batch_references(current_batch
, bo
))
2136 perf_cfg
->vtbl
.batchbuffer_flush(perf_ctx
->ctx
, __FILE__
, __LINE__
);
2138 perf_cfg
->vtbl
.bo_wait_rendering(bo
);
2140 /* Due to a race condition between the OA unit signaling report
2141 * availability and the report actually being written into memory,
2142 * we need to wait for all the reports to come in before we can
2145 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
||
2146 query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
2147 while (!read_oa_samples_for_query(perf_ctx
, query
, current_batch
))
2153 gen_perf_is_query_ready(struct gen_perf_context
*perf_ctx
,
2154 struct gen_perf_query_object
*query
,
2155 void *current_batch
)
2157 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2159 switch (query
->queryinfo
->kind
) {
2160 case GEN_PERF_QUERY_TYPE_OA
:
2161 case GEN_PERF_QUERY_TYPE_RAW
:
2162 return (query
->oa
.results_accumulated
||
2164 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
2165 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
) &&
2166 read_oa_samples_for_query(perf_ctx
, query
, current_batch
)));
2167 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2168 return (query
->pipeline_stats
.bo
&&
2169 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->pipeline_stats
.bo
) &&
2170 !perf_cfg
->vtbl
.bo_busy(query
->pipeline_stats
.bo
));
2173 unreachable("Unknown query type");
2181 * Remove a query from the global list of unaccumulated queries once
2182 * after successfully accumulating the OA reports associated with the
2183 * query in accumulate_oa_reports() or when discarding unwanted query
2187 drop_from_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
2188 struct gen_perf_query_object
*query
)
2190 for (int i
= 0; i
< perf_ctx
->unaccumulated_elements
; i
++) {
2191 if (perf_ctx
->unaccumulated
[i
] == query
) {
2192 int last_elt
= --perf_ctx
->unaccumulated_elements
;
2195 perf_ctx
->unaccumulated
[i
] = NULL
;
2197 perf_ctx
->unaccumulated
[i
] =
2198 perf_ctx
->unaccumulated
[last_elt
];
2205 /* Drop our samples_head reference so that associated periodic
2206 * sample data buffers can potentially be reaped if they aren't
2207 * referenced by any other queries...
2210 struct oa_sample_buf
*buf
=
2211 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
2213 assert(buf
->refcount
> 0);
2216 query
->oa
.samples_head
= NULL
;
2218 reap_old_sample_buffers(perf_ctx
);
2221 /* In general if we see anything spurious while accumulating results,
2222 * we don't try and continue accumulating the current query, hoping
2223 * for the best, we scrap anything outstanding, and then hope for the
2224 * best with new queries.
2227 discard_all_queries(struct gen_perf_context
*perf_ctx
)
2229 while (perf_ctx
->unaccumulated_elements
) {
2230 struct gen_perf_query_object
*query
= perf_ctx
->unaccumulated
[0];
2232 query
->oa
.results_accumulated
= true;
2233 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2235 dec_n_users(perf_ctx
);
2239 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
2241 oa_report_ctx_id_valid(const struct gen_device_info
*devinfo
,
2242 const uint32_t *report
)
2244 assert(devinfo
->gen
>= 8);
2245 if (devinfo
->gen
== 8)
2246 return (report
[0] & (1 << 25)) != 0;
2247 return (report
[0] & (1 << 16)) != 0;
2251 * Accumulate raw OA counter values based on deltas between pairs of
2254 * Accumulation starts from the first report captured via
2255 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
2256 * last MI_RPC report requested by brw_end_perf_query(). Between these
2257 * two reports there may also some number of periodically sampled OA
2258 * reports collected via the i915 perf interface - depending on the
2259 * duration of the query.
2261 * These periodic snapshots help to ensure we handle counter overflow
2262 * correctly by being frequent enough to ensure we don't miss multiple
2263 * overflows of a counter between snapshots. For Gen8+ the i915 perf
2264 * snapshots provide the extra context-switch reports that let us
2265 * subtract out the progress of counters associated with other
2266 * contexts running on the system.
2269 accumulate_oa_reports(struct gen_perf_context
*perf_ctx
,
2270 struct gen_perf_query_object
*query
)
2272 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2276 struct exec_node
*first_samples_node
;
2277 bool last_report_ctx_match
= true;
2278 int out_duration
= 0;
2280 assert(query
->oa
.map
!= NULL
);
2282 start
= last
= query
->oa
.map
;
2283 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2285 if (start
[0] != query
->oa
.begin_report_id
) {
2286 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
2289 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
2290 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
2294 /* On Gen12+ OA reports are sourced from per context counters, so we don't
2295 * ever have to look at the global OA buffer. Yey \o/
2297 if (perf_ctx
->devinfo
->gen
>= 12) {
2302 /* See if we have any periodic reports to accumulate too... */
2304 /* N.B. The oa.samples_head was set when the query began and
2305 * pointed to the tail of the perf_ctx->sample_buffers list at
2306 * the time the query started. Since the buffer existed before the
2307 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
2308 * that no data in this particular node's buffer can possibly be
2309 * associated with the query - so skip ahead one...
2311 first_samples_node
= query
->oa
.samples_head
->next
;
2313 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
2314 &perf_ctx
->sample_buffers
,
2319 while (offset
< buf
->len
) {
2320 const struct drm_i915_perf_record_header
*header
=
2321 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
2323 assert(header
->size
!= 0);
2324 assert(header
->size
<= buf
->len
);
2326 offset
+= header
->size
;
2328 switch (header
->type
) {
2329 case DRM_I915_PERF_RECORD_SAMPLE
: {
2330 uint32_t *report
= (uint32_t *)(header
+ 1);
2331 bool report_ctx_match
= true;
2334 /* Ignore reports that come before the start marker.
2335 * (Note: takes care to allow overflow of 32bit timestamps)
2337 if (gen_device_info_timebase_scale(devinfo
,
2338 report
[1] - start
[1]) > 5000000000) {
2342 /* Ignore reports that come after the end marker.
2343 * (Note: takes care to allow overflow of 32bit timestamps)
2345 if (gen_device_info_timebase_scale(devinfo
,
2346 report
[1] - end
[1]) <= 5000000000) {
2350 /* For Gen8+ since the counters continue while other
2351 * contexts are running we need to discount any unrelated
2352 * deltas. The hardware automatically generates a report
2353 * on context switch which gives us a new reference point
2354 * to continuing adding deltas from.
2356 * For Haswell we can rely on the HW to stop the progress
2357 * of OA counters while any other context is acctive.
2359 if (devinfo
->gen
>= 8) {
2360 /* Consider that the current report matches our context only if
2361 * the report says the report ID is valid.
2363 report_ctx_match
= oa_report_ctx_id_valid(devinfo
, report
) &&
2364 report
[2] == start
[2];
2365 if (report_ctx_match
)
2370 /* Only add the delta between <last, report> if the last report
2371 * was clearly identified as our context, or if we have at most
2372 * 1 report without a matching ID.
2374 * The OA unit will sometimes label reports with an invalid
2375 * context ID when i915 rewrites the execlist submit register
2376 * with the same context as the one currently running. This
2377 * happens when i915 wants to notify the HW of ringbuffer tail
2378 * register update. We have to consider this report as part of
2379 * our context as the 3d pipeline behind the OACS unit is still
2380 * processing the operations started at the previous execlist
2383 add
= last_report_ctx_match
&& out_duration
< 2;
2387 gen_perf_query_result_accumulate(&query
->oa
.result
,
2391 /* We're not adding the delta because we've identified it's not
2392 * for the context we filter for. We can consider that the
2395 query
->oa
.result
.query_disjoint
= true;
2399 last_report_ctx_match
= report_ctx_match
;
2404 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
2405 DBG("i915 perf: OA error: all reports lost\n");
2407 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
2408 DBG("i915 perf: OA report lost\n");
2416 gen_perf_query_result_accumulate(&query
->oa
.result
, query
->queryinfo
,
2419 query
->oa
.results_accumulated
= true;
2420 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2421 dec_n_users(perf_ctx
);
2427 discard_all_queries(perf_ctx
);
2431 gen_perf_delete_query(struct gen_perf_context
*perf_ctx
,
2432 struct gen_perf_query_object
*query
)
2434 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2436 /* We can assume that the frontend waits for a query to complete
2437 * before ever calling into here, so we don't have to worry about
2438 * deleting an in-flight query object.
2440 switch (query
->queryinfo
->kind
) {
2441 case GEN_PERF_QUERY_TYPE_OA
:
2442 case GEN_PERF_QUERY_TYPE_RAW
:
2444 if (!query
->oa
.results_accumulated
) {
2445 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2446 dec_n_users(perf_ctx
);
2449 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
2450 query
->oa
.bo
= NULL
;
2453 query
->oa
.results_accumulated
= false;
2456 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2457 if (query
->pipeline_stats
.bo
) {
2458 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
2459 query
->pipeline_stats
.bo
= NULL
;
2464 unreachable("Unknown query type");
2468 /* As an indication that the INTEL_performance_query extension is no
2469 * longer in use, it's a good time to free our cache of sample
2470 * buffers and close any current i915-perf stream.
2472 if (--perf_ctx
->n_query_instances
== 0) {
2473 free_sample_bufs(perf_ctx
);
2474 gen_perf_close(perf_ctx
, query
->queryinfo
);
2480 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
2483 read_gt_frequency(struct gen_perf_context
*perf_ctx
,
2484 struct gen_perf_query_object
*obj
)
2486 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2487 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
2488 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
2490 switch (devinfo
->gen
) {
2493 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
2494 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
2499 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
2500 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
2503 unreachable("unexpected gen");
2506 /* Put the numbers into Hz. */
2507 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
2508 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
2512 get_oa_counter_data(struct gen_perf_context
*perf_ctx
,
2513 struct gen_perf_query_object
*query
,
2517 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2518 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2519 int n_counters
= queryinfo
->n_counters
;
2522 for (int i
= 0; i
< n_counters
; i
++) {
2523 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2524 uint64_t *out_uint64
;
2526 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
2529 switch (counter
->data_type
) {
2530 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
2531 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
2533 counter
->oa_counter_read_uint64(perf_cfg
, queryinfo
,
2534 query
->oa
.result
.accumulator
);
2536 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
2537 out_float
= (float *)(data
+ counter
->offset
);
2539 counter
->oa_counter_read_float(perf_cfg
, queryinfo
,
2540 query
->oa
.result
.accumulator
);
2543 /* So far we aren't using uint32, double or bool32... */
2544 unreachable("unexpected counter data type");
2546 written
= counter
->offset
+ counter_size
;
2554 get_pipeline_stats_data(struct gen_perf_context
*perf_ctx
,
2555 struct gen_perf_query_object
*query
,
2560 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2561 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2562 int n_counters
= queryinfo
->n_counters
;
2565 uint64_t *start
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->pipeline_stats
.bo
, MAP_READ
);
2566 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
2568 for (int i
= 0; i
< n_counters
; i
++) {
2569 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2570 uint64_t value
= end
[i
] - start
[i
];
2572 if (counter
->pipeline_stat
.numerator
!=
2573 counter
->pipeline_stat
.denominator
) {
2574 value
*= counter
->pipeline_stat
.numerator
;
2575 value
/= counter
->pipeline_stat
.denominator
;
2578 *((uint64_t *)p
) = value
;
2582 perf_cfg
->vtbl
.bo_unmap(query
->pipeline_stats
.bo
);
2588 gen_perf_get_query_data(struct gen_perf_context
*perf_ctx
,
2589 struct gen_perf_query_object
*query
,
2592 unsigned *bytes_written
)
2594 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2597 switch (query
->queryinfo
->kind
) {
2598 case GEN_PERF_QUERY_TYPE_OA
:
2599 case GEN_PERF_QUERY_TYPE_RAW
:
2600 if (!query
->oa
.results_accumulated
) {
2601 read_gt_frequency(perf_ctx
, query
);
2602 uint32_t *begin_report
= query
->oa
.map
;
2603 uint32_t *end_report
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2604 gen_perf_query_result_read_frequencies(&query
->oa
.result
,
2608 accumulate_oa_reports(perf_ctx
, query
);
2609 assert(query
->oa
.results_accumulated
);
2611 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
2612 query
->oa
.map
= NULL
;
2614 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
2615 written
= get_oa_counter_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2617 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2619 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
2620 devinfo
, &query
->oa
.result
,
2621 query
->oa
.gt_frequency
[0],
2622 query
->oa
.gt_frequency
[1]);
2626 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2627 written
= get_pipeline_stats_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2631 unreachable("Unknown query type");
2636 *bytes_written
= written
;
2640 gen_perf_dump_query_count(struct gen_perf_context
*perf_ctx
)
2642 DBG("Queries: (Open queries = %d, OA users = %d)\n",
2643 perf_ctx
->n_active_oa_queries
, perf_ctx
->n_oa_users
);
2647 gen_perf_dump_query(struct gen_perf_context
*ctx
,
2648 struct gen_perf_query_object
*obj
,
2649 void *current_batch
)
2651 switch (obj
->queryinfo
->kind
) {
2652 case GEN_PERF_QUERY_TYPE_OA
:
2653 case GEN_PERF_QUERY_TYPE_RAW
:
2654 DBG("BO: %-4s OA data: %-10s %-15s\n",
2655 obj
->oa
.bo
? "yes," : "no,",
2656 gen_perf_is_query_ready(ctx
, obj
, current_batch
) ? "ready," : "not ready,",
2657 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
2659 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2661 obj
->pipeline_stats
.bo
? "yes" : "no");
2664 unreachable("Unknown query type");