2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <sys/types.h>
32 #include <drm-uapi/i915_drm.h>
34 #include "common/gen_gem.h"
36 #include "perf/gen_perf_mdapi.h"
37 #include "perf/gen_perf_metrics.h"
39 #include "dev/gen_debug.h"
40 #include "dev/gen_device_info.h"
41 #include "util/bitscan.h"
42 #include "util/u_math.h"
44 #define FILE_DEBUG_FLAG DEBUG_PERFMON
45 #define MI_RPC_BO_SIZE 4096
46 #define MI_FREQ_START_OFFSET_BYTES (3072)
47 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
48 #define MI_FREQ_END_OFFSET_BYTES (3076)
50 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
52 #define GEN7_RPSTAT1 0xA01C
53 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
54 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
55 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
56 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
58 #define GEN9_RPSTAT0 0xA01C
59 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
60 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
61 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
62 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
64 #define GEN6_SO_PRIM_STORAGE_NEEDED 0x2280
65 #define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
66 #define GEN6_SO_NUM_PRIMS_WRITTEN 0x2288
67 #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
69 #define MAP_READ (1 << 0)
70 #define MAP_WRITE (1 << 1)
73 * Periodic OA samples are read() into these buffer structures via the
74 * i915 perf kernel interface and appended to the
75 * perf_ctx->sample_buffers linked list. When we process the
76 * results of an OA metrics query we need to consider all the periodic
77 * samples between the Begin and End MI_REPORT_PERF_COUNT command
80 * 'Periodic' is a simplification as there are other automatic reports
81 * written by the hardware also buffered here.
83 * Considering three queries, A, B and C:
86 * ________________A_________________
88 * | ________B_________ _____C___________
91 * And an illustration of sample buffers read over this time frame:
92 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
94 * These nodes may hold samples for query A:
95 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
97 * These nodes may hold samples for query B:
98 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
100 * These nodes may hold samples for query C:
101 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
103 * The illustration assumes we have an even distribution of periodic
104 * samples so all nodes have the same size plotted against time:
106 * Note, to simplify code, the list is never empty.
108 * With overlapping queries we can see that periodic OA reports may
109 * relate to multiple queries and care needs to be take to keep
110 * track of sample buffers until there are no queries that might
111 * depend on their contents.
113 * We use a node ref counting system where a reference ensures that a
114 * node and all following nodes can't be freed/recycled until the
115 * reference drops to zero.
117 * E.g. with a ref of one here:
118 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
120 * These nodes could be freed or recycled ("reaped"):
123 * These must be preserved until the leading ref drops to zero:
124 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
126 * When a query starts we take a reference on the current tail of
127 * the list, knowing that no already-buffered samples can possibly
128 * relate to the newly-started query. A pointer to this node is
129 * also saved in the query object's ->oa.samples_head.
131 * E.g. starting query A while there are two nodes in .sample_buffers:
132 * ________________A________
136 * ^_______ Add a reference and store pointer to node in
139 * Moving forward to when the B query starts with no new buffer nodes:
140 * (for reference, i915 perf reads() are only done when queries finish)
141 * ________________A_______
146 * ^_______ Add a reference and store pointer to
147 * node in B->oa.samples_head
149 * Once a query is finished, after an OA query has become 'Ready',
150 * once the End OA report has landed and after we we have processed
151 * all the intermediate periodic samples then we drop the
152 * ->oa.samples_head reference we took at the start.
154 * So when the B query has finished we have:
155 * ________________A________
156 * | ______B___________
158 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
159 * ^_______ Drop B->oa.samples_head reference
161 * We still can't free these due to the A->oa.samples_head ref:
162 * [ 1 ][ 0 ][ 0 ][ 0 ]
164 * When the A query finishes: (note there's a new ref for C's samples_head)
165 * ________________A_________________
169 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
170 * ^_______ Drop A->oa.samples_head reference
172 * And we can now reap these nodes up to the C->oa.samples_head:
173 * [ X ][ X ][ X ][ X ]
174 * keeping -> [ 1 ][ 0 ][ 0 ]
176 * We reap old sample buffers each time we finish processing an OA
177 * query by iterating the sample_buffers list from the head until we
178 * find a referenced node and stop.
180 * Reaped buffers move to a perfquery.free_sample_buffers list and
181 * when we come to read() we first look to recycle a buffer from the
182 * free_sample_buffers list before allocating a new buffer.
184 struct oa_sample_buf
{
185 struct exec_node link
;
188 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
189 uint32_t last_timestamp
;
193 * gen representation of a performance query object.
195 * NB: We want to keep this structure relatively lean considering that
196 * applications may expect to allocate enough objects to be able to
197 * query around all draw calls in a frame.
199 struct gen_perf_query_object
201 const struct gen_perf_query_info
*queryinfo
;
203 /* See query->kind to know which state below is in use... */
208 * BO containing OA counter snapshots at query Begin/End time.
213 * Address of mapped of @bo
218 * The MI_REPORT_PERF_COUNT command lets us specify a unique
219 * ID that will be reflected in the resulting OA report
220 * that's written by the GPU. This is the ID we're expecting
221 * in the begin report and the the end report should be
222 * @begin_report_id + 1.
227 * Reference the head of the brw->perfquery.sample_buffers
228 * list at the time that the query started (so we only need
229 * to look at nodes after this point when looking for samples
230 * related to this query)
232 * (See struct brw_oa_sample_buf description for more details)
234 struct exec_node
*samples_head
;
237 * false while in the unaccumulated_elements list, and set to
238 * true when the final, end MI_RPC snapshot has been
241 bool results_accumulated
;
244 * Frequency of the GT at begin and end of the query.
246 uint64_t gt_frequency
[2];
249 * Accumulated OA results between begin and end of the query.
251 struct gen_perf_query_result result
;
256 * BO containing starting and ending snapshots for the
257 * statistics counters.
264 struct gen_perf_context
{
265 struct gen_perf_config
*perf
;
267 void * ctx
; /* driver context (eg, brw_context) */
269 const struct gen_device_info
*devinfo
;
274 /* The i915 perf stream we open to setup + enable the OA counters */
277 /* An i915 perf stream fd gives exclusive access to the OA unit that will
278 * report counter snapshots for a specific counter set/profile in a
279 * specific layout/format so we can only start OA queries that are
280 * compatible with the currently open fd...
282 int current_oa_metrics_set_id
;
283 int current_oa_format
;
285 /* List of buffers containing OA reports */
286 struct exec_list sample_buffers
;
288 /* Cached list of empty sample buffers */
289 struct exec_list free_sample_buffers
;
291 int n_active_oa_queries
;
292 int n_active_pipeline_stats_queries
;
294 /* The number of queries depending on running OA counters which
295 * extends beyond brw_end_perf_query() since we need to wait until
296 * the last MI_RPC command has parsed by the GPU.
298 * Accurate accounting is important here as emitting an
299 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
300 * effectively hang the gpu.
304 /* To help catch an spurious problem with the hardware or perf
305 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
306 * with a unique ID that we can explicitly check for...
308 int next_query_start_report_id
;
311 * An array of queries whose results haven't yet been assembled
312 * based on the data in buffer objects.
314 * These may be active, or have already ended. However, the
315 * results have not been requested.
317 struct gen_perf_query_object
**unaccumulated
;
318 int unaccumulated_elements
;
319 int unaccumulated_array_size
;
321 /* The total number of query objects so we can relinquish
322 * our exclusive access to perf if the application deletes
323 * all of its objects. (NB: We only disable perf while
324 * there are no active queries)
326 int n_query_instances
;
329 const struct gen_perf_query_info
*
330 gen_perf_query_info(const struct gen_perf_query_object
*query
)
332 return query
->queryinfo
;
335 struct gen_perf_context
*
336 gen_perf_new_context(void *parent
)
338 struct gen_perf_context
*ctx
= rzalloc(parent
, struct gen_perf_context
);
340 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
344 struct gen_perf_config
*
345 gen_perf_config(struct gen_perf_context
*ctx
)
350 struct gen_perf_query_object
*
351 gen_perf_new_query(struct gen_perf_context
*perf_ctx
, unsigned query_index
)
353 const struct gen_perf_query_info
*query
=
354 &perf_ctx
->perf
->queries
[query_index
];
355 struct gen_perf_query_object
*obj
=
356 calloc(1, sizeof(struct gen_perf_query_object
));
361 obj
->queryinfo
= query
;
363 perf_ctx
->n_query_instances
++;
368 gen_perf_active_queries(struct gen_perf_context
*perf_ctx
,
369 const struct gen_perf_query_info
*query
)
371 assert(perf_ctx
->n_active_oa_queries
== 0 || perf_ctx
->n_active_pipeline_stats_queries
== 0);
373 switch (query
->kind
) {
374 case GEN_PERF_QUERY_TYPE_OA
:
375 case GEN_PERF_QUERY_TYPE_RAW
:
376 return perf_ctx
->n_active_oa_queries
;
379 case GEN_PERF_QUERY_TYPE_PIPELINE
:
380 return perf_ctx
->n_active_pipeline_stats_queries
;
384 unreachable("Unknown query type");
390 get_sysfs_dev_dir(struct gen_perf_config
*perf
, int fd
)
395 struct dirent
*drm_entry
;
398 perf
->sysfs_dev_dir
[0] = '\0';
400 if (fstat(fd
, &sb
)) {
401 DBG("Failed to stat DRM fd\n");
405 maj
= major(sb
.st_rdev
);
406 min
= minor(sb
.st_rdev
);
408 if (!S_ISCHR(sb
.st_mode
)) {
409 DBG("DRM fd is not a character device as expected\n");
413 len
= snprintf(perf
->sysfs_dev_dir
,
414 sizeof(perf
->sysfs_dev_dir
),
415 "/sys/dev/char/%d:%d/device/drm", maj
, min
);
416 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
)) {
417 DBG("Failed to concatenate sysfs path to drm device\n");
421 drmdir
= opendir(perf
->sysfs_dev_dir
);
423 DBG("Failed to open %s: %m\n", perf
->sysfs_dev_dir
);
427 while ((drm_entry
= readdir(drmdir
))) {
428 if ((drm_entry
->d_type
== DT_DIR
||
429 drm_entry
->d_type
== DT_LNK
) &&
430 strncmp(drm_entry
->d_name
, "card", 4) == 0)
432 len
= snprintf(perf
->sysfs_dev_dir
,
433 sizeof(perf
->sysfs_dev_dir
),
434 "/sys/dev/char/%d:%d/device/drm/%s",
435 maj
, min
, drm_entry
->d_name
);
437 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
))
446 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
453 read_file_uint64(const char *file
, uint64_t *val
)
461 while ((n
= read(fd
, buf
, sizeof (buf
) - 1)) < 0 &&
468 *val
= strtoull(buf
, NULL
, 0);
474 read_sysfs_drm_device_file_uint64(struct gen_perf_config
*perf
,
481 len
= snprintf(buf
, sizeof(buf
), "%s/%s", perf
->sysfs_dev_dir
, file
);
482 if (len
< 0 || len
>= sizeof(buf
)) {
483 DBG("Failed to concatenate sys filename to read u64 from\n");
487 return read_file_uint64(buf
, value
);
490 static inline struct gen_perf_query_info
*
491 append_query_info(struct gen_perf_config
*perf
, int max_counters
)
493 struct gen_perf_query_info
*query
;
495 perf
->queries
= reralloc(perf
, perf
->queries
,
496 struct gen_perf_query_info
,
498 query
= &perf
->queries
[perf
->n_queries
- 1];
499 memset(query
, 0, sizeof(*query
));
501 if (max_counters
> 0) {
502 query
->max_counters
= max_counters
;
504 rzalloc_array(perf
, struct gen_perf_query_counter
, max_counters
);
511 register_oa_config(struct gen_perf_config
*perf
,
512 const struct gen_perf_query_info
*query
,
515 struct gen_perf_query_info
*registered_query
= append_query_info(perf
, 0);
517 *registered_query
= *query
;
518 registered_query
->oa_metrics_set_id
= config_id
;
519 DBG("metric set registered: id = %" PRIu64
", guid = %s\n",
520 registered_query
->oa_metrics_set_id
, query
->guid
);
524 enumerate_sysfs_metrics(struct gen_perf_config
*perf
)
526 DIR *metricsdir
= NULL
;
527 struct dirent
*metric_entry
;
531 len
= snprintf(buf
, sizeof(buf
), "%s/metrics", perf
->sysfs_dev_dir
);
532 if (len
< 0 || len
>= sizeof(buf
)) {
533 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
537 metricsdir
= opendir(buf
);
539 DBG("Failed to open %s: %m\n", buf
);
543 while ((metric_entry
= readdir(metricsdir
))) {
544 struct hash_entry
*entry
;
546 if ((metric_entry
->d_type
!= DT_DIR
&&
547 metric_entry
->d_type
!= DT_LNK
) ||
548 metric_entry
->d_name
[0] == '.')
551 DBG("metric set: %s\n", metric_entry
->d_name
);
552 entry
= _mesa_hash_table_search(perf
->oa_metrics_table
,
553 metric_entry
->d_name
);
557 len
= snprintf(buf
, sizeof(buf
), "%s/metrics/%s/id",
558 perf
->sysfs_dev_dir
, metric_entry
->d_name
);
559 if (len
< 0 || len
>= sizeof(buf
)) {
560 DBG("Failed to concatenate path to sysfs metric id file\n");
564 if (!read_file_uint64(buf
, &id
)) {
565 DBG("Failed to read metric set id from %s: %m", buf
);
569 register_oa_config(perf
, (const struct gen_perf_query_info
*)entry
->data
, id
);
571 DBG("metric set not known by mesa (skipping)\n");
574 closedir(metricsdir
);
578 kernel_has_dynamic_config_support(struct gen_perf_config
*perf
, int fd
)
580 uint64_t invalid_config_id
= UINT64_MAX
;
582 return gen_ioctl(fd
, DRM_IOCTL_I915_PERF_REMOVE_CONFIG
,
583 &invalid_config_id
) < 0 && errno
== ENOENT
;
587 gen_perf_load_metric_id(struct gen_perf_config
*perf_cfg
,
591 char config_path
[280];
593 snprintf(config_path
, sizeof(config_path
), "%s/metrics/%s/id",
594 perf_cfg
->sysfs_dev_dir
, guid
);
596 /* Don't recreate already loaded configs. */
597 return read_file_uint64(config_path
, metric_id
);
601 init_oa_configs(struct gen_perf_config
*perf
, int fd
)
603 hash_table_foreach(perf
->oa_metrics_table
, entry
) {
604 const struct gen_perf_query_info
*query
= entry
->data
;
605 struct drm_i915_perf_oa_config config
;
609 if (gen_perf_load_metric_id(perf
, query
->guid
, &config_id
)) {
610 DBG("metric set: %s (already loaded)\n", query
->guid
);
611 register_oa_config(perf
, query
, config_id
);
615 memset(&config
, 0, sizeof(config
));
617 memcpy(config
.uuid
, query
->guid
, sizeof(config
.uuid
));
619 config
.n_mux_regs
= query
->config
.n_mux_regs
;
620 config
.mux_regs_ptr
= (uintptr_t) query
->config
.mux_regs
;
622 config
.n_boolean_regs
= query
->config
.n_b_counter_regs
;
623 config
.boolean_regs_ptr
= (uintptr_t) query
->config
.b_counter_regs
;
625 config
.n_flex_regs
= query
->config
.n_flex_regs
;
626 config
.flex_regs_ptr
= (uintptr_t) query
->config
.flex_regs
;
628 ret
= gen_ioctl(fd
, DRM_IOCTL_I915_PERF_ADD_CONFIG
, &config
);
630 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
631 query
->name
, query
->guid
, strerror(errno
));
635 register_oa_config(perf
, query
, ret
);
636 DBG("metric set: %s (added)\n", query
->guid
);
641 compute_topology_builtins(struct gen_perf_config
*perf
,
642 const struct gen_device_info
*devinfo
)
644 perf
->sys_vars
.slice_mask
= devinfo
->slice_masks
;
645 perf
->sys_vars
.n_eu_slices
= devinfo
->num_slices
;
647 for (int i
= 0; i
< sizeof(devinfo
->subslice_masks
[i
]); i
++) {
648 perf
->sys_vars
.n_eu_sub_slices
+=
649 __builtin_popcount(devinfo
->subslice_masks
[i
]);
652 for (int i
= 0; i
< sizeof(devinfo
->eu_masks
); i
++)
653 perf
->sys_vars
.n_eus
+= __builtin_popcount(devinfo
->eu_masks
[i
]);
655 perf
->sys_vars
.eu_threads_count
= devinfo
->num_thread_per_eu
;
657 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
658 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
661 * Ideally equations would be updated to have a slice/subslice query
664 perf
->sys_vars
.subslice_mask
= 0;
666 int bits_per_subslice
= devinfo
->gen
== 11 ? 8 : 3;
668 for (int s
= 0; s
< util_last_bit(devinfo
->slice_masks
); s
++) {
669 for (int ss
= 0; ss
< (devinfo
->subslice_slice_stride
* 8); ss
++) {
670 if (gen_device_info_subslice_available(devinfo
, s
, ss
))
671 perf
->sys_vars
.subslice_mask
|= 1ULL << (s
* bits_per_subslice
+ ss
);
677 init_oa_sys_vars(struct gen_perf_config
*perf
, const struct gen_device_info
*devinfo
)
679 uint64_t min_freq_mhz
= 0, max_freq_mhz
= 0;
681 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_min_freq_mhz", &min_freq_mhz
))
684 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_max_freq_mhz", &max_freq_mhz
))
687 memset(&perf
->sys_vars
, 0, sizeof(perf
->sys_vars
));
688 perf
->sys_vars
.gt_min_freq
= min_freq_mhz
* 1000000;
689 perf
->sys_vars
.gt_max_freq
= max_freq_mhz
* 1000000;
690 perf
->sys_vars
.timestamp_frequency
= devinfo
->timestamp_frequency
;
691 perf
->sys_vars
.revision
= devinfo
->revision
;
692 compute_topology_builtins(perf
, devinfo
);
697 typedef void (*perf_register_oa_queries_t
)(struct gen_perf_config
*);
699 static perf_register_oa_queries_t
700 get_register_queries_function(const struct gen_device_info
*devinfo
)
702 if (devinfo
->is_haswell
)
703 return gen_oa_register_queries_hsw
;
704 if (devinfo
->is_cherryview
)
705 return gen_oa_register_queries_chv
;
706 if (devinfo
->is_broadwell
)
707 return gen_oa_register_queries_bdw
;
708 if (devinfo
->is_broxton
)
709 return gen_oa_register_queries_bxt
;
710 if (devinfo
->is_skylake
) {
711 if (devinfo
->gt
== 2)
712 return gen_oa_register_queries_sklgt2
;
713 if (devinfo
->gt
== 3)
714 return gen_oa_register_queries_sklgt3
;
715 if (devinfo
->gt
== 4)
716 return gen_oa_register_queries_sklgt4
;
718 if (devinfo
->is_kabylake
) {
719 if (devinfo
->gt
== 2)
720 return gen_oa_register_queries_kblgt2
;
721 if (devinfo
->gt
== 3)
722 return gen_oa_register_queries_kblgt3
;
724 if (devinfo
->is_geminilake
)
725 return gen_oa_register_queries_glk
;
726 if (devinfo
->is_coffeelake
) {
727 if (devinfo
->gt
== 2)
728 return gen_oa_register_queries_cflgt2
;
729 if (devinfo
->gt
== 3)
730 return gen_oa_register_queries_cflgt3
;
732 if (devinfo
->is_cannonlake
)
733 return gen_oa_register_queries_cnl
;
734 if (devinfo
->gen
== 11)
735 return gen_oa_register_queries_icl
;
741 add_stat_reg(struct gen_perf_query_info
*query
, uint32_t reg
,
742 uint32_t numerator
, uint32_t denominator
,
743 const char *name
, const char *description
)
745 struct gen_perf_query_counter
*counter
;
747 assert(query
->n_counters
< query
->max_counters
);
749 counter
= &query
->counters
[query
->n_counters
];
750 counter
->name
= name
;
751 counter
->desc
= description
;
752 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
753 counter
->data_type
= GEN_PERF_COUNTER_DATA_TYPE_UINT64
;
754 counter
->offset
= sizeof(uint64_t) * query
->n_counters
;
755 counter
->pipeline_stat
.reg
= reg
;
756 counter
->pipeline_stat
.numerator
= numerator
;
757 counter
->pipeline_stat
.denominator
= denominator
;
763 add_basic_stat_reg(struct gen_perf_query_info
*query
,
764 uint32_t reg
, const char *name
)
766 add_stat_reg(query
, reg
, 1, 1, name
, name
);
770 load_pipeline_statistic_metrics(struct gen_perf_config
*perf_cfg
,
771 const struct gen_device_info
*devinfo
)
773 struct gen_perf_query_info
*query
=
774 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
776 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
777 query
->name
= "Pipeline Statistics Registers";
779 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
780 "N vertices submitted");
781 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
782 "N primitives submitted");
783 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
784 "N vertex shader invocations");
786 if (devinfo
->gen
== 6) {
787 add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
788 "SO_PRIM_STORAGE_NEEDED",
789 "N geometry shader stream-out primitives (total)");
790 add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
791 "SO_NUM_PRIMS_WRITTEN",
792 "N geometry shader stream-out primitives (written)");
794 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
795 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
796 "N stream-out (stream 0) primitives (total)");
797 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
798 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
799 "N stream-out (stream 1) primitives (total)");
800 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
801 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
802 "N stream-out (stream 2) primitives (total)");
803 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
804 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
805 "N stream-out (stream 3) primitives (total)");
806 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
807 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
808 "N stream-out (stream 0) primitives (written)");
809 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
810 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
811 "N stream-out (stream 1) primitives (written)");
812 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
813 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
814 "N stream-out (stream 2) primitives (written)");
815 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
816 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
817 "N stream-out (stream 3) primitives (written)");
820 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
821 "N TCS shader invocations");
822 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
823 "N TES shader invocations");
825 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
826 "N geometry shader invocations");
827 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
828 "N geometry shader primitives emitted");
830 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
831 "N primitives entering clipping");
832 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
833 "N primitives leaving clipping");
835 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
836 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
837 "N fragment shader invocations",
838 "N fragment shader invocations");
840 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
841 "N fragment shader invocations");
844 add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
845 "N z-pass fragments");
847 if (devinfo
->gen
>= 7) {
848 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
849 "N compute shader invocations");
852 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
856 load_oa_metrics(struct gen_perf_config
*perf
, int fd
,
857 const struct gen_device_info
*devinfo
)
859 perf_register_oa_queries_t oa_register
= get_register_queries_function(devinfo
);
860 bool i915_perf_oa_available
= false;
863 /* The existence of this sysctl parameter implies the kernel supports
864 * the i915 perf interface.
866 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb
) == 0) {
868 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
869 * metrics unless running as root.
871 if (devinfo
->is_haswell
)
872 i915_perf_oa_available
= true;
874 uint64_t paranoid
= 1;
876 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", ¶noid
);
878 if (paranoid
== 0 || geteuid() == 0)
879 i915_perf_oa_available
= true;
883 if (!i915_perf_oa_available
||
885 !get_sysfs_dev_dir(perf
, fd
) ||
886 !init_oa_sys_vars(perf
, devinfo
))
889 perf
->oa_metrics_table
=
890 _mesa_hash_table_create(perf
, _mesa_key_hash_string
,
891 _mesa_key_string_equal
);
893 /* Index all the metric sets mesa knows about before looking to see what
894 * the kernel is advertising.
898 if (likely((INTEL_DEBUG
& DEBUG_NO_OACONFIG
) == 0) &&
899 kernel_has_dynamic_config_support(perf
, fd
))
900 init_oa_configs(perf
, fd
);
902 enumerate_sysfs_metrics(perf
);
907 /* Accumulate 32bits OA counters */
909 accumulate_uint32(const uint32_t *report0
,
910 const uint32_t *report1
,
911 uint64_t *accumulator
)
913 *accumulator
+= (uint32_t)(*report1
- *report0
);
916 /* Accumulate 40bits OA counters */
918 accumulate_uint40(int a_index
,
919 const uint32_t *report0
,
920 const uint32_t *report1
,
921 uint64_t *accumulator
)
923 const uint8_t *high_bytes0
= (uint8_t *)(report0
+ 40);
924 const uint8_t *high_bytes1
= (uint8_t *)(report1
+ 40);
925 uint64_t high0
= (uint64_t)(high_bytes0
[a_index
]) << 32;
926 uint64_t high1
= (uint64_t)(high_bytes1
[a_index
]) << 32;
927 uint64_t value0
= report0
[a_index
+ 4] | high0
;
928 uint64_t value1
= report1
[a_index
+ 4] | high1
;
932 delta
= (1ULL << 40) + value1
- value0
;
934 delta
= value1
- value0
;
936 *accumulator
+= delta
;
940 gen8_read_report_clock_ratios(const uint32_t *report
,
941 uint64_t *slice_freq_hz
,
942 uint64_t *unslice_freq_hz
)
944 /* The lower 16bits of the RPT_ID field of the OA reports contains a
945 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
948 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
949 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
950 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
952 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
953 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
955 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
956 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
959 uint32_t unslice_freq
= report
[0] & 0x1ff;
960 uint32_t slice_freq_low
= (report
[0] >> 25) & 0x7f;
961 uint32_t slice_freq_high
= (report
[0] >> 9) & 0x3;
962 uint32_t slice_freq
= slice_freq_low
| (slice_freq_high
<< 7);
964 *slice_freq_hz
= slice_freq
* 16666667ULL;
965 *unslice_freq_hz
= unslice_freq
* 16666667ULL;
969 gen_perf_query_result_read_frequencies(struct gen_perf_query_result
*result
,
970 const struct gen_device_info
*devinfo
,
971 const uint32_t *start
,
974 /* Slice/Unslice frequency is only available in the OA reports when the
975 * "Disable OA reports due to clock ratio change" field in
976 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
977 * global register (see drivers/gpu/drm/i915/i915_perf.c)
979 * Documentation says this should be available on Gen9+ but experimentation
980 * shows that Gen8 reports similar values, so we enable it there too.
982 if (devinfo
->gen
< 8)
985 gen8_read_report_clock_ratios(start
,
986 &result
->slice_frequency
[0],
987 &result
->unslice_frequency
[0]);
988 gen8_read_report_clock_ratios(end
,
989 &result
->slice_frequency
[1],
990 &result
->unslice_frequency
[1]);
994 gen_perf_query_result_accumulate(struct gen_perf_query_result
*result
,
995 const struct gen_perf_query_info
*query
,
996 const uint32_t *start
,
1001 result
->hw_id
= start
[2];
1002 result
->reports_accumulated
++;
1004 switch (query
->oa_format
) {
1005 case I915_OA_FORMAT_A32u40_A4u32_B8_C8
:
1006 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
+ idx
++); /* timestamp */
1007 accumulate_uint32(start
+ 3, end
+ 3, result
->accumulator
+ idx
++); /* clock */
1009 /* 32x 40bit A counters... */
1010 for (i
= 0; i
< 32; i
++)
1011 accumulate_uint40(i
, start
, end
, result
->accumulator
+ idx
++);
1013 /* 4x 32bit A counters... */
1014 for (i
= 0; i
< 4; i
++)
1015 accumulate_uint32(start
+ 36 + i
, end
+ 36 + i
, result
->accumulator
+ idx
++);
1017 /* 8x 32bit B counters + 8x 32bit C counters... */
1018 for (i
= 0; i
< 16; i
++)
1019 accumulate_uint32(start
+ 48 + i
, end
+ 48 + i
, result
->accumulator
+ idx
++);
1022 case I915_OA_FORMAT_A45_B8_C8
:
1023 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
); /* timestamp */
1025 for (i
= 0; i
< 61; i
++)
1026 accumulate_uint32(start
+ 3 + i
, end
+ 3 + i
, result
->accumulator
+ 1 + i
);
1030 unreachable("Can't accumulate OA counters in unknown format");
1036 gen_perf_query_result_clear(struct gen_perf_query_result
*result
)
1038 memset(result
, 0, sizeof(*result
));
1039 result
->hw_id
= 0xffffffff; /* invalid */
1043 register_mdapi_statistic_query(struct gen_perf_config
*perf_cfg
,
1044 const struct gen_device_info
*devinfo
)
1046 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
1049 struct gen_perf_query_info
*query
=
1050 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
1052 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1053 query
->name
= "Intel_Raw_Pipeline_Statistics_Query";
1055 /* The order has to match mdapi_pipeline_metrics. */
1056 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1057 "N vertices submitted");
1058 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1059 "N primitives submitted");
1060 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1061 "N vertex shader invocations");
1062 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1063 "N geometry shader invocations");
1064 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1065 "N geometry shader primitives emitted");
1066 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1067 "N primitives entering clipping");
1068 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1069 "N primitives leaving clipping");
1070 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1071 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1072 "N fragment shader invocations",
1073 "N fragment shader invocations");
1075 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1076 "N fragment shader invocations");
1078 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1079 "N TCS shader invocations");
1080 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1081 "N TES shader invocations");
1082 if (devinfo
->gen
>= 7) {
1083 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1084 "N compute shader invocations");
1087 if (devinfo
->gen
>= 10) {
1088 /* Reuse existing CS invocation register until we can expose this new
1091 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1095 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1099 fill_mdapi_perf_query_counter(struct gen_perf_query_info
*query
,
1101 uint32_t data_offset
,
1103 enum gen_perf_counter_data_type data_type
)
1105 struct gen_perf_query_counter
*counter
= &query
->counters
[query
->n_counters
];
1107 assert(query
->n_counters
<= query
->max_counters
);
1109 counter
->name
= name
;
1110 counter
->desc
= "Raw counter value";
1111 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
1112 counter
->data_type
= data_type
;
1113 counter
->offset
= data_offset
;
1115 query
->n_counters
++;
1117 assert(counter
->offset
+ gen_perf_query_counter_get_size(counter
) <= query
->data_size
);
1120 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
1121 fill_mdapi_perf_query_counter(query, #field_name, \
1122 (uint8_t *) &struct_name.field_name - \
1123 (uint8_t *) &struct_name, \
1124 sizeof(struct_name.field_name), \
1125 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1126 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
1127 fill_mdapi_perf_query_counter(query, \
1128 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
1129 (uint8_t *) &struct_name.field_name[idx] - \
1130 (uint8_t *) &struct_name, \
1131 sizeof(struct_name.field_name[0]), \
1132 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1135 register_mdapi_oa_query(const struct gen_device_info
*devinfo
,
1136 struct gen_perf_config
*perf
)
1138 struct gen_perf_query_info
*query
= NULL
;
1140 /* MDAPI requires different structures for pretty much every generation
1141 * (right now we have definitions for gen 7 to 11).
1143 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
1146 switch (devinfo
->gen
) {
1148 query
= append_query_info(perf
, 1 + 45 + 16 + 7);
1149 query
->oa_format
= I915_OA_FORMAT_A45_B8_C8
;
1151 struct gen7_mdapi_metrics metric_data
;
1152 query
->data_size
= sizeof(metric_data
);
1154 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1155 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.ACounters
); i
++) {
1156 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1157 metric_data
, ACounters
, i
, UINT64
);
1159 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NOACounters
); i
++) {
1160 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1161 metric_data
, NOACounters
, i
, UINT64
);
1163 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1164 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1165 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1166 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1167 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1168 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1169 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1173 query
= append_query_info(perf
, 2 + 36 + 16 + 16);
1174 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
1176 struct gen8_mdapi_metrics metric_data
;
1177 query
->data_size
= sizeof(metric_data
);
1179 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1180 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
1181 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
1182 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1183 metric_data
, OaCntr
, i
, UINT64
);
1185 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
1186 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1187 metric_data
, NoaCntr
, i
, UINT64
);
1189 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
1190 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
1191 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
1192 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
1193 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
1194 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
1195 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
1196 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
1197 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
1198 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1199 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1200 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1201 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1202 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1203 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1204 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1210 query
= append_query_info(perf
, 2 + 36 + 16 + 16 + 16 + 2);
1211 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
1213 struct gen9_mdapi_metrics metric_data
;
1214 query
->data_size
= sizeof(metric_data
);
1216 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1217 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
1218 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
1219 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1220 metric_data
, OaCntr
, i
, UINT64
);
1222 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
1223 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1224 metric_data
, NoaCntr
, i
, UINT64
);
1226 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
1227 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
1228 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
1229 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
1230 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
1231 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
1232 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
1233 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
1234 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
1235 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1236 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1237 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1238 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1239 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1240 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1241 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1242 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.UserCntr
); i
++) {
1243 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1244 metric_data
, UserCntr
, i
, UINT64
);
1246 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UserCntrCfgId
, UINT32
);
1247 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved4
, UINT32
);
1251 unreachable("Unsupported gen");
1255 query
->kind
= GEN_PERF_QUERY_TYPE_RAW
;
1256 query
->name
= "Intel_Raw_Hardware_Counters_Set_0_Query";
1257 query
->guid
= GEN_PERF_QUERY_GUID_MDAPI
;
1260 /* Accumulation buffer offsets copied from an actual query... */
1261 const struct gen_perf_query_info
*copy_query
=
1264 query
->gpu_time_offset
= copy_query
->gpu_time_offset
;
1265 query
->gpu_clock_offset
= copy_query
->gpu_clock_offset
;
1266 query
->a_offset
= copy_query
->a_offset
;
1267 query
->b_offset
= copy_query
->b_offset
;
1268 query
->c_offset
= copy_query
->c_offset
;
1273 get_metric_id(struct gen_perf_config
*perf
,
1274 const struct gen_perf_query_info
*query
)
1276 /* These queries are know not to ever change, their config ID has been
1277 * loaded upon the first query creation. No need to look them up again.
1279 if (query
->kind
== GEN_PERF_QUERY_TYPE_OA
)
1280 return query
->oa_metrics_set_id
;
1282 assert(query
->kind
== GEN_PERF_QUERY_TYPE_RAW
);
1284 /* Raw queries can be reprogrammed up by an external application/library.
1285 * When a raw query is used for the first time it's id is set to a value !=
1286 * 0. When it stops being used the id returns to 0. No need to reload the
1287 * ID when it's already loaded.
1289 if (query
->oa_metrics_set_id
!= 0) {
1290 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64
"\n",
1291 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
1292 return query
->oa_metrics_set_id
;
1295 struct gen_perf_query_info
*raw_query
= (struct gen_perf_query_info
*)query
;
1296 if (!gen_perf_load_metric_id(perf
, query
->guid
,
1297 &raw_query
->oa_metrics_set_id
)) {
1298 DBG("Unable to read query guid=%s ID, falling back to test config\n", query
->guid
);
1299 raw_query
->oa_metrics_set_id
= 1ULL;
1301 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64
"\n",
1302 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
1304 return query
->oa_metrics_set_id
;
1307 static struct oa_sample_buf
*
1308 get_free_sample_buf(struct gen_perf_context
*perf_ctx
)
1310 struct exec_node
*node
= exec_list_pop_head(&perf_ctx
->free_sample_buffers
);
1311 struct oa_sample_buf
*buf
;
1314 buf
= exec_node_data(struct oa_sample_buf
, node
, link
);
1316 buf
= ralloc_size(perf_ctx
->perf
, sizeof(*buf
));
1318 exec_node_init(&buf
->link
);
1327 reap_old_sample_buffers(struct gen_perf_context
*perf_ctx
)
1329 struct exec_node
*tail_node
=
1330 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1331 struct oa_sample_buf
*tail_buf
=
1332 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
1334 /* Remove all old, unreferenced sample buffers walking forward from
1335 * the head of the list, except always leave at least one node in
1336 * the list so we always have a node to reference when we Begin
1339 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1340 &perf_ctx
->sample_buffers
)
1342 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
1343 exec_node_remove(&buf
->link
);
1344 exec_list_push_head(&perf_ctx
->free_sample_buffers
, &buf
->link
);
1351 free_sample_bufs(struct gen_perf_context
*perf_ctx
)
1353 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1354 &perf_ctx
->free_sample_buffers
)
1357 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1360 /******************************************************************************/
1363 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1364 * pipeline statistics for the performance query object.
1367 snapshot_statistics_registers(void *context
,
1368 struct gen_perf_config
*perf
,
1369 struct gen_perf_query_object
*obj
,
1370 uint32_t offset_in_bytes
)
1372 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
1373 const int n_counters
= query
->n_counters
;
1375 for (int i
= 0; i
< n_counters
; i
++) {
1376 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1378 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
1380 perf
->vtbl
.store_register_mem64(context
, obj
->pipeline_stats
.bo
,
1381 counter
->pipeline_stat
.reg
,
1382 offset_in_bytes
+ i
* sizeof(uint64_t));
1387 gen_perf_close(struct gen_perf_context
*perfquery
,
1388 const struct gen_perf_query_info
*query
)
1390 if (perfquery
->oa_stream_fd
!= -1) {
1391 close(perfquery
->oa_stream_fd
);
1392 perfquery
->oa_stream_fd
= -1;
1394 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1395 struct gen_perf_query_info
*raw_query
=
1396 (struct gen_perf_query_info
*) query
;
1397 raw_query
->oa_metrics_set_id
= 0;
1402 gen_perf_open(struct gen_perf_context
*perf_ctx
,
1405 int period_exponent
,
1409 uint64_t properties
[] = {
1410 /* Single context sampling */
1411 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
1413 /* Include OA reports in samples */
1414 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
1416 /* OA unit configuration */
1417 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
1418 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
1419 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
1421 struct drm_i915_perf_open_param param
= {
1422 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
1423 I915_PERF_FLAG_FD_NONBLOCK
|
1424 I915_PERF_FLAG_DISABLED
,
1425 .num_properties
= ARRAY_SIZE(properties
) / 2,
1426 .properties_ptr
= (uintptr_t) properties
,
1428 int fd
= gen_ioctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
1430 DBG("Error opening gen perf OA stream: %m\n");
1434 perf_ctx
->oa_stream_fd
= fd
;
1436 perf_ctx
->current_oa_metrics_set_id
= metrics_set_id
;
1437 perf_ctx
->current_oa_format
= report_format
;
1443 inc_n_users(struct gen_perf_context
*perf_ctx
)
1445 if (perf_ctx
->n_oa_users
== 0 &&
1446 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_ENABLE
, 0) < 0)
1450 ++perf_ctx
->n_oa_users
;
1456 dec_n_users(struct gen_perf_context
*perf_ctx
)
1458 /* Disabling the i915 perf stream will effectively disable the OA
1459 * counters. Note it's important to be sure there are no outstanding
1460 * MI_RPC commands at this point since they could stall the CS
1461 * indefinitely once OACONTROL is disabled.
1463 --perf_ctx
->n_oa_users
;
1464 if (perf_ctx
->n_oa_users
== 0 &&
1465 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
1467 DBG("WARNING: Error disabling gen perf stream: %m\n");
1472 gen_perf_init_metrics(struct gen_perf_config
*perf_cfg
,
1473 const struct gen_device_info
*devinfo
,
1476 load_pipeline_statistic_metrics(perf_cfg
, devinfo
);
1477 register_mdapi_statistic_query(perf_cfg
, devinfo
);
1478 if (load_oa_metrics(perf_cfg
, drm_fd
, devinfo
))
1479 register_mdapi_oa_query(devinfo
, perf_cfg
);
1483 gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
1484 struct gen_perf_config
*perf_cfg
,
1485 void * ctx
, /* driver context (eg, brw_context) */
1486 void * bufmgr
, /* eg brw_bufmgr */
1487 const struct gen_device_info
*devinfo
,
1491 perf_ctx
->perf
= perf_cfg
;
1492 perf_ctx
->ctx
= ctx
;
1493 perf_ctx
->bufmgr
= bufmgr
;
1494 perf_ctx
->drm_fd
= drm_fd
;
1495 perf_ctx
->hw_ctx
= hw_ctx
;
1496 perf_ctx
->devinfo
= devinfo
;
1498 perf_ctx
->unaccumulated
=
1499 ralloc_array(ctx
, struct gen_perf_query_object
*, 2);
1500 perf_ctx
->unaccumulated_elements
= 0;
1501 perf_ctx
->unaccumulated_array_size
= 2;
1503 exec_list_make_empty(&perf_ctx
->sample_buffers
);
1504 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1506 /* It's convenient to guarantee that this linked list of sample
1507 * buffers is never empty so we add an empty head so when we
1508 * Begin an OA query we can always take a reference on a buffer
1511 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
1512 exec_list_push_head(&perf_ctx
->sample_buffers
, &buf
->link
);
1514 perf_ctx
->oa_stream_fd
= -1;
1515 perf_ctx
->next_query_start_report_id
= 1000;
1519 * Add a query to the global list of "unaccumulated queries."
1521 * Queries are tracked here until all the associated OA reports have
1522 * been accumulated via accumulate_oa_reports() after the end
1523 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1526 add_to_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
1527 struct gen_perf_query_object
*obj
)
1529 if (perf_ctx
->unaccumulated_elements
>=
1530 perf_ctx
->unaccumulated_array_size
)
1532 perf_ctx
->unaccumulated_array_size
*= 1.5;
1533 perf_ctx
->unaccumulated
=
1534 reralloc(perf_ctx
->ctx
, perf_ctx
->unaccumulated
,
1535 struct gen_perf_query_object
*,
1536 perf_ctx
->unaccumulated_array_size
);
1539 perf_ctx
->unaccumulated
[perf_ctx
->unaccumulated_elements
++] = obj
;
1543 gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
1544 struct gen_perf_query_object
*query
)
1546 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1547 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
1549 /* XXX: We have to consider that the command parser unit that parses batch
1550 * buffer commands and is used to capture begin/end counter snapshots isn't
1551 * implicitly synchronized with what's currently running across other GPU
1552 * units (such as the EUs running shaders) that the performance counters are
1555 * The intention of performance queries is to measure the work associated
1556 * with commands between the begin/end delimiters and so for that to be the
1557 * case we need to explicitly synchronize the parsing of commands to capture
1558 * Begin/End counter snapshots with what's running across other parts of the
1561 * When the command parser reaches a Begin marker it effectively needs to
1562 * drain everything currently running on the GPU until the hardware is idle
1563 * before capturing the first snapshot of counters - otherwise the results
1564 * would also be measuring the effects of earlier commands.
1566 * When the command parser reaches an End marker it needs to stall until
1567 * everything currently running on the GPU has finished before capturing the
1568 * end snapshot - otherwise the results won't be a complete representation
1571 * Theoretically there could be opportunities to minimize how much of the
1572 * GPU pipeline is drained, or that we stall for, when we know what specific
1573 * units the performance counters being queried relate to but we don't
1574 * currently attempt to be clever here.
1576 * Note: with our current simple approach here then for back-to-back queries
1577 * we will redundantly emit duplicate commands to synchronize the command
1578 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1579 * second synchronization is effectively a NOOP.
1581 * N.B. The final results are based on deltas of counters between (inside)
1582 * Begin/End markers so even though the total wall clock time of the
1583 * workload is stretched by larger pipeline bubbles the bubbles themselves
1584 * are generally invisible to the query results. Whether that's a good or a
1585 * bad thing depends on the use case. For a lower real-time impact while
1586 * capturing metrics then periodic sampling may be a better choice than
1587 * INTEL_performance_query.
1590 * This is our Begin synchronization point to drain current work on the
1591 * GPU before we capture our first counter snapshot...
1593 perf_cfg
->vtbl
.emit_mi_flush(perf_ctx
->ctx
);
1595 switch (queryinfo
->kind
) {
1596 case GEN_PERF_QUERY_TYPE_OA
:
1597 case GEN_PERF_QUERY_TYPE_RAW
: {
1599 /* Opening an i915 perf stream implies exclusive access to the OA unit
1600 * which will generate counter reports for a specific counter set with a
1601 * specific layout/format so we can't begin any OA based queries that
1602 * require a different counter set or format unless we get an opportunity
1603 * to close the stream and open a new one...
1605 uint64_t metric_id
= get_metric_id(perf_ctx
->perf
, queryinfo
);
1607 if (perf_ctx
->oa_stream_fd
!= -1 &&
1608 perf_ctx
->current_oa_metrics_set_id
!= metric_id
) {
1610 if (perf_ctx
->n_oa_users
!= 0) {
1611 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64
"\n",
1612 perf_ctx
->current_oa_metrics_set_id
, metric_id
);
1615 gen_perf_close(perf_ctx
, queryinfo
);
1618 /* If the OA counters aren't already on, enable them. */
1619 if (perf_ctx
->oa_stream_fd
== -1) {
1620 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1622 /* The period_exponent gives a sampling period as follows:
1623 * sample_period = timestamp_period * 2^(period_exponent + 1)
1625 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1628 * The counter overflow period is derived from the EuActive counter
1629 * which reads a counter that increments by the number of clock
1630 * cycles multiplied by the number of EUs. It can be calculated as:
1632 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1634 * (E.g. 40 EUs @ 1GHz = ~53ms)
1636 * We select a sampling period inferior to that overflow period to
1637 * ensure we cannot see more than 1 counter overflow, otherwise we
1638 * could loose information.
1641 int a_counter_in_bits
= 32;
1642 if (devinfo
->gen
>= 8)
1643 a_counter_in_bits
= 40;
1645 uint64_t overflow_period
= pow(2, a_counter_in_bits
) / (perf_cfg
->sys_vars
.n_eus
*
1646 /* drop 1GHz freq to have units in nanoseconds */
1649 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
1650 overflow_period
, overflow_period
/ 1000000ul, perf_cfg
->sys_vars
.n_eus
);
1652 int period_exponent
= 0;
1653 uint64_t prev_sample_period
, next_sample_period
;
1654 for (int e
= 0; e
< 30; e
++) {
1655 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
1656 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
1658 /* Take the previous sampling period, lower than the overflow
1661 if (prev_sample_period
< overflow_period
&&
1662 next_sample_period
> overflow_period
)
1663 period_exponent
= e
+ 1;
1666 if (period_exponent
== 0) {
1667 DBG("WARNING: enable to find a sampling exponent\n");
1671 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
1672 prev_sample_period
/ 1000000ul);
1674 if (!gen_perf_open(perf_ctx
, metric_id
, queryinfo
->oa_format
,
1675 period_exponent
, perf_ctx
->drm_fd
,
1679 assert(perf_ctx
->current_oa_metrics_set_id
== metric_id
&&
1680 perf_ctx
->current_oa_format
== queryinfo
->oa_format
);
1683 if (!inc_n_users(perf_ctx
)) {
1684 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1689 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
1690 query
->oa
.bo
= NULL
;
1693 query
->oa
.bo
= perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1694 "perf. query OA MI_RPC bo",
1697 /* Pre-filling the BO helps debug whether writes landed. */
1698 void *map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_WRITE
);
1699 memset(map
, 0x80, MI_RPC_BO_SIZE
);
1700 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
1703 query
->oa
.begin_report_id
= perf_ctx
->next_query_start_report_id
;
1704 perf_ctx
->next_query_start_report_id
+= 2;
1706 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1707 * delimiting commands end up in different batchbuffers. If that's the
1708 * case, the measurement will include the time it takes for the kernel
1709 * scheduler to load a new request into the hardware. This is manifested in
1710 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1712 perf_cfg
->vtbl
.batchbuffer_flush(perf_ctx
->ctx
, __FILE__
, __LINE__
);
1714 /* Take a starting OA counter snapshot. */
1715 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
, 0,
1716 query
->oa
.begin_report_id
);
1717 perf_cfg
->vtbl
.capture_frequency_stat_register(perf_ctx
->ctx
, query
->oa
.bo
,
1718 MI_FREQ_START_OFFSET_BYTES
);
1720 ++perf_ctx
->n_active_oa_queries
;
1722 /* No already-buffered samples can possibly be associated with this query
1723 * so create a marker within the list of sample buffers enabling us to
1724 * easily ignore earlier samples when processing this query after
1727 assert(!exec_list_is_empty(&perf_ctx
->sample_buffers
));
1728 query
->oa
.samples_head
= exec_list_get_tail(&perf_ctx
->sample_buffers
);
1730 struct oa_sample_buf
*buf
=
1731 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
1733 /* This reference will ensure that future/following sample
1734 * buffers (that may relate to this query) can't be freed until
1735 * this drops to zero.
1739 gen_perf_query_result_clear(&query
->oa
.result
);
1740 query
->oa
.results_accumulated
= false;
1742 add_to_unaccumulated_query_list(perf_ctx
, query
);
1746 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1747 if (query
->pipeline_stats
.bo
) {
1748 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
1749 query
->pipeline_stats
.bo
= NULL
;
1752 query
->pipeline_stats
.bo
=
1753 perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1754 "perf. query pipeline stats bo",
1757 /* Take starting snapshots. */
1758 snapshot_statistics_registers(perf_ctx
->ctx
, perf_cfg
, query
, 0);
1760 ++perf_ctx
->n_active_pipeline_stats_queries
;
1764 unreachable("Unknown query type");
1772 gen_perf_end_query(struct gen_perf_context
*perf_ctx
,
1773 struct gen_perf_query_object
*query
)
1775 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1777 /* Ensure that the work associated with the queried commands will have
1778 * finished before taking our query end counter readings.
1780 * For more details see comment in brw_begin_perf_query for
1781 * corresponding flush.
1783 perf_cfg
->vtbl
.emit_mi_flush(perf_ctx
->ctx
);
1785 switch (query
->queryinfo
->kind
) {
1786 case GEN_PERF_QUERY_TYPE_OA
:
1787 case GEN_PERF_QUERY_TYPE_RAW
:
1789 /* NB: It's possible that the query will have already been marked
1790 * as 'accumulated' if an error was seen while reading samples
1791 * from perf. In this case we mustn't try and emit a closing
1792 * MI_RPC command in case the OA unit has already been disabled
1794 if (!query
->oa
.results_accumulated
) {
1795 /* Take an ending OA counter snapshot. */
1796 perf_cfg
->vtbl
.capture_frequency_stat_register(perf_ctx
->ctx
, query
->oa
.bo
,
1797 MI_FREQ_END_OFFSET_BYTES
);
1798 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
,
1799 MI_RPC_BO_END_OFFSET_BYTES
,
1800 query
->oa
.begin_report_id
+ 1);
1803 --perf_ctx
->n_active_oa_queries
;
1805 /* NB: even though the query has now ended, it can't be accumulated
1806 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1811 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1812 snapshot_statistics_registers(perf_ctx
->ctx
, perf_cfg
, query
,
1813 STATS_BO_END_OFFSET_BYTES
);
1814 --perf_ctx
->n_active_pipeline_stats_queries
;
1818 unreachable("Unknown query type");
1824 OA_READ_STATUS_ERROR
,
1825 OA_READ_STATUS_UNFINISHED
,
1826 OA_READ_STATUS_FINISHED
,
1829 static enum OaReadStatus
1830 read_oa_samples_until(struct gen_perf_context
*perf_ctx
,
1831 uint32_t start_timestamp
,
1832 uint32_t end_timestamp
)
1834 struct exec_node
*tail_node
=
1835 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1836 struct oa_sample_buf
*tail_buf
=
1837 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
1838 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
1841 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
1845 while ((len
= read(perf_ctx
->oa_stream_fd
, buf
->buf
,
1846 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
1850 exec_list_push_tail(&perf_ctx
->free_sample_buffers
, &buf
->link
);
1853 if (errno
== EAGAIN
)
1854 return ((last_timestamp
- start_timestamp
) >=
1855 (end_timestamp
- start_timestamp
)) ?
1856 OA_READ_STATUS_FINISHED
:
1857 OA_READ_STATUS_UNFINISHED
;
1859 DBG("Error reading i915 perf samples: %m\n");
1862 DBG("Spurious EOF reading i915 perf samples\n");
1864 return OA_READ_STATUS_ERROR
;
1868 exec_list_push_tail(&perf_ctx
->sample_buffers
, &buf
->link
);
1870 /* Go through the reports and update the last timestamp. */
1872 while (offset
< buf
->len
) {
1873 const struct drm_i915_perf_record_header
*header
=
1874 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
1875 uint32_t *report
= (uint32_t *) (header
+ 1);
1877 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
1878 last_timestamp
= report
[1];
1880 offset
+= header
->size
;
1883 buf
->last_timestamp
= last_timestamp
;
1886 unreachable("not reached");
1887 return OA_READ_STATUS_ERROR
;
1891 * Try to read all the reports until either the delimiting timestamp
1892 * or an error arises.
1895 read_oa_samples_for_query(struct gen_perf_context
*perf_ctx
,
1896 struct gen_perf_query_object
*query
,
1897 void *current_batch
)
1902 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1904 /* We need the MI_REPORT_PERF_COUNT to land before we can start
1906 assert(!perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
1907 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
));
1909 /* Map the BO once here and let accumulate_oa_reports() unmap
1911 if (query
->oa
.map
== NULL
)
1912 query
->oa
.map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_READ
);
1914 start
= last
= query
->oa
.map
;
1915 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1917 if (start
[0] != query
->oa
.begin_report_id
) {
1918 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
1921 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
1922 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
1926 /* Read the reports until the end timestamp. */
1927 switch (read_oa_samples_until(perf_ctx
, start
[1], end
[1])) {
1928 case OA_READ_STATUS_ERROR
:
1929 /* Fallthrough and let accumulate_oa_reports() deal with the
1931 case OA_READ_STATUS_FINISHED
:
1933 case OA_READ_STATUS_UNFINISHED
:
1937 unreachable("invalid read status");
1942 gen_perf_wait_query(struct gen_perf_context
*perf_ctx
,
1943 struct gen_perf_query_object
*query
,
1944 void *current_batch
)
1946 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1947 struct brw_bo
*bo
= NULL
;
1949 switch (query
->queryinfo
->kind
) {
1950 case GEN_PERF_QUERY_TYPE_OA
:
1951 case GEN_PERF_QUERY_TYPE_RAW
:
1955 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1956 bo
= query
->pipeline_stats
.bo
;
1960 unreachable("Unknown query type");
1967 /* If the current batch references our results bo then we need to
1970 if (perf_cfg
->vtbl
.batch_references(current_batch
, bo
))
1971 perf_cfg
->vtbl
.batchbuffer_flush(perf_ctx
->ctx
, __FILE__
, __LINE__
);
1973 perf_cfg
->vtbl
.bo_wait_rendering(bo
);
1975 /* Due to a race condition between the OA unit signaling report
1976 * availability and the report actually being written into memory,
1977 * we need to wait for all the reports to come in before we can
1980 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1981 query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1982 while (!read_oa_samples_for_query(perf_ctx
, query
, current_batch
))
1988 gen_perf_is_query_ready(struct gen_perf_context
*perf_ctx
,
1989 struct gen_perf_query_object
*query
,
1990 void *current_batch
)
1992 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1994 switch (query
->queryinfo
->kind
) {
1995 case GEN_PERF_QUERY_TYPE_OA
:
1996 case GEN_PERF_QUERY_TYPE_RAW
:
1997 return (query
->oa
.results_accumulated
||
1999 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
2000 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
) &&
2001 read_oa_samples_for_query(perf_ctx
, query
, current_batch
)));
2002 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2003 return (query
->pipeline_stats
.bo
&&
2004 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->pipeline_stats
.bo
) &&
2005 !perf_cfg
->vtbl
.bo_busy(query
->pipeline_stats
.bo
));
2008 unreachable("Unknown query type");
2016 * Remove a query from the global list of unaccumulated queries once
2017 * after successfully accumulating the OA reports associated with the
2018 * query in accumulate_oa_reports() or when discarding unwanted query
2022 drop_from_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
2023 struct gen_perf_query_object
*query
)
2025 for (int i
= 0; i
< perf_ctx
->unaccumulated_elements
; i
++) {
2026 if (perf_ctx
->unaccumulated
[i
] == query
) {
2027 int last_elt
= --perf_ctx
->unaccumulated_elements
;
2030 perf_ctx
->unaccumulated
[i
] = NULL
;
2032 perf_ctx
->unaccumulated
[i
] =
2033 perf_ctx
->unaccumulated
[last_elt
];
2040 /* Drop our samples_head reference so that associated periodic
2041 * sample data buffers can potentially be reaped if they aren't
2042 * referenced by any other queries...
2045 struct oa_sample_buf
*buf
=
2046 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
2048 assert(buf
->refcount
> 0);
2051 query
->oa
.samples_head
= NULL
;
2053 reap_old_sample_buffers(perf_ctx
);
2056 /* In general if we see anything spurious while accumulating results,
2057 * we don't try and continue accumulating the current query, hoping
2058 * for the best, we scrap anything outstanding, and then hope for the
2059 * best with new queries.
2062 discard_all_queries(struct gen_perf_context
*perf_ctx
)
2064 while (perf_ctx
->unaccumulated_elements
) {
2065 struct gen_perf_query_object
*query
= perf_ctx
->unaccumulated
[0];
2067 query
->oa
.results_accumulated
= true;
2068 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2070 dec_n_users(perf_ctx
);
2075 * Accumulate raw OA counter values based on deltas between pairs of
2078 * Accumulation starts from the first report captured via
2079 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
2080 * last MI_RPC report requested by brw_end_perf_query(). Between these
2081 * two reports there may also some number of periodically sampled OA
2082 * reports collected via the i915 perf interface - depending on the
2083 * duration of the query.
2085 * These periodic snapshots help to ensure we handle counter overflow
2086 * correctly by being frequent enough to ensure we don't miss multiple
2087 * overflows of a counter between snapshots. For Gen8+ the i915 perf
2088 * snapshots provide the extra context-switch reports that let us
2089 * subtract out the progress of counters associated with other
2090 * contexts running on the system.
2093 accumulate_oa_reports(struct gen_perf_context
*perf_ctx
,
2094 struct gen_perf_query_object
*query
)
2096 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2100 struct exec_node
*first_samples_node
;
2102 int out_duration
= 0;
2104 assert(query
->oa
.map
!= NULL
);
2106 start
= last
= query
->oa
.map
;
2107 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2109 if (start
[0] != query
->oa
.begin_report_id
) {
2110 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
2113 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
2114 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
2118 /* See if we have any periodic reports to accumulate too... */
2120 /* N.B. The oa.samples_head was set when the query began and
2121 * pointed to the tail of the perf_ctx->sample_buffers list at
2122 * the time the query started. Since the buffer existed before the
2123 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
2124 * that no data in this particular node's buffer can possibly be
2125 * associated with the query - so skip ahead one...
2127 first_samples_node
= query
->oa
.samples_head
->next
;
2129 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
2130 &perf_ctx
.sample_buffers
,
2135 while (offset
< buf
->len
) {
2136 const struct drm_i915_perf_record_header
*header
=
2137 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
2139 assert(header
->size
!= 0);
2140 assert(header
->size
<= buf
->len
);
2142 offset
+= header
->size
;
2144 switch (header
->type
) {
2145 case DRM_I915_PERF_RECORD_SAMPLE
: {
2146 uint32_t *report
= (uint32_t *)(header
+ 1);
2149 /* Ignore reports that come before the start marker.
2150 * (Note: takes care to allow overflow of 32bit timestamps)
2152 if (gen_device_info_timebase_scale(devinfo
,
2153 report
[1] - start
[1]) > 5000000000) {
2157 /* Ignore reports that come after the end marker.
2158 * (Note: takes care to allow overflow of 32bit timestamps)
2160 if (gen_device_info_timebase_scale(devinfo
,
2161 report
[1] - end
[1]) <= 5000000000) {
2165 /* For Gen8+ since the counters continue while other
2166 * contexts are running we need to discount any unrelated
2167 * deltas. The hardware automatically generates a report
2168 * on context switch which gives us a new reference point
2169 * to continuing adding deltas from.
2171 * For Haswell we can rely on the HW to stop the progress
2172 * of OA counters while any other context is acctive.
2174 if (devinfo
->gen
>= 8) {
2175 if (in_ctx
&& report
[2] != query
->oa
.result
.hw_id
) {
2176 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
2179 } else if (in_ctx
== false && report
[2] == query
->oa
.result
.hw_id
) {
2180 DBG("i915 perf: Switch TO\n");
2183 /* From experimentation in IGT, we found that the OA unit
2184 * might label some report as "idle" (using an invalid
2185 * context ID), right after a report for a given context.
2186 * Deltas generated by those reports actually belong to the
2187 * previous context, even though they're not labelled as
2190 * We didn't *really* Switch AWAY in the case that we e.g.
2191 * saw a single periodic report while idle...
2193 if (out_duration
>= 1)
2195 } else if (in_ctx
) {
2196 assert(report
[2] == query
->oa
.result
.hw_id
);
2197 DBG("i915 perf: Continuation IN\n");
2199 assert(report
[2] != query
->oa
.result
.hw_id
);
2200 DBG("i915 perf: Continuation OUT\n");
2207 gen_perf_query_result_accumulate(&query
->oa
.result
,
2217 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
2218 DBG("i915 perf: OA error: all reports lost\n");
2220 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
2221 DBG("i915 perf: OA report lost\n");
2229 gen_perf_query_result_accumulate(&query
->oa
.result
, query
->queryinfo
,
2232 query
->oa
.results_accumulated
= true;
2233 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2234 dec_n_users(perf_ctx
);
2240 discard_all_queries(perf_ctx
);
2244 gen_perf_delete_query(struct gen_perf_context
*perf_ctx
,
2245 struct gen_perf_query_object
*query
)
2247 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2249 /* We can assume that the frontend waits for a query to complete
2250 * before ever calling into here, so we don't have to worry about
2251 * deleting an in-flight query object.
2253 switch (query
->queryinfo
->kind
) {
2254 case GEN_PERF_QUERY_TYPE_OA
:
2255 case GEN_PERF_QUERY_TYPE_RAW
:
2257 if (!query
->oa
.results_accumulated
) {
2258 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2259 dec_n_users(perf_ctx
);
2262 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
2263 query
->oa
.bo
= NULL
;
2266 query
->oa
.results_accumulated
= false;
2269 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2270 if (query
->pipeline_stats
.bo
) {
2271 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
2272 query
->pipeline_stats
.bo
= NULL
;
2277 unreachable("Unknown query type");
2281 /* As an indication that the INTEL_performance_query extension is no
2282 * longer in use, it's a good time to free our cache of sample
2283 * buffers and close any current i915-perf stream.
2285 if (--perf_ctx
->n_query_instances
== 0) {
2286 free_sample_bufs(perf_ctx
);
2287 gen_perf_close(perf_ctx
, query
->queryinfo
);
2293 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
2296 read_gt_frequency(struct gen_perf_context
*perf_ctx
,
2297 struct gen_perf_query_object
*obj
)
2299 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2300 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
2301 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
2303 switch (devinfo
->gen
) {
2306 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
2307 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
2312 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
2313 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
2316 unreachable("unexpected gen");
2319 /* Put the numbers into Hz. */
2320 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
2321 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
2325 get_oa_counter_data(struct gen_perf_context
*perf_ctx
,
2326 struct gen_perf_query_object
*query
,
2330 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2331 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2332 int n_counters
= queryinfo
->n_counters
;
2335 for (int i
= 0; i
< n_counters
; i
++) {
2336 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2337 uint64_t *out_uint64
;
2339 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
2342 switch (counter
->data_type
) {
2343 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
2344 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
2346 counter
->oa_counter_read_uint64(perf_cfg
, queryinfo
,
2347 query
->oa
.result
.accumulator
);
2349 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
2350 out_float
= (float *)(data
+ counter
->offset
);
2352 counter
->oa_counter_read_float(perf_cfg
, queryinfo
,
2353 query
->oa
.result
.accumulator
);
2356 /* So far we aren't using uint32, double or bool32... */
2357 unreachable("unexpected counter data type");
2359 written
= counter
->offset
+ counter_size
;
2367 get_pipeline_stats_data(struct gen_perf_context
*perf_ctx
,
2368 struct gen_perf_query_object
*query
,
2373 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2374 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2375 int n_counters
= queryinfo
->n_counters
;
2378 uint64_t *start
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->pipeline_stats
.bo
, MAP_READ
);
2379 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
2381 for (int i
= 0; i
< n_counters
; i
++) {
2382 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2383 uint64_t value
= end
[i
] - start
[i
];
2385 if (counter
->pipeline_stat
.numerator
!=
2386 counter
->pipeline_stat
.denominator
) {
2387 value
*= counter
->pipeline_stat
.numerator
;
2388 value
/= counter
->pipeline_stat
.denominator
;
2391 *((uint64_t *)p
) = value
;
2395 perf_cfg
->vtbl
.bo_unmap(query
->pipeline_stats
.bo
);
2401 gen_perf_get_query_data(struct gen_perf_context
*perf_ctx
,
2402 struct gen_perf_query_object
*query
,
2405 unsigned *bytes_written
)
2407 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2410 switch (query
->queryinfo
->kind
) {
2411 case GEN_PERF_QUERY_TYPE_OA
:
2412 case GEN_PERF_QUERY_TYPE_RAW
:
2413 if (!query
->oa
.results_accumulated
) {
2414 read_gt_frequency(perf_ctx
, query
);
2415 uint32_t *begin_report
= query
->oa
.map
;
2416 uint32_t *end_report
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2417 gen_perf_query_result_read_frequencies(&query
->oa
.result
,
2421 accumulate_oa_reports(perf_ctx
, query
);
2422 assert(query
->oa
.results_accumulated
);
2424 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
2425 query
->oa
.map
= NULL
;
2427 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
2428 written
= get_oa_counter_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2430 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2432 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
2433 devinfo
, &query
->oa
.result
,
2434 query
->oa
.gt_frequency
[0],
2435 query
->oa
.gt_frequency
[1]);
2439 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2440 written
= get_pipeline_stats_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2444 unreachable("Unknown query type");
2449 *bytes_written
= written
;
2453 gen_perf_dump_query_count(struct gen_perf_context
*perf_ctx
)
2455 DBG("Queries: (Open queries = %d, OA users = %d)\n",
2456 perf_ctx
->n_active_oa_queries
, perf_ctx
->n_oa_users
);
2460 gen_perf_dump_query(struct gen_perf_context
*ctx
,
2461 struct gen_perf_query_object
*obj
,
2462 void *current_batch
)
2464 switch (obj
->queryinfo
->kind
) {
2465 case GEN_PERF_QUERY_TYPE_OA
:
2466 case GEN_PERF_QUERY_TYPE_RAW
:
2467 DBG("BO: %-4s OA data: %-10s %-15s\n",
2468 obj
->oa
.bo
? "yes," : "no,",
2469 gen_perf_is_query_ready(ctx
, obj
, current_batch
) ? "ready," : "not ready,",
2470 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
2472 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2474 obj
->pipeline_stats
.bo
? "yes" : "no");
2477 unreachable("Unknown query type");