2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <sys/types.h>
32 #include <drm-uapi/i915_drm.h>
34 #include "common/gen_gem.h"
36 #include "gen_perf_regs.h"
37 #include "perf/gen_perf_mdapi.h"
38 #include "perf/gen_perf_metrics.h"
40 #include "dev/gen_debug.h"
41 #include "dev/gen_device_info.h"
42 #include "util/bitscan.h"
43 #include "util/mesa-sha1.h"
44 #include "util/u_math.h"
46 #define FILE_DEBUG_FLAG DEBUG_PERFMON
47 #define MI_RPC_BO_SIZE 4096
48 #define MI_FREQ_START_OFFSET_BYTES (3072)
49 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
50 #define MI_FREQ_END_OFFSET_BYTES (3076)
52 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
54 #define GEN7_RPSTAT1 0xA01C
55 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
56 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
57 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
58 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
60 #define GEN9_RPSTAT0 0xA01C
61 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
62 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
63 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
64 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
66 #define GEN6_SO_PRIM_STORAGE_NEEDED 0x2280
67 #define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
68 #define GEN6_SO_NUM_PRIMS_WRITTEN 0x2288
69 #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
71 #define MAP_READ (1 << 0)
72 #define MAP_WRITE (1 << 1)
74 #define OA_REPORT_INVALID_CTX_ID (0xffffffff)
77 * Periodic OA samples are read() into these buffer structures via the
78 * i915 perf kernel interface and appended to the
79 * perf_ctx->sample_buffers linked list. When we process the
80 * results of an OA metrics query we need to consider all the periodic
81 * samples between the Begin and End MI_REPORT_PERF_COUNT command
84 * 'Periodic' is a simplification as there are other automatic reports
85 * written by the hardware also buffered here.
87 * Considering three queries, A, B and C:
90 * ________________A_________________
92 * | ________B_________ _____C___________
95 * And an illustration of sample buffers read over this time frame:
96 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
98 * These nodes may hold samples for query A:
99 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
101 * These nodes may hold samples for query B:
102 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
104 * These nodes may hold samples for query C:
105 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
107 * The illustration assumes we have an even distribution of periodic
108 * samples so all nodes have the same size plotted against time:
110 * Note, to simplify code, the list is never empty.
112 * With overlapping queries we can see that periodic OA reports may
113 * relate to multiple queries and care needs to be take to keep
114 * track of sample buffers until there are no queries that might
115 * depend on their contents.
117 * We use a node ref counting system where a reference ensures that a
118 * node and all following nodes can't be freed/recycled until the
119 * reference drops to zero.
121 * E.g. with a ref of one here:
122 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
124 * These nodes could be freed or recycled ("reaped"):
127 * These must be preserved until the leading ref drops to zero:
128 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
130 * When a query starts we take a reference on the current tail of
131 * the list, knowing that no already-buffered samples can possibly
132 * relate to the newly-started query. A pointer to this node is
133 * also saved in the query object's ->oa.samples_head.
135 * E.g. starting query A while there are two nodes in .sample_buffers:
136 * ________________A________
140 * ^_______ Add a reference and store pointer to node in
143 * Moving forward to when the B query starts with no new buffer nodes:
144 * (for reference, i915 perf reads() are only done when queries finish)
145 * ________________A_______
150 * ^_______ Add a reference and store pointer to
151 * node in B->oa.samples_head
153 * Once a query is finished, after an OA query has become 'Ready',
154 * once the End OA report has landed and after we we have processed
155 * all the intermediate periodic samples then we drop the
156 * ->oa.samples_head reference we took at the start.
158 * So when the B query has finished we have:
159 * ________________A________
160 * | ______B___________
162 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
163 * ^_______ Drop B->oa.samples_head reference
165 * We still can't free these due to the A->oa.samples_head ref:
166 * [ 1 ][ 0 ][ 0 ][ 0 ]
168 * When the A query finishes: (note there's a new ref for C's samples_head)
169 * ________________A_________________
173 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
174 * ^_______ Drop A->oa.samples_head reference
176 * And we can now reap these nodes up to the C->oa.samples_head:
177 * [ X ][ X ][ X ][ X ]
178 * keeping -> [ 1 ][ 0 ][ 0 ]
180 * We reap old sample buffers each time we finish processing an OA
181 * query by iterating the sample_buffers list from the head until we
182 * find a referenced node and stop.
184 * Reaped buffers move to a perfquery.free_sample_buffers list and
185 * when we come to read() we first look to recycle a buffer from the
186 * free_sample_buffers list before allocating a new buffer.
188 struct oa_sample_buf
{
189 struct exec_node link
;
192 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
193 uint32_t last_timestamp
;
197 * gen representation of a performance query object.
199 * NB: We want to keep this structure relatively lean considering that
200 * applications may expect to allocate enough objects to be able to
201 * query around all draw calls in a frame.
203 struct gen_perf_query_object
205 const struct gen_perf_query_info
*queryinfo
;
207 /* See query->kind to know which state below is in use... */
212 * BO containing OA counter snapshots at query Begin/End time.
217 * Address of mapped of @bo
222 * The MI_REPORT_PERF_COUNT command lets us specify a unique
223 * ID that will be reflected in the resulting OA report
224 * that's written by the GPU. This is the ID we're expecting
225 * in the begin report and the the end report should be
226 * @begin_report_id + 1.
231 * Reference the head of the brw->perfquery.sample_buffers
232 * list at the time that the query started (so we only need
233 * to look at nodes after this point when looking for samples
234 * related to this query)
236 * (See struct brw_oa_sample_buf description for more details)
238 struct exec_node
*samples_head
;
241 * false while in the unaccumulated_elements list, and set to
242 * true when the final, end MI_RPC snapshot has been
245 bool results_accumulated
;
248 * Frequency of the GT at begin and end of the query.
250 uint64_t gt_frequency
[2];
253 * Accumulated OA results between begin and end of the query.
255 struct gen_perf_query_result result
;
260 * BO containing starting and ending snapshots for the
261 * statistics counters.
268 struct gen_perf_context
{
269 struct gen_perf_config
*perf
;
271 void * ctx
; /* driver context (eg, brw_context) */
273 const struct gen_device_info
*devinfo
;
278 /* The i915 perf stream we open to setup + enable the OA counters */
281 /* An i915 perf stream fd gives exclusive access to the OA unit that will
282 * report counter snapshots for a specific counter set/profile in a
283 * specific layout/format so we can only start OA queries that are
284 * compatible with the currently open fd...
286 int current_oa_metrics_set_id
;
287 int current_oa_format
;
289 /* List of buffers containing OA reports */
290 struct exec_list sample_buffers
;
292 /* Cached list of empty sample buffers */
293 struct exec_list free_sample_buffers
;
295 int n_active_oa_queries
;
296 int n_active_pipeline_stats_queries
;
298 /* The number of queries depending on running OA counters which
299 * extends beyond brw_end_perf_query() since we need to wait until
300 * the last MI_RPC command has parsed by the GPU.
302 * Accurate accounting is important here as emitting an
303 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
304 * effectively hang the gpu.
308 /* To help catch an spurious problem with the hardware or perf
309 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
310 * with a unique ID that we can explicitly check for...
312 int next_query_start_report_id
;
315 * An array of queries whose results haven't yet been assembled
316 * based on the data in buffer objects.
318 * These may be active, or have already ended. However, the
319 * results have not been requested.
321 struct gen_perf_query_object
**unaccumulated
;
322 int unaccumulated_elements
;
323 int unaccumulated_array_size
;
325 /* The total number of query objects so we can relinquish
326 * our exclusive access to perf if the application deletes
327 * all of its objects. (NB: We only disable perf while
328 * there are no active queries)
330 int n_query_instances
;
333 const struct gen_perf_query_info
*
334 gen_perf_query_info(const struct gen_perf_query_object
*query
)
336 return query
->queryinfo
;
339 struct gen_perf_context
*
340 gen_perf_new_context(void *parent
)
342 struct gen_perf_context
*ctx
= rzalloc(parent
, struct gen_perf_context
);
344 fprintf(stderr
, "%s: failed to alloc context\n", __func__
);
348 struct gen_perf_config
*
349 gen_perf_config(struct gen_perf_context
*ctx
)
354 struct gen_perf_query_object
*
355 gen_perf_new_query(struct gen_perf_context
*perf_ctx
, unsigned query_index
)
357 const struct gen_perf_query_info
*query
=
358 &perf_ctx
->perf
->queries
[query_index
];
359 struct gen_perf_query_object
*obj
=
360 calloc(1, sizeof(struct gen_perf_query_object
));
365 obj
->queryinfo
= query
;
367 perf_ctx
->n_query_instances
++;
372 gen_perf_active_queries(struct gen_perf_context
*perf_ctx
,
373 const struct gen_perf_query_info
*query
)
375 assert(perf_ctx
->n_active_oa_queries
== 0 || perf_ctx
->n_active_pipeline_stats_queries
== 0);
377 switch (query
->kind
) {
378 case GEN_PERF_QUERY_TYPE_OA
:
379 case GEN_PERF_QUERY_TYPE_RAW
:
380 return perf_ctx
->n_active_oa_queries
;
383 case GEN_PERF_QUERY_TYPE_PIPELINE
:
384 return perf_ctx
->n_active_pipeline_stats_queries
;
388 unreachable("Unknown query type");
393 static inline uint64_t to_user_pointer(void *ptr
)
395 return (uintptr_t) ptr
;
399 get_sysfs_dev_dir(struct gen_perf_config
*perf
, int fd
)
404 struct dirent
*drm_entry
;
407 perf
->sysfs_dev_dir
[0] = '\0';
409 if (fstat(fd
, &sb
)) {
410 DBG("Failed to stat DRM fd\n");
414 maj
= major(sb
.st_rdev
);
415 min
= minor(sb
.st_rdev
);
417 if (!S_ISCHR(sb
.st_mode
)) {
418 DBG("DRM fd is not a character device as expected\n");
422 len
= snprintf(perf
->sysfs_dev_dir
,
423 sizeof(perf
->sysfs_dev_dir
),
424 "/sys/dev/char/%d:%d/device/drm", maj
, min
);
425 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
)) {
426 DBG("Failed to concatenate sysfs path to drm device\n");
430 drmdir
= opendir(perf
->sysfs_dev_dir
);
432 DBG("Failed to open %s: %m\n", perf
->sysfs_dev_dir
);
436 while ((drm_entry
= readdir(drmdir
))) {
437 if ((drm_entry
->d_type
== DT_DIR
||
438 drm_entry
->d_type
== DT_LNK
) &&
439 strncmp(drm_entry
->d_name
, "card", 4) == 0)
441 len
= snprintf(perf
->sysfs_dev_dir
,
442 sizeof(perf
->sysfs_dev_dir
),
443 "/sys/dev/char/%d:%d/device/drm/%s",
444 maj
, min
, drm_entry
->d_name
);
446 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
))
455 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
462 read_file_uint64(const char *file
, uint64_t *val
)
470 while ((n
= read(fd
, buf
, sizeof (buf
) - 1)) < 0 &&
477 *val
= strtoull(buf
, NULL
, 0);
483 read_sysfs_drm_device_file_uint64(struct gen_perf_config
*perf
,
490 len
= snprintf(buf
, sizeof(buf
), "%s/%s", perf
->sysfs_dev_dir
, file
);
491 if (len
< 0 || len
>= sizeof(buf
)) {
492 DBG("Failed to concatenate sys filename to read u64 from\n");
496 return read_file_uint64(buf
, value
);
499 static inline struct gen_perf_query_info
*
500 append_query_info(struct gen_perf_config
*perf
, int max_counters
)
502 struct gen_perf_query_info
*query
;
504 perf
->queries
= reralloc(perf
, perf
->queries
,
505 struct gen_perf_query_info
,
507 query
= &perf
->queries
[perf
->n_queries
- 1];
508 memset(query
, 0, sizeof(*query
));
510 if (max_counters
> 0) {
511 query
->max_counters
= max_counters
;
513 rzalloc_array(perf
, struct gen_perf_query_counter
, max_counters
);
520 register_oa_config(struct gen_perf_config
*perf
,
521 const struct gen_perf_query_info
*query
,
524 struct gen_perf_query_info
*registered_query
= append_query_info(perf
, 0);
526 *registered_query
= *query
;
527 registered_query
->oa_metrics_set_id
= config_id
;
528 DBG("metric set registered: id = %" PRIu64
", guid = %s\n",
529 registered_query
->oa_metrics_set_id
, query
->guid
);
533 enumerate_sysfs_metrics(struct gen_perf_config
*perf
)
535 DIR *metricsdir
= NULL
;
536 struct dirent
*metric_entry
;
540 len
= snprintf(buf
, sizeof(buf
), "%s/metrics", perf
->sysfs_dev_dir
);
541 if (len
< 0 || len
>= sizeof(buf
)) {
542 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
546 metricsdir
= opendir(buf
);
548 DBG("Failed to open %s: %m\n", buf
);
552 while ((metric_entry
= readdir(metricsdir
))) {
553 struct hash_entry
*entry
;
555 if ((metric_entry
->d_type
!= DT_DIR
&&
556 metric_entry
->d_type
!= DT_LNK
) ||
557 metric_entry
->d_name
[0] == '.')
560 DBG("metric set: %s\n", metric_entry
->d_name
);
561 entry
= _mesa_hash_table_search(perf
->oa_metrics_table
,
562 metric_entry
->d_name
);
565 if (!gen_perf_load_metric_id(perf
, metric_entry
->d_name
, &id
)) {
566 DBG("Failed to read metric set id from %s: %m", buf
);
570 register_oa_config(perf
, (const struct gen_perf_query_info
*)entry
->data
, id
);
572 DBG("metric set not known by mesa (skipping)\n");
575 closedir(metricsdir
);
579 kernel_has_dynamic_config_support(struct gen_perf_config
*perf
, int fd
)
581 uint64_t invalid_config_id
= UINT64_MAX
;
583 return gen_ioctl(fd
, DRM_IOCTL_I915_PERF_REMOVE_CONFIG
,
584 &invalid_config_id
) < 0 && errno
== ENOENT
;
588 i915_query_items(struct gen_perf_config
*perf
, int fd
,
589 struct drm_i915_query_item
*items
, uint32_t n_items
)
591 struct drm_i915_query q
= {
592 .num_items
= n_items
,
593 .items_ptr
= to_user_pointer(items
),
595 return gen_ioctl(fd
, DRM_IOCTL_I915_QUERY
, &q
);
599 i915_query_perf_config_supported(struct gen_perf_config
*perf
, int fd
)
601 struct drm_i915_query_item item
= {
602 .query_id
= DRM_I915_QUERY_PERF_CONFIG
,
603 .flags
= DRM_I915_QUERY_PERF_CONFIG_LIST
,
606 return i915_query_items(perf
, fd
, &item
, 1) == 0 && item
.length
> 0;
610 i915_query_perf_config_data(struct gen_perf_config
*perf
,
611 int fd
, const char *guid
,
612 struct drm_i915_perf_oa_config
*config
)
615 struct drm_i915_query_perf_config query
;
616 struct drm_i915_perf_oa_config config
;
618 struct drm_i915_query_item item
= {
619 .query_id
= DRM_I915_QUERY_PERF_CONFIG
,
620 .flags
= DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
,
621 .data_ptr
= to_user_pointer(&item_data
),
622 .length
= sizeof(item_data
),
625 memset(&item_data
, 0, sizeof(item_data
));
626 memcpy(item_data
.query
.uuid
, guid
, sizeof(item_data
.query
.uuid
));
627 memcpy(&item_data
.config
, config
, sizeof(item_data
.config
));
629 if (!(i915_query_items(perf
, fd
, &item
, 1) == 0 && item
.length
> 0))
632 memcpy(config
, &item_data
.config
, sizeof(item_data
.config
));
638 gen_perf_load_metric_id(struct gen_perf_config
*perf_cfg
,
642 char config_path
[280];
644 snprintf(config_path
, sizeof(config_path
), "%s/metrics/%s/id",
645 perf_cfg
->sysfs_dev_dir
, guid
);
647 /* Don't recreate already loaded configs. */
648 return read_file_uint64(config_path
, metric_id
);
652 i915_add_config(struct gen_perf_config
*perf
, int fd
,
653 const struct gen_perf_registers
*config
,
656 struct drm_i915_perf_oa_config i915_config
= { 0, };
658 memcpy(i915_config
.uuid
, guid
, sizeof(i915_config
.uuid
));
660 i915_config
.n_mux_regs
= config
->n_mux_regs
;
661 i915_config
.mux_regs_ptr
= to_user_pointer(config
->mux_regs
);
663 i915_config
.n_boolean_regs
= config
->n_b_counter_regs
;
664 i915_config
.boolean_regs_ptr
= to_user_pointer(config
->b_counter_regs
);
666 i915_config
.n_flex_regs
= config
->n_flex_regs
;
667 i915_config
.flex_regs_ptr
= to_user_pointer(config
->flex_regs
);
669 int ret
= gen_ioctl(fd
, DRM_IOCTL_I915_PERF_ADD_CONFIG
, &i915_config
);
670 return ret
> 0 ? ret
: 0;
674 init_oa_configs(struct gen_perf_config
*perf
, int fd
)
676 hash_table_foreach(perf
->oa_metrics_table
, entry
) {
677 const struct gen_perf_query_info
*query
= entry
->data
;
680 if (gen_perf_load_metric_id(perf
, query
->guid
, &config_id
)) {
681 DBG("metric set: %s (already loaded)\n", query
->guid
);
682 register_oa_config(perf
, query
, config_id
);
686 int ret
= i915_add_config(perf
, fd
, &query
->config
, query
->guid
);
688 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
689 query
->name
, query
->guid
, strerror(errno
));
693 register_oa_config(perf
, query
, ret
);
694 DBG("metric set: %s (added)\n", query
->guid
);
699 compute_topology_builtins(struct gen_perf_config
*perf
,
700 const struct gen_device_info
*devinfo
)
702 perf
->sys_vars
.slice_mask
= devinfo
->slice_masks
;
703 perf
->sys_vars
.n_eu_slices
= devinfo
->num_slices
;
705 for (int i
= 0; i
< sizeof(devinfo
->subslice_masks
[i
]); i
++) {
706 perf
->sys_vars
.n_eu_sub_slices
+=
707 __builtin_popcount(devinfo
->subslice_masks
[i
]);
710 for (int i
= 0; i
< sizeof(devinfo
->eu_masks
); i
++)
711 perf
->sys_vars
.n_eus
+= __builtin_popcount(devinfo
->eu_masks
[i
]);
713 perf
->sys_vars
.eu_threads_count
= devinfo
->num_thread_per_eu
;
715 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
716 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
719 * Ideally equations would be updated to have a slice/subslice query
722 perf
->sys_vars
.subslice_mask
= 0;
724 int bits_per_subslice
= devinfo
->gen
== 11 ? 8 : 3;
726 for (int s
= 0; s
< util_last_bit(devinfo
->slice_masks
); s
++) {
727 for (int ss
= 0; ss
< (devinfo
->subslice_slice_stride
* 8); ss
++) {
728 if (gen_device_info_subslice_available(devinfo
, s
, ss
))
729 perf
->sys_vars
.subslice_mask
|= 1ULL << (s
* bits_per_subslice
+ ss
);
735 init_oa_sys_vars(struct gen_perf_config
*perf
, const struct gen_device_info
*devinfo
)
737 uint64_t min_freq_mhz
= 0, max_freq_mhz
= 0;
739 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_min_freq_mhz", &min_freq_mhz
))
742 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_max_freq_mhz", &max_freq_mhz
))
745 memset(&perf
->sys_vars
, 0, sizeof(perf
->sys_vars
));
746 perf
->sys_vars
.gt_min_freq
= min_freq_mhz
* 1000000;
747 perf
->sys_vars
.gt_max_freq
= max_freq_mhz
* 1000000;
748 perf
->sys_vars
.timestamp_frequency
= devinfo
->timestamp_frequency
;
749 perf
->sys_vars
.revision
= devinfo
->revision
;
750 compute_topology_builtins(perf
, devinfo
);
755 typedef void (*perf_register_oa_queries_t
)(struct gen_perf_config
*);
757 static perf_register_oa_queries_t
758 get_register_queries_function(const struct gen_device_info
*devinfo
)
760 if (devinfo
->is_haswell
)
761 return gen_oa_register_queries_hsw
;
762 if (devinfo
->is_cherryview
)
763 return gen_oa_register_queries_chv
;
764 if (devinfo
->is_broadwell
)
765 return gen_oa_register_queries_bdw
;
766 if (devinfo
->is_broxton
)
767 return gen_oa_register_queries_bxt
;
768 if (devinfo
->is_skylake
) {
769 if (devinfo
->gt
== 2)
770 return gen_oa_register_queries_sklgt2
;
771 if (devinfo
->gt
== 3)
772 return gen_oa_register_queries_sklgt3
;
773 if (devinfo
->gt
== 4)
774 return gen_oa_register_queries_sklgt4
;
776 if (devinfo
->is_kabylake
) {
777 if (devinfo
->gt
== 2)
778 return gen_oa_register_queries_kblgt2
;
779 if (devinfo
->gt
== 3)
780 return gen_oa_register_queries_kblgt3
;
782 if (devinfo
->is_geminilake
)
783 return gen_oa_register_queries_glk
;
784 if (devinfo
->is_coffeelake
) {
785 if (devinfo
->gt
== 2)
786 return gen_oa_register_queries_cflgt2
;
787 if (devinfo
->gt
== 3)
788 return gen_oa_register_queries_cflgt3
;
790 if (devinfo
->is_cannonlake
)
791 return gen_oa_register_queries_cnl
;
792 if (devinfo
->gen
== 11) {
793 if (devinfo
->is_elkhartlake
)
794 return gen_oa_register_queries_lkf
;
795 return gen_oa_register_queries_icl
;
797 if (devinfo
->gen
== 12)
798 return gen_oa_register_queries_tgl
;
804 add_stat_reg(struct gen_perf_query_info
*query
, uint32_t reg
,
805 uint32_t numerator
, uint32_t denominator
,
806 const char *name
, const char *description
)
808 struct gen_perf_query_counter
*counter
;
810 assert(query
->n_counters
< query
->max_counters
);
812 counter
= &query
->counters
[query
->n_counters
];
813 counter
->name
= name
;
814 counter
->desc
= description
;
815 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
816 counter
->data_type
= GEN_PERF_COUNTER_DATA_TYPE_UINT64
;
817 counter
->offset
= sizeof(uint64_t) * query
->n_counters
;
818 counter
->pipeline_stat
.reg
= reg
;
819 counter
->pipeline_stat
.numerator
= numerator
;
820 counter
->pipeline_stat
.denominator
= denominator
;
826 add_basic_stat_reg(struct gen_perf_query_info
*query
,
827 uint32_t reg
, const char *name
)
829 add_stat_reg(query
, reg
, 1, 1, name
, name
);
833 load_pipeline_statistic_metrics(struct gen_perf_config
*perf_cfg
,
834 const struct gen_device_info
*devinfo
)
836 struct gen_perf_query_info
*query
=
837 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
839 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
840 query
->name
= "Pipeline Statistics Registers";
842 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
843 "N vertices submitted");
844 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
845 "N primitives submitted");
846 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
847 "N vertex shader invocations");
849 if (devinfo
->gen
== 6) {
850 add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
851 "SO_PRIM_STORAGE_NEEDED",
852 "N geometry shader stream-out primitives (total)");
853 add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
854 "SO_NUM_PRIMS_WRITTEN",
855 "N geometry shader stream-out primitives (written)");
857 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
858 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
859 "N stream-out (stream 0) primitives (total)");
860 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
861 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
862 "N stream-out (stream 1) primitives (total)");
863 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
864 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
865 "N stream-out (stream 2) primitives (total)");
866 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
867 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
868 "N stream-out (stream 3) primitives (total)");
869 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
870 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
871 "N stream-out (stream 0) primitives (written)");
872 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
873 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
874 "N stream-out (stream 1) primitives (written)");
875 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
876 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
877 "N stream-out (stream 2) primitives (written)");
878 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
879 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
880 "N stream-out (stream 3) primitives (written)");
883 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
884 "N TCS shader invocations");
885 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
886 "N TES shader invocations");
888 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
889 "N geometry shader invocations");
890 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
891 "N geometry shader primitives emitted");
893 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
894 "N primitives entering clipping");
895 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
896 "N primitives leaving clipping");
898 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
899 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
900 "N fragment shader invocations",
901 "N fragment shader invocations");
903 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
904 "N fragment shader invocations");
907 add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
908 "N z-pass fragments");
910 if (devinfo
->gen
>= 7) {
911 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
912 "N compute shader invocations");
915 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
919 load_oa_metrics(struct gen_perf_config
*perf
, int fd
,
920 const struct gen_device_info
*devinfo
)
922 perf_register_oa_queries_t oa_register
= get_register_queries_function(devinfo
);
923 bool i915_perf_oa_available
= false;
926 perf
->i915_query_supported
= i915_query_perf_config_supported(perf
, fd
);
928 /* The existence of this sysctl parameter implies the kernel supports
929 * the i915 perf interface.
931 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb
) == 0) {
933 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
934 * metrics unless running as root.
936 if (devinfo
->is_haswell
)
937 i915_perf_oa_available
= true;
939 uint64_t paranoid
= 1;
941 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", ¶noid
);
943 if (paranoid
== 0 || geteuid() == 0)
944 i915_perf_oa_available
= true;
948 if (!i915_perf_oa_available
||
950 !get_sysfs_dev_dir(perf
, fd
) ||
951 !init_oa_sys_vars(perf
, devinfo
))
954 perf
->oa_metrics_table
=
955 _mesa_hash_table_create(perf
, _mesa_key_hash_string
,
956 _mesa_key_string_equal
);
958 /* Index all the metric sets mesa knows about before looking to see what
959 * the kernel is advertising.
963 if (likely((INTEL_DEBUG
& DEBUG_NO_OACONFIG
) == 0) &&
964 kernel_has_dynamic_config_support(perf
, fd
))
965 init_oa_configs(perf
, fd
);
967 enumerate_sysfs_metrics(perf
);
972 struct gen_perf_registers
*
973 gen_perf_load_configuration(struct gen_perf_config
*perf_cfg
, int fd
, const char *guid
)
975 if (!perf_cfg
->i915_query_supported
)
978 struct drm_i915_perf_oa_config i915_config
= { 0, };
979 if (!i915_query_perf_config_data(perf_cfg
, fd
, guid
, &i915_config
))
982 struct gen_perf_registers
*config
= rzalloc(NULL
, struct gen_perf_registers
);
983 config
->n_flex_regs
= i915_config
.n_flex_regs
;
984 config
->flex_regs
= rzalloc_array(config
, struct gen_perf_query_register_prog
, config
->n_flex_regs
);
985 config
->n_mux_regs
= i915_config
.n_mux_regs
;
986 config
->mux_regs
= rzalloc_array(config
, struct gen_perf_query_register_prog
, config
->n_mux_regs
);
987 config
->n_b_counter_regs
= i915_config
.n_boolean_regs
;
988 config
->b_counter_regs
= rzalloc_array(config
, struct gen_perf_query_register_prog
, config
->n_b_counter_regs
);
991 * struct gen_perf_query_register_prog maps exactly to the tuple of
992 * (register offset, register value) returned by the i915.
994 i915_config
.flex_regs_ptr
= to_user_pointer(config
->flex_regs
);
995 i915_config
.mux_regs_ptr
= to_user_pointer(config
->mux_regs
);
996 i915_config
.boolean_regs_ptr
= to_user_pointer(config
->b_counter_regs
);
997 if (!i915_query_perf_config_data(perf_cfg
, fd
, guid
, &i915_config
)) {
1006 gen_perf_store_configuration(struct gen_perf_config
*perf_cfg
, int fd
,
1007 const struct gen_perf_registers
*config
,
1011 return i915_add_config(perf_cfg
, fd
, config
, guid
);
1013 struct mesa_sha1 sha1_ctx
;
1014 _mesa_sha1_init(&sha1_ctx
);
1016 if (config
->flex_regs
) {
1017 _mesa_sha1_update(&sha1_ctx
, config
->flex_regs
,
1018 sizeof(config
->flex_regs
[0]) *
1019 config
->n_flex_regs
);
1021 if (config
->mux_regs
) {
1022 _mesa_sha1_update(&sha1_ctx
, config
->mux_regs
,
1023 sizeof(config
->mux_regs
[0]) *
1024 config
->n_mux_regs
);
1026 if (config
->b_counter_regs
) {
1027 _mesa_sha1_update(&sha1_ctx
, config
->b_counter_regs
,
1028 sizeof(config
->b_counter_regs
[0]) *
1029 config
->n_b_counter_regs
);
1033 _mesa_sha1_final(&sha1_ctx
, hash
);
1035 char formatted_hash
[41];
1036 _mesa_sha1_format(formatted_hash
, hash
);
1038 char generated_guid
[37];
1039 snprintf(generated_guid
, sizeof(generated_guid
),
1040 "%.8s-%.4s-%.4s-%.4s-%.12s",
1041 &formatted_hash
[0], &formatted_hash
[8],
1042 &formatted_hash
[8 + 4], &formatted_hash
[8 + 4 + 4],
1043 &formatted_hash
[8 + 4 + 4 + 4]);
1045 /* Check if already present. */
1047 if (gen_perf_load_metric_id(perf_cfg
, generated_guid
, &id
))
1050 return i915_add_config(perf_cfg
, fd
, config
, generated_guid
);
1053 /* Accumulate 32bits OA counters */
1055 accumulate_uint32(const uint32_t *report0
,
1056 const uint32_t *report1
,
1057 uint64_t *accumulator
)
1059 *accumulator
+= (uint32_t)(*report1
- *report0
);
1062 /* Accumulate 40bits OA counters */
1064 accumulate_uint40(int a_index
,
1065 const uint32_t *report0
,
1066 const uint32_t *report1
,
1067 uint64_t *accumulator
)
1069 const uint8_t *high_bytes0
= (uint8_t *)(report0
+ 40);
1070 const uint8_t *high_bytes1
= (uint8_t *)(report1
+ 40);
1071 uint64_t high0
= (uint64_t)(high_bytes0
[a_index
]) << 32;
1072 uint64_t high1
= (uint64_t)(high_bytes1
[a_index
]) << 32;
1073 uint64_t value0
= report0
[a_index
+ 4] | high0
;
1074 uint64_t value1
= report1
[a_index
+ 4] | high1
;
1077 if (value0
> value1
)
1078 delta
= (1ULL << 40) + value1
- value0
;
1080 delta
= value1
- value0
;
1082 *accumulator
+= delta
;
1086 gen8_read_report_clock_ratios(const uint32_t *report
,
1087 uint64_t *slice_freq_hz
,
1088 uint64_t *unslice_freq_hz
)
1090 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1091 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1092 * divided this way :
1094 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1095 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1096 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1098 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1099 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1101 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1102 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1105 uint32_t unslice_freq
= report
[0] & 0x1ff;
1106 uint32_t slice_freq_low
= (report
[0] >> 25) & 0x7f;
1107 uint32_t slice_freq_high
= (report
[0] >> 9) & 0x3;
1108 uint32_t slice_freq
= slice_freq_low
| (slice_freq_high
<< 7);
1110 *slice_freq_hz
= slice_freq
* 16666667ULL;
1111 *unslice_freq_hz
= unslice_freq
* 16666667ULL;
1115 gen_perf_query_result_read_frequencies(struct gen_perf_query_result
*result
,
1116 const struct gen_device_info
*devinfo
,
1117 const uint32_t *start
,
1118 const uint32_t *end
)
1120 /* Slice/Unslice frequency is only available in the OA reports when the
1121 * "Disable OA reports due to clock ratio change" field in
1122 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1123 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1125 * Documentation says this should be available on Gen9+ but experimentation
1126 * shows that Gen8 reports similar values, so we enable it there too.
1128 if (devinfo
->gen
< 8)
1131 gen8_read_report_clock_ratios(start
,
1132 &result
->slice_frequency
[0],
1133 &result
->unslice_frequency
[0]);
1134 gen8_read_report_clock_ratios(end
,
1135 &result
->slice_frequency
[1],
1136 &result
->unslice_frequency
[1]);
1140 gen_perf_query_result_accumulate(struct gen_perf_query_result
*result
,
1141 const struct gen_perf_query_info
*query
,
1142 const uint32_t *start
,
1143 const uint32_t *end
)
1147 if (result
->hw_id
== OA_REPORT_INVALID_CTX_ID
&&
1148 start
[2] != OA_REPORT_INVALID_CTX_ID
)
1149 result
->hw_id
= start
[2];
1150 if (result
->reports_accumulated
== 0)
1151 result
->begin_timestamp
= start
[1];
1152 result
->reports_accumulated
++;
1154 switch (query
->oa_format
) {
1155 case I915_OA_FORMAT_A32u40_A4u32_B8_C8
:
1156 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
+ idx
++); /* timestamp */
1157 accumulate_uint32(start
+ 3, end
+ 3, result
->accumulator
+ idx
++); /* clock */
1159 /* 32x 40bit A counters... */
1160 for (i
= 0; i
< 32; i
++)
1161 accumulate_uint40(i
, start
, end
, result
->accumulator
+ idx
++);
1163 /* 4x 32bit A counters... */
1164 for (i
= 0; i
< 4; i
++)
1165 accumulate_uint32(start
+ 36 + i
, end
+ 36 + i
, result
->accumulator
+ idx
++);
1167 /* 8x 32bit B counters + 8x 32bit C counters... */
1168 for (i
= 0; i
< 16; i
++)
1169 accumulate_uint32(start
+ 48 + i
, end
+ 48 + i
, result
->accumulator
+ idx
++);
1172 case I915_OA_FORMAT_A45_B8_C8
:
1173 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
); /* timestamp */
1175 for (i
= 0; i
< 61; i
++)
1176 accumulate_uint32(start
+ 3 + i
, end
+ 3 + i
, result
->accumulator
+ 1 + i
);
1180 unreachable("Can't accumulate OA counters in unknown format");
1186 gen_perf_query_result_clear(struct gen_perf_query_result
*result
)
1188 memset(result
, 0, sizeof(*result
));
1189 result
->hw_id
= OA_REPORT_INVALID_CTX_ID
; /* invalid */
1193 register_mdapi_statistic_query(struct gen_perf_config
*perf_cfg
,
1194 const struct gen_device_info
*devinfo
)
1196 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
1199 struct gen_perf_query_info
*query
=
1200 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
1202 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1203 query
->name
= "Intel_Raw_Pipeline_Statistics_Query";
1205 /* The order has to match mdapi_pipeline_metrics. */
1206 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1207 "N vertices submitted");
1208 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1209 "N primitives submitted");
1210 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1211 "N vertex shader invocations");
1212 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1213 "N geometry shader invocations");
1214 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1215 "N geometry shader primitives emitted");
1216 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1217 "N primitives entering clipping");
1218 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1219 "N primitives leaving clipping");
1220 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1221 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1222 "N fragment shader invocations",
1223 "N fragment shader invocations");
1225 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1226 "N fragment shader invocations");
1228 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1229 "N TCS shader invocations");
1230 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1231 "N TES shader invocations");
1232 if (devinfo
->gen
>= 7) {
1233 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1234 "N compute shader invocations");
1237 if (devinfo
->gen
>= 10) {
1238 /* Reuse existing CS invocation register until we can expose this new
1241 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1245 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1249 fill_mdapi_perf_query_counter(struct gen_perf_query_info
*query
,
1251 uint32_t data_offset
,
1253 enum gen_perf_counter_data_type data_type
)
1255 struct gen_perf_query_counter
*counter
= &query
->counters
[query
->n_counters
];
1257 assert(query
->n_counters
<= query
->max_counters
);
1259 counter
->name
= name
;
1260 counter
->desc
= "Raw counter value";
1261 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
1262 counter
->data_type
= data_type
;
1263 counter
->offset
= data_offset
;
1265 query
->n_counters
++;
1267 assert(counter
->offset
+ gen_perf_query_counter_get_size(counter
) <= query
->data_size
);
1270 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
1271 fill_mdapi_perf_query_counter(query, #field_name, \
1272 (uint8_t *) &struct_name.field_name - \
1273 (uint8_t *) &struct_name, \
1274 sizeof(struct_name.field_name), \
1275 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1276 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
1277 fill_mdapi_perf_query_counter(query, \
1278 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
1279 (uint8_t *) &struct_name.field_name[idx] - \
1280 (uint8_t *) &struct_name, \
1281 sizeof(struct_name.field_name[0]), \
1282 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1285 register_mdapi_oa_query(const struct gen_device_info
*devinfo
,
1286 struct gen_perf_config
*perf
)
1288 struct gen_perf_query_info
*query
= NULL
;
1290 /* MDAPI requires different structures for pretty much every generation
1291 * (right now we have definitions for gen 7 to 11).
1293 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
1296 switch (devinfo
->gen
) {
1298 query
= append_query_info(perf
, 1 + 45 + 16 + 7);
1299 query
->oa_format
= I915_OA_FORMAT_A45_B8_C8
;
1301 struct gen7_mdapi_metrics metric_data
;
1302 query
->data_size
= sizeof(metric_data
);
1304 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1305 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.ACounters
); i
++) {
1306 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1307 metric_data
, ACounters
, i
, UINT64
);
1309 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NOACounters
); i
++) {
1310 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1311 metric_data
, NOACounters
, i
, UINT64
);
1313 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1314 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1315 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1316 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1317 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1318 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1319 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1323 query
= append_query_info(perf
, 2 + 36 + 16 + 16);
1324 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
1326 struct gen8_mdapi_metrics metric_data
;
1327 query
->data_size
= sizeof(metric_data
);
1329 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1330 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
1331 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
1332 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1333 metric_data
, OaCntr
, i
, UINT64
);
1335 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
1336 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1337 metric_data
, NoaCntr
, i
, UINT64
);
1339 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
1340 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
1341 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
1342 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
1343 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
1344 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
1345 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
1346 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
1347 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
1348 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1349 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1350 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1351 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1352 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1353 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1354 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1360 query
= append_query_info(perf
, 2 + 36 + 16 + 16 + 16 + 2);
1361 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
1363 struct gen9_mdapi_metrics metric_data
;
1364 query
->data_size
= sizeof(metric_data
);
1366 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
1367 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
1368 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
1369 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1370 metric_data
, OaCntr
, i
, UINT64
);
1372 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
1373 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1374 metric_data
, NoaCntr
, i
, UINT64
);
1376 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
1377 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
1378 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
1379 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
1380 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
1381 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
1382 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
1383 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
1384 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
1385 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
1386 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
1387 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
1388 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
1389 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
1390 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
1391 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
1392 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.UserCntr
); i
++) {
1393 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
1394 metric_data
, UserCntr
, i
, UINT64
);
1396 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UserCntrCfgId
, UINT32
);
1397 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved4
, UINT32
);
1401 unreachable("Unsupported gen");
1405 query
->kind
= GEN_PERF_QUERY_TYPE_RAW
;
1406 query
->name
= "Intel_Raw_Hardware_Counters_Set_0_Query";
1407 query
->guid
= GEN_PERF_QUERY_GUID_MDAPI
;
1410 /* Accumulation buffer offsets copied from an actual query... */
1411 const struct gen_perf_query_info
*copy_query
=
1414 query
->gpu_time_offset
= copy_query
->gpu_time_offset
;
1415 query
->gpu_clock_offset
= copy_query
->gpu_clock_offset
;
1416 query
->a_offset
= copy_query
->a_offset
;
1417 query
->b_offset
= copy_query
->b_offset
;
1418 query
->c_offset
= copy_query
->c_offset
;
1423 get_metric_id(struct gen_perf_config
*perf
,
1424 const struct gen_perf_query_info
*query
)
1426 /* These queries are know not to ever change, their config ID has been
1427 * loaded upon the first query creation. No need to look them up again.
1429 if (query
->kind
== GEN_PERF_QUERY_TYPE_OA
)
1430 return query
->oa_metrics_set_id
;
1432 assert(query
->kind
== GEN_PERF_QUERY_TYPE_RAW
);
1434 /* Raw queries can be reprogrammed up by an external application/library.
1435 * When a raw query is used for the first time it's id is set to a value !=
1436 * 0. When it stops being used the id returns to 0. No need to reload the
1437 * ID when it's already loaded.
1439 if (query
->oa_metrics_set_id
!= 0) {
1440 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64
"\n",
1441 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
1442 return query
->oa_metrics_set_id
;
1445 struct gen_perf_query_info
*raw_query
= (struct gen_perf_query_info
*)query
;
1446 if (!gen_perf_load_metric_id(perf
, query
->guid
,
1447 &raw_query
->oa_metrics_set_id
)) {
1448 DBG("Unable to read query guid=%s ID, falling back to test config\n", query
->guid
);
1449 raw_query
->oa_metrics_set_id
= 1ULL;
1451 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64
"\n",
1452 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
1454 return query
->oa_metrics_set_id
;
1457 static struct oa_sample_buf
*
1458 get_free_sample_buf(struct gen_perf_context
*perf_ctx
)
1460 struct exec_node
*node
= exec_list_pop_head(&perf_ctx
->free_sample_buffers
);
1461 struct oa_sample_buf
*buf
;
1464 buf
= exec_node_data(struct oa_sample_buf
, node
, link
);
1466 buf
= ralloc_size(perf_ctx
->perf
, sizeof(*buf
));
1468 exec_node_init(&buf
->link
);
1477 reap_old_sample_buffers(struct gen_perf_context
*perf_ctx
)
1479 struct exec_node
*tail_node
=
1480 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1481 struct oa_sample_buf
*tail_buf
=
1482 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
1484 /* Remove all old, unreferenced sample buffers walking forward from
1485 * the head of the list, except always leave at least one node in
1486 * the list so we always have a node to reference when we Begin
1489 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1490 &perf_ctx
->sample_buffers
)
1492 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
1493 exec_node_remove(&buf
->link
);
1494 exec_list_push_head(&perf_ctx
->free_sample_buffers
, &buf
->link
);
1501 free_sample_bufs(struct gen_perf_context
*perf_ctx
)
1503 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1504 &perf_ctx
->free_sample_buffers
)
1507 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1510 /******************************************************************************/
1513 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1514 * pipeline statistics for the performance query object.
1517 snapshot_statistics_registers(struct gen_perf_context
*ctx
,
1518 struct gen_perf_query_object
*obj
,
1519 uint32_t offset_in_bytes
)
1521 struct gen_perf_config
*perf
= ctx
->perf
;
1522 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
1523 const int n_counters
= query
->n_counters
;
1525 for (int i
= 0; i
< n_counters
; i
++) {
1526 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1528 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
1530 perf
->vtbl
.store_register_mem(ctx
->ctx
, obj
->pipeline_stats
.bo
,
1531 counter
->pipeline_stat
.reg
, 8,
1532 offset_in_bytes
+ i
* sizeof(uint64_t));
1537 snapshot_freq_register(struct gen_perf_context
*ctx
,
1538 struct gen_perf_query_object
*query
,
1541 struct gen_perf_config
*perf
= ctx
->perf
;
1542 const struct gen_device_info
*devinfo
= ctx
->devinfo
;
1544 if (devinfo
->gen
== 8 && !devinfo
->is_cherryview
)
1545 perf
->vtbl
.store_register_mem(ctx
->ctx
, query
->oa
.bo
, GEN7_RPSTAT1
, 4, bo_offset
);
1546 else if (devinfo
->gen
>= 9)
1547 perf
->vtbl
.store_register_mem(ctx
->ctx
, query
->oa
.bo
, GEN9_RPSTAT0
, 4, bo_offset
);
1551 gen_perf_close(struct gen_perf_context
*perfquery
,
1552 const struct gen_perf_query_info
*query
)
1554 if (perfquery
->oa_stream_fd
!= -1) {
1555 close(perfquery
->oa_stream_fd
);
1556 perfquery
->oa_stream_fd
= -1;
1558 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1559 struct gen_perf_query_info
*raw_query
=
1560 (struct gen_perf_query_info
*) query
;
1561 raw_query
->oa_metrics_set_id
= 0;
1566 gen_perf_open(struct gen_perf_context
*perf_ctx
,
1569 int period_exponent
,
1573 uint64_t properties
[] = {
1574 /* Single context sampling */
1575 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
1577 /* Include OA reports in samples */
1578 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
1580 /* OA unit configuration */
1581 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
1582 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
1583 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
1585 struct drm_i915_perf_open_param param
= {
1586 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
1587 I915_PERF_FLAG_FD_NONBLOCK
|
1588 I915_PERF_FLAG_DISABLED
,
1589 .num_properties
= ARRAY_SIZE(properties
) / 2,
1590 .properties_ptr
= (uintptr_t) properties
,
1592 int fd
= gen_ioctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
1594 DBG("Error opening gen perf OA stream: %m\n");
1598 perf_ctx
->oa_stream_fd
= fd
;
1600 perf_ctx
->current_oa_metrics_set_id
= metrics_set_id
;
1601 perf_ctx
->current_oa_format
= report_format
;
1607 inc_n_users(struct gen_perf_context
*perf_ctx
)
1609 if (perf_ctx
->n_oa_users
== 0 &&
1610 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_ENABLE
, 0) < 0)
1614 ++perf_ctx
->n_oa_users
;
1620 dec_n_users(struct gen_perf_context
*perf_ctx
)
1622 /* Disabling the i915 perf stream will effectively disable the OA
1623 * counters. Note it's important to be sure there are no outstanding
1624 * MI_RPC commands at this point since they could stall the CS
1625 * indefinitely once OACONTROL is disabled.
1627 --perf_ctx
->n_oa_users
;
1628 if (perf_ctx
->n_oa_users
== 0 &&
1629 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
1631 DBG("WARNING: Error disabling gen perf stream: %m\n");
1636 gen_perf_init_metrics(struct gen_perf_config
*perf_cfg
,
1637 const struct gen_device_info
*devinfo
,
1640 load_pipeline_statistic_metrics(perf_cfg
, devinfo
);
1641 register_mdapi_statistic_query(perf_cfg
, devinfo
);
1642 if (load_oa_metrics(perf_cfg
, drm_fd
, devinfo
))
1643 register_mdapi_oa_query(devinfo
, perf_cfg
);
1647 gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
1648 struct gen_perf_config
*perf_cfg
,
1649 void * ctx
, /* driver context (eg, brw_context) */
1650 void * bufmgr
, /* eg brw_bufmgr */
1651 const struct gen_device_info
*devinfo
,
1655 perf_ctx
->perf
= perf_cfg
;
1656 perf_ctx
->ctx
= ctx
;
1657 perf_ctx
->bufmgr
= bufmgr
;
1658 perf_ctx
->drm_fd
= drm_fd
;
1659 perf_ctx
->hw_ctx
= hw_ctx
;
1660 perf_ctx
->devinfo
= devinfo
;
1662 perf_ctx
->unaccumulated
=
1663 ralloc_array(ctx
, struct gen_perf_query_object
*, 2);
1664 perf_ctx
->unaccumulated_elements
= 0;
1665 perf_ctx
->unaccumulated_array_size
= 2;
1667 exec_list_make_empty(&perf_ctx
->sample_buffers
);
1668 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1670 /* It's convenient to guarantee that this linked list of sample
1671 * buffers is never empty so we add an empty head so when we
1672 * Begin an OA query we can always take a reference on a buffer
1675 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
1676 exec_list_push_head(&perf_ctx
->sample_buffers
, &buf
->link
);
1678 perf_ctx
->oa_stream_fd
= -1;
1679 perf_ctx
->next_query_start_report_id
= 1000;
1683 * Add a query to the global list of "unaccumulated queries."
1685 * Queries are tracked here until all the associated OA reports have
1686 * been accumulated via accumulate_oa_reports() after the end
1687 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1690 add_to_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
1691 struct gen_perf_query_object
*obj
)
1693 if (perf_ctx
->unaccumulated_elements
>=
1694 perf_ctx
->unaccumulated_array_size
)
1696 perf_ctx
->unaccumulated_array_size
*= 1.5;
1697 perf_ctx
->unaccumulated
=
1698 reralloc(perf_ctx
->ctx
, perf_ctx
->unaccumulated
,
1699 struct gen_perf_query_object
*,
1700 perf_ctx
->unaccumulated_array_size
);
1703 perf_ctx
->unaccumulated
[perf_ctx
->unaccumulated_elements
++] = obj
;
1707 gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
1708 struct gen_perf_query_object
*query
)
1710 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1711 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
1713 /* XXX: We have to consider that the command parser unit that parses batch
1714 * buffer commands and is used to capture begin/end counter snapshots isn't
1715 * implicitly synchronized with what's currently running across other GPU
1716 * units (such as the EUs running shaders) that the performance counters are
1719 * The intention of performance queries is to measure the work associated
1720 * with commands between the begin/end delimiters and so for that to be the
1721 * case we need to explicitly synchronize the parsing of commands to capture
1722 * Begin/End counter snapshots with what's running across other parts of the
1725 * When the command parser reaches a Begin marker it effectively needs to
1726 * drain everything currently running on the GPU until the hardware is idle
1727 * before capturing the first snapshot of counters - otherwise the results
1728 * would also be measuring the effects of earlier commands.
1730 * When the command parser reaches an End marker it needs to stall until
1731 * everything currently running on the GPU has finished before capturing the
1732 * end snapshot - otherwise the results won't be a complete representation
1735 * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
1736 * additional work to be processed by the pipeline until all pixels of the
1737 * previous draw has be completed).
1739 * N.B. The final results are based on deltas of counters between (inside)
1740 * Begin/End markers so even though the total wall clock time of the
1741 * workload is stretched by larger pipeline bubbles the bubbles themselves
1742 * are generally invisible to the query results. Whether that's a good or a
1743 * bad thing depends on the use case. For a lower real-time impact while
1744 * capturing metrics then periodic sampling may be a better choice than
1745 * INTEL_performance_query.
1748 * This is our Begin synchronization point to drain current work on the
1749 * GPU before we capture our first counter snapshot...
1751 perf_cfg
->vtbl
.emit_stall_at_pixel_scoreboard(perf_ctx
->ctx
);
1753 switch (queryinfo
->kind
) {
1754 case GEN_PERF_QUERY_TYPE_OA
:
1755 case GEN_PERF_QUERY_TYPE_RAW
: {
1757 /* Opening an i915 perf stream implies exclusive access to the OA unit
1758 * which will generate counter reports for a specific counter set with a
1759 * specific layout/format so we can't begin any OA based queries that
1760 * require a different counter set or format unless we get an opportunity
1761 * to close the stream and open a new one...
1763 uint64_t metric_id
= get_metric_id(perf_ctx
->perf
, queryinfo
);
1765 if (perf_ctx
->oa_stream_fd
!= -1 &&
1766 perf_ctx
->current_oa_metrics_set_id
!= metric_id
) {
1768 if (perf_ctx
->n_oa_users
!= 0) {
1769 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64
"\n",
1770 perf_ctx
->current_oa_metrics_set_id
, metric_id
);
1773 gen_perf_close(perf_ctx
, queryinfo
);
1776 /* If the OA counters aren't already on, enable them. */
1777 if (perf_ctx
->oa_stream_fd
== -1) {
1778 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1780 /* The period_exponent gives a sampling period as follows:
1781 * sample_period = timestamp_period * 2^(period_exponent + 1)
1783 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1786 * The counter overflow period is derived from the EuActive counter
1787 * which reads a counter that increments by the number of clock
1788 * cycles multiplied by the number of EUs. It can be calculated as:
1790 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1792 * (E.g. 40 EUs @ 1GHz = ~53ms)
1794 * We select a sampling period inferior to that overflow period to
1795 * ensure we cannot see more than 1 counter overflow, otherwise we
1796 * could loose information.
1799 int a_counter_in_bits
= 32;
1800 if (devinfo
->gen
>= 8)
1801 a_counter_in_bits
= 40;
1803 uint64_t overflow_period
= pow(2, a_counter_in_bits
) / (perf_cfg
->sys_vars
.n_eus
*
1804 /* drop 1GHz freq to have units in nanoseconds */
1807 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
1808 overflow_period
, overflow_period
/ 1000000ul, perf_cfg
->sys_vars
.n_eus
);
1810 int period_exponent
= 0;
1811 uint64_t prev_sample_period
, next_sample_period
;
1812 for (int e
= 0; e
< 30; e
++) {
1813 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
1814 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
1816 /* Take the previous sampling period, lower than the overflow
1819 if (prev_sample_period
< overflow_period
&&
1820 next_sample_period
> overflow_period
)
1821 period_exponent
= e
+ 1;
1824 if (period_exponent
== 0) {
1825 DBG("WARNING: enable to find a sampling exponent\n");
1829 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
1830 prev_sample_period
/ 1000000ul);
1832 if (!gen_perf_open(perf_ctx
, metric_id
, queryinfo
->oa_format
,
1833 period_exponent
, perf_ctx
->drm_fd
,
1837 assert(perf_ctx
->current_oa_metrics_set_id
== metric_id
&&
1838 perf_ctx
->current_oa_format
== queryinfo
->oa_format
);
1841 if (!inc_n_users(perf_ctx
)) {
1842 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1847 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
1848 query
->oa
.bo
= NULL
;
1851 query
->oa
.bo
= perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1852 "perf. query OA MI_RPC bo",
1855 /* Pre-filling the BO helps debug whether writes landed. */
1856 void *map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_WRITE
);
1857 memset(map
, 0x80, MI_RPC_BO_SIZE
);
1858 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
1861 query
->oa
.begin_report_id
= perf_ctx
->next_query_start_report_id
;
1862 perf_ctx
->next_query_start_report_id
+= 2;
1864 /* Take a starting OA counter snapshot. */
1865 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
, 0,
1866 query
->oa
.begin_report_id
);
1867 snapshot_freq_register(perf_ctx
, query
, MI_FREQ_START_OFFSET_BYTES
);
1869 ++perf_ctx
->n_active_oa_queries
;
1871 /* No already-buffered samples can possibly be associated with this query
1872 * so create a marker within the list of sample buffers enabling us to
1873 * easily ignore earlier samples when processing this query after
1876 assert(!exec_list_is_empty(&perf_ctx
->sample_buffers
));
1877 query
->oa
.samples_head
= exec_list_get_tail(&perf_ctx
->sample_buffers
);
1879 struct oa_sample_buf
*buf
=
1880 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
1882 /* This reference will ensure that future/following sample
1883 * buffers (that may relate to this query) can't be freed until
1884 * this drops to zero.
1888 gen_perf_query_result_clear(&query
->oa
.result
);
1889 query
->oa
.results_accumulated
= false;
1891 add_to_unaccumulated_query_list(perf_ctx
, query
);
1895 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1896 if (query
->pipeline_stats
.bo
) {
1897 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
1898 query
->pipeline_stats
.bo
= NULL
;
1901 query
->pipeline_stats
.bo
=
1902 perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1903 "perf. query pipeline stats bo",
1906 /* Take starting snapshots. */
1907 snapshot_statistics_registers(perf_ctx
, query
, 0);
1909 ++perf_ctx
->n_active_pipeline_stats_queries
;
1913 unreachable("Unknown query type");
1921 gen_perf_end_query(struct gen_perf_context
*perf_ctx
,
1922 struct gen_perf_query_object
*query
)
1924 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1926 /* Ensure that the work associated with the queried commands will have
1927 * finished before taking our query end counter readings.
1929 * For more details see comment in brw_begin_perf_query for
1930 * corresponding flush.
1932 perf_cfg
->vtbl
.emit_stall_at_pixel_scoreboard(perf_ctx
->ctx
);
1934 switch (query
->queryinfo
->kind
) {
1935 case GEN_PERF_QUERY_TYPE_OA
:
1936 case GEN_PERF_QUERY_TYPE_RAW
:
1938 /* NB: It's possible that the query will have already been marked
1939 * as 'accumulated' if an error was seen while reading samples
1940 * from perf. In this case we mustn't try and emit a closing
1941 * MI_RPC command in case the OA unit has already been disabled
1943 if (!query
->oa
.results_accumulated
) {
1944 /* Take an ending OA counter snapshot. */
1945 snapshot_freq_register(perf_ctx
, query
, MI_FREQ_END_OFFSET_BYTES
);
1946 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
,
1947 MI_RPC_BO_END_OFFSET_BYTES
,
1948 query
->oa
.begin_report_id
+ 1);
1951 --perf_ctx
->n_active_oa_queries
;
1953 /* NB: even though the query has now ended, it can't be accumulated
1954 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1959 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1960 snapshot_statistics_registers(perf_ctx
, query
,
1961 STATS_BO_END_OFFSET_BYTES
);
1962 --perf_ctx
->n_active_pipeline_stats_queries
;
1966 unreachable("Unknown query type");
1972 OA_READ_STATUS_ERROR
,
1973 OA_READ_STATUS_UNFINISHED
,
1974 OA_READ_STATUS_FINISHED
,
1977 static enum OaReadStatus
1978 read_oa_samples_until(struct gen_perf_context
*perf_ctx
,
1979 uint32_t start_timestamp
,
1980 uint32_t end_timestamp
)
1982 struct exec_node
*tail_node
=
1983 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1984 struct oa_sample_buf
*tail_buf
=
1985 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
1986 uint32_t last_timestamp
=
1987 tail_buf
->len
== 0 ? start_timestamp
: tail_buf
->last_timestamp
;
1990 struct oa_sample_buf
*buf
= get_free_sample_buf(perf_ctx
);
1994 while ((len
= read(perf_ctx
->oa_stream_fd
, buf
->buf
,
1995 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
1999 exec_list_push_tail(&perf_ctx
->free_sample_buffers
, &buf
->link
);
2002 if (errno
== EAGAIN
) {
2003 return ((last_timestamp
- start_timestamp
) < INT32_MAX
&&
2004 (last_timestamp
- start_timestamp
) >=
2005 (end_timestamp
- start_timestamp
)) ?
2006 OA_READ_STATUS_FINISHED
:
2007 OA_READ_STATUS_UNFINISHED
;
2009 DBG("Error reading i915 perf samples: %m\n");
2012 DBG("Spurious EOF reading i915 perf samples\n");
2014 return OA_READ_STATUS_ERROR
;
2018 exec_list_push_tail(&perf_ctx
->sample_buffers
, &buf
->link
);
2020 /* Go through the reports and update the last timestamp. */
2022 while (offset
< buf
->len
) {
2023 const struct drm_i915_perf_record_header
*header
=
2024 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
2025 uint32_t *report
= (uint32_t *) (header
+ 1);
2027 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
2028 last_timestamp
= report
[1];
2030 offset
+= header
->size
;
2033 buf
->last_timestamp
= last_timestamp
;
2036 unreachable("not reached");
2037 return OA_READ_STATUS_ERROR
;
2041 * Try to read all the reports until either the delimiting timestamp
2042 * or an error arises.
2045 read_oa_samples_for_query(struct gen_perf_context
*perf_ctx
,
2046 struct gen_perf_query_object
*query
,
2047 void *current_batch
)
2052 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2054 /* We need the MI_REPORT_PERF_COUNT to land before we can start
2056 assert(!perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
2057 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
));
2059 /* Map the BO once here and let accumulate_oa_reports() unmap
2061 if (query
->oa
.map
== NULL
)
2062 query
->oa
.map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_READ
);
2064 start
= last
= query
->oa
.map
;
2065 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2067 if (start
[0] != query
->oa
.begin_report_id
) {
2068 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
2071 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
2072 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
2076 /* Read the reports until the end timestamp. */
2077 switch (read_oa_samples_until(perf_ctx
, start
[1], end
[1])) {
2078 case OA_READ_STATUS_ERROR
:
2079 /* Fallthrough and let accumulate_oa_reports() deal with the
2081 case OA_READ_STATUS_FINISHED
:
2083 case OA_READ_STATUS_UNFINISHED
:
2087 unreachable("invalid read status");
2092 gen_perf_wait_query(struct gen_perf_context
*perf_ctx
,
2093 struct gen_perf_query_object
*query
,
2094 void *current_batch
)
2096 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2097 struct brw_bo
*bo
= NULL
;
2099 switch (query
->queryinfo
->kind
) {
2100 case GEN_PERF_QUERY_TYPE_OA
:
2101 case GEN_PERF_QUERY_TYPE_RAW
:
2105 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2106 bo
= query
->pipeline_stats
.bo
;
2110 unreachable("Unknown query type");
2117 /* If the current batch references our results bo then we need to
2120 if (perf_cfg
->vtbl
.batch_references(current_batch
, bo
))
2121 perf_cfg
->vtbl
.batchbuffer_flush(perf_ctx
->ctx
, __FILE__
, __LINE__
);
2123 perf_cfg
->vtbl
.bo_wait_rendering(bo
);
2125 /* Due to a race condition between the OA unit signaling report
2126 * availability and the report actually being written into memory,
2127 * we need to wait for all the reports to come in before we can
2130 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
||
2131 query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
2132 while (!read_oa_samples_for_query(perf_ctx
, query
, current_batch
))
2138 gen_perf_is_query_ready(struct gen_perf_context
*perf_ctx
,
2139 struct gen_perf_query_object
*query
,
2140 void *current_batch
)
2142 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2144 switch (query
->queryinfo
->kind
) {
2145 case GEN_PERF_QUERY_TYPE_OA
:
2146 case GEN_PERF_QUERY_TYPE_RAW
:
2147 return (query
->oa
.results_accumulated
||
2149 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
2150 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
) &&
2151 read_oa_samples_for_query(perf_ctx
, query
, current_batch
)));
2152 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2153 return (query
->pipeline_stats
.bo
&&
2154 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->pipeline_stats
.bo
) &&
2155 !perf_cfg
->vtbl
.bo_busy(query
->pipeline_stats
.bo
));
2158 unreachable("Unknown query type");
2166 * Remove a query from the global list of unaccumulated queries once
2167 * after successfully accumulating the OA reports associated with the
2168 * query in accumulate_oa_reports() or when discarding unwanted query
2172 drop_from_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
2173 struct gen_perf_query_object
*query
)
2175 for (int i
= 0; i
< perf_ctx
->unaccumulated_elements
; i
++) {
2176 if (perf_ctx
->unaccumulated
[i
] == query
) {
2177 int last_elt
= --perf_ctx
->unaccumulated_elements
;
2180 perf_ctx
->unaccumulated
[i
] = NULL
;
2182 perf_ctx
->unaccumulated
[i
] =
2183 perf_ctx
->unaccumulated
[last_elt
];
2190 /* Drop our samples_head reference so that associated periodic
2191 * sample data buffers can potentially be reaped if they aren't
2192 * referenced by any other queries...
2195 struct oa_sample_buf
*buf
=
2196 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
2198 assert(buf
->refcount
> 0);
2201 query
->oa
.samples_head
= NULL
;
2203 reap_old_sample_buffers(perf_ctx
);
2206 /* In general if we see anything spurious while accumulating results,
2207 * we don't try and continue accumulating the current query, hoping
2208 * for the best, we scrap anything outstanding, and then hope for the
2209 * best with new queries.
2212 discard_all_queries(struct gen_perf_context
*perf_ctx
)
2214 while (perf_ctx
->unaccumulated_elements
) {
2215 struct gen_perf_query_object
*query
= perf_ctx
->unaccumulated
[0];
2217 query
->oa
.results_accumulated
= true;
2218 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2220 dec_n_users(perf_ctx
);
2224 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
2226 oa_report_ctx_id_valid(const struct gen_device_info
*devinfo
,
2227 const uint32_t *report
)
2229 assert(devinfo
->gen
>= 8);
2230 if (devinfo
->gen
== 8)
2231 return (report
[0] & (1 << 25)) != 0;
2232 return (report
[0] & (1 << 16)) != 0;
2236 * Accumulate raw OA counter values based on deltas between pairs of
2239 * Accumulation starts from the first report captured via
2240 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
2241 * last MI_RPC report requested by brw_end_perf_query(). Between these
2242 * two reports there may also some number of periodically sampled OA
2243 * reports collected via the i915 perf interface - depending on the
2244 * duration of the query.
2246 * These periodic snapshots help to ensure we handle counter overflow
2247 * correctly by being frequent enough to ensure we don't miss multiple
2248 * overflows of a counter between snapshots. For Gen8+ the i915 perf
2249 * snapshots provide the extra context-switch reports that let us
2250 * subtract out the progress of counters associated with other
2251 * contexts running on the system.
2254 accumulate_oa_reports(struct gen_perf_context
*perf_ctx
,
2255 struct gen_perf_query_object
*query
)
2257 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2261 struct exec_node
*first_samples_node
;
2262 bool last_report_ctx_match
= true;
2263 int out_duration
= 0;
2265 assert(query
->oa
.map
!= NULL
);
2267 start
= last
= query
->oa
.map
;
2268 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2270 if (start
[0] != query
->oa
.begin_report_id
) {
2271 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
2274 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
2275 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
2279 /* On Gen12+ OA reports are sourced from per context counters, so we don't
2280 * ever have to look at the global OA buffer. Yey \o/
2282 if (perf_ctx
->devinfo
->gen
>= 12) {
2287 /* See if we have any periodic reports to accumulate too... */
2289 /* N.B. The oa.samples_head was set when the query began and
2290 * pointed to the tail of the perf_ctx->sample_buffers list at
2291 * the time the query started. Since the buffer existed before the
2292 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
2293 * that no data in this particular node's buffer can possibly be
2294 * associated with the query - so skip ahead one...
2296 first_samples_node
= query
->oa
.samples_head
->next
;
2298 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
2299 &perf_ctx
->sample_buffers
,
2304 while (offset
< buf
->len
) {
2305 const struct drm_i915_perf_record_header
*header
=
2306 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
2308 assert(header
->size
!= 0);
2309 assert(header
->size
<= buf
->len
);
2311 offset
+= header
->size
;
2313 switch (header
->type
) {
2314 case DRM_I915_PERF_RECORD_SAMPLE
: {
2315 uint32_t *report
= (uint32_t *)(header
+ 1);
2316 bool report_ctx_match
= true;
2319 /* Ignore reports that come before the start marker.
2320 * (Note: takes care to allow overflow of 32bit timestamps)
2322 if (gen_device_info_timebase_scale(devinfo
,
2323 report
[1] - start
[1]) > 5000000000) {
2327 /* Ignore reports that come after the end marker.
2328 * (Note: takes care to allow overflow of 32bit timestamps)
2330 if (gen_device_info_timebase_scale(devinfo
,
2331 report
[1] - end
[1]) <= 5000000000) {
2335 /* For Gen8+ since the counters continue while other
2336 * contexts are running we need to discount any unrelated
2337 * deltas. The hardware automatically generates a report
2338 * on context switch which gives us a new reference point
2339 * to continuing adding deltas from.
2341 * For Haswell we can rely on the HW to stop the progress
2342 * of OA counters while any other context is acctive.
2344 if (devinfo
->gen
>= 8) {
2345 /* Consider that the current report matches our context only if
2346 * the report says the report ID is valid.
2348 report_ctx_match
= oa_report_ctx_id_valid(devinfo
, report
) &&
2349 report
[2] == start
[2];
2350 if (report_ctx_match
)
2355 /* Only add the delta between <last, report> if the last report
2356 * was clearly identified as our context, or if we have at most
2357 * 1 report without a matching ID.
2359 * The OA unit will sometimes label reports with an invalid
2360 * context ID when i915 rewrites the execlist submit register
2361 * with the same context as the one currently running. This
2362 * happens when i915 wants to notify the HW of ringbuffer tail
2363 * register update. We have to consider this report as part of
2364 * our context as the 3d pipeline behind the OACS unit is still
2365 * processing the operations started at the previous execlist
2368 add
= last_report_ctx_match
&& out_duration
< 2;
2372 gen_perf_query_result_accumulate(&query
->oa
.result
,
2378 last_report_ctx_match
= report_ctx_match
;
2383 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
2384 DBG("i915 perf: OA error: all reports lost\n");
2386 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
2387 DBG("i915 perf: OA report lost\n");
2395 gen_perf_query_result_accumulate(&query
->oa
.result
, query
->queryinfo
,
2398 query
->oa
.results_accumulated
= true;
2399 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2400 dec_n_users(perf_ctx
);
2406 discard_all_queries(perf_ctx
);
2410 gen_perf_delete_query(struct gen_perf_context
*perf_ctx
,
2411 struct gen_perf_query_object
*query
)
2413 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2415 /* We can assume that the frontend waits for a query to complete
2416 * before ever calling into here, so we don't have to worry about
2417 * deleting an in-flight query object.
2419 switch (query
->queryinfo
->kind
) {
2420 case GEN_PERF_QUERY_TYPE_OA
:
2421 case GEN_PERF_QUERY_TYPE_RAW
:
2423 if (!query
->oa
.results_accumulated
) {
2424 drop_from_unaccumulated_query_list(perf_ctx
, query
);
2425 dec_n_users(perf_ctx
);
2428 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
2429 query
->oa
.bo
= NULL
;
2432 query
->oa
.results_accumulated
= false;
2435 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2436 if (query
->pipeline_stats
.bo
) {
2437 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
2438 query
->pipeline_stats
.bo
= NULL
;
2443 unreachable("Unknown query type");
2447 /* As an indication that the INTEL_performance_query extension is no
2448 * longer in use, it's a good time to free our cache of sample
2449 * buffers and close any current i915-perf stream.
2451 if (--perf_ctx
->n_query_instances
== 0) {
2452 free_sample_bufs(perf_ctx
);
2453 gen_perf_close(perf_ctx
, query
->queryinfo
);
2459 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
2462 read_gt_frequency(struct gen_perf_context
*perf_ctx
,
2463 struct gen_perf_query_object
*obj
)
2465 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2466 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
2467 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
2469 switch (devinfo
->gen
) {
2472 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
2473 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
2478 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
2479 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
2482 unreachable("unexpected gen");
2485 /* Put the numbers into Hz. */
2486 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
2487 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
2491 get_oa_counter_data(struct gen_perf_context
*perf_ctx
,
2492 struct gen_perf_query_object
*query
,
2496 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2497 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2498 int n_counters
= queryinfo
->n_counters
;
2501 for (int i
= 0; i
< n_counters
; i
++) {
2502 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2503 uint64_t *out_uint64
;
2505 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
2508 switch (counter
->data_type
) {
2509 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
2510 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
2512 counter
->oa_counter_read_uint64(perf_cfg
, queryinfo
,
2513 query
->oa
.result
.accumulator
);
2515 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
2516 out_float
= (float *)(data
+ counter
->offset
);
2518 counter
->oa_counter_read_float(perf_cfg
, queryinfo
,
2519 query
->oa
.result
.accumulator
);
2522 /* So far we aren't using uint32, double or bool32... */
2523 unreachable("unexpected counter data type");
2525 written
= counter
->offset
+ counter_size
;
2533 get_pipeline_stats_data(struct gen_perf_context
*perf_ctx
,
2534 struct gen_perf_query_object
*query
,
2539 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2540 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2541 int n_counters
= queryinfo
->n_counters
;
2544 uint64_t *start
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->pipeline_stats
.bo
, MAP_READ
);
2545 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
2547 for (int i
= 0; i
< n_counters
; i
++) {
2548 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2549 uint64_t value
= end
[i
] - start
[i
];
2551 if (counter
->pipeline_stat
.numerator
!=
2552 counter
->pipeline_stat
.denominator
) {
2553 value
*= counter
->pipeline_stat
.numerator
;
2554 value
/= counter
->pipeline_stat
.denominator
;
2557 *((uint64_t *)p
) = value
;
2561 perf_cfg
->vtbl
.bo_unmap(query
->pipeline_stats
.bo
);
2567 gen_perf_get_query_data(struct gen_perf_context
*perf_ctx
,
2568 struct gen_perf_query_object
*query
,
2571 unsigned *bytes_written
)
2573 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2576 switch (query
->queryinfo
->kind
) {
2577 case GEN_PERF_QUERY_TYPE_OA
:
2578 case GEN_PERF_QUERY_TYPE_RAW
:
2579 if (!query
->oa
.results_accumulated
) {
2580 read_gt_frequency(perf_ctx
, query
);
2581 uint32_t *begin_report
= query
->oa
.map
;
2582 uint32_t *end_report
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2583 gen_perf_query_result_read_frequencies(&query
->oa
.result
,
2587 accumulate_oa_reports(perf_ctx
, query
);
2588 assert(query
->oa
.results_accumulated
);
2590 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
2591 query
->oa
.map
= NULL
;
2593 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
2594 written
= get_oa_counter_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2596 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2598 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
2599 devinfo
, &query
->oa
.result
,
2600 query
->oa
.gt_frequency
[0],
2601 query
->oa
.gt_frequency
[1]);
2605 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2606 written
= get_pipeline_stats_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2610 unreachable("Unknown query type");
2615 *bytes_written
= written
;
2619 gen_perf_dump_query_count(struct gen_perf_context
*perf_ctx
)
2621 DBG("Queries: (Open queries = %d, OA users = %d)\n",
2622 perf_ctx
->n_active_oa_queries
, perf_ctx
->n_oa_users
);
2626 gen_perf_dump_query(struct gen_perf_context
*ctx
,
2627 struct gen_perf_query_object
*obj
,
2628 void *current_batch
)
2630 switch (obj
->queryinfo
->kind
) {
2631 case GEN_PERF_QUERY_TYPE_OA
:
2632 case GEN_PERF_QUERY_TYPE_RAW
:
2633 DBG("BO: %-4s OA data: %-10s %-15s\n",
2634 obj
->oa
.bo
? "yes," : "no,",
2635 gen_perf_is_query_ready(ctx
, obj
, current_batch
) ? "ready," : "not ready,",
2636 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
2638 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2640 obj
->pipeline_stats
.bo
? "yes" : "no");
2643 unreachable("Unknown query type");