intel/perf: add EHL performance query support
[mesa.git] / src / intel / perf / gen_perf.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31
32 #include <drm-uapi/i915_drm.h>
33
34 #include "common/gen_gem.h"
35 #include "gen_perf.h"
36 #include "gen_perf_regs.h"
37 #include "perf/gen_perf_mdapi.h"
38 #include "perf/gen_perf_metrics.h"
39
40 #include "dev/gen_debug.h"
41 #include "dev/gen_device_info.h"
42 #include "util/bitscan.h"
43 #include "util/mesa-sha1.h"
44 #include "util/u_math.h"
45
46 #define FILE_DEBUG_FLAG DEBUG_PERFMON
47 #define MI_RPC_BO_SIZE 4096
48 #define MI_FREQ_START_OFFSET_BYTES (3072)
49 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
50 #define MI_FREQ_END_OFFSET_BYTES (3076)
51
52 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
53
54 #define GEN7_RPSTAT1 0xA01C
55 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
56 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
57 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
58 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
59
60 #define GEN9_RPSTAT0 0xA01C
61 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
62 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
63 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
64 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
65
66 #define GEN6_SO_PRIM_STORAGE_NEEDED 0x2280
67 #define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
68 #define GEN6_SO_NUM_PRIMS_WRITTEN 0x2288
69 #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
70
71 #define MAP_READ (1 << 0)
72 #define MAP_WRITE (1 << 1)
73
74 /**
75 * Periodic OA samples are read() into these buffer structures via the
76 * i915 perf kernel interface and appended to the
77 * perf_ctx->sample_buffers linked list. When we process the
78 * results of an OA metrics query we need to consider all the periodic
79 * samples between the Begin and End MI_REPORT_PERF_COUNT command
80 * markers.
81 *
82 * 'Periodic' is a simplification as there are other automatic reports
83 * written by the hardware also buffered here.
84 *
85 * Considering three queries, A, B and C:
86 *
87 * Time ---->
88 * ________________A_________________
89 * | |
90 * | ________B_________ _____C___________
91 * | | | | | |
92 *
93 * And an illustration of sample buffers read over this time frame:
94 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
95 *
96 * These nodes may hold samples for query A:
97 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
98 *
99 * These nodes may hold samples for query B:
100 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
101 *
102 * These nodes may hold samples for query C:
103 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
104 *
105 * The illustration assumes we have an even distribution of periodic
106 * samples so all nodes have the same size plotted against time:
107 *
108 * Note, to simplify code, the list is never empty.
109 *
110 * With overlapping queries we can see that periodic OA reports may
111 * relate to multiple queries and care needs to be take to keep
112 * track of sample buffers until there are no queries that might
113 * depend on their contents.
114 *
115 * We use a node ref counting system where a reference ensures that a
116 * node and all following nodes can't be freed/recycled until the
117 * reference drops to zero.
118 *
119 * E.g. with a ref of one here:
120 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
121 *
122 * These nodes could be freed or recycled ("reaped"):
123 * [ 0 ][ 0 ]
124 *
125 * These must be preserved until the leading ref drops to zero:
126 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
127 *
128 * When a query starts we take a reference on the current tail of
129 * the list, knowing that no already-buffered samples can possibly
130 * relate to the newly-started query. A pointer to this node is
131 * also saved in the query object's ->oa.samples_head.
132 *
133 * E.g. starting query A while there are two nodes in .sample_buffers:
134 * ________________A________
135 * |
136 *
137 * [ 0 ][ 1 ]
138 * ^_______ Add a reference and store pointer to node in
139 * A->oa.samples_head
140 *
141 * Moving forward to when the B query starts with no new buffer nodes:
142 * (for reference, i915 perf reads() are only done when queries finish)
143 * ________________A_______
144 * | ________B___
145 * | |
146 *
147 * [ 0 ][ 2 ]
148 * ^_______ Add a reference and store pointer to
149 * node in B->oa.samples_head
150 *
151 * Once a query is finished, after an OA query has become 'Ready',
152 * once the End OA report has landed and after we we have processed
153 * all the intermediate periodic samples then we drop the
154 * ->oa.samples_head reference we took at the start.
155 *
156 * So when the B query has finished we have:
157 * ________________A________
158 * | ______B___________
159 * | | |
160 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
161 * ^_______ Drop B->oa.samples_head reference
162 *
163 * We still can't free these due to the A->oa.samples_head ref:
164 * [ 1 ][ 0 ][ 0 ][ 0 ]
165 *
166 * When the A query finishes: (note there's a new ref for C's samples_head)
167 * ________________A_________________
168 * | |
169 * | _____C_________
170 * | | |
171 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
172 * ^_______ Drop A->oa.samples_head reference
173 *
174 * And we can now reap these nodes up to the C->oa.samples_head:
175 * [ X ][ X ][ X ][ X ]
176 * keeping -> [ 1 ][ 0 ][ 0 ]
177 *
178 * We reap old sample buffers each time we finish processing an OA
179 * query by iterating the sample_buffers list from the head until we
180 * find a referenced node and stop.
181 *
182 * Reaped buffers move to a perfquery.free_sample_buffers list and
183 * when we come to read() we first look to recycle a buffer from the
184 * free_sample_buffers list before allocating a new buffer.
185 */
186 struct oa_sample_buf {
187 struct exec_node link;
188 int refcount;
189 int len;
190 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
191 uint32_t last_timestamp;
192 };
193
194 /**
195 * gen representation of a performance query object.
196 *
197 * NB: We want to keep this structure relatively lean considering that
198 * applications may expect to allocate enough objects to be able to
199 * query around all draw calls in a frame.
200 */
201 struct gen_perf_query_object
202 {
203 const struct gen_perf_query_info *queryinfo;
204
205 /* See query->kind to know which state below is in use... */
206 union {
207 struct {
208
209 /**
210 * BO containing OA counter snapshots at query Begin/End time.
211 */
212 void *bo;
213
214 /**
215 * Address of mapped of @bo
216 */
217 void *map;
218
219 /**
220 * The MI_REPORT_PERF_COUNT command lets us specify a unique
221 * ID that will be reflected in the resulting OA report
222 * that's written by the GPU. This is the ID we're expecting
223 * in the begin report and the the end report should be
224 * @begin_report_id + 1.
225 */
226 int begin_report_id;
227
228 /**
229 * Reference the head of the brw->perfquery.sample_buffers
230 * list at the time that the query started (so we only need
231 * to look at nodes after this point when looking for samples
232 * related to this query)
233 *
234 * (See struct brw_oa_sample_buf description for more details)
235 */
236 struct exec_node *samples_head;
237
238 /**
239 * false while in the unaccumulated_elements list, and set to
240 * true when the final, end MI_RPC snapshot has been
241 * accumulated.
242 */
243 bool results_accumulated;
244
245 /**
246 * Frequency of the GT at begin and end of the query.
247 */
248 uint64_t gt_frequency[2];
249
250 /**
251 * Accumulated OA results between begin and end of the query.
252 */
253 struct gen_perf_query_result result;
254 } oa;
255
256 struct {
257 /**
258 * BO containing starting and ending snapshots for the
259 * statistics counters.
260 */
261 void *bo;
262 } pipeline_stats;
263 };
264 };
265
266 struct gen_perf_context {
267 struct gen_perf_config *perf;
268
269 void * ctx; /* driver context (eg, brw_context) */
270 void * bufmgr;
271 const struct gen_device_info *devinfo;
272
273 uint32_t hw_ctx;
274 int drm_fd;
275
276 /* The i915 perf stream we open to setup + enable the OA counters */
277 int oa_stream_fd;
278
279 /* An i915 perf stream fd gives exclusive access to the OA unit that will
280 * report counter snapshots for a specific counter set/profile in a
281 * specific layout/format so we can only start OA queries that are
282 * compatible with the currently open fd...
283 */
284 int current_oa_metrics_set_id;
285 int current_oa_format;
286
287 /* List of buffers containing OA reports */
288 struct exec_list sample_buffers;
289
290 /* Cached list of empty sample buffers */
291 struct exec_list free_sample_buffers;
292
293 int n_active_oa_queries;
294 int n_active_pipeline_stats_queries;
295
296 /* The number of queries depending on running OA counters which
297 * extends beyond brw_end_perf_query() since we need to wait until
298 * the last MI_RPC command has parsed by the GPU.
299 *
300 * Accurate accounting is important here as emitting an
301 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
302 * effectively hang the gpu.
303 */
304 int n_oa_users;
305
306 /* To help catch an spurious problem with the hardware or perf
307 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
308 * with a unique ID that we can explicitly check for...
309 */
310 int next_query_start_report_id;
311
312 /**
313 * An array of queries whose results haven't yet been assembled
314 * based on the data in buffer objects.
315 *
316 * These may be active, or have already ended. However, the
317 * results have not been requested.
318 */
319 struct gen_perf_query_object **unaccumulated;
320 int unaccumulated_elements;
321 int unaccumulated_array_size;
322
323 /* The total number of query objects so we can relinquish
324 * our exclusive access to perf if the application deletes
325 * all of its objects. (NB: We only disable perf while
326 * there are no active queries)
327 */
328 int n_query_instances;
329 };
330
331 const struct gen_perf_query_info*
332 gen_perf_query_info(const struct gen_perf_query_object *query)
333 {
334 return query->queryinfo;
335 }
336
337 struct gen_perf_context *
338 gen_perf_new_context(void *parent)
339 {
340 struct gen_perf_context *ctx = rzalloc(parent, struct gen_perf_context);
341 if (! ctx)
342 fprintf(stderr, "%s: failed to alloc context\n", __func__);
343 return ctx;
344 }
345
346 struct gen_perf_config *
347 gen_perf_config(struct gen_perf_context *ctx)
348 {
349 return ctx->perf;
350 }
351
352 struct gen_perf_query_object *
353 gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
354 {
355 const struct gen_perf_query_info *query =
356 &perf_ctx->perf->queries[query_index];
357 struct gen_perf_query_object *obj =
358 calloc(1, sizeof(struct gen_perf_query_object));
359
360 if (!obj)
361 return NULL;
362
363 obj->queryinfo = query;
364
365 perf_ctx->n_query_instances++;
366 return obj;
367 }
368
369 int
370 gen_perf_active_queries(struct gen_perf_context *perf_ctx,
371 const struct gen_perf_query_info *query)
372 {
373 assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
374
375 switch (query->kind) {
376 case GEN_PERF_QUERY_TYPE_OA:
377 case GEN_PERF_QUERY_TYPE_RAW:
378 return perf_ctx->n_active_oa_queries;
379 break;
380
381 case GEN_PERF_QUERY_TYPE_PIPELINE:
382 return perf_ctx->n_active_pipeline_stats_queries;
383 break;
384
385 default:
386 unreachable("Unknown query type");
387 break;
388 }
389 }
390
391 static inline uint64_t to_user_pointer(void *ptr)
392 {
393 return (uintptr_t) ptr;
394 }
395
396 static bool
397 get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
398 {
399 struct stat sb;
400 int min, maj;
401 DIR *drmdir;
402 struct dirent *drm_entry;
403 int len;
404
405 perf->sysfs_dev_dir[0] = '\0';
406
407 if (fstat(fd, &sb)) {
408 DBG("Failed to stat DRM fd\n");
409 return false;
410 }
411
412 maj = major(sb.st_rdev);
413 min = minor(sb.st_rdev);
414
415 if (!S_ISCHR(sb.st_mode)) {
416 DBG("DRM fd is not a character device as expected\n");
417 return false;
418 }
419
420 len = snprintf(perf->sysfs_dev_dir,
421 sizeof(perf->sysfs_dev_dir),
422 "/sys/dev/char/%d:%d/device/drm", maj, min);
423 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
424 DBG("Failed to concatenate sysfs path to drm device\n");
425 return false;
426 }
427
428 drmdir = opendir(perf->sysfs_dev_dir);
429 if (!drmdir) {
430 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
431 return false;
432 }
433
434 while ((drm_entry = readdir(drmdir))) {
435 if ((drm_entry->d_type == DT_DIR ||
436 drm_entry->d_type == DT_LNK) &&
437 strncmp(drm_entry->d_name, "card", 4) == 0)
438 {
439 len = snprintf(perf->sysfs_dev_dir,
440 sizeof(perf->sysfs_dev_dir),
441 "/sys/dev/char/%d:%d/device/drm/%s",
442 maj, min, drm_entry->d_name);
443 closedir(drmdir);
444 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
445 return false;
446 else
447 return true;
448 }
449 }
450
451 closedir(drmdir);
452
453 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
454 maj, min);
455
456 return false;
457 }
458
459 static bool
460 read_file_uint64(const char *file, uint64_t *val)
461 {
462 char buf[32];
463 int fd, n;
464
465 fd = open(file, 0);
466 if (fd < 0)
467 return false;
468 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
469 errno == EINTR);
470 close(fd);
471 if (n < 0)
472 return false;
473
474 buf[n] = '\0';
475 *val = strtoull(buf, NULL, 0);
476
477 return true;
478 }
479
480 static bool
481 read_sysfs_drm_device_file_uint64(struct gen_perf_config *perf,
482 const char *file,
483 uint64_t *value)
484 {
485 char buf[512];
486 int len;
487
488 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
489 if (len < 0 || len >= sizeof(buf)) {
490 DBG("Failed to concatenate sys filename to read u64 from\n");
491 return false;
492 }
493
494 return read_file_uint64(buf, value);
495 }
496
497 static inline struct gen_perf_query_info *
498 append_query_info(struct gen_perf_config *perf, int max_counters)
499 {
500 struct gen_perf_query_info *query;
501
502 perf->queries = reralloc(perf, perf->queries,
503 struct gen_perf_query_info,
504 ++perf->n_queries);
505 query = &perf->queries[perf->n_queries - 1];
506 memset(query, 0, sizeof(*query));
507
508 if (max_counters > 0) {
509 query->max_counters = max_counters;
510 query->counters =
511 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
512 }
513
514 return query;
515 }
516
517 static void
518 register_oa_config(struct gen_perf_config *perf,
519 const struct gen_perf_query_info *query,
520 uint64_t config_id)
521 {
522 struct gen_perf_query_info *registered_query = append_query_info(perf, 0);
523
524 *registered_query = *query;
525 registered_query->oa_metrics_set_id = config_id;
526 DBG("metric set registered: id = %" PRIu64", guid = %s\n",
527 registered_query->oa_metrics_set_id, query->guid);
528 }
529
530 static void
531 enumerate_sysfs_metrics(struct gen_perf_config *perf)
532 {
533 DIR *metricsdir = NULL;
534 struct dirent *metric_entry;
535 char buf[256];
536 int len;
537
538 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
539 if (len < 0 || len >= sizeof(buf)) {
540 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
541 return;
542 }
543
544 metricsdir = opendir(buf);
545 if (!metricsdir) {
546 DBG("Failed to open %s: %m\n", buf);
547 return;
548 }
549
550 while ((metric_entry = readdir(metricsdir))) {
551 struct hash_entry *entry;
552
553 if ((metric_entry->d_type != DT_DIR &&
554 metric_entry->d_type != DT_LNK) ||
555 metric_entry->d_name[0] == '.')
556 continue;
557
558 DBG("metric set: %s\n", metric_entry->d_name);
559 entry = _mesa_hash_table_search(perf->oa_metrics_table,
560 metric_entry->d_name);
561 if (entry) {
562 uint64_t id;
563 if (!gen_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
564 DBG("Failed to read metric set id from %s: %m", buf);
565 continue;
566 }
567
568 register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
569 } else
570 DBG("metric set not known by mesa (skipping)\n");
571 }
572
573 closedir(metricsdir);
574 }
575
576 static bool
577 kernel_has_dynamic_config_support(struct gen_perf_config *perf, int fd)
578 {
579 uint64_t invalid_config_id = UINT64_MAX;
580
581 return gen_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
582 &invalid_config_id) < 0 && errno == ENOENT;
583 }
584
585 static int
586 i915_query_items(struct gen_perf_config *perf, int fd,
587 struct drm_i915_query_item *items, uint32_t n_items)
588 {
589 struct drm_i915_query q = {
590 .num_items = n_items,
591 .items_ptr = to_user_pointer(items),
592 };
593 return gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &q);
594 }
595
596 static bool
597 i915_query_perf_config_supported(struct gen_perf_config *perf, int fd)
598 {
599 struct drm_i915_query_item item = {
600 .query_id = DRM_I915_QUERY_PERF_CONFIG,
601 .flags = DRM_I915_QUERY_PERF_CONFIG_LIST,
602 };
603
604 return i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0;
605 }
606
607 static bool
608 i915_query_perf_config_data(struct gen_perf_config *perf,
609 int fd, const char *guid,
610 struct drm_i915_perf_oa_config *config)
611 {
612 struct {
613 struct drm_i915_query_perf_config query;
614 struct drm_i915_perf_oa_config config;
615 } item_data;
616 struct drm_i915_query_item item = {
617 .query_id = DRM_I915_QUERY_PERF_CONFIG,
618 .flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
619 .data_ptr = to_user_pointer(&item_data),
620 .length = sizeof(item_data),
621 };
622
623 memset(&item_data, 0, sizeof(item_data));
624 memcpy(item_data.query.uuid, guid, sizeof(item_data.query.uuid));
625 memcpy(&item_data.config, config, sizeof(item_data.config));
626
627 if (!(i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0))
628 return false;
629
630 memcpy(config, &item_data.config, sizeof(item_data.config));
631
632 return true;
633 }
634
635 bool
636 gen_perf_load_metric_id(struct gen_perf_config *perf_cfg,
637 const char *guid,
638 uint64_t *metric_id)
639 {
640 char config_path[280];
641
642 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
643 perf_cfg->sysfs_dev_dir, guid);
644
645 /* Don't recreate already loaded configs. */
646 return read_file_uint64(config_path, metric_id);
647 }
648
649 static uint64_t
650 i915_add_config(struct gen_perf_config *perf, int fd,
651 const struct gen_perf_registers *config,
652 const char *guid)
653 {
654 struct drm_i915_perf_oa_config i915_config = { 0, };
655
656 memcpy(i915_config.uuid, guid, sizeof(i915_config.uuid));
657
658 i915_config.n_mux_regs = config->n_mux_regs;
659 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
660
661 i915_config.n_boolean_regs = config->n_b_counter_regs;
662 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
663
664 i915_config.n_flex_regs = config->n_flex_regs;
665 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
666
667 int ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
668 return ret > 0 ? ret : 0;
669 }
670
671 static void
672 init_oa_configs(struct gen_perf_config *perf, int fd)
673 {
674 hash_table_foreach(perf->oa_metrics_table, entry) {
675 const struct gen_perf_query_info *query = entry->data;
676 uint64_t config_id;
677
678 if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
679 DBG("metric set: %s (already loaded)\n", query->guid);
680 register_oa_config(perf, query, config_id);
681 continue;
682 }
683
684 int ret = i915_add_config(perf, fd, &query->config, query->guid);
685 if (ret < 0) {
686 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
687 query->name, query->guid, strerror(errno));
688 continue;
689 }
690
691 register_oa_config(perf, query, ret);
692 DBG("metric set: %s (added)\n", query->guid);
693 }
694 }
695
696 static void
697 compute_topology_builtins(struct gen_perf_config *perf,
698 const struct gen_device_info *devinfo)
699 {
700 perf->sys_vars.slice_mask = devinfo->slice_masks;
701 perf->sys_vars.n_eu_slices = devinfo->num_slices;
702
703 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
704 perf->sys_vars.n_eu_sub_slices +=
705 __builtin_popcount(devinfo->subslice_masks[i]);
706 }
707
708 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
709 perf->sys_vars.n_eus += __builtin_popcount(devinfo->eu_masks[i]);
710
711 perf->sys_vars.eu_threads_count = devinfo->num_thread_per_eu;
712
713 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
714 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
715 * slice.
716 *
717 * Ideally equations would be updated to have a slice/subslice query
718 * function/operator.
719 */
720 perf->sys_vars.subslice_mask = 0;
721
722 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
723
724 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
725 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
726 if (gen_device_info_subslice_available(devinfo, s, ss))
727 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
728 }
729 }
730 }
731
732 static bool
733 init_oa_sys_vars(struct gen_perf_config *perf, const struct gen_device_info *devinfo)
734 {
735 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
736
737 if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
738 return false;
739
740 if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
741 return false;
742
743 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
744 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
745 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
746 perf->sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
747 perf->sys_vars.revision = devinfo->revision;
748 compute_topology_builtins(perf, devinfo);
749
750 return true;
751 }
752
753 typedef void (*perf_register_oa_queries_t)(struct gen_perf_config *);
754
755 static perf_register_oa_queries_t
756 get_register_queries_function(const struct gen_device_info *devinfo)
757 {
758 if (devinfo->is_haswell)
759 return gen_oa_register_queries_hsw;
760 if (devinfo->is_cherryview)
761 return gen_oa_register_queries_chv;
762 if (devinfo->is_broadwell)
763 return gen_oa_register_queries_bdw;
764 if (devinfo->is_broxton)
765 return gen_oa_register_queries_bxt;
766 if (devinfo->is_skylake) {
767 if (devinfo->gt == 2)
768 return gen_oa_register_queries_sklgt2;
769 if (devinfo->gt == 3)
770 return gen_oa_register_queries_sklgt3;
771 if (devinfo->gt == 4)
772 return gen_oa_register_queries_sklgt4;
773 }
774 if (devinfo->is_kabylake) {
775 if (devinfo->gt == 2)
776 return gen_oa_register_queries_kblgt2;
777 if (devinfo->gt == 3)
778 return gen_oa_register_queries_kblgt3;
779 }
780 if (devinfo->is_geminilake)
781 return gen_oa_register_queries_glk;
782 if (devinfo->is_coffeelake) {
783 if (devinfo->gt == 2)
784 return gen_oa_register_queries_cflgt2;
785 if (devinfo->gt == 3)
786 return gen_oa_register_queries_cflgt3;
787 }
788 if (devinfo->is_cannonlake)
789 return gen_oa_register_queries_cnl;
790 if (devinfo->gen == 11) {
791 if (devinfo->is_elkhartlake)
792 return gen_oa_register_queries_lkf;
793 return gen_oa_register_queries_icl;
794 }
795 if (devinfo->gen == 12)
796 return gen_oa_register_queries_tgl;
797
798 return NULL;
799 }
800
801 static inline void
802 add_stat_reg(struct gen_perf_query_info *query, uint32_t reg,
803 uint32_t numerator, uint32_t denominator,
804 const char *name, const char *description)
805 {
806 struct gen_perf_query_counter *counter;
807
808 assert(query->n_counters < query->max_counters);
809
810 counter = &query->counters[query->n_counters];
811 counter->name = name;
812 counter->desc = description;
813 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
814 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
815 counter->offset = sizeof(uint64_t) * query->n_counters;
816 counter->pipeline_stat.reg = reg;
817 counter->pipeline_stat.numerator = numerator;
818 counter->pipeline_stat.denominator = denominator;
819
820 query->n_counters++;
821 }
822
823 static inline void
824 add_basic_stat_reg(struct gen_perf_query_info *query,
825 uint32_t reg, const char *name)
826 {
827 add_stat_reg(query, reg, 1, 1, name, name);
828 }
829
830 static void
831 load_pipeline_statistic_metrics(struct gen_perf_config *perf_cfg,
832 const struct gen_device_info *devinfo)
833 {
834 struct gen_perf_query_info *query =
835 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
836
837 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
838 query->name = "Pipeline Statistics Registers";
839
840 add_basic_stat_reg(query, IA_VERTICES_COUNT,
841 "N vertices submitted");
842 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
843 "N primitives submitted");
844 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
845 "N vertex shader invocations");
846
847 if (devinfo->gen == 6) {
848 add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
849 "SO_PRIM_STORAGE_NEEDED",
850 "N geometry shader stream-out primitives (total)");
851 add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
852 "SO_NUM_PRIMS_WRITTEN",
853 "N geometry shader stream-out primitives (written)");
854 } else {
855 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
856 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
857 "N stream-out (stream 0) primitives (total)");
858 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
859 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
860 "N stream-out (stream 1) primitives (total)");
861 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
862 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
863 "N stream-out (stream 2) primitives (total)");
864 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
865 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
866 "N stream-out (stream 3) primitives (total)");
867 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
868 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
869 "N stream-out (stream 0) primitives (written)");
870 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
871 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
872 "N stream-out (stream 1) primitives (written)");
873 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
874 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
875 "N stream-out (stream 2) primitives (written)");
876 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
877 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
878 "N stream-out (stream 3) primitives (written)");
879 }
880
881 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
882 "N TCS shader invocations");
883 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
884 "N TES shader invocations");
885
886 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
887 "N geometry shader invocations");
888 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
889 "N geometry shader primitives emitted");
890
891 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
892 "N primitives entering clipping");
893 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
894 "N primitives leaving clipping");
895
896 if (devinfo->is_haswell || devinfo->gen == 8) {
897 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
898 "N fragment shader invocations",
899 "N fragment shader invocations");
900 } else {
901 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
902 "N fragment shader invocations");
903 }
904
905 add_basic_stat_reg(query, PS_DEPTH_COUNT,
906 "N z-pass fragments");
907
908 if (devinfo->gen >= 7) {
909 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
910 "N compute shader invocations");
911 }
912
913 query->data_size = sizeof(uint64_t) * query->n_counters;
914 }
915
916 static bool
917 load_oa_metrics(struct gen_perf_config *perf, int fd,
918 const struct gen_device_info *devinfo)
919 {
920 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
921 bool i915_perf_oa_available = false;
922 struct stat sb;
923
924 perf->i915_query_supported = i915_query_perf_config_supported(perf, fd);
925
926 /* The existence of this sysctl parameter implies the kernel supports
927 * the i915 perf interface.
928 */
929 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
930
931 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
932 * metrics unless running as root.
933 */
934 if (devinfo->is_haswell)
935 i915_perf_oa_available = true;
936 else {
937 uint64_t paranoid = 1;
938
939 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
940
941 if (paranoid == 0 || geteuid() == 0)
942 i915_perf_oa_available = true;
943 }
944 }
945
946 if (!i915_perf_oa_available ||
947 !oa_register ||
948 !get_sysfs_dev_dir(perf, fd) ||
949 !init_oa_sys_vars(perf, devinfo))
950 return false;
951
952 perf->oa_metrics_table =
953 _mesa_hash_table_create(perf, _mesa_key_hash_string,
954 _mesa_key_string_equal);
955
956 /* Index all the metric sets mesa knows about before looking to see what
957 * the kernel is advertising.
958 */
959 oa_register(perf);
960
961 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
962 kernel_has_dynamic_config_support(perf, fd))
963 init_oa_configs(perf, fd);
964 else
965 enumerate_sysfs_metrics(perf);
966
967 return true;
968 }
969
970 struct gen_perf_registers *
971 gen_perf_load_configuration(struct gen_perf_config *perf_cfg, int fd, const char *guid)
972 {
973 if (!perf_cfg->i915_query_supported)
974 return NULL;
975
976 struct drm_i915_perf_oa_config i915_config = { 0, };
977 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config))
978 return NULL;
979
980 struct gen_perf_registers *config = rzalloc(NULL, struct gen_perf_registers);
981 config->n_flex_regs = i915_config.n_flex_regs;
982 config->flex_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_flex_regs);
983 config->n_mux_regs = i915_config.n_mux_regs;
984 config->mux_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_mux_regs);
985 config->n_b_counter_regs = i915_config.n_boolean_regs;
986 config->b_counter_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_b_counter_regs);
987
988 /*
989 * struct gen_perf_query_register_prog maps exactly to the tuple of
990 * (register offset, register value) returned by the i915.
991 */
992 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
993 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
994 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
995 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config)) {
996 ralloc_free(config);
997 return NULL;
998 }
999
1000 return config;
1001 }
1002
1003 uint64_t
1004 gen_perf_store_configuration(struct gen_perf_config *perf_cfg, int fd,
1005 const struct gen_perf_registers *config,
1006 const char *guid)
1007 {
1008 if (guid)
1009 return i915_add_config(perf_cfg, fd, config, guid);
1010
1011 struct mesa_sha1 sha1_ctx;
1012 _mesa_sha1_init(&sha1_ctx);
1013
1014 if (config->flex_regs) {
1015 _mesa_sha1_update(&sha1_ctx, config->flex_regs,
1016 sizeof(config->flex_regs[0]) *
1017 config->n_flex_regs);
1018 }
1019 if (config->mux_regs) {
1020 _mesa_sha1_update(&sha1_ctx, config->mux_regs,
1021 sizeof(config->mux_regs[0]) *
1022 config->n_mux_regs);
1023 }
1024 if (config->b_counter_regs) {
1025 _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
1026 sizeof(config->b_counter_regs[0]) *
1027 config->n_b_counter_regs);
1028 }
1029
1030 uint8_t hash[20];
1031 _mesa_sha1_final(&sha1_ctx, hash);
1032
1033 char formatted_hash[41];
1034 _mesa_sha1_format(formatted_hash, hash);
1035
1036 char generated_guid[37];
1037 snprintf(generated_guid, sizeof(generated_guid),
1038 "%.8s-%.4s-%.4s-%.4s-%.12s",
1039 &formatted_hash[0], &formatted_hash[8],
1040 &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
1041 &formatted_hash[8 + 4 + 4 + 4]);
1042
1043 /* Check if already present. */
1044 uint64_t id;
1045 if (gen_perf_load_metric_id(perf_cfg, generated_guid, &id))
1046 return id;
1047
1048 return i915_add_config(perf_cfg, fd, config, generated_guid);
1049 }
1050
1051 /* Accumulate 32bits OA counters */
1052 static inline void
1053 accumulate_uint32(const uint32_t *report0,
1054 const uint32_t *report1,
1055 uint64_t *accumulator)
1056 {
1057 *accumulator += (uint32_t)(*report1 - *report0);
1058 }
1059
1060 /* Accumulate 40bits OA counters */
1061 static inline void
1062 accumulate_uint40(int a_index,
1063 const uint32_t *report0,
1064 const uint32_t *report1,
1065 uint64_t *accumulator)
1066 {
1067 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
1068 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
1069 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
1070 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
1071 uint64_t value0 = report0[a_index + 4] | high0;
1072 uint64_t value1 = report1[a_index + 4] | high1;
1073 uint64_t delta;
1074
1075 if (value0 > value1)
1076 delta = (1ULL << 40) + value1 - value0;
1077 else
1078 delta = value1 - value0;
1079
1080 *accumulator += delta;
1081 }
1082
1083 static void
1084 gen8_read_report_clock_ratios(const uint32_t *report,
1085 uint64_t *slice_freq_hz,
1086 uint64_t *unslice_freq_hz)
1087 {
1088 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1089 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1090 * divided this way :
1091 *
1092 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1093 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1094 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1095 *
1096 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1097 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1098 *
1099 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1100 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1101 */
1102
1103 uint32_t unslice_freq = report[0] & 0x1ff;
1104 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1105 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1106 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1107
1108 *slice_freq_hz = slice_freq * 16666667ULL;
1109 *unslice_freq_hz = unslice_freq * 16666667ULL;
1110 }
1111
1112 void
1113 gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
1114 const struct gen_device_info *devinfo,
1115 const uint32_t *start,
1116 const uint32_t *end)
1117 {
1118 /* Slice/Unslice frequency is only available in the OA reports when the
1119 * "Disable OA reports due to clock ratio change" field in
1120 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1121 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1122 *
1123 * Documentation says this should be available on Gen9+ but experimentation
1124 * shows that Gen8 reports similar values, so we enable it there too.
1125 */
1126 if (devinfo->gen < 8)
1127 return;
1128
1129 gen8_read_report_clock_ratios(start,
1130 &result->slice_frequency[0],
1131 &result->unslice_frequency[0]);
1132 gen8_read_report_clock_ratios(end,
1133 &result->slice_frequency[1],
1134 &result->unslice_frequency[1]);
1135 }
1136
1137 void
1138 gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
1139 const struct gen_perf_query_info *query,
1140 const uint32_t *start,
1141 const uint32_t *end)
1142 {
1143 int i, idx = 0;
1144
1145 result->hw_id = start[2];
1146 result->reports_accumulated++;
1147
1148 switch (query->oa_format) {
1149 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
1150 accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
1151 accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
1152
1153 /* 32x 40bit A counters... */
1154 for (i = 0; i < 32; i++)
1155 accumulate_uint40(i, start, end, result->accumulator + idx++);
1156
1157 /* 4x 32bit A counters... */
1158 for (i = 0; i < 4; i++)
1159 accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
1160
1161 /* 8x 32bit B counters + 8x 32bit C counters... */
1162 for (i = 0; i < 16; i++)
1163 accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
1164 break;
1165
1166 case I915_OA_FORMAT_A45_B8_C8:
1167 accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
1168
1169 for (i = 0; i < 61; i++)
1170 accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
1171 break;
1172
1173 default:
1174 unreachable("Can't accumulate OA counters in unknown format");
1175 }
1176
1177 }
1178
1179 void
1180 gen_perf_query_result_clear(struct gen_perf_query_result *result)
1181 {
1182 memset(result, 0, sizeof(*result));
1183 result->hw_id = 0xffffffff; /* invalid */
1184 }
1185
1186 static void
1187 register_mdapi_statistic_query(struct gen_perf_config *perf_cfg,
1188 const struct gen_device_info *devinfo)
1189 {
1190 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1191 return;
1192
1193 struct gen_perf_query_info *query =
1194 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
1195
1196 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
1197 query->name = "Intel_Raw_Pipeline_Statistics_Query";
1198
1199 /* The order has to match mdapi_pipeline_metrics. */
1200 add_basic_stat_reg(query, IA_VERTICES_COUNT,
1201 "N vertices submitted");
1202 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1203 "N primitives submitted");
1204 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1205 "N vertex shader invocations");
1206 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1207 "N geometry shader invocations");
1208 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1209 "N geometry shader primitives emitted");
1210 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1211 "N primitives entering clipping");
1212 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1213 "N primitives leaving clipping");
1214 if (devinfo->is_haswell || devinfo->gen == 8) {
1215 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1216 "N fragment shader invocations",
1217 "N fragment shader invocations");
1218 } else {
1219 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1220 "N fragment shader invocations");
1221 }
1222 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1223 "N TCS shader invocations");
1224 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1225 "N TES shader invocations");
1226 if (devinfo->gen >= 7) {
1227 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1228 "N compute shader invocations");
1229 }
1230
1231 if (devinfo->gen >= 10) {
1232 /* Reuse existing CS invocation register until we can expose this new
1233 * one.
1234 */
1235 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1236 "Reserved1");
1237 }
1238
1239 query->data_size = sizeof(uint64_t) * query->n_counters;
1240 }
1241
1242 static void
1243 fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
1244 const char *name,
1245 uint32_t data_offset,
1246 uint32_t data_size,
1247 enum gen_perf_counter_data_type data_type)
1248 {
1249 struct gen_perf_query_counter *counter = &query->counters[query->n_counters];
1250
1251 assert(query->n_counters <= query->max_counters);
1252
1253 counter->name = name;
1254 counter->desc = "Raw counter value";
1255 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
1256 counter->data_type = data_type;
1257 counter->offset = data_offset;
1258
1259 query->n_counters++;
1260
1261 assert(counter->offset + gen_perf_query_counter_get_size(counter) <= query->data_size);
1262 }
1263
1264 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
1265 fill_mdapi_perf_query_counter(query, #field_name, \
1266 (uint8_t *) &struct_name.field_name - \
1267 (uint8_t *) &struct_name, \
1268 sizeof(struct_name.field_name), \
1269 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1270 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
1271 fill_mdapi_perf_query_counter(query, \
1272 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
1273 (uint8_t *) &struct_name.field_name[idx] - \
1274 (uint8_t *) &struct_name, \
1275 sizeof(struct_name.field_name[0]), \
1276 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1277
1278 static void
1279 register_mdapi_oa_query(const struct gen_device_info *devinfo,
1280 struct gen_perf_config *perf)
1281 {
1282 struct gen_perf_query_info *query = NULL;
1283
1284 /* MDAPI requires different structures for pretty much every generation
1285 * (right now we have definitions for gen 7 to 11).
1286 */
1287 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1288 return;
1289
1290 switch (devinfo->gen) {
1291 case 7: {
1292 query = append_query_info(perf, 1 + 45 + 16 + 7);
1293 query->oa_format = I915_OA_FORMAT_A45_B8_C8;
1294
1295 struct gen7_mdapi_metrics metric_data;
1296 query->data_size = sizeof(metric_data);
1297
1298 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1299 for (int i = 0; i < ARRAY_SIZE(metric_data.ACounters); i++) {
1300 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1301 metric_data, ACounters, i, UINT64);
1302 }
1303 for (int i = 0; i < ARRAY_SIZE(metric_data.NOACounters); i++) {
1304 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1305 metric_data, NOACounters, i, UINT64);
1306 }
1307 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1308 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1309 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1310 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1311 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1312 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1313 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1314 break;
1315 }
1316 case 8: {
1317 query = append_query_info(perf, 2 + 36 + 16 + 16);
1318 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1319
1320 struct gen8_mdapi_metrics metric_data;
1321 query->data_size = sizeof(metric_data);
1322
1323 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1324 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1325 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1326 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1327 metric_data, OaCntr, i, UINT64);
1328 }
1329 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1330 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1331 metric_data, NoaCntr, i, UINT64);
1332 }
1333 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1334 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1335 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1336 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1337 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1338 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1339 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1340 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1341 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1342 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1343 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1344 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1345 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1346 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1347 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1348 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1349 break;
1350 }
1351 case 9:
1352 case 10:
1353 case 11: {
1354 query = append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
1355 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1356
1357 struct gen9_mdapi_metrics metric_data;
1358 query->data_size = sizeof(metric_data);
1359
1360 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1361 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1362 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1363 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1364 metric_data, OaCntr, i, UINT64);
1365 }
1366 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1367 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1368 metric_data, NoaCntr, i, UINT64);
1369 }
1370 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1371 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1372 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1373 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1374 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1375 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1376 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1377 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1378 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1379 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1380 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1381 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1382 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1383 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1384 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1385 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1386 for (int i = 0; i < ARRAY_SIZE(metric_data.UserCntr); i++) {
1387 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1388 metric_data, UserCntr, i, UINT64);
1389 }
1390 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UserCntrCfgId, UINT32);
1391 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved4, UINT32);
1392 break;
1393 }
1394 default:
1395 unreachable("Unsupported gen");
1396 break;
1397 }
1398
1399 query->kind = GEN_PERF_QUERY_TYPE_RAW;
1400 query->name = "Intel_Raw_Hardware_Counters_Set_0_Query";
1401 query->guid = GEN_PERF_QUERY_GUID_MDAPI;
1402
1403 {
1404 /* Accumulation buffer offsets copied from an actual query... */
1405 const struct gen_perf_query_info *copy_query =
1406 &perf->queries[0];
1407
1408 query->gpu_time_offset = copy_query->gpu_time_offset;
1409 query->gpu_clock_offset = copy_query->gpu_clock_offset;
1410 query->a_offset = copy_query->a_offset;
1411 query->b_offset = copy_query->b_offset;
1412 query->c_offset = copy_query->c_offset;
1413 }
1414 }
1415
1416 static uint64_t
1417 get_metric_id(struct gen_perf_config *perf,
1418 const struct gen_perf_query_info *query)
1419 {
1420 /* These queries are know not to ever change, their config ID has been
1421 * loaded upon the first query creation. No need to look them up again.
1422 */
1423 if (query->kind == GEN_PERF_QUERY_TYPE_OA)
1424 return query->oa_metrics_set_id;
1425
1426 assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
1427
1428 /* Raw queries can be reprogrammed up by an external application/library.
1429 * When a raw query is used for the first time it's id is set to a value !=
1430 * 0. When it stops being used the id returns to 0. No need to reload the
1431 * ID when it's already loaded.
1432 */
1433 if (query->oa_metrics_set_id != 0) {
1434 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
1435 query->name, query->guid, query->oa_metrics_set_id);
1436 return query->oa_metrics_set_id;
1437 }
1438
1439 struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
1440 if (!gen_perf_load_metric_id(perf, query->guid,
1441 &raw_query->oa_metrics_set_id)) {
1442 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
1443 raw_query->oa_metrics_set_id = 1ULL;
1444 } else {
1445 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
1446 query->name, query->guid, query->oa_metrics_set_id);
1447 }
1448 return query->oa_metrics_set_id;
1449 }
1450
1451 static struct oa_sample_buf *
1452 get_free_sample_buf(struct gen_perf_context *perf_ctx)
1453 {
1454 struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
1455 struct oa_sample_buf *buf;
1456
1457 if (node)
1458 buf = exec_node_data(struct oa_sample_buf, node, link);
1459 else {
1460 buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
1461
1462 exec_node_init(&buf->link);
1463 buf->refcount = 0;
1464 buf->len = 0;
1465 }
1466
1467 return buf;
1468 }
1469
1470 static void
1471 reap_old_sample_buffers(struct gen_perf_context *perf_ctx)
1472 {
1473 struct exec_node *tail_node =
1474 exec_list_get_tail(&perf_ctx->sample_buffers);
1475 struct oa_sample_buf *tail_buf =
1476 exec_node_data(struct oa_sample_buf, tail_node, link);
1477
1478 /* Remove all old, unreferenced sample buffers walking forward from
1479 * the head of the list, except always leave at least one node in
1480 * the list so we always have a node to reference when we Begin
1481 * a new query.
1482 */
1483 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1484 &perf_ctx->sample_buffers)
1485 {
1486 if (buf->refcount == 0 && buf != tail_buf) {
1487 exec_node_remove(&buf->link);
1488 exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
1489 } else
1490 return;
1491 }
1492 }
1493
1494 static void
1495 free_sample_bufs(struct gen_perf_context *perf_ctx)
1496 {
1497 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1498 &perf_ctx->free_sample_buffers)
1499 ralloc_free(buf);
1500
1501 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1502 }
1503
1504 /******************************************************************************/
1505
1506 /**
1507 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1508 * pipeline statistics for the performance query object.
1509 */
1510 static void
1511 snapshot_statistics_registers(void *context,
1512 struct gen_perf_config *perf,
1513 struct gen_perf_query_object *obj,
1514 uint32_t offset_in_bytes)
1515 {
1516 const struct gen_perf_query_info *query = obj->queryinfo;
1517 const int n_counters = query->n_counters;
1518
1519 for (int i = 0; i < n_counters; i++) {
1520 const struct gen_perf_query_counter *counter = &query->counters[i];
1521
1522 assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
1523
1524 perf->vtbl.store_register_mem64(context, obj->pipeline_stats.bo,
1525 counter->pipeline_stat.reg,
1526 offset_in_bytes + i * sizeof(uint64_t));
1527 }
1528 }
1529
1530 static void
1531 gen_perf_close(struct gen_perf_context *perfquery,
1532 const struct gen_perf_query_info *query)
1533 {
1534 if (perfquery->oa_stream_fd != -1) {
1535 close(perfquery->oa_stream_fd);
1536 perfquery->oa_stream_fd = -1;
1537 }
1538 if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
1539 struct gen_perf_query_info *raw_query =
1540 (struct gen_perf_query_info *) query;
1541 raw_query->oa_metrics_set_id = 0;
1542 }
1543 }
1544
1545 static bool
1546 gen_perf_open(struct gen_perf_context *perf_ctx,
1547 int metrics_set_id,
1548 int report_format,
1549 int period_exponent,
1550 int drm_fd,
1551 uint32_t ctx_id)
1552 {
1553 uint64_t properties[] = {
1554 /* Single context sampling */
1555 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
1556
1557 /* Include OA reports in samples */
1558 DRM_I915_PERF_PROP_SAMPLE_OA, true,
1559
1560 /* OA unit configuration */
1561 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
1562 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
1563 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
1564 };
1565 struct drm_i915_perf_open_param param = {
1566 .flags = I915_PERF_FLAG_FD_CLOEXEC |
1567 I915_PERF_FLAG_FD_NONBLOCK |
1568 I915_PERF_FLAG_DISABLED,
1569 .num_properties = ARRAY_SIZE(properties) / 2,
1570 .properties_ptr = (uintptr_t) properties,
1571 };
1572 int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
1573 if (fd == -1) {
1574 DBG("Error opening gen perf OA stream: %m\n");
1575 return false;
1576 }
1577
1578 perf_ctx->oa_stream_fd = fd;
1579
1580 perf_ctx->current_oa_metrics_set_id = metrics_set_id;
1581 perf_ctx->current_oa_format = report_format;
1582
1583 return true;
1584 }
1585
1586 static bool
1587 inc_n_users(struct gen_perf_context *perf_ctx)
1588 {
1589 if (perf_ctx->n_oa_users == 0 &&
1590 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
1591 {
1592 return false;
1593 }
1594 ++perf_ctx->n_oa_users;
1595
1596 return true;
1597 }
1598
1599 static void
1600 dec_n_users(struct gen_perf_context *perf_ctx)
1601 {
1602 /* Disabling the i915 perf stream will effectively disable the OA
1603 * counters. Note it's important to be sure there are no outstanding
1604 * MI_RPC commands at this point since they could stall the CS
1605 * indefinitely once OACONTROL is disabled.
1606 */
1607 --perf_ctx->n_oa_users;
1608 if (perf_ctx->n_oa_users == 0 &&
1609 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
1610 {
1611 DBG("WARNING: Error disabling gen perf stream: %m\n");
1612 }
1613 }
1614
1615 void
1616 gen_perf_init_metrics(struct gen_perf_config *perf_cfg,
1617 const struct gen_device_info *devinfo,
1618 int drm_fd)
1619 {
1620 load_pipeline_statistic_metrics(perf_cfg, devinfo);
1621 register_mdapi_statistic_query(perf_cfg, devinfo);
1622 if (load_oa_metrics(perf_cfg, drm_fd, devinfo))
1623 register_mdapi_oa_query(devinfo, perf_cfg);
1624 }
1625
1626 void
1627 gen_perf_init_context(struct gen_perf_context *perf_ctx,
1628 struct gen_perf_config *perf_cfg,
1629 void * ctx, /* driver context (eg, brw_context) */
1630 void * bufmgr, /* eg brw_bufmgr */
1631 const struct gen_device_info *devinfo,
1632 uint32_t hw_ctx,
1633 int drm_fd)
1634 {
1635 perf_ctx->perf = perf_cfg;
1636 perf_ctx->ctx = ctx;
1637 perf_ctx->bufmgr = bufmgr;
1638 perf_ctx->drm_fd = drm_fd;
1639 perf_ctx->hw_ctx = hw_ctx;
1640 perf_ctx->devinfo = devinfo;
1641
1642 perf_ctx->unaccumulated =
1643 ralloc_array(ctx, struct gen_perf_query_object *, 2);
1644 perf_ctx->unaccumulated_elements = 0;
1645 perf_ctx->unaccumulated_array_size = 2;
1646
1647 exec_list_make_empty(&perf_ctx->sample_buffers);
1648 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1649
1650 /* It's convenient to guarantee that this linked list of sample
1651 * buffers is never empty so we add an empty head so when we
1652 * Begin an OA query we can always take a reference on a buffer
1653 * in this list.
1654 */
1655 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1656 exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
1657
1658 perf_ctx->oa_stream_fd = -1;
1659 perf_ctx->next_query_start_report_id = 1000;
1660 }
1661
1662 /**
1663 * Add a query to the global list of "unaccumulated queries."
1664 *
1665 * Queries are tracked here until all the associated OA reports have
1666 * been accumulated via accumulate_oa_reports() after the end
1667 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1668 */
1669 static void
1670 add_to_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
1671 struct gen_perf_query_object *obj)
1672 {
1673 if (perf_ctx->unaccumulated_elements >=
1674 perf_ctx->unaccumulated_array_size)
1675 {
1676 perf_ctx->unaccumulated_array_size *= 1.5;
1677 perf_ctx->unaccumulated =
1678 reralloc(perf_ctx->ctx, perf_ctx->unaccumulated,
1679 struct gen_perf_query_object *,
1680 perf_ctx->unaccumulated_array_size);
1681 }
1682
1683 perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
1684 }
1685
1686 bool
1687 gen_perf_begin_query(struct gen_perf_context *perf_ctx,
1688 struct gen_perf_query_object *query)
1689 {
1690 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1691 const struct gen_perf_query_info *queryinfo = query->queryinfo;
1692
1693 /* XXX: We have to consider that the command parser unit that parses batch
1694 * buffer commands and is used to capture begin/end counter snapshots isn't
1695 * implicitly synchronized with what's currently running across other GPU
1696 * units (such as the EUs running shaders) that the performance counters are
1697 * associated with.
1698 *
1699 * The intention of performance queries is to measure the work associated
1700 * with commands between the begin/end delimiters and so for that to be the
1701 * case we need to explicitly synchronize the parsing of commands to capture
1702 * Begin/End counter snapshots with what's running across other parts of the
1703 * GPU.
1704 *
1705 * When the command parser reaches a Begin marker it effectively needs to
1706 * drain everything currently running on the GPU until the hardware is idle
1707 * before capturing the first snapshot of counters - otherwise the results
1708 * would also be measuring the effects of earlier commands.
1709 *
1710 * When the command parser reaches an End marker it needs to stall until
1711 * everything currently running on the GPU has finished before capturing the
1712 * end snapshot - otherwise the results won't be a complete representation
1713 * of the work.
1714 *
1715 * Theoretically there could be opportunities to minimize how much of the
1716 * GPU pipeline is drained, or that we stall for, when we know what specific
1717 * units the performance counters being queried relate to but we don't
1718 * currently attempt to be clever here.
1719 *
1720 * Note: with our current simple approach here then for back-to-back queries
1721 * we will redundantly emit duplicate commands to synchronize the command
1722 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1723 * second synchronization is effectively a NOOP.
1724 *
1725 * N.B. The final results are based on deltas of counters between (inside)
1726 * Begin/End markers so even though the total wall clock time of the
1727 * workload is stretched by larger pipeline bubbles the bubbles themselves
1728 * are generally invisible to the query results. Whether that's a good or a
1729 * bad thing depends on the use case. For a lower real-time impact while
1730 * capturing metrics then periodic sampling may be a better choice than
1731 * INTEL_performance_query.
1732 *
1733 *
1734 * This is our Begin synchronization point to drain current work on the
1735 * GPU before we capture our first counter snapshot...
1736 */
1737 perf_cfg->vtbl.emit_mi_flush(perf_ctx->ctx);
1738
1739 switch (queryinfo->kind) {
1740 case GEN_PERF_QUERY_TYPE_OA:
1741 case GEN_PERF_QUERY_TYPE_RAW: {
1742
1743 /* Opening an i915 perf stream implies exclusive access to the OA unit
1744 * which will generate counter reports for a specific counter set with a
1745 * specific layout/format so we can't begin any OA based queries that
1746 * require a different counter set or format unless we get an opportunity
1747 * to close the stream and open a new one...
1748 */
1749 uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
1750
1751 if (perf_ctx->oa_stream_fd != -1 &&
1752 perf_ctx->current_oa_metrics_set_id != metric_id) {
1753
1754 if (perf_ctx->n_oa_users != 0) {
1755 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
1756 perf_ctx->current_oa_metrics_set_id, metric_id);
1757 return false;
1758 } else
1759 gen_perf_close(perf_ctx, queryinfo);
1760 }
1761
1762 /* If the OA counters aren't already on, enable them. */
1763 if (perf_ctx->oa_stream_fd == -1) {
1764 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1765
1766 /* The period_exponent gives a sampling period as follows:
1767 * sample_period = timestamp_period * 2^(period_exponent + 1)
1768 *
1769 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1770 * ~83ns (GEN8/9).
1771 *
1772 * The counter overflow period is derived from the EuActive counter
1773 * which reads a counter that increments by the number of clock
1774 * cycles multiplied by the number of EUs. It can be calculated as:
1775 *
1776 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1777 *
1778 * (E.g. 40 EUs @ 1GHz = ~53ms)
1779 *
1780 * We select a sampling period inferior to that overflow period to
1781 * ensure we cannot see more than 1 counter overflow, otherwise we
1782 * could loose information.
1783 */
1784
1785 int a_counter_in_bits = 32;
1786 if (devinfo->gen >= 8)
1787 a_counter_in_bits = 40;
1788
1789 uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
1790 /* drop 1GHz freq to have units in nanoseconds */
1791 2);
1792
1793 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1794 overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
1795
1796 int period_exponent = 0;
1797 uint64_t prev_sample_period, next_sample_period;
1798 for (int e = 0; e < 30; e++) {
1799 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1800 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1801
1802 /* Take the previous sampling period, lower than the overflow
1803 * period.
1804 */
1805 if (prev_sample_period < overflow_period &&
1806 next_sample_period > overflow_period)
1807 period_exponent = e + 1;
1808 }
1809
1810 if (period_exponent == 0) {
1811 DBG("WARNING: enable to find a sampling exponent\n");
1812 return false;
1813 }
1814
1815 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1816 prev_sample_period / 1000000ul);
1817
1818 if (!gen_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
1819 period_exponent, perf_ctx->drm_fd,
1820 perf_ctx->hw_ctx))
1821 return false;
1822 } else {
1823 assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
1824 perf_ctx->current_oa_format == queryinfo->oa_format);
1825 }
1826
1827 if (!inc_n_users(perf_ctx)) {
1828 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1829 return false;
1830 }
1831
1832 if (query->oa.bo) {
1833 perf_cfg->vtbl.bo_unreference(query->oa.bo);
1834 query->oa.bo = NULL;
1835 }
1836
1837 query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1838 "perf. query OA MI_RPC bo",
1839 MI_RPC_BO_SIZE);
1840 #ifdef DEBUG
1841 /* Pre-filling the BO helps debug whether writes landed. */
1842 void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
1843 memset(map, 0x80, MI_RPC_BO_SIZE);
1844 perf_cfg->vtbl.bo_unmap(query->oa.bo);
1845 #endif
1846
1847 query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
1848 perf_ctx->next_query_start_report_id += 2;
1849
1850 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1851 * delimiting commands end up in different batchbuffers. If that's the
1852 * case, the measurement will include the time it takes for the kernel
1853 * scheduler to load a new request into the hardware. This is manifested in
1854 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1855 */
1856 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
1857
1858 /* Take a starting OA counter snapshot. */
1859 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo, 0,
1860 query->oa.begin_report_id);
1861 perf_cfg->vtbl.capture_frequency_stat_register(perf_ctx->ctx, query->oa.bo,
1862 MI_FREQ_START_OFFSET_BYTES);
1863
1864 ++perf_ctx->n_active_oa_queries;
1865
1866 /* No already-buffered samples can possibly be associated with this query
1867 * so create a marker within the list of sample buffers enabling us to
1868 * easily ignore earlier samples when processing this query after
1869 * completion.
1870 */
1871 assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
1872 query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
1873
1874 struct oa_sample_buf *buf =
1875 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1876
1877 /* This reference will ensure that future/following sample
1878 * buffers (that may relate to this query) can't be freed until
1879 * this drops to zero.
1880 */
1881 buf->refcount++;
1882
1883 gen_perf_query_result_clear(&query->oa.result);
1884 query->oa.results_accumulated = false;
1885
1886 add_to_unaccumulated_query_list(perf_ctx, query);
1887 break;
1888 }
1889
1890 case GEN_PERF_QUERY_TYPE_PIPELINE:
1891 if (query->pipeline_stats.bo) {
1892 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1893 query->pipeline_stats.bo = NULL;
1894 }
1895
1896 query->pipeline_stats.bo =
1897 perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1898 "perf. query pipeline stats bo",
1899 STATS_BO_SIZE);
1900
1901 /* Take starting snapshots. */
1902 snapshot_statistics_registers(perf_ctx->ctx , perf_cfg, query, 0);
1903
1904 ++perf_ctx->n_active_pipeline_stats_queries;
1905 break;
1906
1907 default:
1908 unreachable("Unknown query type");
1909 break;
1910 }
1911
1912 return true;
1913 }
1914
1915 void
1916 gen_perf_end_query(struct gen_perf_context *perf_ctx,
1917 struct gen_perf_query_object *query)
1918 {
1919 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1920
1921 /* Ensure that the work associated with the queried commands will have
1922 * finished before taking our query end counter readings.
1923 *
1924 * For more details see comment in brw_begin_perf_query for
1925 * corresponding flush.
1926 */
1927 perf_cfg->vtbl.emit_mi_flush(perf_ctx->ctx);
1928
1929 switch (query->queryinfo->kind) {
1930 case GEN_PERF_QUERY_TYPE_OA:
1931 case GEN_PERF_QUERY_TYPE_RAW:
1932
1933 /* NB: It's possible that the query will have already been marked
1934 * as 'accumulated' if an error was seen while reading samples
1935 * from perf. In this case we mustn't try and emit a closing
1936 * MI_RPC command in case the OA unit has already been disabled
1937 */
1938 if (!query->oa.results_accumulated) {
1939 /* Take an ending OA counter snapshot. */
1940 perf_cfg->vtbl.capture_frequency_stat_register(perf_ctx->ctx, query->oa.bo,
1941 MI_FREQ_END_OFFSET_BYTES);
1942 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
1943 MI_RPC_BO_END_OFFSET_BYTES,
1944 query->oa.begin_report_id + 1);
1945 }
1946
1947 --perf_ctx->n_active_oa_queries;
1948
1949 /* NB: even though the query has now ended, it can't be accumulated
1950 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1951 * to query->oa.bo
1952 */
1953 break;
1954
1955 case GEN_PERF_QUERY_TYPE_PIPELINE:
1956 snapshot_statistics_registers(perf_ctx->ctx, perf_cfg, query,
1957 STATS_BO_END_OFFSET_BYTES);
1958 --perf_ctx->n_active_pipeline_stats_queries;
1959 break;
1960
1961 default:
1962 unreachable("Unknown query type");
1963 break;
1964 }
1965 }
1966
1967 enum OaReadStatus {
1968 OA_READ_STATUS_ERROR,
1969 OA_READ_STATUS_UNFINISHED,
1970 OA_READ_STATUS_FINISHED,
1971 };
1972
1973 static enum OaReadStatus
1974 read_oa_samples_until(struct gen_perf_context *perf_ctx,
1975 uint32_t start_timestamp,
1976 uint32_t end_timestamp)
1977 {
1978 struct exec_node *tail_node =
1979 exec_list_get_tail(&perf_ctx->sample_buffers);
1980 struct oa_sample_buf *tail_buf =
1981 exec_node_data(struct oa_sample_buf, tail_node, link);
1982 uint32_t last_timestamp = tail_buf->last_timestamp;
1983
1984 while (1) {
1985 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1986 uint32_t offset;
1987 int len;
1988
1989 while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
1990 sizeof(buf->buf))) < 0 && errno == EINTR)
1991 ;
1992
1993 if (len <= 0) {
1994 exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
1995
1996 if (len < 0) {
1997 if (errno == EAGAIN)
1998 return ((last_timestamp - start_timestamp) >=
1999 (end_timestamp - start_timestamp)) ?
2000 OA_READ_STATUS_FINISHED :
2001 OA_READ_STATUS_UNFINISHED;
2002 else {
2003 DBG("Error reading i915 perf samples: %m\n");
2004 }
2005 } else
2006 DBG("Spurious EOF reading i915 perf samples\n");
2007
2008 return OA_READ_STATUS_ERROR;
2009 }
2010
2011 buf->len = len;
2012 exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
2013
2014 /* Go through the reports and update the last timestamp. */
2015 offset = 0;
2016 while (offset < buf->len) {
2017 const struct drm_i915_perf_record_header *header =
2018 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
2019 uint32_t *report = (uint32_t *) (header + 1);
2020
2021 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
2022 last_timestamp = report[1];
2023
2024 offset += header->size;
2025 }
2026
2027 buf->last_timestamp = last_timestamp;
2028 }
2029
2030 unreachable("not reached");
2031 return OA_READ_STATUS_ERROR;
2032 }
2033
2034 /**
2035 * Try to read all the reports until either the delimiting timestamp
2036 * or an error arises.
2037 */
2038 static bool
2039 read_oa_samples_for_query(struct gen_perf_context *perf_ctx,
2040 struct gen_perf_query_object *query,
2041 void *current_batch)
2042 {
2043 uint32_t *start;
2044 uint32_t *last;
2045 uint32_t *end;
2046 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2047
2048 /* We need the MI_REPORT_PERF_COUNT to land before we can start
2049 * accumulate. */
2050 assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2051 !perf_cfg->vtbl.bo_busy(query->oa.bo));
2052
2053 /* Map the BO once here and let accumulate_oa_reports() unmap
2054 * it. */
2055 if (query->oa.map == NULL)
2056 query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
2057
2058 start = last = query->oa.map;
2059 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2060
2061 if (start[0] != query->oa.begin_report_id) {
2062 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2063 return true;
2064 }
2065 if (end[0] != (query->oa.begin_report_id + 1)) {
2066 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2067 return true;
2068 }
2069
2070 /* Read the reports until the end timestamp. */
2071 switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
2072 case OA_READ_STATUS_ERROR:
2073 /* Fallthrough and let accumulate_oa_reports() deal with the
2074 * error. */
2075 case OA_READ_STATUS_FINISHED:
2076 return true;
2077 case OA_READ_STATUS_UNFINISHED:
2078 return false;
2079 }
2080
2081 unreachable("invalid read status");
2082 return false;
2083 }
2084
2085 void
2086 gen_perf_wait_query(struct gen_perf_context *perf_ctx,
2087 struct gen_perf_query_object *query,
2088 void *current_batch)
2089 {
2090 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2091 struct brw_bo *bo = NULL;
2092
2093 switch (query->queryinfo->kind) {
2094 case GEN_PERF_QUERY_TYPE_OA:
2095 case GEN_PERF_QUERY_TYPE_RAW:
2096 bo = query->oa.bo;
2097 break;
2098
2099 case GEN_PERF_QUERY_TYPE_PIPELINE:
2100 bo = query->pipeline_stats.bo;
2101 break;
2102
2103 default:
2104 unreachable("Unknown query type");
2105 break;
2106 }
2107
2108 if (bo == NULL)
2109 return;
2110
2111 /* If the current batch references our results bo then we need to
2112 * flush first...
2113 */
2114 if (perf_cfg->vtbl.batch_references(current_batch, bo))
2115 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
2116
2117 perf_cfg->vtbl.bo_wait_rendering(bo);
2118
2119 /* Due to a race condition between the OA unit signaling report
2120 * availability and the report actually being written into memory,
2121 * we need to wait for all the reports to come in before we can
2122 * read them.
2123 */
2124 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
2125 query->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
2126 while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
2127 ;
2128 }
2129 }
2130
2131 bool
2132 gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
2133 struct gen_perf_query_object *query,
2134 void *current_batch)
2135 {
2136 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2137
2138 switch (query->queryinfo->kind) {
2139 case GEN_PERF_QUERY_TYPE_OA:
2140 case GEN_PERF_QUERY_TYPE_RAW:
2141 return (query->oa.results_accumulated ||
2142 (query->oa.bo &&
2143 !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2144 !perf_cfg->vtbl.bo_busy(query->oa.bo) &&
2145 read_oa_samples_for_query(perf_ctx, query, current_batch)));
2146 case GEN_PERF_QUERY_TYPE_PIPELINE:
2147 return (query->pipeline_stats.bo &&
2148 !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
2149 !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
2150
2151 default:
2152 unreachable("Unknown query type");
2153 break;
2154 }
2155
2156 return false;
2157 }
2158
2159 /**
2160 * Remove a query from the global list of unaccumulated queries once
2161 * after successfully accumulating the OA reports associated with the
2162 * query in accumulate_oa_reports() or when discarding unwanted query
2163 * results.
2164 */
2165 static void
2166 drop_from_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
2167 struct gen_perf_query_object *query)
2168 {
2169 for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
2170 if (perf_ctx->unaccumulated[i] == query) {
2171 int last_elt = --perf_ctx->unaccumulated_elements;
2172
2173 if (i == last_elt)
2174 perf_ctx->unaccumulated[i] = NULL;
2175 else {
2176 perf_ctx->unaccumulated[i] =
2177 perf_ctx->unaccumulated[last_elt];
2178 }
2179
2180 break;
2181 }
2182 }
2183
2184 /* Drop our samples_head reference so that associated periodic
2185 * sample data buffers can potentially be reaped if they aren't
2186 * referenced by any other queries...
2187 */
2188
2189 struct oa_sample_buf *buf =
2190 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
2191
2192 assert(buf->refcount > 0);
2193 buf->refcount--;
2194
2195 query->oa.samples_head = NULL;
2196
2197 reap_old_sample_buffers(perf_ctx);
2198 }
2199
2200 /* In general if we see anything spurious while accumulating results,
2201 * we don't try and continue accumulating the current query, hoping
2202 * for the best, we scrap anything outstanding, and then hope for the
2203 * best with new queries.
2204 */
2205 static void
2206 discard_all_queries(struct gen_perf_context *perf_ctx)
2207 {
2208 while (perf_ctx->unaccumulated_elements) {
2209 struct gen_perf_query_object *query = perf_ctx->unaccumulated[0];
2210
2211 query->oa.results_accumulated = true;
2212 drop_from_unaccumulated_query_list(perf_ctx, query);
2213
2214 dec_n_users(perf_ctx);
2215 }
2216 }
2217
2218 /**
2219 * Accumulate raw OA counter values based on deltas between pairs of
2220 * OA reports.
2221 *
2222 * Accumulation starts from the first report captured via
2223 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
2224 * last MI_RPC report requested by brw_end_perf_query(). Between these
2225 * two reports there may also some number of periodically sampled OA
2226 * reports collected via the i915 perf interface - depending on the
2227 * duration of the query.
2228 *
2229 * These periodic snapshots help to ensure we handle counter overflow
2230 * correctly by being frequent enough to ensure we don't miss multiple
2231 * overflows of a counter between snapshots. For Gen8+ the i915 perf
2232 * snapshots provide the extra context-switch reports that let us
2233 * subtract out the progress of counters associated with other
2234 * contexts running on the system.
2235 */
2236 static void
2237 accumulate_oa_reports(struct gen_perf_context *perf_ctx,
2238 struct gen_perf_query_object *query)
2239 {
2240 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2241 uint32_t *start;
2242 uint32_t *last;
2243 uint32_t *end;
2244 struct exec_node *first_samples_node;
2245 bool in_ctx = true;
2246 int out_duration = 0;
2247
2248 assert(query->oa.map != NULL);
2249
2250 start = last = query->oa.map;
2251 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2252
2253 if (start[0] != query->oa.begin_report_id) {
2254 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2255 goto error;
2256 }
2257 if (end[0] != (query->oa.begin_report_id + 1)) {
2258 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2259 goto error;
2260 }
2261
2262 /* On Gen12+ OA reports are sourced from per context counters, so we don't
2263 * ever have to look at the global OA buffer. Yey \o/
2264 */
2265 if (perf_ctx->devinfo->gen >= 12) {
2266 last = start;
2267 goto end;
2268 }
2269
2270 /* See if we have any periodic reports to accumulate too... */
2271
2272 /* N.B. The oa.samples_head was set when the query began and
2273 * pointed to the tail of the perf_ctx->sample_buffers list at
2274 * the time the query started. Since the buffer existed before the
2275 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
2276 * that no data in this particular node's buffer can possibly be
2277 * associated with the query - so skip ahead one...
2278 */
2279 first_samples_node = query->oa.samples_head->next;
2280
2281 foreach_list_typed_from(struct oa_sample_buf, buf, link,
2282 &perf_ctx.sample_buffers,
2283 first_samples_node)
2284 {
2285 int offset = 0;
2286
2287 while (offset < buf->len) {
2288 const struct drm_i915_perf_record_header *header =
2289 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
2290
2291 assert(header->size != 0);
2292 assert(header->size <= buf->len);
2293
2294 offset += header->size;
2295
2296 switch (header->type) {
2297 case DRM_I915_PERF_RECORD_SAMPLE: {
2298 uint32_t *report = (uint32_t *)(header + 1);
2299 bool add = true;
2300
2301 /* Ignore reports that come before the start marker.
2302 * (Note: takes care to allow overflow of 32bit timestamps)
2303 */
2304 if (gen_device_info_timebase_scale(devinfo,
2305 report[1] - start[1]) > 5000000000) {
2306 continue;
2307 }
2308
2309 /* Ignore reports that come after the end marker.
2310 * (Note: takes care to allow overflow of 32bit timestamps)
2311 */
2312 if (gen_device_info_timebase_scale(devinfo,
2313 report[1] - end[1]) <= 5000000000) {
2314 goto end;
2315 }
2316
2317 /* For Gen8+ since the counters continue while other
2318 * contexts are running we need to discount any unrelated
2319 * deltas. The hardware automatically generates a report
2320 * on context switch which gives us a new reference point
2321 * to continuing adding deltas from.
2322 *
2323 * For Haswell we can rely on the HW to stop the progress
2324 * of OA counters while any other context is acctive.
2325 */
2326 if (devinfo->gen >= 8) {
2327 if (in_ctx && report[2] != query->oa.result.hw_id) {
2328 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
2329 in_ctx = false;
2330 out_duration = 0;
2331 } else if (in_ctx == false && report[2] == query->oa.result.hw_id) {
2332 DBG("i915 perf: Switch TO\n");
2333 in_ctx = true;
2334
2335 /* From experimentation in IGT, we found that the OA unit
2336 * might label some report as "idle" (using an invalid
2337 * context ID), right after a report for a given context.
2338 * Deltas generated by those reports actually belong to the
2339 * previous context, even though they're not labelled as
2340 * such.
2341 *
2342 * We didn't *really* Switch AWAY in the case that we e.g.
2343 * saw a single periodic report while idle...
2344 */
2345 if (out_duration >= 1)
2346 add = false;
2347 } else if (in_ctx) {
2348 assert(report[2] == query->oa.result.hw_id);
2349 DBG("i915 perf: Continuation IN\n");
2350 } else {
2351 assert(report[2] != query->oa.result.hw_id);
2352 DBG("i915 perf: Continuation OUT\n");
2353 add = false;
2354 out_duration++;
2355 }
2356 }
2357
2358 if (add) {
2359 gen_perf_query_result_accumulate(&query->oa.result,
2360 query->queryinfo,
2361 last, report);
2362 }
2363
2364 last = report;
2365
2366 break;
2367 }
2368
2369 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
2370 DBG("i915 perf: OA error: all reports lost\n");
2371 goto error;
2372 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
2373 DBG("i915 perf: OA report lost\n");
2374 break;
2375 }
2376 }
2377 }
2378
2379 end:
2380
2381 gen_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
2382 last, end);
2383
2384 query->oa.results_accumulated = true;
2385 drop_from_unaccumulated_query_list(perf_ctx, query);
2386 dec_n_users(perf_ctx);
2387
2388 return;
2389
2390 error:
2391
2392 discard_all_queries(perf_ctx);
2393 }
2394
2395 void
2396 gen_perf_delete_query(struct gen_perf_context *perf_ctx,
2397 struct gen_perf_query_object *query)
2398 {
2399 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2400
2401 /* We can assume that the frontend waits for a query to complete
2402 * before ever calling into here, so we don't have to worry about
2403 * deleting an in-flight query object.
2404 */
2405 switch (query->queryinfo->kind) {
2406 case GEN_PERF_QUERY_TYPE_OA:
2407 case GEN_PERF_QUERY_TYPE_RAW:
2408 if (query->oa.bo) {
2409 if (!query->oa.results_accumulated) {
2410 drop_from_unaccumulated_query_list(perf_ctx, query);
2411 dec_n_users(perf_ctx);
2412 }
2413
2414 perf_cfg->vtbl.bo_unreference(query->oa.bo);
2415 query->oa.bo = NULL;
2416 }
2417
2418 query->oa.results_accumulated = false;
2419 break;
2420
2421 case GEN_PERF_QUERY_TYPE_PIPELINE:
2422 if (query->pipeline_stats.bo) {
2423 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
2424 query->pipeline_stats.bo = NULL;
2425 }
2426 break;
2427
2428 default:
2429 unreachable("Unknown query type");
2430 break;
2431 }
2432
2433 /* As an indication that the INTEL_performance_query extension is no
2434 * longer in use, it's a good time to free our cache of sample
2435 * buffers and close any current i915-perf stream.
2436 */
2437 if (--perf_ctx->n_query_instances == 0) {
2438 free_sample_bufs(perf_ctx);
2439 gen_perf_close(perf_ctx, query->queryinfo);
2440 }
2441
2442 free(query);
2443 }
2444
2445 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
2446
2447 static void
2448 read_gt_frequency(struct gen_perf_context *perf_ctx,
2449 struct gen_perf_query_object *obj)
2450 {
2451 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2452 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
2453 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
2454
2455 switch (devinfo->gen) {
2456 case 7:
2457 case 8:
2458 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2459 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2460 break;
2461 case 9:
2462 case 10:
2463 case 11:
2464 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2465 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2466 break;
2467 default:
2468 unreachable("unexpected gen");
2469 }
2470
2471 /* Put the numbers into Hz. */
2472 obj->oa.gt_frequency[0] *= 1000000ULL;
2473 obj->oa.gt_frequency[1] *= 1000000ULL;
2474 }
2475
2476 static int
2477 get_oa_counter_data(struct gen_perf_context *perf_ctx,
2478 struct gen_perf_query_object *query,
2479 size_t data_size,
2480 uint8_t *data)
2481 {
2482 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2483 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2484 int n_counters = queryinfo->n_counters;
2485 int written = 0;
2486
2487 for (int i = 0; i < n_counters; i++) {
2488 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2489 uint64_t *out_uint64;
2490 float *out_float;
2491 size_t counter_size = gen_perf_query_counter_get_size(counter);
2492
2493 if (counter_size) {
2494 switch (counter->data_type) {
2495 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
2496 out_uint64 = (uint64_t *)(data + counter->offset);
2497 *out_uint64 =
2498 counter->oa_counter_read_uint64(perf_cfg, queryinfo,
2499 query->oa.result.accumulator);
2500 break;
2501 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
2502 out_float = (float *)(data + counter->offset);
2503 *out_float =
2504 counter->oa_counter_read_float(perf_cfg, queryinfo,
2505 query->oa.result.accumulator);
2506 break;
2507 default:
2508 /* So far we aren't using uint32, double or bool32... */
2509 unreachable("unexpected counter data type");
2510 }
2511 written = counter->offset + counter_size;
2512 }
2513 }
2514
2515 return written;
2516 }
2517
2518 static int
2519 get_pipeline_stats_data(struct gen_perf_context *perf_ctx,
2520 struct gen_perf_query_object *query,
2521 size_t data_size,
2522 uint8_t *data)
2523
2524 {
2525 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2526 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2527 int n_counters = queryinfo->n_counters;
2528 uint8_t *p = data;
2529
2530 uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
2531 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
2532
2533 for (int i = 0; i < n_counters; i++) {
2534 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2535 uint64_t value = end[i] - start[i];
2536
2537 if (counter->pipeline_stat.numerator !=
2538 counter->pipeline_stat.denominator) {
2539 value *= counter->pipeline_stat.numerator;
2540 value /= counter->pipeline_stat.denominator;
2541 }
2542
2543 *((uint64_t *)p) = value;
2544 p += 8;
2545 }
2546
2547 perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
2548
2549 return p - data;
2550 }
2551
2552 void
2553 gen_perf_get_query_data(struct gen_perf_context *perf_ctx,
2554 struct gen_perf_query_object *query,
2555 int data_size,
2556 unsigned *data,
2557 unsigned *bytes_written)
2558 {
2559 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2560 int written = 0;
2561
2562 switch (query->queryinfo->kind) {
2563 case GEN_PERF_QUERY_TYPE_OA:
2564 case GEN_PERF_QUERY_TYPE_RAW:
2565 if (!query->oa.results_accumulated) {
2566 read_gt_frequency(perf_ctx, query);
2567 uint32_t *begin_report = query->oa.map;
2568 uint32_t *end_report = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2569 gen_perf_query_result_read_frequencies(&query->oa.result,
2570 perf_ctx->devinfo,
2571 begin_report,
2572 end_report);
2573 accumulate_oa_reports(perf_ctx, query);
2574 assert(query->oa.results_accumulated);
2575
2576 perf_cfg->vtbl.bo_unmap(query->oa.bo);
2577 query->oa.map = NULL;
2578 }
2579 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
2580 written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
2581 } else {
2582 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2583
2584 written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
2585 devinfo, &query->oa.result,
2586 query->oa.gt_frequency[0],
2587 query->oa.gt_frequency[1]);
2588 }
2589 break;
2590
2591 case GEN_PERF_QUERY_TYPE_PIPELINE:
2592 written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
2593 break;
2594
2595 default:
2596 unreachable("Unknown query type");
2597 break;
2598 }
2599
2600 if (bytes_written)
2601 *bytes_written = written;
2602 }
2603
2604 void
2605 gen_perf_dump_query_count(struct gen_perf_context *perf_ctx)
2606 {
2607 DBG("Queries: (Open queries = %d, OA users = %d)\n",
2608 perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
2609 }
2610
2611 void
2612 gen_perf_dump_query(struct gen_perf_context *ctx,
2613 struct gen_perf_query_object *obj,
2614 void *current_batch)
2615 {
2616 switch (obj->queryinfo->kind) {
2617 case GEN_PERF_QUERY_TYPE_OA:
2618 case GEN_PERF_QUERY_TYPE_RAW:
2619 DBG("BO: %-4s OA data: %-10s %-15s\n",
2620 obj->oa.bo ? "yes," : "no,",
2621 gen_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
2622 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
2623 break;
2624 case GEN_PERF_QUERY_TYPE_PIPELINE:
2625 DBG("BO: %-4s\n",
2626 obj->pipeline_stats.bo ? "yes" : "no");
2627 break;
2628 default:
2629 unreachable("Unknown query type");
2630 break;
2631 }
2632 }