intel/perf: move register definition to special file
[mesa.git] / src / intel / perf / gen_perf.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31
32 #ifndef HAVE_DIRENT_D_TYPE
33 #include <limits.h> // PATH_MAX
34 #endif
35
36 #include <drm-uapi/i915_drm.h>
37
38 #include "common/gen_gem.h"
39 #include "gen_perf.h"
40 #include "gen_perf_regs.h"
41 #include "perf/gen_perf_mdapi.h"
42 #include "perf/gen_perf_metrics.h"
43
44 #include "dev/gen_debug.h"
45 #include "dev/gen_device_info.h"
46 #include "util/bitscan.h"
47 #include "util/mesa-sha1.h"
48 #include "util/u_math.h"
49
50 #define FILE_DEBUG_FLAG DEBUG_PERFMON
51 #define MI_RPC_BO_SIZE 4096
52 #define MI_FREQ_START_OFFSET_BYTES (3072)
53 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
54 #define MI_FREQ_END_OFFSET_BYTES (3076)
55
56 #define MAP_READ (1 << 0)
57 #define MAP_WRITE (1 << 1)
58
59 #define OA_REPORT_INVALID_CTX_ID (0xffffffff)
60
61 /**
62 * Periodic OA samples are read() into these buffer structures via the
63 * i915 perf kernel interface and appended to the
64 * perf_ctx->sample_buffers linked list. When we process the
65 * results of an OA metrics query we need to consider all the periodic
66 * samples between the Begin and End MI_REPORT_PERF_COUNT command
67 * markers.
68 *
69 * 'Periodic' is a simplification as there are other automatic reports
70 * written by the hardware also buffered here.
71 *
72 * Considering three queries, A, B and C:
73 *
74 * Time ---->
75 * ________________A_________________
76 * | |
77 * | ________B_________ _____C___________
78 * | | | | | |
79 *
80 * And an illustration of sample buffers read over this time frame:
81 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
82 *
83 * These nodes may hold samples for query A:
84 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
85 *
86 * These nodes may hold samples for query B:
87 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
88 *
89 * These nodes may hold samples for query C:
90 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
91 *
92 * The illustration assumes we have an even distribution of periodic
93 * samples so all nodes have the same size plotted against time:
94 *
95 * Note, to simplify code, the list is never empty.
96 *
97 * With overlapping queries we can see that periodic OA reports may
98 * relate to multiple queries and care needs to be take to keep
99 * track of sample buffers until there are no queries that might
100 * depend on their contents.
101 *
102 * We use a node ref counting system where a reference ensures that a
103 * node and all following nodes can't be freed/recycled until the
104 * reference drops to zero.
105 *
106 * E.g. with a ref of one here:
107 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
108 *
109 * These nodes could be freed or recycled ("reaped"):
110 * [ 0 ][ 0 ]
111 *
112 * These must be preserved until the leading ref drops to zero:
113 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
114 *
115 * When a query starts we take a reference on the current tail of
116 * the list, knowing that no already-buffered samples can possibly
117 * relate to the newly-started query. A pointer to this node is
118 * also saved in the query object's ->oa.samples_head.
119 *
120 * E.g. starting query A while there are two nodes in .sample_buffers:
121 * ________________A________
122 * |
123 *
124 * [ 0 ][ 1 ]
125 * ^_______ Add a reference and store pointer to node in
126 * A->oa.samples_head
127 *
128 * Moving forward to when the B query starts with no new buffer nodes:
129 * (for reference, i915 perf reads() are only done when queries finish)
130 * ________________A_______
131 * | ________B___
132 * | |
133 *
134 * [ 0 ][ 2 ]
135 * ^_______ Add a reference and store pointer to
136 * node in B->oa.samples_head
137 *
138 * Once a query is finished, after an OA query has become 'Ready',
139 * once the End OA report has landed and after we we have processed
140 * all the intermediate periodic samples then we drop the
141 * ->oa.samples_head reference we took at the start.
142 *
143 * So when the B query has finished we have:
144 * ________________A________
145 * | ______B___________
146 * | | |
147 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
148 * ^_______ Drop B->oa.samples_head reference
149 *
150 * We still can't free these due to the A->oa.samples_head ref:
151 * [ 1 ][ 0 ][ 0 ][ 0 ]
152 *
153 * When the A query finishes: (note there's a new ref for C's samples_head)
154 * ________________A_________________
155 * | |
156 * | _____C_________
157 * | | |
158 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
159 * ^_______ Drop A->oa.samples_head reference
160 *
161 * And we can now reap these nodes up to the C->oa.samples_head:
162 * [ X ][ X ][ X ][ X ]
163 * keeping -> [ 1 ][ 0 ][ 0 ]
164 *
165 * We reap old sample buffers each time we finish processing an OA
166 * query by iterating the sample_buffers list from the head until we
167 * find a referenced node and stop.
168 *
169 * Reaped buffers move to a perfquery.free_sample_buffers list and
170 * when we come to read() we first look to recycle a buffer from the
171 * free_sample_buffers list before allocating a new buffer.
172 */
173 struct oa_sample_buf {
174 struct exec_node link;
175 int refcount;
176 int len;
177 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
178 uint32_t last_timestamp;
179 };
180
181 /**
182 * gen representation of a performance query object.
183 *
184 * NB: We want to keep this structure relatively lean considering that
185 * applications may expect to allocate enough objects to be able to
186 * query around all draw calls in a frame.
187 */
188 struct gen_perf_query_object
189 {
190 const struct gen_perf_query_info *queryinfo;
191
192 /* See query->kind to know which state below is in use... */
193 union {
194 struct {
195
196 /**
197 * BO containing OA counter snapshots at query Begin/End time.
198 */
199 void *bo;
200
201 /**
202 * Address of mapped of @bo
203 */
204 void *map;
205
206 /**
207 * The MI_REPORT_PERF_COUNT command lets us specify a unique
208 * ID that will be reflected in the resulting OA report
209 * that's written by the GPU. This is the ID we're expecting
210 * in the begin report and the the end report should be
211 * @begin_report_id + 1.
212 */
213 int begin_report_id;
214
215 /**
216 * Reference the head of the brw->perfquery.sample_buffers
217 * list at the time that the query started (so we only need
218 * to look at nodes after this point when looking for samples
219 * related to this query)
220 *
221 * (See struct brw_oa_sample_buf description for more details)
222 */
223 struct exec_node *samples_head;
224
225 /**
226 * false while in the unaccumulated_elements list, and set to
227 * true when the final, end MI_RPC snapshot has been
228 * accumulated.
229 */
230 bool results_accumulated;
231
232 /**
233 * Frequency of the GT at begin and end of the query.
234 */
235 uint64_t gt_frequency[2];
236
237 /**
238 * Accumulated OA results between begin and end of the query.
239 */
240 struct gen_perf_query_result result;
241 } oa;
242
243 struct {
244 /**
245 * BO containing starting and ending snapshots for the
246 * statistics counters.
247 */
248 void *bo;
249 } pipeline_stats;
250 };
251 };
252
253 struct gen_perf_context {
254 struct gen_perf_config *perf;
255
256 void * ctx; /* driver context (eg, brw_context) */
257 void * bufmgr;
258 const struct gen_device_info *devinfo;
259
260 uint32_t hw_ctx;
261 int drm_fd;
262
263 /* The i915 perf stream we open to setup + enable the OA counters */
264 int oa_stream_fd;
265
266 /* An i915 perf stream fd gives exclusive access to the OA unit that will
267 * report counter snapshots for a specific counter set/profile in a
268 * specific layout/format so we can only start OA queries that are
269 * compatible with the currently open fd...
270 */
271 int current_oa_metrics_set_id;
272 int current_oa_format;
273
274 /* List of buffers containing OA reports */
275 struct exec_list sample_buffers;
276
277 /* Cached list of empty sample buffers */
278 struct exec_list free_sample_buffers;
279
280 int n_active_oa_queries;
281 int n_active_pipeline_stats_queries;
282
283 /* The number of queries depending on running OA counters which
284 * extends beyond brw_end_perf_query() since we need to wait until
285 * the last MI_RPC command has parsed by the GPU.
286 *
287 * Accurate accounting is important here as emitting an
288 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
289 * effectively hang the gpu.
290 */
291 int n_oa_users;
292
293 /* To help catch an spurious problem with the hardware or perf
294 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
295 * with a unique ID that we can explicitly check for...
296 */
297 int next_query_start_report_id;
298
299 /**
300 * An array of queries whose results haven't yet been assembled
301 * based on the data in buffer objects.
302 *
303 * These may be active, or have already ended. However, the
304 * results have not been requested.
305 */
306 struct gen_perf_query_object **unaccumulated;
307 int unaccumulated_elements;
308 int unaccumulated_array_size;
309
310 /* The total number of query objects so we can relinquish
311 * our exclusive access to perf if the application deletes
312 * all of its objects. (NB: We only disable perf while
313 * there are no active queries)
314 */
315 int n_query_instances;
316 };
317
318 const struct gen_perf_query_info*
319 gen_perf_query_info(const struct gen_perf_query_object *query)
320 {
321 return query->queryinfo;
322 }
323
324 struct gen_perf_context *
325 gen_perf_new_context(void *parent)
326 {
327 struct gen_perf_context *ctx = rzalloc(parent, struct gen_perf_context);
328 if (! ctx)
329 fprintf(stderr, "%s: failed to alloc context\n", __func__);
330 return ctx;
331 }
332
333 struct gen_perf_config *
334 gen_perf_config(struct gen_perf_context *ctx)
335 {
336 return ctx->perf;
337 }
338
339 struct gen_perf_query_object *
340 gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
341 {
342 const struct gen_perf_query_info *query =
343 &perf_ctx->perf->queries[query_index];
344 struct gen_perf_query_object *obj =
345 calloc(1, sizeof(struct gen_perf_query_object));
346
347 if (!obj)
348 return NULL;
349
350 obj->queryinfo = query;
351
352 perf_ctx->n_query_instances++;
353 return obj;
354 }
355
356 int
357 gen_perf_active_queries(struct gen_perf_context *perf_ctx,
358 const struct gen_perf_query_info *query)
359 {
360 assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
361
362 switch (query->kind) {
363 case GEN_PERF_QUERY_TYPE_OA:
364 case GEN_PERF_QUERY_TYPE_RAW:
365 return perf_ctx->n_active_oa_queries;
366 break;
367
368 case GEN_PERF_QUERY_TYPE_PIPELINE:
369 return perf_ctx->n_active_pipeline_stats_queries;
370 break;
371
372 default:
373 unreachable("Unknown query type");
374 break;
375 }
376 }
377
378 static inline uint64_t to_user_pointer(void *ptr)
379 {
380 return (uintptr_t) ptr;
381 }
382
383 static bool
384 is_dir_or_link(const struct dirent *entry, const char *parent_dir)
385 {
386 #ifdef HAVE_DIRENT_D_TYPE
387 return entry->d_type == DT_DIR || entry->d_type == DT_LNK;
388 #else
389 struct stat st;
390 char path[PATH_MAX + 1];
391 snprintf(path, sizeof(path), "%s/%s", parent_dir, entry->d_name);
392 lstat(path, &st);
393 return S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode);
394 #endif
395 }
396
397 static bool
398 get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
399 {
400 struct stat sb;
401 int min, maj;
402 DIR *drmdir;
403 struct dirent *drm_entry;
404 int len;
405
406 perf->sysfs_dev_dir[0] = '\0';
407
408 if (fstat(fd, &sb)) {
409 DBG("Failed to stat DRM fd\n");
410 return false;
411 }
412
413 maj = major(sb.st_rdev);
414 min = minor(sb.st_rdev);
415
416 if (!S_ISCHR(sb.st_mode)) {
417 DBG("DRM fd is not a character device as expected\n");
418 return false;
419 }
420
421 len = snprintf(perf->sysfs_dev_dir,
422 sizeof(perf->sysfs_dev_dir),
423 "/sys/dev/char/%d:%d/device/drm", maj, min);
424 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
425 DBG("Failed to concatenate sysfs path to drm device\n");
426 return false;
427 }
428
429 drmdir = opendir(perf->sysfs_dev_dir);
430 if (!drmdir) {
431 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
432 return false;
433 }
434
435 while ((drm_entry = readdir(drmdir))) {
436 if (is_dir_or_link(drm_entry, perf->sysfs_dev_dir) &&
437 strncmp(drm_entry->d_name, "card", 4) == 0)
438 {
439 len = snprintf(perf->sysfs_dev_dir,
440 sizeof(perf->sysfs_dev_dir),
441 "/sys/dev/char/%d:%d/device/drm/%s",
442 maj, min, drm_entry->d_name);
443 closedir(drmdir);
444 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
445 return false;
446 else
447 return true;
448 }
449 }
450
451 closedir(drmdir);
452
453 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
454 maj, min);
455
456 return false;
457 }
458
459 static bool
460 read_file_uint64(const char *file, uint64_t *val)
461 {
462 char buf[32];
463 int fd, n;
464
465 fd = open(file, 0);
466 if (fd < 0)
467 return false;
468 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
469 errno == EINTR);
470 close(fd);
471 if (n < 0)
472 return false;
473
474 buf[n] = '\0';
475 *val = strtoull(buf, NULL, 0);
476
477 return true;
478 }
479
480 static bool
481 read_sysfs_drm_device_file_uint64(struct gen_perf_config *perf,
482 const char *file,
483 uint64_t *value)
484 {
485 char buf[512];
486 int len;
487
488 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
489 if (len < 0 || len >= sizeof(buf)) {
490 DBG("Failed to concatenate sys filename to read u64 from\n");
491 return false;
492 }
493
494 return read_file_uint64(buf, value);
495 }
496
497 static inline struct gen_perf_query_info *
498 append_query_info(struct gen_perf_config *perf, int max_counters)
499 {
500 struct gen_perf_query_info *query;
501
502 perf->queries = reralloc(perf, perf->queries,
503 struct gen_perf_query_info,
504 ++perf->n_queries);
505 query = &perf->queries[perf->n_queries - 1];
506 memset(query, 0, sizeof(*query));
507
508 if (max_counters > 0) {
509 query->max_counters = max_counters;
510 query->counters =
511 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
512 }
513
514 return query;
515 }
516
517 static void
518 register_oa_config(struct gen_perf_config *perf,
519 const struct gen_perf_query_info *query,
520 uint64_t config_id)
521 {
522 struct gen_perf_query_info *registered_query = append_query_info(perf, 0);
523
524 *registered_query = *query;
525 registered_query->oa_metrics_set_id = config_id;
526 DBG("metric set registered: id = %" PRIu64", guid = %s\n",
527 registered_query->oa_metrics_set_id, query->guid);
528 }
529
530 static void
531 enumerate_sysfs_metrics(struct gen_perf_config *perf)
532 {
533 DIR *metricsdir = NULL;
534 struct dirent *metric_entry;
535 char buf[256];
536 int len;
537
538 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
539 if (len < 0 || len >= sizeof(buf)) {
540 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
541 return;
542 }
543
544 metricsdir = opendir(buf);
545 if (!metricsdir) {
546 DBG("Failed to open %s: %m\n", buf);
547 return;
548 }
549
550 while ((metric_entry = readdir(metricsdir))) {
551 struct hash_entry *entry;
552 if (!is_dir_or_link(metric_entry, buf) ||
553 metric_entry->d_name[0] == '.')
554 continue;
555
556 DBG("metric set: %s\n", metric_entry->d_name);
557 entry = _mesa_hash_table_search(perf->oa_metrics_table,
558 metric_entry->d_name);
559 if (entry) {
560 uint64_t id;
561 if (!gen_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
562 DBG("Failed to read metric set id from %s: %m", buf);
563 continue;
564 }
565
566 register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
567 } else
568 DBG("metric set not known by mesa (skipping)\n");
569 }
570
571 closedir(metricsdir);
572 }
573
574 static bool
575 kernel_has_dynamic_config_support(struct gen_perf_config *perf, int fd)
576 {
577 uint64_t invalid_config_id = UINT64_MAX;
578
579 return gen_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
580 &invalid_config_id) < 0 && errno == ENOENT;
581 }
582
583 static int
584 i915_query_items(struct gen_perf_config *perf, int fd,
585 struct drm_i915_query_item *items, uint32_t n_items)
586 {
587 struct drm_i915_query q = {
588 .num_items = n_items,
589 .items_ptr = to_user_pointer(items),
590 };
591 return gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &q);
592 }
593
594 static bool
595 i915_query_perf_config_supported(struct gen_perf_config *perf, int fd)
596 {
597 struct drm_i915_query_item item = {
598 .query_id = DRM_I915_QUERY_PERF_CONFIG,
599 .flags = DRM_I915_QUERY_PERF_CONFIG_LIST,
600 };
601
602 return i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0;
603 }
604
605 static bool
606 i915_query_perf_config_data(struct gen_perf_config *perf,
607 int fd, const char *guid,
608 struct drm_i915_perf_oa_config *config)
609 {
610 struct {
611 struct drm_i915_query_perf_config query;
612 struct drm_i915_perf_oa_config config;
613 } item_data;
614 struct drm_i915_query_item item = {
615 .query_id = DRM_I915_QUERY_PERF_CONFIG,
616 .flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
617 .data_ptr = to_user_pointer(&item_data),
618 .length = sizeof(item_data),
619 };
620
621 memset(&item_data, 0, sizeof(item_data));
622 memcpy(item_data.query.uuid, guid, sizeof(item_data.query.uuid));
623 memcpy(&item_data.config, config, sizeof(item_data.config));
624
625 if (!(i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0))
626 return false;
627
628 memcpy(config, &item_data.config, sizeof(item_data.config));
629
630 return true;
631 }
632
633 bool
634 gen_perf_load_metric_id(struct gen_perf_config *perf_cfg,
635 const char *guid,
636 uint64_t *metric_id)
637 {
638 char config_path[280];
639
640 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
641 perf_cfg->sysfs_dev_dir, guid);
642
643 /* Don't recreate already loaded configs. */
644 return read_file_uint64(config_path, metric_id);
645 }
646
647 static uint64_t
648 i915_add_config(struct gen_perf_config *perf, int fd,
649 const struct gen_perf_registers *config,
650 const char *guid)
651 {
652 struct drm_i915_perf_oa_config i915_config = { 0, };
653
654 memcpy(i915_config.uuid, guid, sizeof(i915_config.uuid));
655
656 i915_config.n_mux_regs = config->n_mux_regs;
657 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
658
659 i915_config.n_boolean_regs = config->n_b_counter_regs;
660 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
661
662 i915_config.n_flex_regs = config->n_flex_regs;
663 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
664
665 int ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
666 return ret > 0 ? ret : 0;
667 }
668
669 static void
670 init_oa_configs(struct gen_perf_config *perf, int fd)
671 {
672 hash_table_foreach(perf->oa_metrics_table, entry) {
673 const struct gen_perf_query_info *query = entry->data;
674 uint64_t config_id;
675
676 if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
677 DBG("metric set: %s (already loaded)\n", query->guid);
678 register_oa_config(perf, query, config_id);
679 continue;
680 }
681
682 int ret = i915_add_config(perf, fd, &query->config, query->guid);
683 if (ret < 0) {
684 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
685 query->name, query->guid, strerror(errno));
686 continue;
687 }
688
689 register_oa_config(perf, query, ret);
690 DBG("metric set: %s (added)\n", query->guid);
691 }
692 }
693
694 static void
695 compute_topology_builtins(struct gen_perf_config *perf,
696 const struct gen_device_info *devinfo)
697 {
698 perf->sys_vars.slice_mask = devinfo->slice_masks;
699 perf->sys_vars.n_eu_slices = devinfo->num_slices;
700
701 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
702 perf->sys_vars.n_eu_sub_slices +=
703 __builtin_popcount(devinfo->subslice_masks[i]);
704 }
705
706 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
707 perf->sys_vars.n_eus += __builtin_popcount(devinfo->eu_masks[i]);
708
709 perf->sys_vars.eu_threads_count = devinfo->num_thread_per_eu;
710
711 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
712 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
713 * slice.
714 *
715 * Ideally equations would be updated to have a slice/subslice query
716 * function/operator.
717 */
718 perf->sys_vars.subslice_mask = 0;
719
720 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
721
722 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
723 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
724 if (gen_device_info_subslice_available(devinfo, s, ss))
725 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
726 }
727 }
728 }
729
730 static bool
731 init_oa_sys_vars(struct gen_perf_config *perf, const struct gen_device_info *devinfo)
732 {
733 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
734
735 if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
736 return false;
737
738 if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
739 return false;
740
741 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
742 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
743 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
744 perf->sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
745 perf->sys_vars.revision = devinfo->revision;
746 compute_topology_builtins(perf, devinfo);
747
748 return true;
749 }
750
751 typedef void (*perf_register_oa_queries_t)(struct gen_perf_config *);
752
753 static perf_register_oa_queries_t
754 get_register_queries_function(const struct gen_device_info *devinfo)
755 {
756 if (devinfo->is_haswell)
757 return gen_oa_register_queries_hsw;
758 if (devinfo->is_cherryview)
759 return gen_oa_register_queries_chv;
760 if (devinfo->is_broadwell)
761 return gen_oa_register_queries_bdw;
762 if (devinfo->is_broxton)
763 return gen_oa_register_queries_bxt;
764 if (devinfo->is_skylake) {
765 if (devinfo->gt == 2)
766 return gen_oa_register_queries_sklgt2;
767 if (devinfo->gt == 3)
768 return gen_oa_register_queries_sklgt3;
769 if (devinfo->gt == 4)
770 return gen_oa_register_queries_sklgt4;
771 }
772 if (devinfo->is_kabylake) {
773 if (devinfo->gt == 2)
774 return gen_oa_register_queries_kblgt2;
775 if (devinfo->gt == 3)
776 return gen_oa_register_queries_kblgt3;
777 }
778 if (devinfo->is_geminilake)
779 return gen_oa_register_queries_glk;
780 if (devinfo->is_coffeelake) {
781 if (devinfo->gt == 2)
782 return gen_oa_register_queries_cflgt2;
783 if (devinfo->gt == 3)
784 return gen_oa_register_queries_cflgt3;
785 }
786 if (devinfo->is_cannonlake)
787 return gen_oa_register_queries_cnl;
788 if (devinfo->gen == 11) {
789 if (devinfo->is_elkhartlake)
790 return gen_oa_register_queries_lkf;
791 return gen_oa_register_queries_icl;
792 }
793 if (devinfo->gen == 12)
794 return gen_oa_register_queries_tgl;
795
796 return NULL;
797 }
798
799 static inline void
800 add_stat_reg(struct gen_perf_query_info *query, uint32_t reg,
801 uint32_t numerator, uint32_t denominator,
802 const char *name, const char *description)
803 {
804 struct gen_perf_query_counter *counter;
805
806 assert(query->n_counters < query->max_counters);
807
808 counter = &query->counters[query->n_counters];
809 counter->name = name;
810 counter->desc = description;
811 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
812 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
813 counter->offset = sizeof(uint64_t) * query->n_counters;
814 counter->pipeline_stat.reg = reg;
815 counter->pipeline_stat.numerator = numerator;
816 counter->pipeline_stat.denominator = denominator;
817
818 query->n_counters++;
819 }
820
821 static inline void
822 add_basic_stat_reg(struct gen_perf_query_info *query,
823 uint32_t reg, const char *name)
824 {
825 add_stat_reg(query, reg, 1, 1, name, name);
826 }
827
828 static void
829 load_pipeline_statistic_metrics(struct gen_perf_config *perf_cfg,
830 const struct gen_device_info *devinfo)
831 {
832 struct gen_perf_query_info *query =
833 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
834
835 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
836 query->name = "Pipeline Statistics Registers";
837
838 add_basic_stat_reg(query, IA_VERTICES_COUNT,
839 "N vertices submitted");
840 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
841 "N primitives submitted");
842 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
843 "N vertex shader invocations");
844
845 if (devinfo->gen == 6) {
846 add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
847 "SO_PRIM_STORAGE_NEEDED",
848 "N geometry shader stream-out primitives (total)");
849 add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
850 "SO_NUM_PRIMS_WRITTEN",
851 "N geometry shader stream-out primitives (written)");
852 } else {
853 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
854 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
855 "N stream-out (stream 0) primitives (total)");
856 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
857 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
858 "N stream-out (stream 1) primitives (total)");
859 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
860 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
861 "N stream-out (stream 2) primitives (total)");
862 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
863 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
864 "N stream-out (stream 3) primitives (total)");
865 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
866 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
867 "N stream-out (stream 0) primitives (written)");
868 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
869 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
870 "N stream-out (stream 1) primitives (written)");
871 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
872 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
873 "N stream-out (stream 2) primitives (written)");
874 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
875 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
876 "N stream-out (stream 3) primitives (written)");
877 }
878
879 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
880 "N TCS shader invocations");
881 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
882 "N TES shader invocations");
883
884 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
885 "N geometry shader invocations");
886 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
887 "N geometry shader primitives emitted");
888
889 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
890 "N primitives entering clipping");
891 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
892 "N primitives leaving clipping");
893
894 if (devinfo->is_haswell || devinfo->gen == 8) {
895 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
896 "N fragment shader invocations",
897 "N fragment shader invocations");
898 } else {
899 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
900 "N fragment shader invocations");
901 }
902
903 add_basic_stat_reg(query, PS_DEPTH_COUNT,
904 "N z-pass fragments");
905
906 if (devinfo->gen >= 7) {
907 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
908 "N compute shader invocations");
909 }
910
911 query->data_size = sizeof(uint64_t) * query->n_counters;
912 }
913
914 static bool
915 load_oa_metrics(struct gen_perf_config *perf, int fd,
916 const struct gen_device_info *devinfo)
917 {
918 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
919 bool i915_perf_oa_available = false;
920 struct stat sb;
921
922 perf->i915_query_supported = i915_query_perf_config_supported(perf, fd);
923
924 /* The existence of this sysctl parameter implies the kernel supports
925 * the i915 perf interface.
926 */
927 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
928
929 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
930 * metrics unless running as root.
931 */
932 if (devinfo->is_haswell)
933 i915_perf_oa_available = true;
934 else {
935 uint64_t paranoid = 1;
936
937 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
938
939 if (paranoid == 0 || geteuid() == 0)
940 i915_perf_oa_available = true;
941 }
942 }
943
944 if (!i915_perf_oa_available ||
945 !oa_register ||
946 !get_sysfs_dev_dir(perf, fd) ||
947 !init_oa_sys_vars(perf, devinfo))
948 return false;
949
950 perf->oa_metrics_table =
951 _mesa_hash_table_create(perf, _mesa_hash_string,
952 _mesa_key_string_equal);
953
954 /* Index all the metric sets mesa knows about before looking to see what
955 * the kernel is advertising.
956 */
957 oa_register(perf);
958
959 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
960 kernel_has_dynamic_config_support(perf, fd))
961 init_oa_configs(perf, fd);
962 else
963 enumerate_sysfs_metrics(perf);
964
965 return true;
966 }
967
968 struct gen_perf_registers *
969 gen_perf_load_configuration(struct gen_perf_config *perf_cfg, int fd, const char *guid)
970 {
971 if (!perf_cfg->i915_query_supported)
972 return NULL;
973
974 struct drm_i915_perf_oa_config i915_config = { 0, };
975 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config))
976 return NULL;
977
978 struct gen_perf_registers *config = rzalloc(NULL, struct gen_perf_registers);
979 config->n_flex_regs = i915_config.n_flex_regs;
980 config->flex_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_flex_regs);
981 config->n_mux_regs = i915_config.n_mux_regs;
982 config->mux_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_mux_regs);
983 config->n_b_counter_regs = i915_config.n_boolean_regs;
984 config->b_counter_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_b_counter_regs);
985
986 /*
987 * struct gen_perf_query_register_prog maps exactly to the tuple of
988 * (register offset, register value) returned by the i915.
989 */
990 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
991 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
992 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
993 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config)) {
994 ralloc_free(config);
995 return NULL;
996 }
997
998 return config;
999 }
1000
1001 uint64_t
1002 gen_perf_store_configuration(struct gen_perf_config *perf_cfg, int fd,
1003 const struct gen_perf_registers *config,
1004 const char *guid)
1005 {
1006 if (guid)
1007 return i915_add_config(perf_cfg, fd, config, guid);
1008
1009 struct mesa_sha1 sha1_ctx;
1010 _mesa_sha1_init(&sha1_ctx);
1011
1012 if (config->flex_regs) {
1013 _mesa_sha1_update(&sha1_ctx, config->flex_regs,
1014 sizeof(config->flex_regs[0]) *
1015 config->n_flex_regs);
1016 }
1017 if (config->mux_regs) {
1018 _mesa_sha1_update(&sha1_ctx, config->mux_regs,
1019 sizeof(config->mux_regs[0]) *
1020 config->n_mux_regs);
1021 }
1022 if (config->b_counter_regs) {
1023 _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
1024 sizeof(config->b_counter_regs[0]) *
1025 config->n_b_counter_regs);
1026 }
1027
1028 uint8_t hash[20];
1029 _mesa_sha1_final(&sha1_ctx, hash);
1030
1031 char formatted_hash[41];
1032 _mesa_sha1_format(formatted_hash, hash);
1033
1034 char generated_guid[37];
1035 snprintf(generated_guid, sizeof(generated_guid),
1036 "%.8s-%.4s-%.4s-%.4s-%.12s",
1037 &formatted_hash[0], &formatted_hash[8],
1038 &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
1039 &formatted_hash[8 + 4 + 4 + 4]);
1040
1041 /* Check if already present. */
1042 uint64_t id;
1043 if (gen_perf_load_metric_id(perf_cfg, generated_guid, &id))
1044 return id;
1045
1046 return i915_add_config(perf_cfg, fd, config, generated_guid);
1047 }
1048
1049 /* Accumulate 32bits OA counters */
1050 static inline void
1051 accumulate_uint32(const uint32_t *report0,
1052 const uint32_t *report1,
1053 uint64_t *accumulator)
1054 {
1055 *accumulator += (uint32_t)(*report1 - *report0);
1056 }
1057
1058 /* Accumulate 40bits OA counters */
1059 static inline void
1060 accumulate_uint40(int a_index,
1061 const uint32_t *report0,
1062 const uint32_t *report1,
1063 uint64_t *accumulator)
1064 {
1065 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
1066 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
1067 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
1068 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
1069 uint64_t value0 = report0[a_index + 4] | high0;
1070 uint64_t value1 = report1[a_index + 4] | high1;
1071 uint64_t delta;
1072
1073 if (value0 > value1)
1074 delta = (1ULL << 40) + value1 - value0;
1075 else
1076 delta = value1 - value0;
1077
1078 *accumulator += delta;
1079 }
1080
1081 static void
1082 gen8_read_report_clock_ratios(const uint32_t *report,
1083 uint64_t *slice_freq_hz,
1084 uint64_t *unslice_freq_hz)
1085 {
1086 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1087 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1088 * divided this way :
1089 *
1090 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1091 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1092 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1093 *
1094 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1095 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1096 *
1097 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1098 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1099 */
1100
1101 uint32_t unslice_freq = report[0] & 0x1ff;
1102 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1103 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1104 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1105
1106 *slice_freq_hz = slice_freq * 16666667ULL;
1107 *unslice_freq_hz = unslice_freq * 16666667ULL;
1108 }
1109
1110 void
1111 gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
1112 const struct gen_device_info *devinfo,
1113 const uint32_t *start,
1114 const uint32_t *end)
1115 {
1116 /* Slice/Unslice frequency is only available in the OA reports when the
1117 * "Disable OA reports due to clock ratio change" field in
1118 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1119 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1120 *
1121 * Documentation says this should be available on Gen9+ but experimentation
1122 * shows that Gen8 reports similar values, so we enable it there too.
1123 */
1124 if (devinfo->gen < 8)
1125 return;
1126
1127 gen8_read_report_clock_ratios(start,
1128 &result->slice_frequency[0],
1129 &result->unslice_frequency[0]);
1130 gen8_read_report_clock_ratios(end,
1131 &result->slice_frequency[1],
1132 &result->unslice_frequency[1]);
1133 }
1134
1135 void
1136 gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
1137 const struct gen_perf_query_info *query,
1138 const uint32_t *start,
1139 const uint32_t *end)
1140 {
1141 int i, idx = 0;
1142
1143 if (result->hw_id == OA_REPORT_INVALID_CTX_ID &&
1144 start[2] != OA_REPORT_INVALID_CTX_ID)
1145 result->hw_id = start[2];
1146 if (result->reports_accumulated == 0)
1147 result->begin_timestamp = start[1];
1148 result->reports_accumulated++;
1149
1150 switch (query->oa_format) {
1151 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
1152 accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
1153 accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
1154
1155 /* 32x 40bit A counters... */
1156 for (i = 0; i < 32; i++)
1157 accumulate_uint40(i, start, end, result->accumulator + idx++);
1158
1159 /* 4x 32bit A counters... */
1160 for (i = 0; i < 4; i++)
1161 accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
1162
1163 /* 8x 32bit B counters + 8x 32bit C counters... */
1164 for (i = 0; i < 16; i++)
1165 accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
1166 break;
1167
1168 case I915_OA_FORMAT_A45_B8_C8:
1169 accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
1170
1171 for (i = 0; i < 61; i++)
1172 accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
1173 break;
1174
1175 default:
1176 unreachable("Can't accumulate OA counters in unknown format");
1177 }
1178
1179 }
1180
1181 void
1182 gen_perf_query_result_clear(struct gen_perf_query_result *result)
1183 {
1184 memset(result, 0, sizeof(*result));
1185 result->hw_id = OA_REPORT_INVALID_CTX_ID; /* invalid */
1186 }
1187
1188 static void
1189 register_mdapi_statistic_query(struct gen_perf_config *perf_cfg,
1190 const struct gen_device_info *devinfo)
1191 {
1192 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1193 return;
1194
1195 struct gen_perf_query_info *query =
1196 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
1197
1198 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
1199 query->name = "Intel_Raw_Pipeline_Statistics_Query";
1200
1201 /* The order has to match mdapi_pipeline_metrics. */
1202 add_basic_stat_reg(query, IA_VERTICES_COUNT,
1203 "N vertices submitted");
1204 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1205 "N primitives submitted");
1206 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1207 "N vertex shader invocations");
1208 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1209 "N geometry shader invocations");
1210 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1211 "N geometry shader primitives emitted");
1212 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1213 "N primitives entering clipping");
1214 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1215 "N primitives leaving clipping");
1216 if (devinfo->is_haswell || devinfo->gen == 8) {
1217 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1218 "N fragment shader invocations",
1219 "N fragment shader invocations");
1220 } else {
1221 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1222 "N fragment shader invocations");
1223 }
1224 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1225 "N TCS shader invocations");
1226 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1227 "N TES shader invocations");
1228 if (devinfo->gen >= 7) {
1229 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1230 "N compute shader invocations");
1231 }
1232
1233 if (devinfo->gen >= 10) {
1234 /* Reuse existing CS invocation register until we can expose this new
1235 * one.
1236 */
1237 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1238 "Reserved1");
1239 }
1240
1241 query->data_size = sizeof(uint64_t) * query->n_counters;
1242 }
1243
1244 static void
1245 fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
1246 const char *name,
1247 uint32_t data_offset,
1248 uint32_t data_size,
1249 enum gen_perf_counter_data_type data_type)
1250 {
1251 struct gen_perf_query_counter *counter = &query->counters[query->n_counters];
1252
1253 assert(query->n_counters <= query->max_counters);
1254
1255 counter->name = name;
1256 counter->desc = "Raw counter value";
1257 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
1258 counter->data_type = data_type;
1259 counter->offset = data_offset;
1260
1261 query->n_counters++;
1262
1263 assert(counter->offset + gen_perf_query_counter_get_size(counter) <= query->data_size);
1264 }
1265
1266 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
1267 fill_mdapi_perf_query_counter(query, #field_name, \
1268 (uint8_t *) &struct_name.field_name - \
1269 (uint8_t *) &struct_name, \
1270 sizeof(struct_name.field_name), \
1271 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1272 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
1273 fill_mdapi_perf_query_counter(query, \
1274 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
1275 (uint8_t *) &struct_name.field_name[idx] - \
1276 (uint8_t *) &struct_name, \
1277 sizeof(struct_name.field_name[0]), \
1278 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1279
1280 static void
1281 register_mdapi_oa_query(const struct gen_device_info *devinfo,
1282 struct gen_perf_config *perf)
1283 {
1284 struct gen_perf_query_info *query = NULL;
1285
1286 /* MDAPI requires different structures for pretty much every generation
1287 * (right now we have definitions for gen 7 to 11).
1288 */
1289 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1290 return;
1291
1292 switch (devinfo->gen) {
1293 case 7: {
1294 query = append_query_info(perf, 1 + 45 + 16 + 7);
1295 query->oa_format = I915_OA_FORMAT_A45_B8_C8;
1296
1297 struct gen7_mdapi_metrics metric_data;
1298 query->data_size = sizeof(metric_data);
1299
1300 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1301 for (int i = 0; i < ARRAY_SIZE(metric_data.ACounters); i++) {
1302 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1303 metric_data, ACounters, i, UINT64);
1304 }
1305 for (int i = 0; i < ARRAY_SIZE(metric_data.NOACounters); i++) {
1306 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1307 metric_data, NOACounters, i, UINT64);
1308 }
1309 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1310 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1311 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1312 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1313 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1314 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1315 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1316 break;
1317 }
1318 case 8: {
1319 query = append_query_info(perf, 2 + 36 + 16 + 16);
1320 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1321
1322 struct gen8_mdapi_metrics metric_data;
1323 query->data_size = sizeof(metric_data);
1324
1325 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1326 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1327 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1328 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1329 metric_data, OaCntr, i, UINT64);
1330 }
1331 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1332 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1333 metric_data, NoaCntr, i, UINT64);
1334 }
1335 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1336 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1337 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1338 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1339 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1340 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1341 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1342 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1343 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1344 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1345 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1346 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1347 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1348 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1349 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1350 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1351 break;
1352 }
1353 case 9:
1354 case 10:
1355 case 11: {
1356 query = append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
1357 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1358
1359 struct gen9_mdapi_metrics metric_data;
1360 query->data_size = sizeof(metric_data);
1361
1362 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1363 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1364 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1365 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1366 metric_data, OaCntr, i, UINT64);
1367 }
1368 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1369 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1370 metric_data, NoaCntr, i, UINT64);
1371 }
1372 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1373 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1374 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1375 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1376 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1377 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1378 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1379 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1380 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1381 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1382 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1383 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1384 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1385 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1386 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1387 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1388 for (int i = 0; i < ARRAY_SIZE(metric_data.UserCntr); i++) {
1389 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1390 metric_data, UserCntr, i, UINT64);
1391 }
1392 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UserCntrCfgId, UINT32);
1393 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved4, UINT32);
1394 break;
1395 }
1396 default:
1397 unreachable("Unsupported gen");
1398 break;
1399 }
1400
1401 query->kind = GEN_PERF_QUERY_TYPE_RAW;
1402 query->name = "Intel_Raw_Hardware_Counters_Set_0_Query";
1403 query->guid = GEN_PERF_QUERY_GUID_MDAPI;
1404
1405 {
1406 /* Accumulation buffer offsets copied from an actual query... */
1407 const struct gen_perf_query_info *copy_query =
1408 &perf->queries[0];
1409
1410 query->gpu_time_offset = copy_query->gpu_time_offset;
1411 query->gpu_clock_offset = copy_query->gpu_clock_offset;
1412 query->a_offset = copy_query->a_offset;
1413 query->b_offset = copy_query->b_offset;
1414 query->c_offset = copy_query->c_offset;
1415 }
1416 }
1417
1418 static uint64_t
1419 get_metric_id(struct gen_perf_config *perf,
1420 const struct gen_perf_query_info *query)
1421 {
1422 /* These queries are know not to ever change, their config ID has been
1423 * loaded upon the first query creation. No need to look them up again.
1424 */
1425 if (query->kind == GEN_PERF_QUERY_TYPE_OA)
1426 return query->oa_metrics_set_id;
1427
1428 assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
1429
1430 /* Raw queries can be reprogrammed up by an external application/library.
1431 * When a raw query is used for the first time it's id is set to a value !=
1432 * 0. When it stops being used the id returns to 0. No need to reload the
1433 * ID when it's already loaded.
1434 */
1435 if (query->oa_metrics_set_id != 0) {
1436 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
1437 query->name, query->guid, query->oa_metrics_set_id);
1438 return query->oa_metrics_set_id;
1439 }
1440
1441 struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
1442 if (!gen_perf_load_metric_id(perf, query->guid,
1443 &raw_query->oa_metrics_set_id)) {
1444 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
1445 raw_query->oa_metrics_set_id = 1ULL;
1446 } else {
1447 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
1448 query->name, query->guid, query->oa_metrics_set_id);
1449 }
1450 return query->oa_metrics_set_id;
1451 }
1452
1453 static struct oa_sample_buf *
1454 get_free_sample_buf(struct gen_perf_context *perf_ctx)
1455 {
1456 struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
1457 struct oa_sample_buf *buf;
1458
1459 if (node)
1460 buf = exec_node_data(struct oa_sample_buf, node, link);
1461 else {
1462 buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
1463
1464 exec_node_init(&buf->link);
1465 buf->refcount = 0;
1466 }
1467 buf->len = 0;
1468
1469 return buf;
1470 }
1471
1472 static void
1473 reap_old_sample_buffers(struct gen_perf_context *perf_ctx)
1474 {
1475 struct exec_node *tail_node =
1476 exec_list_get_tail(&perf_ctx->sample_buffers);
1477 struct oa_sample_buf *tail_buf =
1478 exec_node_data(struct oa_sample_buf, tail_node, link);
1479
1480 /* Remove all old, unreferenced sample buffers walking forward from
1481 * the head of the list, except always leave at least one node in
1482 * the list so we always have a node to reference when we Begin
1483 * a new query.
1484 */
1485 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1486 &perf_ctx->sample_buffers)
1487 {
1488 if (buf->refcount == 0 && buf != tail_buf) {
1489 exec_node_remove(&buf->link);
1490 exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
1491 } else
1492 return;
1493 }
1494 }
1495
1496 static void
1497 free_sample_bufs(struct gen_perf_context *perf_ctx)
1498 {
1499 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1500 &perf_ctx->free_sample_buffers)
1501 ralloc_free(buf);
1502
1503 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1504 }
1505
1506 /******************************************************************************/
1507
1508 /**
1509 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1510 * pipeline statistics for the performance query object.
1511 */
1512 static void
1513 snapshot_statistics_registers(struct gen_perf_context *ctx,
1514 struct gen_perf_query_object *obj,
1515 uint32_t offset_in_bytes)
1516 {
1517 struct gen_perf_config *perf = ctx->perf;
1518 const struct gen_perf_query_info *query = obj->queryinfo;
1519 const int n_counters = query->n_counters;
1520
1521 for (int i = 0; i < n_counters; i++) {
1522 const struct gen_perf_query_counter *counter = &query->counters[i];
1523
1524 assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
1525
1526 perf->vtbl.store_register_mem(ctx->ctx, obj->pipeline_stats.bo,
1527 counter->pipeline_stat.reg, 8,
1528 offset_in_bytes + i * sizeof(uint64_t));
1529 }
1530 }
1531
1532 static void
1533 snapshot_freq_register(struct gen_perf_context *ctx,
1534 struct gen_perf_query_object *query,
1535 uint32_t bo_offset)
1536 {
1537 struct gen_perf_config *perf = ctx->perf;
1538 const struct gen_device_info *devinfo = ctx->devinfo;
1539
1540 if (devinfo->gen == 8 && !devinfo->is_cherryview)
1541 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN7_RPSTAT1, 4, bo_offset);
1542 else if (devinfo->gen >= 9)
1543 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN9_RPSTAT0, 4, bo_offset);
1544 }
1545
1546 static void
1547 gen_perf_close(struct gen_perf_context *perfquery,
1548 const struct gen_perf_query_info *query)
1549 {
1550 if (perfquery->oa_stream_fd != -1) {
1551 close(perfquery->oa_stream_fd);
1552 perfquery->oa_stream_fd = -1;
1553 }
1554 if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
1555 struct gen_perf_query_info *raw_query =
1556 (struct gen_perf_query_info *) query;
1557 raw_query->oa_metrics_set_id = 0;
1558 }
1559 }
1560
1561 static bool
1562 gen_perf_open(struct gen_perf_context *perf_ctx,
1563 int metrics_set_id,
1564 int report_format,
1565 int period_exponent,
1566 int drm_fd,
1567 uint32_t ctx_id)
1568 {
1569 uint64_t properties[] = {
1570 /* Single context sampling */
1571 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
1572
1573 /* Include OA reports in samples */
1574 DRM_I915_PERF_PROP_SAMPLE_OA, true,
1575
1576 /* OA unit configuration */
1577 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
1578 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
1579 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
1580 };
1581 struct drm_i915_perf_open_param param = {
1582 .flags = I915_PERF_FLAG_FD_CLOEXEC |
1583 I915_PERF_FLAG_FD_NONBLOCK |
1584 I915_PERF_FLAG_DISABLED,
1585 .num_properties = ARRAY_SIZE(properties) / 2,
1586 .properties_ptr = (uintptr_t) properties,
1587 };
1588 int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
1589 if (fd == -1) {
1590 DBG("Error opening gen perf OA stream: %m\n");
1591 return false;
1592 }
1593
1594 perf_ctx->oa_stream_fd = fd;
1595
1596 perf_ctx->current_oa_metrics_set_id = metrics_set_id;
1597 perf_ctx->current_oa_format = report_format;
1598
1599 return true;
1600 }
1601
1602 static bool
1603 inc_n_users(struct gen_perf_context *perf_ctx)
1604 {
1605 if (perf_ctx->n_oa_users == 0 &&
1606 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
1607 {
1608 return false;
1609 }
1610 ++perf_ctx->n_oa_users;
1611
1612 return true;
1613 }
1614
1615 static void
1616 dec_n_users(struct gen_perf_context *perf_ctx)
1617 {
1618 /* Disabling the i915 perf stream will effectively disable the OA
1619 * counters. Note it's important to be sure there are no outstanding
1620 * MI_RPC commands at this point since they could stall the CS
1621 * indefinitely once OACONTROL is disabled.
1622 */
1623 --perf_ctx->n_oa_users;
1624 if (perf_ctx->n_oa_users == 0 &&
1625 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
1626 {
1627 DBG("WARNING: Error disabling gen perf stream: %m\n");
1628 }
1629 }
1630
1631 void
1632 gen_perf_init_metrics(struct gen_perf_config *perf_cfg,
1633 const struct gen_device_info *devinfo,
1634 int drm_fd)
1635 {
1636 load_pipeline_statistic_metrics(perf_cfg, devinfo);
1637 register_mdapi_statistic_query(perf_cfg, devinfo);
1638 if (load_oa_metrics(perf_cfg, drm_fd, devinfo))
1639 register_mdapi_oa_query(devinfo, perf_cfg);
1640 }
1641
1642 void
1643 gen_perf_init_context(struct gen_perf_context *perf_ctx,
1644 struct gen_perf_config *perf_cfg,
1645 void * ctx, /* driver context (eg, brw_context) */
1646 void * bufmgr, /* eg brw_bufmgr */
1647 const struct gen_device_info *devinfo,
1648 uint32_t hw_ctx,
1649 int drm_fd)
1650 {
1651 perf_ctx->perf = perf_cfg;
1652 perf_ctx->ctx = ctx;
1653 perf_ctx->bufmgr = bufmgr;
1654 perf_ctx->drm_fd = drm_fd;
1655 perf_ctx->hw_ctx = hw_ctx;
1656 perf_ctx->devinfo = devinfo;
1657
1658 perf_ctx->unaccumulated =
1659 ralloc_array(ctx, struct gen_perf_query_object *, 2);
1660 perf_ctx->unaccumulated_elements = 0;
1661 perf_ctx->unaccumulated_array_size = 2;
1662
1663 exec_list_make_empty(&perf_ctx->sample_buffers);
1664 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1665
1666 /* It's convenient to guarantee that this linked list of sample
1667 * buffers is never empty so we add an empty head so when we
1668 * Begin an OA query we can always take a reference on a buffer
1669 * in this list.
1670 */
1671 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1672 exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
1673
1674 perf_ctx->oa_stream_fd = -1;
1675 perf_ctx->next_query_start_report_id = 1000;
1676 }
1677
1678 /**
1679 * Add a query to the global list of "unaccumulated queries."
1680 *
1681 * Queries are tracked here until all the associated OA reports have
1682 * been accumulated via accumulate_oa_reports() after the end
1683 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1684 */
1685 static void
1686 add_to_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
1687 struct gen_perf_query_object *obj)
1688 {
1689 if (perf_ctx->unaccumulated_elements >=
1690 perf_ctx->unaccumulated_array_size)
1691 {
1692 perf_ctx->unaccumulated_array_size *= 1.5;
1693 perf_ctx->unaccumulated =
1694 reralloc(perf_ctx->ctx, perf_ctx->unaccumulated,
1695 struct gen_perf_query_object *,
1696 perf_ctx->unaccumulated_array_size);
1697 }
1698
1699 perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
1700 }
1701
1702 bool
1703 gen_perf_begin_query(struct gen_perf_context *perf_ctx,
1704 struct gen_perf_query_object *query)
1705 {
1706 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1707 const struct gen_perf_query_info *queryinfo = query->queryinfo;
1708
1709 /* XXX: We have to consider that the command parser unit that parses batch
1710 * buffer commands and is used to capture begin/end counter snapshots isn't
1711 * implicitly synchronized with what's currently running across other GPU
1712 * units (such as the EUs running shaders) that the performance counters are
1713 * associated with.
1714 *
1715 * The intention of performance queries is to measure the work associated
1716 * with commands between the begin/end delimiters and so for that to be the
1717 * case we need to explicitly synchronize the parsing of commands to capture
1718 * Begin/End counter snapshots with what's running across other parts of the
1719 * GPU.
1720 *
1721 * When the command parser reaches a Begin marker it effectively needs to
1722 * drain everything currently running on the GPU until the hardware is idle
1723 * before capturing the first snapshot of counters - otherwise the results
1724 * would also be measuring the effects of earlier commands.
1725 *
1726 * When the command parser reaches an End marker it needs to stall until
1727 * everything currently running on the GPU has finished before capturing the
1728 * end snapshot - otherwise the results won't be a complete representation
1729 * of the work.
1730 *
1731 * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
1732 * additional work to be processed by the pipeline until all pixels of the
1733 * previous draw has be completed).
1734 *
1735 * N.B. The final results are based on deltas of counters between (inside)
1736 * Begin/End markers so even though the total wall clock time of the
1737 * workload is stretched by larger pipeline bubbles the bubbles themselves
1738 * are generally invisible to the query results. Whether that's a good or a
1739 * bad thing depends on the use case. For a lower real-time impact while
1740 * capturing metrics then periodic sampling may be a better choice than
1741 * INTEL_performance_query.
1742 *
1743 *
1744 * This is our Begin synchronization point to drain current work on the
1745 * GPU before we capture our first counter snapshot...
1746 */
1747 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
1748
1749 switch (queryinfo->kind) {
1750 case GEN_PERF_QUERY_TYPE_OA:
1751 case GEN_PERF_QUERY_TYPE_RAW: {
1752
1753 /* Opening an i915 perf stream implies exclusive access to the OA unit
1754 * which will generate counter reports for a specific counter set with a
1755 * specific layout/format so we can't begin any OA based queries that
1756 * require a different counter set or format unless we get an opportunity
1757 * to close the stream and open a new one...
1758 */
1759 uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
1760
1761 if (perf_ctx->oa_stream_fd != -1 &&
1762 perf_ctx->current_oa_metrics_set_id != metric_id) {
1763
1764 if (perf_ctx->n_oa_users != 0) {
1765 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
1766 perf_ctx->current_oa_metrics_set_id, metric_id);
1767 return false;
1768 } else
1769 gen_perf_close(perf_ctx, queryinfo);
1770 }
1771
1772 /* If the OA counters aren't already on, enable them. */
1773 if (perf_ctx->oa_stream_fd == -1) {
1774 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1775
1776 /* The period_exponent gives a sampling period as follows:
1777 * sample_period = timestamp_period * 2^(period_exponent + 1)
1778 *
1779 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1780 * ~83ns (GEN8/9).
1781 *
1782 * The counter overflow period is derived from the EuActive counter
1783 * which reads a counter that increments by the number of clock
1784 * cycles multiplied by the number of EUs. It can be calculated as:
1785 *
1786 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1787 *
1788 * (E.g. 40 EUs @ 1GHz = ~53ms)
1789 *
1790 * We select a sampling period inferior to that overflow period to
1791 * ensure we cannot see more than 1 counter overflow, otherwise we
1792 * could loose information.
1793 */
1794
1795 int a_counter_in_bits = 32;
1796 if (devinfo->gen >= 8)
1797 a_counter_in_bits = 40;
1798
1799 uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
1800 /* drop 1GHz freq to have units in nanoseconds */
1801 2);
1802
1803 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1804 overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
1805
1806 int period_exponent = 0;
1807 uint64_t prev_sample_period, next_sample_period;
1808 for (int e = 0; e < 30; e++) {
1809 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1810 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1811
1812 /* Take the previous sampling period, lower than the overflow
1813 * period.
1814 */
1815 if (prev_sample_period < overflow_period &&
1816 next_sample_period > overflow_period)
1817 period_exponent = e + 1;
1818 }
1819
1820 if (period_exponent == 0) {
1821 DBG("WARNING: enable to find a sampling exponent\n");
1822 return false;
1823 }
1824
1825 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1826 prev_sample_period / 1000000ul);
1827
1828 if (!gen_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
1829 period_exponent, perf_ctx->drm_fd,
1830 perf_ctx->hw_ctx))
1831 return false;
1832 } else {
1833 assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
1834 perf_ctx->current_oa_format == queryinfo->oa_format);
1835 }
1836
1837 if (!inc_n_users(perf_ctx)) {
1838 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1839 return false;
1840 }
1841
1842 if (query->oa.bo) {
1843 perf_cfg->vtbl.bo_unreference(query->oa.bo);
1844 query->oa.bo = NULL;
1845 }
1846
1847 query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1848 "perf. query OA MI_RPC bo",
1849 MI_RPC_BO_SIZE);
1850 #ifdef DEBUG
1851 /* Pre-filling the BO helps debug whether writes landed. */
1852 void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
1853 memset(map, 0x80, MI_RPC_BO_SIZE);
1854 perf_cfg->vtbl.bo_unmap(query->oa.bo);
1855 #endif
1856
1857 query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
1858 perf_ctx->next_query_start_report_id += 2;
1859
1860 /* Take a starting OA counter snapshot. */
1861 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo, 0,
1862 query->oa.begin_report_id);
1863 snapshot_freq_register(perf_ctx, query, MI_FREQ_START_OFFSET_BYTES);
1864
1865 ++perf_ctx->n_active_oa_queries;
1866
1867 /* No already-buffered samples can possibly be associated with this query
1868 * so create a marker within the list of sample buffers enabling us to
1869 * easily ignore earlier samples when processing this query after
1870 * completion.
1871 */
1872 assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
1873 query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
1874
1875 struct oa_sample_buf *buf =
1876 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1877
1878 /* This reference will ensure that future/following sample
1879 * buffers (that may relate to this query) can't be freed until
1880 * this drops to zero.
1881 */
1882 buf->refcount++;
1883
1884 gen_perf_query_result_clear(&query->oa.result);
1885 query->oa.results_accumulated = false;
1886
1887 add_to_unaccumulated_query_list(perf_ctx, query);
1888 break;
1889 }
1890
1891 case GEN_PERF_QUERY_TYPE_PIPELINE:
1892 if (query->pipeline_stats.bo) {
1893 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1894 query->pipeline_stats.bo = NULL;
1895 }
1896
1897 query->pipeline_stats.bo =
1898 perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1899 "perf. query pipeline stats bo",
1900 STATS_BO_SIZE);
1901
1902 /* Take starting snapshots. */
1903 snapshot_statistics_registers(perf_ctx, query, 0);
1904
1905 ++perf_ctx->n_active_pipeline_stats_queries;
1906 break;
1907
1908 default:
1909 unreachable("Unknown query type");
1910 break;
1911 }
1912
1913 return true;
1914 }
1915
1916 void
1917 gen_perf_end_query(struct gen_perf_context *perf_ctx,
1918 struct gen_perf_query_object *query)
1919 {
1920 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1921
1922 /* Ensure that the work associated with the queried commands will have
1923 * finished before taking our query end counter readings.
1924 *
1925 * For more details see comment in brw_begin_perf_query for
1926 * corresponding flush.
1927 */
1928 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
1929
1930 switch (query->queryinfo->kind) {
1931 case GEN_PERF_QUERY_TYPE_OA:
1932 case GEN_PERF_QUERY_TYPE_RAW:
1933
1934 /* NB: It's possible that the query will have already been marked
1935 * as 'accumulated' if an error was seen while reading samples
1936 * from perf. In this case we mustn't try and emit a closing
1937 * MI_RPC command in case the OA unit has already been disabled
1938 */
1939 if (!query->oa.results_accumulated) {
1940 /* Take an ending OA counter snapshot. */
1941 snapshot_freq_register(perf_ctx, query, MI_FREQ_END_OFFSET_BYTES);
1942 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
1943 MI_RPC_BO_END_OFFSET_BYTES,
1944 query->oa.begin_report_id + 1);
1945 }
1946
1947 --perf_ctx->n_active_oa_queries;
1948
1949 /* NB: even though the query has now ended, it can't be accumulated
1950 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1951 * to query->oa.bo
1952 */
1953 break;
1954
1955 case GEN_PERF_QUERY_TYPE_PIPELINE:
1956 snapshot_statistics_registers(perf_ctx, query,
1957 STATS_BO_END_OFFSET_BYTES);
1958 --perf_ctx->n_active_pipeline_stats_queries;
1959 break;
1960
1961 default:
1962 unreachable("Unknown query type");
1963 break;
1964 }
1965 }
1966
1967 enum OaReadStatus {
1968 OA_READ_STATUS_ERROR,
1969 OA_READ_STATUS_UNFINISHED,
1970 OA_READ_STATUS_FINISHED,
1971 };
1972
1973 static enum OaReadStatus
1974 read_oa_samples_until(struct gen_perf_context *perf_ctx,
1975 uint32_t start_timestamp,
1976 uint32_t end_timestamp)
1977 {
1978 struct exec_node *tail_node =
1979 exec_list_get_tail(&perf_ctx->sample_buffers);
1980 struct oa_sample_buf *tail_buf =
1981 exec_node_data(struct oa_sample_buf, tail_node, link);
1982 uint32_t last_timestamp =
1983 tail_buf->len == 0 ? start_timestamp : tail_buf->last_timestamp;
1984
1985 while (1) {
1986 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1987 uint32_t offset;
1988 int len;
1989
1990 while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
1991 sizeof(buf->buf))) < 0 && errno == EINTR)
1992 ;
1993
1994 if (len <= 0) {
1995 exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
1996
1997 if (len < 0) {
1998 if (errno == EAGAIN) {
1999 return ((last_timestamp - start_timestamp) < INT32_MAX &&
2000 (last_timestamp - start_timestamp) >=
2001 (end_timestamp - start_timestamp)) ?
2002 OA_READ_STATUS_FINISHED :
2003 OA_READ_STATUS_UNFINISHED;
2004 } else {
2005 DBG("Error reading i915 perf samples: %m\n");
2006 }
2007 } else
2008 DBG("Spurious EOF reading i915 perf samples\n");
2009
2010 return OA_READ_STATUS_ERROR;
2011 }
2012
2013 buf->len = len;
2014 exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
2015
2016 /* Go through the reports and update the last timestamp. */
2017 offset = 0;
2018 while (offset < buf->len) {
2019 const struct drm_i915_perf_record_header *header =
2020 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
2021 uint32_t *report = (uint32_t *) (header + 1);
2022
2023 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
2024 last_timestamp = report[1];
2025
2026 offset += header->size;
2027 }
2028
2029 buf->last_timestamp = last_timestamp;
2030 }
2031
2032 unreachable("not reached");
2033 return OA_READ_STATUS_ERROR;
2034 }
2035
2036 /**
2037 * Try to read all the reports until either the delimiting timestamp
2038 * or an error arises.
2039 */
2040 static bool
2041 read_oa_samples_for_query(struct gen_perf_context *perf_ctx,
2042 struct gen_perf_query_object *query,
2043 void *current_batch)
2044 {
2045 uint32_t *start;
2046 uint32_t *last;
2047 uint32_t *end;
2048 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2049
2050 /* We need the MI_REPORT_PERF_COUNT to land before we can start
2051 * accumulate. */
2052 assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2053 !perf_cfg->vtbl.bo_busy(query->oa.bo));
2054
2055 /* Map the BO once here and let accumulate_oa_reports() unmap
2056 * it. */
2057 if (query->oa.map == NULL)
2058 query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
2059
2060 start = last = query->oa.map;
2061 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2062
2063 if (start[0] != query->oa.begin_report_id) {
2064 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2065 return true;
2066 }
2067 if (end[0] != (query->oa.begin_report_id + 1)) {
2068 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2069 return true;
2070 }
2071
2072 /* Read the reports until the end timestamp. */
2073 switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
2074 case OA_READ_STATUS_ERROR:
2075 /* Fallthrough and let accumulate_oa_reports() deal with the
2076 * error. */
2077 case OA_READ_STATUS_FINISHED:
2078 return true;
2079 case OA_READ_STATUS_UNFINISHED:
2080 return false;
2081 }
2082
2083 unreachable("invalid read status");
2084 return false;
2085 }
2086
2087 void
2088 gen_perf_wait_query(struct gen_perf_context *perf_ctx,
2089 struct gen_perf_query_object *query,
2090 void *current_batch)
2091 {
2092 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2093 struct brw_bo *bo = NULL;
2094
2095 switch (query->queryinfo->kind) {
2096 case GEN_PERF_QUERY_TYPE_OA:
2097 case GEN_PERF_QUERY_TYPE_RAW:
2098 bo = query->oa.bo;
2099 break;
2100
2101 case GEN_PERF_QUERY_TYPE_PIPELINE:
2102 bo = query->pipeline_stats.bo;
2103 break;
2104
2105 default:
2106 unreachable("Unknown query type");
2107 break;
2108 }
2109
2110 if (bo == NULL)
2111 return;
2112
2113 /* If the current batch references our results bo then we need to
2114 * flush first...
2115 */
2116 if (perf_cfg->vtbl.batch_references(current_batch, bo))
2117 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
2118
2119 perf_cfg->vtbl.bo_wait_rendering(bo);
2120
2121 /* Due to a race condition between the OA unit signaling report
2122 * availability and the report actually being written into memory,
2123 * we need to wait for all the reports to come in before we can
2124 * read them.
2125 */
2126 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
2127 query->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
2128 while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
2129 ;
2130 }
2131 }
2132
2133 bool
2134 gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
2135 struct gen_perf_query_object *query,
2136 void *current_batch)
2137 {
2138 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2139
2140 switch (query->queryinfo->kind) {
2141 case GEN_PERF_QUERY_TYPE_OA:
2142 case GEN_PERF_QUERY_TYPE_RAW:
2143 return (query->oa.results_accumulated ||
2144 (query->oa.bo &&
2145 !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2146 !perf_cfg->vtbl.bo_busy(query->oa.bo) &&
2147 read_oa_samples_for_query(perf_ctx, query, current_batch)));
2148 case GEN_PERF_QUERY_TYPE_PIPELINE:
2149 return (query->pipeline_stats.bo &&
2150 !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
2151 !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
2152
2153 default:
2154 unreachable("Unknown query type");
2155 break;
2156 }
2157
2158 return false;
2159 }
2160
2161 /**
2162 * Remove a query from the global list of unaccumulated queries once
2163 * after successfully accumulating the OA reports associated with the
2164 * query in accumulate_oa_reports() or when discarding unwanted query
2165 * results.
2166 */
2167 static void
2168 drop_from_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
2169 struct gen_perf_query_object *query)
2170 {
2171 for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
2172 if (perf_ctx->unaccumulated[i] == query) {
2173 int last_elt = --perf_ctx->unaccumulated_elements;
2174
2175 if (i == last_elt)
2176 perf_ctx->unaccumulated[i] = NULL;
2177 else {
2178 perf_ctx->unaccumulated[i] =
2179 perf_ctx->unaccumulated[last_elt];
2180 }
2181
2182 break;
2183 }
2184 }
2185
2186 /* Drop our samples_head reference so that associated periodic
2187 * sample data buffers can potentially be reaped if they aren't
2188 * referenced by any other queries...
2189 */
2190
2191 struct oa_sample_buf *buf =
2192 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
2193
2194 assert(buf->refcount > 0);
2195 buf->refcount--;
2196
2197 query->oa.samples_head = NULL;
2198
2199 reap_old_sample_buffers(perf_ctx);
2200 }
2201
2202 /* In general if we see anything spurious while accumulating results,
2203 * we don't try and continue accumulating the current query, hoping
2204 * for the best, we scrap anything outstanding, and then hope for the
2205 * best with new queries.
2206 */
2207 static void
2208 discard_all_queries(struct gen_perf_context *perf_ctx)
2209 {
2210 while (perf_ctx->unaccumulated_elements) {
2211 struct gen_perf_query_object *query = perf_ctx->unaccumulated[0];
2212
2213 query->oa.results_accumulated = true;
2214 drop_from_unaccumulated_query_list(perf_ctx, query);
2215
2216 dec_n_users(perf_ctx);
2217 }
2218 }
2219
2220 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
2221 static bool
2222 oa_report_ctx_id_valid(const struct gen_device_info *devinfo,
2223 const uint32_t *report)
2224 {
2225 assert(devinfo->gen >= 8);
2226 if (devinfo->gen == 8)
2227 return (report[0] & (1 << 25)) != 0;
2228 return (report[0] & (1 << 16)) != 0;
2229 }
2230
2231 /**
2232 * Accumulate raw OA counter values based on deltas between pairs of
2233 * OA reports.
2234 *
2235 * Accumulation starts from the first report captured via
2236 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
2237 * last MI_RPC report requested by brw_end_perf_query(). Between these
2238 * two reports there may also some number of periodically sampled OA
2239 * reports collected via the i915 perf interface - depending on the
2240 * duration of the query.
2241 *
2242 * These periodic snapshots help to ensure we handle counter overflow
2243 * correctly by being frequent enough to ensure we don't miss multiple
2244 * overflows of a counter between snapshots. For Gen8+ the i915 perf
2245 * snapshots provide the extra context-switch reports that let us
2246 * subtract out the progress of counters associated with other
2247 * contexts running on the system.
2248 */
2249 static void
2250 accumulate_oa_reports(struct gen_perf_context *perf_ctx,
2251 struct gen_perf_query_object *query)
2252 {
2253 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2254 uint32_t *start;
2255 uint32_t *last;
2256 uint32_t *end;
2257 struct exec_node *first_samples_node;
2258 bool last_report_ctx_match = true;
2259 int out_duration = 0;
2260
2261 assert(query->oa.map != NULL);
2262
2263 start = last = query->oa.map;
2264 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2265
2266 if (start[0] != query->oa.begin_report_id) {
2267 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2268 goto error;
2269 }
2270 if (end[0] != (query->oa.begin_report_id + 1)) {
2271 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2272 goto error;
2273 }
2274
2275 /* On Gen12+ OA reports are sourced from per context counters, so we don't
2276 * ever have to look at the global OA buffer. Yey \o/
2277 */
2278 if (perf_ctx->devinfo->gen >= 12) {
2279 last = start;
2280 goto end;
2281 }
2282
2283 /* See if we have any periodic reports to accumulate too... */
2284
2285 /* N.B. The oa.samples_head was set when the query began and
2286 * pointed to the tail of the perf_ctx->sample_buffers list at
2287 * the time the query started. Since the buffer existed before the
2288 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
2289 * that no data in this particular node's buffer can possibly be
2290 * associated with the query - so skip ahead one...
2291 */
2292 first_samples_node = query->oa.samples_head->next;
2293
2294 foreach_list_typed_from(struct oa_sample_buf, buf, link,
2295 &perf_ctx->sample_buffers,
2296 first_samples_node)
2297 {
2298 int offset = 0;
2299
2300 while (offset < buf->len) {
2301 const struct drm_i915_perf_record_header *header =
2302 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
2303
2304 assert(header->size != 0);
2305 assert(header->size <= buf->len);
2306
2307 offset += header->size;
2308
2309 switch (header->type) {
2310 case DRM_I915_PERF_RECORD_SAMPLE: {
2311 uint32_t *report = (uint32_t *)(header + 1);
2312 bool report_ctx_match = true;
2313 bool add = true;
2314
2315 /* Ignore reports that come before the start marker.
2316 * (Note: takes care to allow overflow of 32bit timestamps)
2317 */
2318 if (gen_device_info_timebase_scale(devinfo,
2319 report[1] - start[1]) > 5000000000) {
2320 continue;
2321 }
2322
2323 /* Ignore reports that come after the end marker.
2324 * (Note: takes care to allow overflow of 32bit timestamps)
2325 */
2326 if (gen_device_info_timebase_scale(devinfo,
2327 report[1] - end[1]) <= 5000000000) {
2328 goto end;
2329 }
2330
2331 /* For Gen8+ since the counters continue while other
2332 * contexts are running we need to discount any unrelated
2333 * deltas. The hardware automatically generates a report
2334 * on context switch which gives us a new reference point
2335 * to continuing adding deltas from.
2336 *
2337 * For Haswell we can rely on the HW to stop the progress
2338 * of OA counters while any other context is acctive.
2339 */
2340 if (devinfo->gen >= 8) {
2341 /* Consider that the current report matches our context only if
2342 * the report says the report ID is valid.
2343 */
2344 report_ctx_match = oa_report_ctx_id_valid(devinfo, report) &&
2345 report[2] == start[2];
2346 if (report_ctx_match)
2347 out_duration = 0;
2348 else
2349 out_duration++;
2350
2351 /* Only add the delta between <last, report> if the last report
2352 * was clearly identified as our context, or if we have at most
2353 * 1 report without a matching ID.
2354 *
2355 * The OA unit will sometimes label reports with an invalid
2356 * context ID when i915 rewrites the execlist submit register
2357 * with the same context as the one currently running. This
2358 * happens when i915 wants to notify the HW of ringbuffer tail
2359 * register update. We have to consider this report as part of
2360 * our context as the 3d pipeline behind the OACS unit is still
2361 * processing the operations started at the previous execlist
2362 * submission.
2363 */
2364 add = last_report_ctx_match && out_duration < 2;
2365 }
2366
2367 if (add) {
2368 gen_perf_query_result_accumulate(&query->oa.result,
2369 query->queryinfo,
2370 last, report);
2371 } else {
2372 /* We're not adding the delta because we've identified it's not
2373 * for the context we filter for. We can consider that the
2374 * query was split.
2375 */
2376 query->oa.result.query_disjoint = true;
2377 }
2378
2379 last = report;
2380 last_report_ctx_match = report_ctx_match;
2381
2382 break;
2383 }
2384
2385 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
2386 DBG("i915 perf: OA error: all reports lost\n");
2387 goto error;
2388 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
2389 DBG("i915 perf: OA report lost\n");
2390 break;
2391 }
2392 }
2393 }
2394
2395 end:
2396
2397 gen_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
2398 last, end);
2399
2400 query->oa.results_accumulated = true;
2401 drop_from_unaccumulated_query_list(perf_ctx, query);
2402 dec_n_users(perf_ctx);
2403
2404 return;
2405
2406 error:
2407
2408 discard_all_queries(perf_ctx);
2409 }
2410
2411 void
2412 gen_perf_delete_query(struct gen_perf_context *perf_ctx,
2413 struct gen_perf_query_object *query)
2414 {
2415 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2416
2417 /* We can assume that the frontend waits for a query to complete
2418 * before ever calling into here, so we don't have to worry about
2419 * deleting an in-flight query object.
2420 */
2421 switch (query->queryinfo->kind) {
2422 case GEN_PERF_QUERY_TYPE_OA:
2423 case GEN_PERF_QUERY_TYPE_RAW:
2424 if (query->oa.bo) {
2425 if (!query->oa.results_accumulated) {
2426 drop_from_unaccumulated_query_list(perf_ctx, query);
2427 dec_n_users(perf_ctx);
2428 }
2429
2430 perf_cfg->vtbl.bo_unreference(query->oa.bo);
2431 query->oa.bo = NULL;
2432 }
2433
2434 query->oa.results_accumulated = false;
2435 break;
2436
2437 case GEN_PERF_QUERY_TYPE_PIPELINE:
2438 if (query->pipeline_stats.bo) {
2439 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
2440 query->pipeline_stats.bo = NULL;
2441 }
2442 break;
2443
2444 default:
2445 unreachable("Unknown query type");
2446 break;
2447 }
2448
2449 /* As an indication that the INTEL_performance_query extension is no
2450 * longer in use, it's a good time to free our cache of sample
2451 * buffers and close any current i915-perf stream.
2452 */
2453 if (--perf_ctx->n_query_instances == 0) {
2454 free_sample_bufs(perf_ctx);
2455 gen_perf_close(perf_ctx, query->queryinfo);
2456 }
2457
2458 free(query);
2459 }
2460
2461 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
2462
2463 static void
2464 read_gt_frequency(struct gen_perf_context *perf_ctx,
2465 struct gen_perf_query_object *obj)
2466 {
2467 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2468 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
2469 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
2470
2471 switch (devinfo->gen) {
2472 case 7:
2473 case 8:
2474 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2475 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2476 break;
2477 case 9:
2478 case 10:
2479 case 11:
2480 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2481 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2482 break;
2483 default:
2484 unreachable("unexpected gen");
2485 }
2486
2487 /* Put the numbers into Hz. */
2488 obj->oa.gt_frequency[0] *= 1000000ULL;
2489 obj->oa.gt_frequency[1] *= 1000000ULL;
2490 }
2491
2492 static int
2493 get_oa_counter_data(struct gen_perf_context *perf_ctx,
2494 struct gen_perf_query_object *query,
2495 size_t data_size,
2496 uint8_t *data)
2497 {
2498 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2499 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2500 int n_counters = queryinfo->n_counters;
2501 int written = 0;
2502
2503 for (int i = 0; i < n_counters; i++) {
2504 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2505 uint64_t *out_uint64;
2506 float *out_float;
2507 size_t counter_size = gen_perf_query_counter_get_size(counter);
2508
2509 if (counter_size) {
2510 switch (counter->data_type) {
2511 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
2512 out_uint64 = (uint64_t *)(data + counter->offset);
2513 *out_uint64 =
2514 counter->oa_counter_read_uint64(perf_cfg, queryinfo,
2515 query->oa.result.accumulator);
2516 break;
2517 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
2518 out_float = (float *)(data + counter->offset);
2519 *out_float =
2520 counter->oa_counter_read_float(perf_cfg, queryinfo,
2521 query->oa.result.accumulator);
2522 break;
2523 default:
2524 /* So far we aren't using uint32, double or bool32... */
2525 unreachable("unexpected counter data type");
2526 }
2527 written = counter->offset + counter_size;
2528 }
2529 }
2530
2531 return written;
2532 }
2533
2534 static int
2535 get_pipeline_stats_data(struct gen_perf_context *perf_ctx,
2536 struct gen_perf_query_object *query,
2537 size_t data_size,
2538 uint8_t *data)
2539
2540 {
2541 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2542 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2543 int n_counters = queryinfo->n_counters;
2544 uint8_t *p = data;
2545
2546 uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
2547 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
2548
2549 for (int i = 0; i < n_counters; i++) {
2550 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2551 uint64_t value = end[i] - start[i];
2552
2553 if (counter->pipeline_stat.numerator !=
2554 counter->pipeline_stat.denominator) {
2555 value *= counter->pipeline_stat.numerator;
2556 value /= counter->pipeline_stat.denominator;
2557 }
2558
2559 *((uint64_t *)p) = value;
2560 p += 8;
2561 }
2562
2563 perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
2564
2565 return p - data;
2566 }
2567
2568 void
2569 gen_perf_get_query_data(struct gen_perf_context *perf_ctx,
2570 struct gen_perf_query_object *query,
2571 int data_size,
2572 unsigned *data,
2573 unsigned *bytes_written)
2574 {
2575 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2576 int written = 0;
2577
2578 switch (query->queryinfo->kind) {
2579 case GEN_PERF_QUERY_TYPE_OA:
2580 case GEN_PERF_QUERY_TYPE_RAW:
2581 if (!query->oa.results_accumulated) {
2582 read_gt_frequency(perf_ctx, query);
2583 uint32_t *begin_report = query->oa.map;
2584 uint32_t *end_report = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2585 gen_perf_query_result_read_frequencies(&query->oa.result,
2586 perf_ctx->devinfo,
2587 begin_report,
2588 end_report);
2589 accumulate_oa_reports(perf_ctx, query);
2590 assert(query->oa.results_accumulated);
2591
2592 perf_cfg->vtbl.bo_unmap(query->oa.bo);
2593 query->oa.map = NULL;
2594 }
2595 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
2596 written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
2597 } else {
2598 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2599
2600 written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
2601 devinfo, &query->oa.result,
2602 query->oa.gt_frequency[0],
2603 query->oa.gt_frequency[1]);
2604 }
2605 break;
2606
2607 case GEN_PERF_QUERY_TYPE_PIPELINE:
2608 written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
2609 break;
2610
2611 default:
2612 unreachable("Unknown query type");
2613 break;
2614 }
2615
2616 if (bytes_written)
2617 *bytes_written = written;
2618 }
2619
2620 void
2621 gen_perf_dump_query_count(struct gen_perf_context *perf_ctx)
2622 {
2623 DBG("Queries: (Open queries = %d, OA users = %d)\n",
2624 perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
2625 }
2626
2627 void
2628 gen_perf_dump_query(struct gen_perf_context *ctx,
2629 struct gen_perf_query_object *obj,
2630 void *current_batch)
2631 {
2632 switch (obj->queryinfo->kind) {
2633 case GEN_PERF_QUERY_TYPE_OA:
2634 case GEN_PERF_QUERY_TYPE_RAW:
2635 DBG("BO: %-4s OA data: %-10s %-15s\n",
2636 obj->oa.bo ? "yes," : "no,",
2637 gen_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
2638 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
2639 break;
2640 case GEN_PERF_QUERY_TYPE_PIPELINE:
2641 DBG("BO: %-4s\n",
2642 obj->pipeline_stats.bo ? "yes" : "no");
2643 break;
2644 default:
2645 unreachable("Unknown query type");
2646 break;
2647 }
2648 }