intel/perf: report query split for mdapi
[mesa.git] / src / intel / perf / gen_perf.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31
32 #include <drm-uapi/i915_drm.h>
33
34 #include "common/gen_gem.h"
35 #include "gen_perf.h"
36 #include "gen_perf_regs.h"
37 #include "perf/gen_perf_mdapi.h"
38 #include "perf/gen_perf_metrics.h"
39
40 #include "dev/gen_debug.h"
41 #include "dev/gen_device_info.h"
42 #include "util/bitscan.h"
43 #include "util/mesa-sha1.h"
44 #include "util/u_math.h"
45
46 #define FILE_DEBUG_FLAG DEBUG_PERFMON
47 #define MI_RPC_BO_SIZE 4096
48 #define MI_FREQ_START_OFFSET_BYTES (3072)
49 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
50 #define MI_FREQ_END_OFFSET_BYTES (3076)
51
52 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
53
54 #define GEN7_RPSTAT1 0xA01C
55 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
56 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
57 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
58 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
59
60 #define GEN9_RPSTAT0 0xA01C
61 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
62 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
63 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
64 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
65
66 #define GEN6_SO_PRIM_STORAGE_NEEDED 0x2280
67 #define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
68 #define GEN6_SO_NUM_PRIMS_WRITTEN 0x2288
69 #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
70
71 #define MAP_READ (1 << 0)
72 #define MAP_WRITE (1 << 1)
73
74 #define OA_REPORT_INVALID_CTX_ID (0xffffffff)
75
76 /**
77 * Periodic OA samples are read() into these buffer structures via the
78 * i915 perf kernel interface and appended to the
79 * perf_ctx->sample_buffers linked list. When we process the
80 * results of an OA metrics query we need to consider all the periodic
81 * samples between the Begin and End MI_REPORT_PERF_COUNT command
82 * markers.
83 *
84 * 'Periodic' is a simplification as there are other automatic reports
85 * written by the hardware also buffered here.
86 *
87 * Considering three queries, A, B and C:
88 *
89 * Time ---->
90 * ________________A_________________
91 * | |
92 * | ________B_________ _____C___________
93 * | | | | | |
94 *
95 * And an illustration of sample buffers read over this time frame:
96 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
97 *
98 * These nodes may hold samples for query A:
99 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
100 *
101 * These nodes may hold samples for query B:
102 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
103 *
104 * These nodes may hold samples for query C:
105 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
106 *
107 * The illustration assumes we have an even distribution of periodic
108 * samples so all nodes have the same size plotted against time:
109 *
110 * Note, to simplify code, the list is never empty.
111 *
112 * With overlapping queries we can see that periodic OA reports may
113 * relate to multiple queries and care needs to be take to keep
114 * track of sample buffers until there are no queries that might
115 * depend on their contents.
116 *
117 * We use a node ref counting system where a reference ensures that a
118 * node and all following nodes can't be freed/recycled until the
119 * reference drops to zero.
120 *
121 * E.g. with a ref of one here:
122 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
123 *
124 * These nodes could be freed or recycled ("reaped"):
125 * [ 0 ][ 0 ]
126 *
127 * These must be preserved until the leading ref drops to zero:
128 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
129 *
130 * When a query starts we take a reference on the current tail of
131 * the list, knowing that no already-buffered samples can possibly
132 * relate to the newly-started query. A pointer to this node is
133 * also saved in the query object's ->oa.samples_head.
134 *
135 * E.g. starting query A while there are two nodes in .sample_buffers:
136 * ________________A________
137 * |
138 *
139 * [ 0 ][ 1 ]
140 * ^_______ Add a reference and store pointer to node in
141 * A->oa.samples_head
142 *
143 * Moving forward to when the B query starts with no new buffer nodes:
144 * (for reference, i915 perf reads() are only done when queries finish)
145 * ________________A_______
146 * | ________B___
147 * | |
148 *
149 * [ 0 ][ 2 ]
150 * ^_______ Add a reference and store pointer to
151 * node in B->oa.samples_head
152 *
153 * Once a query is finished, after an OA query has become 'Ready',
154 * once the End OA report has landed and after we we have processed
155 * all the intermediate periodic samples then we drop the
156 * ->oa.samples_head reference we took at the start.
157 *
158 * So when the B query has finished we have:
159 * ________________A________
160 * | ______B___________
161 * | | |
162 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
163 * ^_______ Drop B->oa.samples_head reference
164 *
165 * We still can't free these due to the A->oa.samples_head ref:
166 * [ 1 ][ 0 ][ 0 ][ 0 ]
167 *
168 * When the A query finishes: (note there's a new ref for C's samples_head)
169 * ________________A_________________
170 * | |
171 * | _____C_________
172 * | | |
173 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
174 * ^_______ Drop A->oa.samples_head reference
175 *
176 * And we can now reap these nodes up to the C->oa.samples_head:
177 * [ X ][ X ][ X ][ X ]
178 * keeping -> [ 1 ][ 0 ][ 0 ]
179 *
180 * We reap old sample buffers each time we finish processing an OA
181 * query by iterating the sample_buffers list from the head until we
182 * find a referenced node and stop.
183 *
184 * Reaped buffers move to a perfquery.free_sample_buffers list and
185 * when we come to read() we first look to recycle a buffer from the
186 * free_sample_buffers list before allocating a new buffer.
187 */
188 struct oa_sample_buf {
189 struct exec_node link;
190 int refcount;
191 int len;
192 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
193 uint32_t last_timestamp;
194 };
195
196 /**
197 * gen representation of a performance query object.
198 *
199 * NB: We want to keep this structure relatively lean considering that
200 * applications may expect to allocate enough objects to be able to
201 * query around all draw calls in a frame.
202 */
203 struct gen_perf_query_object
204 {
205 const struct gen_perf_query_info *queryinfo;
206
207 /* See query->kind to know which state below is in use... */
208 union {
209 struct {
210
211 /**
212 * BO containing OA counter snapshots at query Begin/End time.
213 */
214 void *bo;
215
216 /**
217 * Address of mapped of @bo
218 */
219 void *map;
220
221 /**
222 * The MI_REPORT_PERF_COUNT command lets us specify a unique
223 * ID that will be reflected in the resulting OA report
224 * that's written by the GPU. This is the ID we're expecting
225 * in the begin report and the the end report should be
226 * @begin_report_id + 1.
227 */
228 int begin_report_id;
229
230 /**
231 * Reference the head of the brw->perfquery.sample_buffers
232 * list at the time that the query started (so we only need
233 * to look at nodes after this point when looking for samples
234 * related to this query)
235 *
236 * (See struct brw_oa_sample_buf description for more details)
237 */
238 struct exec_node *samples_head;
239
240 /**
241 * false while in the unaccumulated_elements list, and set to
242 * true when the final, end MI_RPC snapshot has been
243 * accumulated.
244 */
245 bool results_accumulated;
246
247 /**
248 * Frequency of the GT at begin and end of the query.
249 */
250 uint64_t gt_frequency[2];
251
252 /**
253 * Accumulated OA results between begin and end of the query.
254 */
255 struct gen_perf_query_result result;
256 } oa;
257
258 struct {
259 /**
260 * BO containing starting and ending snapshots for the
261 * statistics counters.
262 */
263 void *bo;
264 } pipeline_stats;
265 };
266 };
267
268 struct gen_perf_context {
269 struct gen_perf_config *perf;
270
271 void * ctx; /* driver context (eg, brw_context) */
272 void * bufmgr;
273 const struct gen_device_info *devinfo;
274
275 uint32_t hw_ctx;
276 int drm_fd;
277
278 /* The i915 perf stream we open to setup + enable the OA counters */
279 int oa_stream_fd;
280
281 /* An i915 perf stream fd gives exclusive access to the OA unit that will
282 * report counter snapshots for a specific counter set/profile in a
283 * specific layout/format so we can only start OA queries that are
284 * compatible with the currently open fd...
285 */
286 int current_oa_metrics_set_id;
287 int current_oa_format;
288
289 /* List of buffers containing OA reports */
290 struct exec_list sample_buffers;
291
292 /* Cached list of empty sample buffers */
293 struct exec_list free_sample_buffers;
294
295 int n_active_oa_queries;
296 int n_active_pipeline_stats_queries;
297
298 /* The number of queries depending on running OA counters which
299 * extends beyond brw_end_perf_query() since we need to wait until
300 * the last MI_RPC command has parsed by the GPU.
301 *
302 * Accurate accounting is important here as emitting an
303 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
304 * effectively hang the gpu.
305 */
306 int n_oa_users;
307
308 /* To help catch an spurious problem with the hardware or perf
309 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
310 * with a unique ID that we can explicitly check for...
311 */
312 int next_query_start_report_id;
313
314 /**
315 * An array of queries whose results haven't yet been assembled
316 * based on the data in buffer objects.
317 *
318 * These may be active, or have already ended. However, the
319 * results have not been requested.
320 */
321 struct gen_perf_query_object **unaccumulated;
322 int unaccumulated_elements;
323 int unaccumulated_array_size;
324
325 /* The total number of query objects so we can relinquish
326 * our exclusive access to perf if the application deletes
327 * all of its objects. (NB: We only disable perf while
328 * there are no active queries)
329 */
330 int n_query_instances;
331 };
332
333 const struct gen_perf_query_info*
334 gen_perf_query_info(const struct gen_perf_query_object *query)
335 {
336 return query->queryinfo;
337 }
338
339 struct gen_perf_context *
340 gen_perf_new_context(void *parent)
341 {
342 struct gen_perf_context *ctx = rzalloc(parent, struct gen_perf_context);
343 if (! ctx)
344 fprintf(stderr, "%s: failed to alloc context\n", __func__);
345 return ctx;
346 }
347
348 struct gen_perf_config *
349 gen_perf_config(struct gen_perf_context *ctx)
350 {
351 return ctx->perf;
352 }
353
354 struct gen_perf_query_object *
355 gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
356 {
357 const struct gen_perf_query_info *query =
358 &perf_ctx->perf->queries[query_index];
359 struct gen_perf_query_object *obj =
360 calloc(1, sizeof(struct gen_perf_query_object));
361
362 if (!obj)
363 return NULL;
364
365 obj->queryinfo = query;
366
367 perf_ctx->n_query_instances++;
368 return obj;
369 }
370
371 int
372 gen_perf_active_queries(struct gen_perf_context *perf_ctx,
373 const struct gen_perf_query_info *query)
374 {
375 assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
376
377 switch (query->kind) {
378 case GEN_PERF_QUERY_TYPE_OA:
379 case GEN_PERF_QUERY_TYPE_RAW:
380 return perf_ctx->n_active_oa_queries;
381 break;
382
383 case GEN_PERF_QUERY_TYPE_PIPELINE:
384 return perf_ctx->n_active_pipeline_stats_queries;
385 break;
386
387 default:
388 unreachable("Unknown query type");
389 break;
390 }
391 }
392
393 static inline uint64_t to_user_pointer(void *ptr)
394 {
395 return (uintptr_t) ptr;
396 }
397
398 static bool
399 get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
400 {
401 struct stat sb;
402 int min, maj;
403 DIR *drmdir;
404 struct dirent *drm_entry;
405 int len;
406
407 perf->sysfs_dev_dir[0] = '\0';
408
409 if (fstat(fd, &sb)) {
410 DBG("Failed to stat DRM fd\n");
411 return false;
412 }
413
414 maj = major(sb.st_rdev);
415 min = minor(sb.st_rdev);
416
417 if (!S_ISCHR(sb.st_mode)) {
418 DBG("DRM fd is not a character device as expected\n");
419 return false;
420 }
421
422 len = snprintf(perf->sysfs_dev_dir,
423 sizeof(perf->sysfs_dev_dir),
424 "/sys/dev/char/%d:%d/device/drm", maj, min);
425 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
426 DBG("Failed to concatenate sysfs path to drm device\n");
427 return false;
428 }
429
430 drmdir = opendir(perf->sysfs_dev_dir);
431 if (!drmdir) {
432 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
433 return false;
434 }
435
436 while ((drm_entry = readdir(drmdir))) {
437 if ((drm_entry->d_type == DT_DIR ||
438 drm_entry->d_type == DT_LNK) &&
439 strncmp(drm_entry->d_name, "card", 4) == 0)
440 {
441 len = snprintf(perf->sysfs_dev_dir,
442 sizeof(perf->sysfs_dev_dir),
443 "/sys/dev/char/%d:%d/device/drm/%s",
444 maj, min, drm_entry->d_name);
445 closedir(drmdir);
446 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
447 return false;
448 else
449 return true;
450 }
451 }
452
453 closedir(drmdir);
454
455 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
456 maj, min);
457
458 return false;
459 }
460
461 static bool
462 read_file_uint64(const char *file, uint64_t *val)
463 {
464 char buf[32];
465 int fd, n;
466
467 fd = open(file, 0);
468 if (fd < 0)
469 return false;
470 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
471 errno == EINTR);
472 close(fd);
473 if (n < 0)
474 return false;
475
476 buf[n] = '\0';
477 *val = strtoull(buf, NULL, 0);
478
479 return true;
480 }
481
482 static bool
483 read_sysfs_drm_device_file_uint64(struct gen_perf_config *perf,
484 const char *file,
485 uint64_t *value)
486 {
487 char buf[512];
488 int len;
489
490 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
491 if (len < 0 || len >= sizeof(buf)) {
492 DBG("Failed to concatenate sys filename to read u64 from\n");
493 return false;
494 }
495
496 return read_file_uint64(buf, value);
497 }
498
499 static inline struct gen_perf_query_info *
500 append_query_info(struct gen_perf_config *perf, int max_counters)
501 {
502 struct gen_perf_query_info *query;
503
504 perf->queries = reralloc(perf, perf->queries,
505 struct gen_perf_query_info,
506 ++perf->n_queries);
507 query = &perf->queries[perf->n_queries - 1];
508 memset(query, 0, sizeof(*query));
509
510 if (max_counters > 0) {
511 query->max_counters = max_counters;
512 query->counters =
513 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
514 }
515
516 return query;
517 }
518
519 static void
520 register_oa_config(struct gen_perf_config *perf,
521 const struct gen_perf_query_info *query,
522 uint64_t config_id)
523 {
524 struct gen_perf_query_info *registered_query = append_query_info(perf, 0);
525
526 *registered_query = *query;
527 registered_query->oa_metrics_set_id = config_id;
528 DBG("metric set registered: id = %" PRIu64", guid = %s\n",
529 registered_query->oa_metrics_set_id, query->guid);
530 }
531
532 static void
533 enumerate_sysfs_metrics(struct gen_perf_config *perf)
534 {
535 DIR *metricsdir = NULL;
536 struct dirent *metric_entry;
537 char buf[256];
538 int len;
539
540 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
541 if (len < 0 || len >= sizeof(buf)) {
542 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
543 return;
544 }
545
546 metricsdir = opendir(buf);
547 if (!metricsdir) {
548 DBG("Failed to open %s: %m\n", buf);
549 return;
550 }
551
552 while ((metric_entry = readdir(metricsdir))) {
553 struct hash_entry *entry;
554
555 if ((metric_entry->d_type != DT_DIR &&
556 metric_entry->d_type != DT_LNK) ||
557 metric_entry->d_name[0] == '.')
558 continue;
559
560 DBG("metric set: %s\n", metric_entry->d_name);
561 entry = _mesa_hash_table_search(perf->oa_metrics_table,
562 metric_entry->d_name);
563 if (entry) {
564 uint64_t id;
565 if (!gen_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
566 DBG("Failed to read metric set id from %s: %m", buf);
567 continue;
568 }
569
570 register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
571 } else
572 DBG("metric set not known by mesa (skipping)\n");
573 }
574
575 closedir(metricsdir);
576 }
577
578 static bool
579 kernel_has_dynamic_config_support(struct gen_perf_config *perf, int fd)
580 {
581 uint64_t invalid_config_id = UINT64_MAX;
582
583 return gen_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
584 &invalid_config_id) < 0 && errno == ENOENT;
585 }
586
587 static int
588 i915_query_items(struct gen_perf_config *perf, int fd,
589 struct drm_i915_query_item *items, uint32_t n_items)
590 {
591 struct drm_i915_query q = {
592 .num_items = n_items,
593 .items_ptr = to_user_pointer(items),
594 };
595 return gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &q);
596 }
597
598 static bool
599 i915_query_perf_config_supported(struct gen_perf_config *perf, int fd)
600 {
601 struct drm_i915_query_item item = {
602 .query_id = DRM_I915_QUERY_PERF_CONFIG,
603 .flags = DRM_I915_QUERY_PERF_CONFIG_LIST,
604 };
605
606 return i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0;
607 }
608
609 static bool
610 i915_query_perf_config_data(struct gen_perf_config *perf,
611 int fd, const char *guid,
612 struct drm_i915_perf_oa_config *config)
613 {
614 struct {
615 struct drm_i915_query_perf_config query;
616 struct drm_i915_perf_oa_config config;
617 } item_data;
618 struct drm_i915_query_item item = {
619 .query_id = DRM_I915_QUERY_PERF_CONFIG,
620 .flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
621 .data_ptr = to_user_pointer(&item_data),
622 .length = sizeof(item_data),
623 };
624
625 memset(&item_data, 0, sizeof(item_data));
626 memcpy(item_data.query.uuid, guid, sizeof(item_data.query.uuid));
627 memcpy(&item_data.config, config, sizeof(item_data.config));
628
629 if (!(i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0))
630 return false;
631
632 memcpy(config, &item_data.config, sizeof(item_data.config));
633
634 return true;
635 }
636
637 bool
638 gen_perf_load_metric_id(struct gen_perf_config *perf_cfg,
639 const char *guid,
640 uint64_t *metric_id)
641 {
642 char config_path[280];
643
644 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
645 perf_cfg->sysfs_dev_dir, guid);
646
647 /* Don't recreate already loaded configs. */
648 return read_file_uint64(config_path, metric_id);
649 }
650
651 static uint64_t
652 i915_add_config(struct gen_perf_config *perf, int fd,
653 const struct gen_perf_registers *config,
654 const char *guid)
655 {
656 struct drm_i915_perf_oa_config i915_config = { 0, };
657
658 memcpy(i915_config.uuid, guid, sizeof(i915_config.uuid));
659
660 i915_config.n_mux_regs = config->n_mux_regs;
661 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
662
663 i915_config.n_boolean_regs = config->n_b_counter_regs;
664 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
665
666 i915_config.n_flex_regs = config->n_flex_regs;
667 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
668
669 int ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
670 return ret > 0 ? ret : 0;
671 }
672
673 static void
674 init_oa_configs(struct gen_perf_config *perf, int fd)
675 {
676 hash_table_foreach(perf->oa_metrics_table, entry) {
677 const struct gen_perf_query_info *query = entry->data;
678 uint64_t config_id;
679
680 if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
681 DBG("metric set: %s (already loaded)\n", query->guid);
682 register_oa_config(perf, query, config_id);
683 continue;
684 }
685
686 int ret = i915_add_config(perf, fd, &query->config, query->guid);
687 if (ret < 0) {
688 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
689 query->name, query->guid, strerror(errno));
690 continue;
691 }
692
693 register_oa_config(perf, query, ret);
694 DBG("metric set: %s (added)\n", query->guid);
695 }
696 }
697
698 static void
699 compute_topology_builtins(struct gen_perf_config *perf,
700 const struct gen_device_info *devinfo)
701 {
702 perf->sys_vars.slice_mask = devinfo->slice_masks;
703 perf->sys_vars.n_eu_slices = devinfo->num_slices;
704
705 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
706 perf->sys_vars.n_eu_sub_slices +=
707 __builtin_popcount(devinfo->subslice_masks[i]);
708 }
709
710 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
711 perf->sys_vars.n_eus += __builtin_popcount(devinfo->eu_masks[i]);
712
713 perf->sys_vars.eu_threads_count = devinfo->num_thread_per_eu;
714
715 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
716 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
717 * slice.
718 *
719 * Ideally equations would be updated to have a slice/subslice query
720 * function/operator.
721 */
722 perf->sys_vars.subslice_mask = 0;
723
724 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
725
726 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
727 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
728 if (gen_device_info_subslice_available(devinfo, s, ss))
729 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
730 }
731 }
732 }
733
734 static bool
735 init_oa_sys_vars(struct gen_perf_config *perf, const struct gen_device_info *devinfo)
736 {
737 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
738
739 if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
740 return false;
741
742 if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
743 return false;
744
745 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
746 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
747 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
748 perf->sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
749 perf->sys_vars.revision = devinfo->revision;
750 compute_topology_builtins(perf, devinfo);
751
752 return true;
753 }
754
755 typedef void (*perf_register_oa_queries_t)(struct gen_perf_config *);
756
757 static perf_register_oa_queries_t
758 get_register_queries_function(const struct gen_device_info *devinfo)
759 {
760 if (devinfo->is_haswell)
761 return gen_oa_register_queries_hsw;
762 if (devinfo->is_cherryview)
763 return gen_oa_register_queries_chv;
764 if (devinfo->is_broadwell)
765 return gen_oa_register_queries_bdw;
766 if (devinfo->is_broxton)
767 return gen_oa_register_queries_bxt;
768 if (devinfo->is_skylake) {
769 if (devinfo->gt == 2)
770 return gen_oa_register_queries_sklgt2;
771 if (devinfo->gt == 3)
772 return gen_oa_register_queries_sklgt3;
773 if (devinfo->gt == 4)
774 return gen_oa_register_queries_sklgt4;
775 }
776 if (devinfo->is_kabylake) {
777 if (devinfo->gt == 2)
778 return gen_oa_register_queries_kblgt2;
779 if (devinfo->gt == 3)
780 return gen_oa_register_queries_kblgt3;
781 }
782 if (devinfo->is_geminilake)
783 return gen_oa_register_queries_glk;
784 if (devinfo->is_coffeelake) {
785 if (devinfo->gt == 2)
786 return gen_oa_register_queries_cflgt2;
787 if (devinfo->gt == 3)
788 return gen_oa_register_queries_cflgt3;
789 }
790 if (devinfo->is_cannonlake)
791 return gen_oa_register_queries_cnl;
792 if (devinfo->gen == 11) {
793 if (devinfo->is_elkhartlake)
794 return gen_oa_register_queries_lkf;
795 return gen_oa_register_queries_icl;
796 }
797 if (devinfo->gen == 12)
798 return gen_oa_register_queries_tgl;
799
800 return NULL;
801 }
802
803 static inline void
804 add_stat_reg(struct gen_perf_query_info *query, uint32_t reg,
805 uint32_t numerator, uint32_t denominator,
806 const char *name, const char *description)
807 {
808 struct gen_perf_query_counter *counter;
809
810 assert(query->n_counters < query->max_counters);
811
812 counter = &query->counters[query->n_counters];
813 counter->name = name;
814 counter->desc = description;
815 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
816 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
817 counter->offset = sizeof(uint64_t) * query->n_counters;
818 counter->pipeline_stat.reg = reg;
819 counter->pipeline_stat.numerator = numerator;
820 counter->pipeline_stat.denominator = denominator;
821
822 query->n_counters++;
823 }
824
825 static inline void
826 add_basic_stat_reg(struct gen_perf_query_info *query,
827 uint32_t reg, const char *name)
828 {
829 add_stat_reg(query, reg, 1, 1, name, name);
830 }
831
832 static void
833 load_pipeline_statistic_metrics(struct gen_perf_config *perf_cfg,
834 const struct gen_device_info *devinfo)
835 {
836 struct gen_perf_query_info *query =
837 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
838
839 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
840 query->name = "Pipeline Statistics Registers";
841
842 add_basic_stat_reg(query, IA_VERTICES_COUNT,
843 "N vertices submitted");
844 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
845 "N primitives submitted");
846 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
847 "N vertex shader invocations");
848
849 if (devinfo->gen == 6) {
850 add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
851 "SO_PRIM_STORAGE_NEEDED",
852 "N geometry shader stream-out primitives (total)");
853 add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
854 "SO_NUM_PRIMS_WRITTEN",
855 "N geometry shader stream-out primitives (written)");
856 } else {
857 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
858 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
859 "N stream-out (stream 0) primitives (total)");
860 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
861 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
862 "N stream-out (stream 1) primitives (total)");
863 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
864 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
865 "N stream-out (stream 2) primitives (total)");
866 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
867 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
868 "N stream-out (stream 3) primitives (total)");
869 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
870 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
871 "N stream-out (stream 0) primitives (written)");
872 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
873 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
874 "N stream-out (stream 1) primitives (written)");
875 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
876 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
877 "N stream-out (stream 2) primitives (written)");
878 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
879 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
880 "N stream-out (stream 3) primitives (written)");
881 }
882
883 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
884 "N TCS shader invocations");
885 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
886 "N TES shader invocations");
887
888 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
889 "N geometry shader invocations");
890 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
891 "N geometry shader primitives emitted");
892
893 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
894 "N primitives entering clipping");
895 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
896 "N primitives leaving clipping");
897
898 if (devinfo->is_haswell || devinfo->gen == 8) {
899 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
900 "N fragment shader invocations",
901 "N fragment shader invocations");
902 } else {
903 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
904 "N fragment shader invocations");
905 }
906
907 add_basic_stat_reg(query, PS_DEPTH_COUNT,
908 "N z-pass fragments");
909
910 if (devinfo->gen >= 7) {
911 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
912 "N compute shader invocations");
913 }
914
915 query->data_size = sizeof(uint64_t) * query->n_counters;
916 }
917
918 static bool
919 load_oa_metrics(struct gen_perf_config *perf, int fd,
920 const struct gen_device_info *devinfo)
921 {
922 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
923 bool i915_perf_oa_available = false;
924 struct stat sb;
925
926 perf->i915_query_supported = i915_query_perf_config_supported(perf, fd);
927
928 /* The existence of this sysctl parameter implies the kernel supports
929 * the i915 perf interface.
930 */
931 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
932
933 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
934 * metrics unless running as root.
935 */
936 if (devinfo->is_haswell)
937 i915_perf_oa_available = true;
938 else {
939 uint64_t paranoid = 1;
940
941 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
942
943 if (paranoid == 0 || geteuid() == 0)
944 i915_perf_oa_available = true;
945 }
946 }
947
948 if (!i915_perf_oa_available ||
949 !oa_register ||
950 !get_sysfs_dev_dir(perf, fd) ||
951 !init_oa_sys_vars(perf, devinfo))
952 return false;
953
954 perf->oa_metrics_table =
955 _mesa_hash_table_create(perf, _mesa_key_hash_string,
956 _mesa_key_string_equal);
957
958 /* Index all the metric sets mesa knows about before looking to see what
959 * the kernel is advertising.
960 */
961 oa_register(perf);
962
963 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
964 kernel_has_dynamic_config_support(perf, fd))
965 init_oa_configs(perf, fd);
966 else
967 enumerate_sysfs_metrics(perf);
968
969 return true;
970 }
971
972 struct gen_perf_registers *
973 gen_perf_load_configuration(struct gen_perf_config *perf_cfg, int fd, const char *guid)
974 {
975 if (!perf_cfg->i915_query_supported)
976 return NULL;
977
978 struct drm_i915_perf_oa_config i915_config = { 0, };
979 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config))
980 return NULL;
981
982 struct gen_perf_registers *config = rzalloc(NULL, struct gen_perf_registers);
983 config->n_flex_regs = i915_config.n_flex_regs;
984 config->flex_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_flex_regs);
985 config->n_mux_regs = i915_config.n_mux_regs;
986 config->mux_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_mux_regs);
987 config->n_b_counter_regs = i915_config.n_boolean_regs;
988 config->b_counter_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_b_counter_regs);
989
990 /*
991 * struct gen_perf_query_register_prog maps exactly to the tuple of
992 * (register offset, register value) returned by the i915.
993 */
994 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
995 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
996 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
997 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config)) {
998 ralloc_free(config);
999 return NULL;
1000 }
1001
1002 return config;
1003 }
1004
1005 uint64_t
1006 gen_perf_store_configuration(struct gen_perf_config *perf_cfg, int fd,
1007 const struct gen_perf_registers *config,
1008 const char *guid)
1009 {
1010 if (guid)
1011 return i915_add_config(perf_cfg, fd, config, guid);
1012
1013 struct mesa_sha1 sha1_ctx;
1014 _mesa_sha1_init(&sha1_ctx);
1015
1016 if (config->flex_regs) {
1017 _mesa_sha1_update(&sha1_ctx, config->flex_regs,
1018 sizeof(config->flex_regs[0]) *
1019 config->n_flex_regs);
1020 }
1021 if (config->mux_regs) {
1022 _mesa_sha1_update(&sha1_ctx, config->mux_regs,
1023 sizeof(config->mux_regs[0]) *
1024 config->n_mux_regs);
1025 }
1026 if (config->b_counter_regs) {
1027 _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
1028 sizeof(config->b_counter_regs[0]) *
1029 config->n_b_counter_regs);
1030 }
1031
1032 uint8_t hash[20];
1033 _mesa_sha1_final(&sha1_ctx, hash);
1034
1035 char formatted_hash[41];
1036 _mesa_sha1_format(formatted_hash, hash);
1037
1038 char generated_guid[37];
1039 snprintf(generated_guid, sizeof(generated_guid),
1040 "%.8s-%.4s-%.4s-%.4s-%.12s",
1041 &formatted_hash[0], &formatted_hash[8],
1042 &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
1043 &formatted_hash[8 + 4 + 4 + 4]);
1044
1045 /* Check if already present. */
1046 uint64_t id;
1047 if (gen_perf_load_metric_id(perf_cfg, generated_guid, &id))
1048 return id;
1049
1050 return i915_add_config(perf_cfg, fd, config, generated_guid);
1051 }
1052
1053 /* Accumulate 32bits OA counters */
1054 static inline void
1055 accumulate_uint32(const uint32_t *report0,
1056 const uint32_t *report1,
1057 uint64_t *accumulator)
1058 {
1059 *accumulator += (uint32_t)(*report1 - *report0);
1060 }
1061
1062 /* Accumulate 40bits OA counters */
1063 static inline void
1064 accumulate_uint40(int a_index,
1065 const uint32_t *report0,
1066 const uint32_t *report1,
1067 uint64_t *accumulator)
1068 {
1069 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
1070 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
1071 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
1072 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
1073 uint64_t value0 = report0[a_index + 4] | high0;
1074 uint64_t value1 = report1[a_index + 4] | high1;
1075 uint64_t delta;
1076
1077 if (value0 > value1)
1078 delta = (1ULL << 40) + value1 - value0;
1079 else
1080 delta = value1 - value0;
1081
1082 *accumulator += delta;
1083 }
1084
1085 static void
1086 gen8_read_report_clock_ratios(const uint32_t *report,
1087 uint64_t *slice_freq_hz,
1088 uint64_t *unslice_freq_hz)
1089 {
1090 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1091 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1092 * divided this way :
1093 *
1094 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1095 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1096 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1097 *
1098 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1099 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1100 *
1101 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1102 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1103 */
1104
1105 uint32_t unslice_freq = report[0] & 0x1ff;
1106 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1107 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1108 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1109
1110 *slice_freq_hz = slice_freq * 16666667ULL;
1111 *unslice_freq_hz = unslice_freq * 16666667ULL;
1112 }
1113
1114 void
1115 gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
1116 const struct gen_device_info *devinfo,
1117 const uint32_t *start,
1118 const uint32_t *end)
1119 {
1120 /* Slice/Unslice frequency is only available in the OA reports when the
1121 * "Disable OA reports due to clock ratio change" field in
1122 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1123 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1124 *
1125 * Documentation says this should be available on Gen9+ but experimentation
1126 * shows that Gen8 reports similar values, so we enable it there too.
1127 */
1128 if (devinfo->gen < 8)
1129 return;
1130
1131 gen8_read_report_clock_ratios(start,
1132 &result->slice_frequency[0],
1133 &result->unslice_frequency[0]);
1134 gen8_read_report_clock_ratios(end,
1135 &result->slice_frequency[1],
1136 &result->unslice_frequency[1]);
1137 }
1138
1139 void
1140 gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
1141 const struct gen_perf_query_info *query,
1142 const uint32_t *start,
1143 const uint32_t *end)
1144 {
1145 int i, idx = 0;
1146
1147 if (result->hw_id == OA_REPORT_INVALID_CTX_ID &&
1148 start[2] != OA_REPORT_INVALID_CTX_ID)
1149 result->hw_id = start[2];
1150 if (result->reports_accumulated == 0)
1151 result->begin_timestamp = start[1];
1152 result->reports_accumulated++;
1153
1154 switch (query->oa_format) {
1155 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
1156 accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
1157 accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
1158
1159 /* 32x 40bit A counters... */
1160 for (i = 0; i < 32; i++)
1161 accumulate_uint40(i, start, end, result->accumulator + idx++);
1162
1163 /* 4x 32bit A counters... */
1164 for (i = 0; i < 4; i++)
1165 accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
1166
1167 /* 8x 32bit B counters + 8x 32bit C counters... */
1168 for (i = 0; i < 16; i++)
1169 accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
1170 break;
1171
1172 case I915_OA_FORMAT_A45_B8_C8:
1173 accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
1174
1175 for (i = 0; i < 61; i++)
1176 accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
1177 break;
1178
1179 default:
1180 unreachable("Can't accumulate OA counters in unknown format");
1181 }
1182
1183 }
1184
1185 void
1186 gen_perf_query_result_clear(struct gen_perf_query_result *result)
1187 {
1188 memset(result, 0, sizeof(*result));
1189 result->hw_id = OA_REPORT_INVALID_CTX_ID; /* invalid */
1190 }
1191
1192 static void
1193 register_mdapi_statistic_query(struct gen_perf_config *perf_cfg,
1194 const struct gen_device_info *devinfo)
1195 {
1196 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1197 return;
1198
1199 struct gen_perf_query_info *query =
1200 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
1201
1202 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
1203 query->name = "Intel_Raw_Pipeline_Statistics_Query";
1204
1205 /* The order has to match mdapi_pipeline_metrics. */
1206 add_basic_stat_reg(query, IA_VERTICES_COUNT,
1207 "N vertices submitted");
1208 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1209 "N primitives submitted");
1210 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1211 "N vertex shader invocations");
1212 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1213 "N geometry shader invocations");
1214 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1215 "N geometry shader primitives emitted");
1216 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1217 "N primitives entering clipping");
1218 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1219 "N primitives leaving clipping");
1220 if (devinfo->is_haswell || devinfo->gen == 8) {
1221 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1222 "N fragment shader invocations",
1223 "N fragment shader invocations");
1224 } else {
1225 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1226 "N fragment shader invocations");
1227 }
1228 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1229 "N TCS shader invocations");
1230 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1231 "N TES shader invocations");
1232 if (devinfo->gen >= 7) {
1233 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1234 "N compute shader invocations");
1235 }
1236
1237 if (devinfo->gen >= 10) {
1238 /* Reuse existing CS invocation register until we can expose this new
1239 * one.
1240 */
1241 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1242 "Reserved1");
1243 }
1244
1245 query->data_size = sizeof(uint64_t) * query->n_counters;
1246 }
1247
1248 static void
1249 fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
1250 const char *name,
1251 uint32_t data_offset,
1252 uint32_t data_size,
1253 enum gen_perf_counter_data_type data_type)
1254 {
1255 struct gen_perf_query_counter *counter = &query->counters[query->n_counters];
1256
1257 assert(query->n_counters <= query->max_counters);
1258
1259 counter->name = name;
1260 counter->desc = "Raw counter value";
1261 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
1262 counter->data_type = data_type;
1263 counter->offset = data_offset;
1264
1265 query->n_counters++;
1266
1267 assert(counter->offset + gen_perf_query_counter_get_size(counter) <= query->data_size);
1268 }
1269
1270 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
1271 fill_mdapi_perf_query_counter(query, #field_name, \
1272 (uint8_t *) &struct_name.field_name - \
1273 (uint8_t *) &struct_name, \
1274 sizeof(struct_name.field_name), \
1275 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1276 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
1277 fill_mdapi_perf_query_counter(query, \
1278 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
1279 (uint8_t *) &struct_name.field_name[idx] - \
1280 (uint8_t *) &struct_name, \
1281 sizeof(struct_name.field_name[0]), \
1282 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1283
1284 static void
1285 register_mdapi_oa_query(const struct gen_device_info *devinfo,
1286 struct gen_perf_config *perf)
1287 {
1288 struct gen_perf_query_info *query = NULL;
1289
1290 /* MDAPI requires different structures for pretty much every generation
1291 * (right now we have definitions for gen 7 to 11).
1292 */
1293 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1294 return;
1295
1296 switch (devinfo->gen) {
1297 case 7: {
1298 query = append_query_info(perf, 1 + 45 + 16 + 7);
1299 query->oa_format = I915_OA_FORMAT_A45_B8_C8;
1300
1301 struct gen7_mdapi_metrics metric_data;
1302 query->data_size = sizeof(metric_data);
1303
1304 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1305 for (int i = 0; i < ARRAY_SIZE(metric_data.ACounters); i++) {
1306 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1307 metric_data, ACounters, i, UINT64);
1308 }
1309 for (int i = 0; i < ARRAY_SIZE(metric_data.NOACounters); i++) {
1310 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1311 metric_data, NOACounters, i, UINT64);
1312 }
1313 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1314 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1315 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1316 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1317 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1318 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1319 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1320 break;
1321 }
1322 case 8: {
1323 query = append_query_info(perf, 2 + 36 + 16 + 16);
1324 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1325
1326 struct gen8_mdapi_metrics metric_data;
1327 query->data_size = sizeof(metric_data);
1328
1329 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1330 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1331 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1332 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1333 metric_data, OaCntr, i, UINT64);
1334 }
1335 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1336 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1337 metric_data, NoaCntr, i, UINT64);
1338 }
1339 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1340 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1341 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1342 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1343 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1344 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1345 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1346 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1347 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1348 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1349 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1350 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1351 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1352 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1353 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1354 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1355 break;
1356 }
1357 case 9:
1358 case 10:
1359 case 11: {
1360 query = append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
1361 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1362
1363 struct gen9_mdapi_metrics metric_data;
1364 query->data_size = sizeof(metric_data);
1365
1366 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1367 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1368 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1369 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1370 metric_data, OaCntr, i, UINT64);
1371 }
1372 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1373 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1374 metric_data, NoaCntr, i, UINT64);
1375 }
1376 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1377 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1378 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1379 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1380 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1381 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1382 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1383 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1384 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1385 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1386 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1387 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1388 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1389 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1390 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1391 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1392 for (int i = 0; i < ARRAY_SIZE(metric_data.UserCntr); i++) {
1393 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1394 metric_data, UserCntr, i, UINT64);
1395 }
1396 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UserCntrCfgId, UINT32);
1397 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved4, UINT32);
1398 break;
1399 }
1400 default:
1401 unreachable("Unsupported gen");
1402 break;
1403 }
1404
1405 query->kind = GEN_PERF_QUERY_TYPE_RAW;
1406 query->name = "Intel_Raw_Hardware_Counters_Set_0_Query";
1407 query->guid = GEN_PERF_QUERY_GUID_MDAPI;
1408
1409 {
1410 /* Accumulation buffer offsets copied from an actual query... */
1411 const struct gen_perf_query_info *copy_query =
1412 &perf->queries[0];
1413
1414 query->gpu_time_offset = copy_query->gpu_time_offset;
1415 query->gpu_clock_offset = copy_query->gpu_clock_offset;
1416 query->a_offset = copy_query->a_offset;
1417 query->b_offset = copy_query->b_offset;
1418 query->c_offset = copy_query->c_offset;
1419 }
1420 }
1421
1422 static uint64_t
1423 get_metric_id(struct gen_perf_config *perf,
1424 const struct gen_perf_query_info *query)
1425 {
1426 /* These queries are know not to ever change, their config ID has been
1427 * loaded upon the first query creation. No need to look them up again.
1428 */
1429 if (query->kind == GEN_PERF_QUERY_TYPE_OA)
1430 return query->oa_metrics_set_id;
1431
1432 assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
1433
1434 /* Raw queries can be reprogrammed up by an external application/library.
1435 * When a raw query is used for the first time it's id is set to a value !=
1436 * 0. When it stops being used the id returns to 0. No need to reload the
1437 * ID when it's already loaded.
1438 */
1439 if (query->oa_metrics_set_id != 0) {
1440 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
1441 query->name, query->guid, query->oa_metrics_set_id);
1442 return query->oa_metrics_set_id;
1443 }
1444
1445 struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
1446 if (!gen_perf_load_metric_id(perf, query->guid,
1447 &raw_query->oa_metrics_set_id)) {
1448 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
1449 raw_query->oa_metrics_set_id = 1ULL;
1450 } else {
1451 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
1452 query->name, query->guid, query->oa_metrics_set_id);
1453 }
1454 return query->oa_metrics_set_id;
1455 }
1456
1457 static struct oa_sample_buf *
1458 get_free_sample_buf(struct gen_perf_context *perf_ctx)
1459 {
1460 struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
1461 struct oa_sample_buf *buf;
1462
1463 if (node)
1464 buf = exec_node_data(struct oa_sample_buf, node, link);
1465 else {
1466 buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
1467
1468 exec_node_init(&buf->link);
1469 buf->refcount = 0;
1470 }
1471 buf->len = 0;
1472
1473 return buf;
1474 }
1475
1476 static void
1477 reap_old_sample_buffers(struct gen_perf_context *perf_ctx)
1478 {
1479 struct exec_node *tail_node =
1480 exec_list_get_tail(&perf_ctx->sample_buffers);
1481 struct oa_sample_buf *tail_buf =
1482 exec_node_data(struct oa_sample_buf, tail_node, link);
1483
1484 /* Remove all old, unreferenced sample buffers walking forward from
1485 * the head of the list, except always leave at least one node in
1486 * the list so we always have a node to reference when we Begin
1487 * a new query.
1488 */
1489 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1490 &perf_ctx->sample_buffers)
1491 {
1492 if (buf->refcount == 0 && buf != tail_buf) {
1493 exec_node_remove(&buf->link);
1494 exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
1495 } else
1496 return;
1497 }
1498 }
1499
1500 static void
1501 free_sample_bufs(struct gen_perf_context *perf_ctx)
1502 {
1503 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1504 &perf_ctx->free_sample_buffers)
1505 ralloc_free(buf);
1506
1507 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1508 }
1509
1510 /******************************************************************************/
1511
1512 /**
1513 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1514 * pipeline statistics for the performance query object.
1515 */
1516 static void
1517 snapshot_statistics_registers(struct gen_perf_context *ctx,
1518 struct gen_perf_query_object *obj,
1519 uint32_t offset_in_bytes)
1520 {
1521 struct gen_perf_config *perf = ctx->perf;
1522 const struct gen_perf_query_info *query = obj->queryinfo;
1523 const int n_counters = query->n_counters;
1524
1525 for (int i = 0; i < n_counters; i++) {
1526 const struct gen_perf_query_counter *counter = &query->counters[i];
1527
1528 assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
1529
1530 perf->vtbl.store_register_mem(ctx->ctx, obj->pipeline_stats.bo,
1531 counter->pipeline_stat.reg, 8,
1532 offset_in_bytes + i * sizeof(uint64_t));
1533 }
1534 }
1535
1536 static void
1537 snapshot_freq_register(struct gen_perf_context *ctx,
1538 struct gen_perf_query_object *query,
1539 uint32_t bo_offset)
1540 {
1541 struct gen_perf_config *perf = ctx->perf;
1542 const struct gen_device_info *devinfo = ctx->devinfo;
1543
1544 if (devinfo->gen == 8 && !devinfo->is_cherryview)
1545 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN7_RPSTAT1, 4, bo_offset);
1546 else if (devinfo->gen >= 9)
1547 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN9_RPSTAT0, 4, bo_offset);
1548 }
1549
1550 static void
1551 gen_perf_close(struct gen_perf_context *perfquery,
1552 const struct gen_perf_query_info *query)
1553 {
1554 if (perfquery->oa_stream_fd != -1) {
1555 close(perfquery->oa_stream_fd);
1556 perfquery->oa_stream_fd = -1;
1557 }
1558 if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
1559 struct gen_perf_query_info *raw_query =
1560 (struct gen_perf_query_info *) query;
1561 raw_query->oa_metrics_set_id = 0;
1562 }
1563 }
1564
1565 static bool
1566 gen_perf_open(struct gen_perf_context *perf_ctx,
1567 int metrics_set_id,
1568 int report_format,
1569 int period_exponent,
1570 int drm_fd,
1571 uint32_t ctx_id)
1572 {
1573 uint64_t properties[] = {
1574 /* Single context sampling */
1575 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
1576
1577 /* Include OA reports in samples */
1578 DRM_I915_PERF_PROP_SAMPLE_OA, true,
1579
1580 /* OA unit configuration */
1581 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
1582 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
1583 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
1584 };
1585 struct drm_i915_perf_open_param param = {
1586 .flags = I915_PERF_FLAG_FD_CLOEXEC |
1587 I915_PERF_FLAG_FD_NONBLOCK |
1588 I915_PERF_FLAG_DISABLED,
1589 .num_properties = ARRAY_SIZE(properties) / 2,
1590 .properties_ptr = (uintptr_t) properties,
1591 };
1592 int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
1593 if (fd == -1) {
1594 DBG("Error opening gen perf OA stream: %m\n");
1595 return false;
1596 }
1597
1598 perf_ctx->oa_stream_fd = fd;
1599
1600 perf_ctx->current_oa_metrics_set_id = metrics_set_id;
1601 perf_ctx->current_oa_format = report_format;
1602
1603 return true;
1604 }
1605
1606 static bool
1607 inc_n_users(struct gen_perf_context *perf_ctx)
1608 {
1609 if (perf_ctx->n_oa_users == 0 &&
1610 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
1611 {
1612 return false;
1613 }
1614 ++perf_ctx->n_oa_users;
1615
1616 return true;
1617 }
1618
1619 static void
1620 dec_n_users(struct gen_perf_context *perf_ctx)
1621 {
1622 /* Disabling the i915 perf stream will effectively disable the OA
1623 * counters. Note it's important to be sure there are no outstanding
1624 * MI_RPC commands at this point since they could stall the CS
1625 * indefinitely once OACONTROL is disabled.
1626 */
1627 --perf_ctx->n_oa_users;
1628 if (perf_ctx->n_oa_users == 0 &&
1629 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
1630 {
1631 DBG("WARNING: Error disabling gen perf stream: %m\n");
1632 }
1633 }
1634
1635 void
1636 gen_perf_init_metrics(struct gen_perf_config *perf_cfg,
1637 const struct gen_device_info *devinfo,
1638 int drm_fd)
1639 {
1640 load_pipeline_statistic_metrics(perf_cfg, devinfo);
1641 register_mdapi_statistic_query(perf_cfg, devinfo);
1642 if (load_oa_metrics(perf_cfg, drm_fd, devinfo))
1643 register_mdapi_oa_query(devinfo, perf_cfg);
1644 }
1645
1646 void
1647 gen_perf_init_context(struct gen_perf_context *perf_ctx,
1648 struct gen_perf_config *perf_cfg,
1649 void * ctx, /* driver context (eg, brw_context) */
1650 void * bufmgr, /* eg brw_bufmgr */
1651 const struct gen_device_info *devinfo,
1652 uint32_t hw_ctx,
1653 int drm_fd)
1654 {
1655 perf_ctx->perf = perf_cfg;
1656 perf_ctx->ctx = ctx;
1657 perf_ctx->bufmgr = bufmgr;
1658 perf_ctx->drm_fd = drm_fd;
1659 perf_ctx->hw_ctx = hw_ctx;
1660 perf_ctx->devinfo = devinfo;
1661
1662 perf_ctx->unaccumulated =
1663 ralloc_array(ctx, struct gen_perf_query_object *, 2);
1664 perf_ctx->unaccumulated_elements = 0;
1665 perf_ctx->unaccumulated_array_size = 2;
1666
1667 exec_list_make_empty(&perf_ctx->sample_buffers);
1668 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1669
1670 /* It's convenient to guarantee that this linked list of sample
1671 * buffers is never empty so we add an empty head so when we
1672 * Begin an OA query we can always take a reference on a buffer
1673 * in this list.
1674 */
1675 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1676 exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
1677
1678 perf_ctx->oa_stream_fd = -1;
1679 perf_ctx->next_query_start_report_id = 1000;
1680 }
1681
1682 /**
1683 * Add a query to the global list of "unaccumulated queries."
1684 *
1685 * Queries are tracked here until all the associated OA reports have
1686 * been accumulated via accumulate_oa_reports() after the end
1687 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1688 */
1689 static void
1690 add_to_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
1691 struct gen_perf_query_object *obj)
1692 {
1693 if (perf_ctx->unaccumulated_elements >=
1694 perf_ctx->unaccumulated_array_size)
1695 {
1696 perf_ctx->unaccumulated_array_size *= 1.5;
1697 perf_ctx->unaccumulated =
1698 reralloc(perf_ctx->ctx, perf_ctx->unaccumulated,
1699 struct gen_perf_query_object *,
1700 perf_ctx->unaccumulated_array_size);
1701 }
1702
1703 perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
1704 }
1705
1706 bool
1707 gen_perf_begin_query(struct gen_perf_context *perf_ctx,
1708 struct gen_perf_query_object *query)
1709 {
1710 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1711 const struct gen_perf_query_info *queryinfo = query->queryinfo;
1712
1713 /* XXX: We have to consider that the command parser unit that parses batch
1714 * buffer commands and is used to capture begin/end counter snapshots isn't
1715 * implicitly synchronized with what's currently running across other GPU
1716 * units (such as the EUs running shaders) that the performance counters are
1717 * associated with.
1718 *
1719 * The intention of performance queries is to measure the work associated
1720 * with commands between the begin/end delimiters and so for that to be the
1721 * case we need to explicitly synchronize the parsing of commands to capture
1722 * Begin/End counter snapshots with what's running across other parts of the
1723 * GPU.
1724 *
1725 * When the command parser reaches a Begin marker it effectively needs to
1726 * drain everything currently running on the GPU until the hardware is idle
1727 * before capturing the first snapshot of counters - otherwise the results
1728 * would also be measuring the effects of earlier commands.
1729 *
1730 * When the command parser reaches an End marker it needs to stall until
1731 * everything currently running on the GPU has finished before capturing the
1732 * end snapshot - otherwise the results won't be a complete representation
1733 * of the work.
1734 *
1735 * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
1736 * additional work to be processed by the pipeline until all pixels of the
1737 * previous draw has be completed).
1738 *
1739 * N.B. The final results are based on deltas of counters between (inside)
1740 * Begin/End markers so even though the total wall clock time of the
1741 * workload is stretched by larger pipeline bubbles the bubbles themselves
1742 * are generally invisible to the query results. Whether that's a good or a
1743 * bad thing depends on the use case. For a lower real-time impact while
1744 * capturing metrics then periodic sampling may be a better choice than
1745 * INTEL_performance_query.
1746 *
1747 *
1748 * This is our Begin synchronization point to drain current work on the
1749 * GPU before we capture our first counter snapshot...
1750 */
1751 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
1752
1753 switch (queryinfo->kind) {
1754 case GEN_PERF_QUERY_TYPE_OA:
1755 case GEN_PERF_QUERY_TYPE_RAW: {
1756
1757 /* Opening an i915 perf stream implies exclusive access to the OA unit
1758 * which will generate counter reports for a specific counter set with a
1759 * specific layout/format so we can't begin any OA based queries that
1760 * require a different counter set or format unless we get an opportunity
1761 * to close the stream and open a new one...
1762 */
1763 uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
1764
1765 if (perf_ctx->oa_stream_fd != -1 &&
1766 perf_ctx->current_oa_metrics_set_id != metric_id) {
1767
1768 if (perf_ctx->n_oa_users != 0) {
1769 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
1770 perf_ctx->current_oa_metrics_set_id, metric_id);
1771 return false;
1772 } else
1773 gen_perf_close(perf_ctx, queryinfo);
1774 }
1775
1776 /* If the OA counters aren't already on, enable them. */
1777 if (perf_ctx->oa_stream_fd == -1) {
1778 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1779
1780 /* The period_exponent gives a sampling period as follows:
1781 * sample_period = timestamp_period * 2^(period_exponent + 1)
1782 *
1783 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1784 * ~83ns (GEN8/9).
1785 *
1786 * The counter overflow period is derived from the EuActive counter
1787 * which reads a counter that increments by the number of clock
1788 * cycles multiplied by the number of EUs. It can be calculated as:
1789 *
1790 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1791 *
1792 * (E.g. 40 EUs @ 1GHz = ~53ms)
1793 *
1794 * We select a sampling period inferior to that overflow period to
1795 * ensure we cannot see more than 1 counter overflow, otherwise we
1796 * could loose information.
1797 */
1798
1799 int a_counter_in_bits = 32;
1800 if (devinfo->gen >= 8)
1801 a_counter_in_bits = 40;
1802
1803 uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
1804 /* drop 1GHz freq to have units in nanoseconds */
1805 2);
1806
1807 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1808 overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
1809
1810 int period_exponent = 0;
1811 uint64_t prev_sample_period, next_sample_period;
1812 for (int e = 0; e < 30; e++) {
1813 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1814 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1815
1816 /* Take the previous sampling period, lower than the overflow
1817 * period.
1818 */
1819 if (prev_sample_period < overflow_period &&
1820 next_sample_period > overflow_period)
1821 period_exponent = e + 1;
1822 }
1823
1824 if (period_exponent == 0) {
1825 DBG("WARNING: enable to find a sampling exponent\n");
1826 return false;
1827 }
1828
1829 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1830 prev_sample_period / 1000000ul);
1831
1832 if (!gen_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
1833 period_exponent, perf_ctx->drm_fd,
1834 perf_ctx->hw_ctx))
1835 return false;
1836 } else {
1837 assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
1838 perf_ctx->current_oa_format == queryinfo->oa_format);
1839 }
1840
1841 if (!inc_n_users(perf_ctx)) {
1842 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1843 return false;
1844 }
1845
1846 if (query->oa.bo) {
1847 perf_cfg->vtbl.bo_unreference(query->oa.bo);
1848 query->oa.bo = NULL;
1849 }
1850
1851 query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1852 "perf. query OA MI_RPC bo",
1853 MI_RPC_BO_SIZE);
1854 #ifdef DEBUG
1855 /* Pre-filling the BO helps debug whether writes landed. */
1856 void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
1857 memset(map, 0x80, MI_RPC_BO_SIZE);
1858 perf_cfg->vtbl.bo_unmap(query->oa.bo);
1859 #endif
1860
1861 query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
1862 perf_ctx->next_query_start_report_id += 2;
1863
1864 /* Take a starting OA counter snapshot. */
1865 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo, 0,
1866 query->oa.begin_report_id);
1867 snapshot_freq_register(perf_ctx, query, MI_FREQ_START_OFFSET_BYTES);
1868
1869 ++perf_ctx->n_active_oa_queries;
1870
1871 /* No already-buffered samples can possibly be associated with this query
1872 * so create a marker within the list of sample buffers enabling us to
1873 * easily ignore earlier samples when processing this query after
1874 * completion.
1875 */
1876 assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
1877 query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
1878
1879 struct oa_sample_buf *buf =
1880 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1881
1882 /* This reference will ensure that future/following sample
1883 * buffers (that may relate to this query) can't be freed until
1884 * this drops to zero.
1885 */
1886 buf->refcount++;
1887
1888 gen_perf_query_result_clear(&query->oa.result);
1889 query->oa.results_accumulated = false;
1890
1891 add_to_unaccumulated_query_list(perf_ctx, query);
1892 break;
1893 }
1894
1895 case GEN_PERF_QUERY_TYPE_PIPELINE:
1896 if (query->pipeline_stats.bo) {
1897 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1898 query->pipeline_stats.bo = NULL;
1899 }
1900
1901 query->pipeline_stats.bo =
1902 perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1903 "perf. query pipeline stats bo",
1904 STATS_BO_SIZE);
1905
1906 /* Take starting snapshots. */
1907 snapshot_statistics_registers(perf_ctx, query, 0);
1908
1909 ++perf_ctx->n_active_pipeline_stats_queries;
1910 break;
1911
1912 default:
1913 unreachable("Unknown query type");
1914 break;
1915 }
1916
1917 return true;
1918 }
1919
1920 void
1921 gen_perf_end_query(struct gen_perf_context *perf_ctx,
1922 struct gen_perf_query_object *query)
1923 {
1924 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1925
1926 /* Ensure that the work associated with the queried commands will have
1927 * finished before taking our query end counter readings.
1928 *
1929 * For more details see comment in brw_begin_perf_query for
1930 * corresponding flush.
1931 */
1932 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
1933
1934 switch (query->queryinfo->kind) {
1935 case GEN_PERF_QUERY_TYPE_OA:
1936 case GEN_PERF_QUERY_TYPE_RAW:
1937
1938 /* NB: It's possible that the query will have already been marked
1939 * as 'accumulated' if an error was seen while reading samples
1940 * from perf. In this case we mustn't try and emit a closing
1941 * MI_RPC command in case the OA unit has already been disabled
1942 */
1943 if (!query->oa.results_accumulated) {
1944 /* Take an ending OA counter snapshot. */
1945 snapshot_freq_register(perf_ctx, query, MI_FREQ_END_OFFSET_BYTES);
1946 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
1947 MI_RPC_BO_END_OFFSET_BYTES,
1948 query->oa.begin_report_id + 1);
1949 }
1950
1951 --perf_ctx->n_active_oa_queries;
1952
1953 /* NB: even though the query has now ended, it can't be accumulated
1954 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1955 * to query->oa.bo
1956 */
1957 break;
1958
1959 case GEN_PERF_QUERY_TYPE_PIPELINE:
1960 snapshot_statistics_registers(perf_ctx, query,
1961 STATS_BO_END_OFFSET_BYTES);
1962 --perf_ctx->n_active_pipeline_stats_queries;
1963 break;
1964
1965 default:
1966 unreachable("Unknown query type");
1967 break;
1968 }
1969 }
1970
1971 enum OaReadStatus {
1972 OA_READ_STATUS_ERROR,
1973 OA_READ_STATUS_UNFINISHED,
1974 OA_READ_STATUS_FINISHED,
1975 };
1976
1977 static enum OaReadStatus
1978 read_oa_samples_until(struct gen_perf_context *perf_ctx,
1979 uint32_t start_timestamp,
1980 uint32_t end_timestamp)
1981 {
1982 struct exec_node *tail_node =
1983 exec_list_get_tail(&perf_ctx->sample_buffers);
1984 struct oa_sample_buf *tail_buf =
1985 exec_node_data(struct oa_sample_buf, tail_node, link);
1986 uint32_t last_timestamp =
1987 tail_buf->len == 0 ? start_timestamp : tail_buf->last_timestamp;
1988
1989 while (1) {
1990 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1991 uint32_t offset;
1992 int len;
1993
1994 while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
1995 sizeof(buf->buf))) < 0 && errno == EINTR)
1996 ;
1997
1998 if (len <= 0) {
1999 exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
2000
2001 if (len < 0) {
2002 if (errno == EAGAIN) {
2003 return ((last_timestamp - start_timestamp) < INT32_MAX &&
2004 (last_timestamp - start_timestamp) >=
2005 (end_timestamp - start_timestamp)) ?
2006 OA_READ_STATUS_FINISHED :
2007 OA_READ_STATUS_UNFINISHED;
2008 } else {
2009 DBG("Error reading i915 perf samples: %m\n");
2010 }
2011 } else
2012 DBG("Spurious EOF reading i915 perf samples\n");
2013
2014 return OA_READ_STATUS_ERROR;
2015 }
2016
2017 buf->len = len;
2018 exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
2019
2020 /* Go through the reports and update the last timestamp. */
2021 offset = 0;
2022 while (offset < buf->len) {
2023 const struct drm_i915_perf_record_header *header =
2024 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
2025 uint32_t *report = (uint32_t *) (header + 1);
2026
2027 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
2028 last_timestamp = report[1];
2029
2030 offset += header->size;
2031 }
2032
2033 buf->last_timestamp = last_timestamp;
2034 }
2035
2036 unreachable("not reached");
2037 return OA_READ_STATUS_ERROR;
2038 }
2039
2040 /**
2041 * Try to read all the reports until either the delimiting timestamp
2042 * or an error arises.
2043 */
2044 static bool
2045 read_oa_samples_for_query(struct gen_perf_context *perf_ctx,
2046 struct gen_perf_query_object *query,
2047 void *current_batch)
2048 {
2049 uint32_t *start;
2050 uint32_t *last;
2051 uint32_t *end;
2052 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2053
2054 /* We need the MI_REPORT_PERF_COUNT to land before we can start
2055 * accumulate. */
2056 assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2057 !perf_cfg->vtbl.bo_busy(query->oa.bo));
2058
2059 /* Map the BO once here and let accumulate_oa_reports() unmap
2060 * it. */
2061 if (query->oa.map == NULL)
2062 query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
2063
2064 start = last = query->oa.map;
2065 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2066
2067 if (start[0] != query->oa.begin_report_id) {
2068 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2069 return true;
2070 }
2071 if (end[0] != (query->oa.begin_report_id + 1)) {
2072 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2073 return true;
2074 }
2075
2076 /* Read the reports until the end timestamp. */
2077 switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
2078 case OA_READ_STATUS_ERROR:
2079 /* Fallthrough and let accumulate_oa_reports() deal with the
2080 * error. */
2081 case OA_READ_STATUS_FINISHED:
2082 return true;
2083 case OA_READ_STATUS_UNFINISHED:
2084 return false;
2085 }
2086
2087 unreachable("invalid read status");
2088 return false;
2089 }
2090
2091 void
2092 gen_perf_wait_query(struct gen_perf_context *perf_ctx,
2093 struct gen_perf_query_object *query,
2094 void *current_batch)
2095 {
2096 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2097 struct brw_bo *bo = NULL;
2098
2099 switch (query->queryinfo->kind) {
2100 case GEN_PERF_QUERY_TYPE_OA:
2101 case GEN_PERF_QUERY_TYPE_RAW:
2102 bo = query->oa.bo;
2103 break;
2104
2105 case GEN_PERF_QUERY_TYPE_PIPELINE:
2106 bo = query->pipeline_stats.bo;
2107 break;
2108
2109 default:
2110 unreachable("Unknown query type");
2111 break;
2112 }
2113
2114 if (bo == NULL)
2115 return;
2116
2117 /* If the current batch references our results bo then we need to
2118 * flush first...
2119 */
2120 if (perf_cfg->vtbl.batch_references(current_batch, bo))
2121 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
2122
2123 perf_cfg->vtbl.bo_wait_rendering(bo);
2124
2125 /* Due to a race condition between the OA unit signaling report
2126 * availability and the report actually being written into memory,
2127 * we need to wait for all the reports to come in before we can
2128 * read them.
2129 */
2130 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
2131 query->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
2132 while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
2133 ;
2134 }
2135 }
2136
2137 bool
2138 gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
2139 struct gen_perf_query_object *query,
2140 void *current_batch)
2141 {
2142 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2143
2144 switch (query->queryinfo->kind) {
2145 case GEN_PERF_QUERY_TYPE_OA:
2146 case GEN_PERF_QUERY_TYPE_RAW:
2147 return (query->oa.results_accumulated ||
2148 (query->oa.bo &&
2149 !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2150 !perf_cfg->vtbl.bo_busy(query->oa.bo) &&
2151 read_oa_samples_for_query(perf_ctx, query, current_batch)));
2152 case GEN_PERF_QUERY_TYPE_PIPELINE:
2153 return (query->pipeline_stats.bo &&
2154 !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
2155 !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
2156
2157 default:
2158 unreachable("Unknown query type");
2159 break;
2160 }
2161
2162 return false;
2163 }
2164
2165 /**
2166 * Remove a query from the global list of unaccumulated queries once
2167 * after successfully accumulating the OA reports associated with the
2168 * query in accumulate_oa_reports() or when discarding unwanted query
2169 * results.
2170 */
2171 static void
2172 drop_from_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
2173 struct gen_perf_query_object *query)
2174 {
2175 for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
2176 if (perf_ctx->unaccumulated[i] == query) {
2177 int last_elt = --perf_ctx->unaccumulated_elements;
2178
2179 if (i == last_elt)
2180 perf_ctx->unaccumulated[i] = NULL;
2181 else {
2182 perf_ctx->unaccumulated[i] =
2183 perf_ctx->unaccumulated[last_elt];
2184 }
2185
2186 break;
2187 }
2188 }
2189
2190 /* Drop our samples_head reference so that associated periodic
2191 * sample data buffers can potentially be reaped if they aren't
2192 * referenced by any other queries...
2193 */
2194
2195 struct oa_sample_buf *buf =
2196 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
2197
2198 assert(buf->refcount > 0);
2199 buf->refcount--;
2200
2201 query->oa.samples_head = NULL;
2202
2203 reap_old_sample_buffers(perf_ctx);
2204 }
2205
2206 /* In general if we see anything spurious while accumulating results,
2207 * we don't try and continue accumulating the current query, hoping
2208 * for the best, we scrap anything outstanding, and then hope for the
2209 * best with new queries.
2210 */
2211 static void
2212 discard_all_queries(struct gen_perf_context *perf_ctx)
2213 {
2214 while (perf_ctx->unaccumulated_elements) {
2215 struct gen_perf_query_object *query = perf_ctx->unaccumulated[0];
2216
2217 query->oa.results_accumulated = true;
2218 drop_from_unaccumulated_query_list(perf_ctx, query);
2219
2220 dec_n_users(perf_ctx);
2221 }
2222 }
2223
2224 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
2225 static bool
2226 oa_report_ctx_id_valid(const struct gen_device_info *devinfo,
2227 const uint32_t *report)
2228 {
2229 assert(devinfo->gen >= 8);
2230 if (devinfo->gen == 8)
2231 return (report[0] & (1 << 25)) != 0;
2232 return (report[0] & (1 << 16)) != 0;
2233 }
2234
2235 /**
2236 * Accumulate raw OA counter values based on deltas between pairs of
2237 * OA reports.
2238 *
2239 * Accumulation starts from the first report captured via
2240 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
2241 * last MI_RPC report requested by brw_end_perf_query(). Between these
2242 * two reports there may also some number of periodically sampled OA
2243 * reports collected via the i915 perf interface - depending on the
2244 * duration of the query.
2245 *
2246 * These periodic snapshots help to ensure we handle counter overflow
2247 * correctly by being frequent enough to ensure we don't miss multiple
2248 * overflows of a counter between snapshots. For Gen8+ the i915 perf
2249 * snapshots provide the extra context-switch reports that let us
2250 * subtract out the progress of counters associated with other
2251 * contexts running on the system.
2252 */
2253 static void
2254 accumulate_oa_reports(struct gen_perf_context *perf_ctx,
2255 struct gen_perf_query_object *query)
2256 {
2257 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2258 uint32_t *start;
2259 uint32_t *last;
2260 uint32_t *end;
2261 struct exec_node *first_samples_node;
2262 bool last_report_ctx_match = true;
2263 int out_duration = 0;
2264
2265 assert(query->oa.map != NULL);
2266
2267 start = last = query->oa.map;
2268 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2269
2270 if (start[0] != query->oa.begin_report_id) {
2271 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2272 goto error;
2273 }
2274 if (end[0] != (query->oa.begin_report_id + 1)) {
2275 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2276 goto error;
2277 }
2278
2279 /* On Gen12+ OA reports are sourced from per context counters, so we don't
2280 * ever have to look at the global OA buffer. Yey \o/
2281 */
2282 if (perf_ctx->devinfo->gen >= 12) {
2283 last = start;
2284 goto end;
2285 }
2286
2287 /* See if we have any periodic reports to accumulate too... */
2288
2289 /* N.B. The oa.samples_head was set when the query began and
2290 * pointed to the tail of the perf_ctx->sample_buffers list at
2291 * the time the query started. Since the buffer existed before the
2292 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
2293 * that no data in this particular node's buffer can possibly be
2294 * associated with the query - so skip ahead one...
2295 */
2296 first_samples_node = query->oa.samples_head->next;
2297
2298 foreach_list_typed_from(struct oa_sample_buf, buf, link,
2299 &perf_ctx->sample_buffers,
2300 first_samples_node)
2301 {
2302 int offset = 0;
2303
2304 while (offset < buf->len) {
2305 const struct drm_i915_perf_record_header *header =
2306 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
2307
2308 assert(header->size != 0);
2309 assert(header->size <= buf->len);
2310
2311 offset += header->size;
2312
2313 switch (header->type) {
2314 case DRM_I915_PERF_RECORD_SAMPLE: {
2315 uint32_t *report = (uint32_t *)(header + 1);
2316 bool report_ctx_match = true;
2317 bool add = true;
2318
2319 /* Ignore reports that come before the start marker.
2320 * (Note: takes care to allow overflow of 32bit timestamps)
2321 */
2322 if (gen_device_info_timebase_scale(devinfo,
2323 report[1] - start[1]) > 5000000000) {
2324 continue;
2325 }
2326
2327 /* Ignore reports that come after the end marker.
2328 * (Note: takes care to allow overflow of 32bit timestamps)
2329 */
2330 if (gen_device_info_timebase_scale(devinfo,
2331 report[1] - end[1]) <= 5000000000) {
2332 goto end;
2333 }
2334
2335 /* For Gen8+ since the counters continue while other
2336 * contexts are running we need to discount any unrelated
2337 * deltas. The hardware automatically generates a report
2338 * on context switch which gives us a new reference point
2339 * to continuing adding deltas from.
2340 *
2341 * For Haswell we can rely on the HW to stop the progress
2342 * of OA counters while any other context is acctive.
2343 */
2344 if (devinfo->gen >= 8) {
2345 /* Consider that the current report matches our context only if
2346 * the report says the report ID is valid.
2347 */
2348 report_ctx_match = oa_report_ctx_id_valid(devinfo, report) &&
2349 report[2] == start[2];
2350 if (report_ctx_match)
2351 out_duration = 0;
2352 else
2353 out_duration++;
2354
2355 /* Only add the delta between <last, report> if the last report
2356 * was clearly identified as our context, or if we have at most
2357 * 1 report without a matching ID.
2358 *
2359 * The OA unit will sometimes label reports with an invalid
2360 * context ID when i915 rewrites the execlist submit register
2361 * with the same context as the one currently running. This
2362 * happens when i915 wants to notify the HW of ringbuffer tail
2363 * register update. We have to consider this report as part of
2364 * our context as the 3d pipeline behind the OACS unit is still
2365 * processing the operations started at the previous execlist
2366 * submission.
2367 */
2368 add = last_report_ctx_match && out_duration < 2;
2369 }
2370
2371 if (add) {
2372 gen_perf_query_result_accumulate(&query->oa.result,
2373 query->queryinfo,
2374 last, report);
2375 } else {
2376 /* We're not adding the delta because we've identified it's not
2377 * for the context we filter for. We can consider that the
2378 * query was split.
2379 */
2380 query->oa.result.query_disjoint = true;
2381 }
2382
2383 last = report;
2384 last_report_ctx_match = report_ctx_match;
2385
2386 break;
2387 }
2388
2389 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
2390 DBG("i915 perf: OA error: all reports lost\n");
2391 goto error;
2392 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
2393 DBG("i915 perf: OA report lost\n");
2394 break;
2395 }
2396 }
2397 }
2398
2399 end:
2400
2401 gen_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
2402 last, end);
2403
2404 query->oa.results_accumulated = true;
2405 drop_from_unaccumulated_query_list(perf_ctx, query);
2406 dec_n_users(perf_ctx);
2407
2408 return;
2409
2410 error:
2411
2412 discard_all_queries(perf_ctx);
2413 }
2414
2415 void
2416 gen_perf_delete_query(struct gen_perf_context *perf_ctx,
2417 struct gen_perf_query_object *query)
2418 {
2419 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2420
2421 /* We can assume that the frontend waits for a query to complete
2422 * before ever calling into here, so we don't have to worry about
2423 * deleting an in-flight query object.
2424 */
2425 switch (query->queryinfo->kind) {
2426 case GEN_PERF_QUERY_TYPE_OA:
2427 case GEN_PERF_QUERY_TYPE_RAW:
2428 if (query->oa.bo) {
2429 if (!query->oa.results_accumulated) {
2430 drop_from_unaccumulated_query_list(perf_ctx, query);
2431 dec_n_users(perf_ctx);
2432 }
2433
2434 perf_cfg->vtbl.bo_unreference(query->oa.bo);
2435 query->oa.bo = NULL;
2436 }
2437
2438 query->oa.results_accumulated = false;
2439 break;
2440
2441 case GEN_PERF_QUERY_TYPE_PIPELINE:
2442 if (query->pipeline_stats.bo) {
2443 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
2444 query->pipeline_stats.bo = NULL;
2445 }
2446 break;
2447
2448 default:
2449 unreachable("Unknown query type");
2450 break;
2451 }
2452
2453 /* As an indication that the INTEL_performance_query extension is no
2454 * longer in use, it's a good time to free our cache of sample
2455 * buffers and close any current i915-perf stream.
2456 */
2457 if (--perf_ctx->n_query_instances == 0) {
2458 free_sample_bufs(perf_ctx);
2459 gen_perf_close(perf_ctx, query->queryinfo);
2460 }
2461
2462 free(query);
2463 }
2464
2465 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
2466
2467 static void
2468 read_gt_frequency(struct gen_perf_context *perf_ctx,
2469 struct gen_perf_query_object *obj)
2470 {
2471 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2472 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
2473 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
2474
2475 switch (devinfo->gen) {
2476 case 7:
2477 case 8:
2478 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2479 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2480 break;
2481 case 9:
2482 case 10:
2483 case 11:
2484 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2485 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2486 break;
2487 default:
2488 unreachable("unexpected gen");
2489 }
2490
2491 /* Put the numbers into Hz. */
2492 obj->oa.gt_frequency[0] *= 1000000ULL;
2493 obj->oa.gt_frequency[1] *= 1000000ULL;
2494 }
2495
2496 static int
2497 get_oa_counter_data(struct gen_perf_context *perf_ctx,
2498 struct gen_perf_query_object *query,
2499 size_t data_size,
2500 uint8_t *data)
2501 {
2502 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2503 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2504 int n_counters = queryinfo->n_counters;
2505 int written = 0;
2506
2507 for (int i = 0; i < n_counters; i++) {
2508 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2509 uint64_t *out_uint64;
2510 float *out_float;
2511 size_t counter_size = gen_perf_query_counter_get_size(counter);
2512
2513 if (counter_size) {
2514 switch (counter->data_type) {
2515 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
2516 out_uint64 = (uint64_t *)(data + counter->offset);
2517 *out_uint64 =
2518 counter->oa_counter_read_uint64(perf_cfg, queryinfo,
2519 query->oa.result.accumulator);
2520 break;
2521 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
2522 out_float = (float *)(data + counter->offset);
2523 *out_float =
2524 counter->oa_counter_read_float(perf_cfg, queryinfo,
2525 query->oa.result.accumulator);
2526 break;
2527 default:
2528 /* So far we aren't using uint32, double or bool32... */
2529 unreachable("unexpected counter data type");
2530 }
2531 written = counter->offset + counter_size;
2532 }
2533 }
2534
2535 return written;
2536 }
2537
2538 static int
2539 get_pipeline_stats_data(struct gen_perf_context *perf_ctx,
2540 struct gen_perf_query_object *query,
2541 size_t data_size,
2542 uint8_t *data)
2543
2544 {
2545 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2546 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2547 int n_counters = queryinfo->n_counters;
2548 uint8_t *p = data;
2549
2550 uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
2551 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
2552
2553 for (int i = 0; i < n_counters; i++) {
2554 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2555 uint64_t value = end[i] - start[i];
2556
2557 if (counter->pipeline_stat.numerator !=
2558 counter->pipeline_stat.denominator) {
2559 value *= counter->pipeline_stat.numerator;
2560 value /= counter->pipeline_stat.denominator;
2561 }
2562
2563 *((uint64_t *)p) = value;
2564 p += 8;
2565 }
2566
2567 perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
2568
2569 return p - data;
2570 }
2571
2572 void
2573 gen_perf_get_query_data(struct gen_perf_context *perf_ctx,
2574 struct gen_perf_query_object *query,
2575 int data_size,
2576 unsigned *data,
2577 unsigned *bytes_written)
2578 {
2579 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2580 int written = 0;
2581
2582 switch (query->queryinfo->kind) {
2583 case GEN_PERF_QUERY_TYPE_OA:
2584 case GEN_PERF_QUERY_TYPE_RAW:
2585 if (!query->oa.results_accumulated) {
2586 read_gt_frequency(perf_ctx, query);
2587 uint32_t *begin_report = query->oa.map;
2588 uint32_t *end_report = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2589 gen_perf_query_result_read_frequencies(&query->oa.result,
2590 perf_ctx->devinfo,
2591 begin_report,
2592 end_report);
2593 accumulate_oa_reports(perf_ctx, query);
2594 assert(query->oa.results_accumulated);
2595
2596 perf_cfg->vtbl.bo_unmap(query->oa.bo);
2597 query->oa.map = NULL;
2598 }
2599 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
2600 written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
2601 } else {
2602 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2603
2604 written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
2605 devinfo, &query->oa.result,
2606 query->oa.gt_frequency[0],
2607 query->oa.gt_frequency[1]);
2608 }
2609 break;
2610
2611 case GEN_PERF_QUERY_TYPE_PIPELINE:
2612 written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
2613 break;
2614
2615 default:
2616 unreachable("Unknown query type");
2617 break;
2618 }
2619
2620 if (bytes_written)
2621 *bytes_written = written;
2622 }
2623
2624 void
2625 gen_perf_dump_query_count(struct gen_perf_context *perf_ctx)
2626 {
2627 DBG("Queries: (Open queries = %d, OA users = %d)\n",
2628 perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
2629 }
2630
2631 void
2632 gen_perf_dump_query(struct gen_perf_context *ctx,
2633 struct gen_perf_query_object *obj,
2634 void *current_batch)
2635 {
2636 switch (obj->queryinfo->kind) {
2637 case GEN_PERF_QUERY_TYPE_OA:
2638 case GEN_PERF_QUERY_TYPE_RAW:
2639 DBG("BO: %-4s OA data: %-10s %-15s\n",
2640 obj->oa.bo ? "yes," : "no,",
2641 gen_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
2642 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
2643 break;
2644 case GEN_PERF_QUERY_TYPE_PIPELINE:
2645 DBG("BO: %-4s\n",
2646 obj->pipeline_stats.bo ? "yes" : "no");
2647 break;
2648 default:
2649 unreachable("Unknown query type");
2650 break;
2651 }
2652 }