intel/perf: adapt to platforms like Solaris without d_type in struct dirent
[mesa.git] / src / intel / perf / gen_perf.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31
32 #ifndef HAVE_DIRENT_D_TYPE
33 #include <limits.h> // PATH_MAX
34 #endif
35
36 #include <drm-uapi/i915_drm.h>
37
38 #include "common/gen_gem.h"
39 #include "gen_perf.h"
40 #include "gen_perf_regs.h"
41 #include "perf/gen_perf_mdapi.h"
42 #include "perf/gen_perf_metrics.h"
43
44 #include "dev/gen_debug.h"
45 #include "dev/gen_device_info.h"
46 #include "util/bitscan.h"
47 #include "util/mesa-sha1.h"
48 #include "util/u_math.h"
49
50 #define FILE_DEBUG_FLAG DEBUG_PERFMON
51 #define MI_RPC_BO_SIZE 4096
52 #define MI_FREQ_START_OFFSET_BYTES (3072)
53 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
54 #define MI_FREQ_END_OFFSET_BYTES (3076)
55
56 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
57
58 #define GEN7_RPSTAT1 0xA01C
59 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
60 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
61 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
62 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
63
64 #define GEN9_RPSTAT0 0xA01C
65 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
66 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
67 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
68 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
69
70 #define GEN6_SO_PRIM_STORAGE_NEEDED 0x2280
71 #define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
72 #define GEN6_SO_NUM_PRIMS_WRITTEN 0x2288
73 #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
74
75 #define MAP_READ (1 << 0)
76 #define MAP_WRITE (1 << 1)
77
78 #define OA_REPORT_INVALID_CTX_ID (0xffffffff)
79
80 /**
81 * Periodic OA samples are read() into these buffer structures via the
82 * i915 perf kernel interface and appended to the
83 * perf_ctx->sample_buffers linked list. When we process the
84 * results of an OA metrics query we need to consider all the periodic
85 * samples between the Begin and End MI_REPORT_PERF_COUNT command
86 * markers.
87 *
88 * 'Periodic' is a simplification as there are other automatic reports
89 * written by the hardware also buffered here.
90 *
91 * Considering three queries, A, B and C:
92 *
93 * Time ---->
94 * ________________A_________________
95 * | |
96 * | ________B_________ _____C___________
97 * | | | | | |
98 *
99 * And an illustration of sample buffers read over this time frame:
100 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
101 *
102 * These nodes may hold samples for query A:
103 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
104 *
105 * These nodes may hold samples for query B:
106 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
107 *
108 * These nodes may hold samples for query C:
109 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
110 *
111 * The illustration assumes we have an even distribution of periodic
112 * samples so all nodes have the same size plotted against time:
113 *
114 * Note, to simplify code, the list is never empty.
115 *
116 * With overlapping queries we can see that periodic OA reports may
117 * relate to multiple queries and care needs to be take to keep
118 * track of sample buffers until there are no queries that might
119 * depend on their contents.
120 *
121 * We use a node ref counting system where a reference ensures that a
122 * node and all following nodes can't be freed/recycled until the
123 * reference drops to zero.
124 *
125 * E.g. with a ref of one here:
126 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
127 *
128 * These nodes could be freed or recycled ("reaped"):
129 * [ 0 ][ 0 ]
130 *
131 * These must be preserved until the leading ref drops to zero:
132 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
133 *
134 * When a query starts we take a reference on the current tail of
135 * the list, knowing that no already-buffered samples can possibly
136 * relate to the newly-started query. A pointer to this node is
137 * also saved in the query object's ->oa.samples_head.
138 *
139 * E.g. starting query A while there are two nodes in .sample_buffers:
140 * ________________A________
141 * |
142 *
143 * [ 0 ][ 1 ]
144 * ^_______ Add a reference and store pointer to node in
145 * A->oa.samples_head
146 *
147 * Moving forward to when the B query starts with no new buffer nodes:
148 * (for reference, i915 perf reads() are only done when queries finish)
149 * ________________A_______
150 * | ________B___
151 * | |
152 *
153 * [ 0 ][ 2 ]
154 * ^_______ Add a reference and store pointer to
155 * node in B->oa.samples_head
156 *
157 * Once a query is finished, after an OA query has become 'Ready',
158 * once the End OA report has landed and after we we have processed
159 * all the intermediate periodic samples then we drop the
160 * ->oa.samples_head reference we took at the start.
161 *
162 * So when the B query has finished we have:
163 * ________________A________
164 * | ______B___________
165 * | | |
166 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
167 * ^_______ Drop B->oa.samples_head reference
168 *
169 * We still can't free these due to the A->oa.samples_head ref:
170 * [ 1 ][ 0 ][ 0 ][ 0 ]
171 *
172 * When the A query finishes: (note there's a new ref for C's samples_head)
173 * ________________A_________________
174 * | |
175 * | _____C_________
176 * | | |
177 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
178 * ^_______ Drop A->oa.samples_head reference
179 *
180 * And we can now reap these nodes up to the C->oa.samples_head:
181 * [ X ][ X ][ X ][ X ]
182 * keeping -> [ 1 ][ 0 ][ 0 ]
183 *
184 * We reap old sample buffers each time we finish processing an OA
185 * query by iterating the sample_buffers list from the head until we
186 * find a referenced node and stop.
187 *
188 * Reaped buffers move to a perfquery.free_sample_buffers list and
189 * when we come to read() we first look to recycle a buffer from the
190 * free_sample_buffers list before allocating a new buffer.
191 */
192 struct oa_sample_buf {
193 struct exec_node link;
194 int refcount;
195 int len;
196 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
197 uint32_t last_timestamp;
198 };
199
200 /**
201 * gen representation of a performance query object.
202 *
203 * NB: We want to keep this structure relatively lean considering that
204 * applications may expect to allocate enough objects to be able to
205 * query around all draw calls in a frame.
206 */
207 struct gen_perf_query_object
208 {
209 const struct gen_perf_query_info *queryinfo;
210
211 /* See query->kind to know which state below is in use... */
212 union {
213 struct {
214
215 /**
216 * BO containing OA counter snapshots at query Begin/End time.
217 */
218 void *bo;
219
220 /**
221 * Address of mapped of @bo
222 */
223 void *map;
224
225 /**
226 * The MI_REPORT_PERF_COUNT command lets us specify a unique
227 * ID that will be reflected in the resulting OA report
228 * that's written by the GPU. This is the ID we're expecting
229 * in the begin report and the the end report should be
230 * @begin_report_id + 1.
231 */
232 int begin_report_id;
233
234 /**
235 * Reference the head of the brw->perfquery.sample_buffers
236 * list at the time that the query started (so we only need
237 * to look at nodes after this point when looking for samples
238 * related to this query)
239 *
240 * (See struct brw_oa_sample_buf description for more details)
241 */
242 struct exec_node *samples_head;
243
244 /**
245 * false while in the unaccumulated_elements list, and set to
246 * true when the final, end MI_RPC snapshot has been
247 * accumulated.
248 */
249 bool results_accumulated;
250
251 /**
252 * Frequency of the GT at begin and end of the query.
253 */
254 uint64_t gt_frequency[2];
255
256 /**
257 * Accumulated OA results between begin and end of the query.
258 */
259 struct gen_perf_query_result result;
260 } oa;
261
262 struct {
263 /**
264 * BO containing starting and ending snapshots for the
265 * statistics counters.
266 */
267 void *bo;
268 } pipeline_stats;
269 };
270 };
271
272 struct gen_perf_context {
273 struct gen_perf_config *perf;
274
275 void * ctx; /* driver context (eg, brw_context) */
276 void * bufmgr;
277 const struct gen_device_info *devinfo;
278
279 uint32_t hw_ctx;
280 int drm_fd;
281
282 /* The i915 perf stream we open to setup + enable the OA counters */
283 int oa_stream_fd;
284
285 /* An i915 perf stream fd gives exclusive access to the OA unit that will
286 * report counter snapshots for a specific counter set/profile in a
287 * specific layout/format so we can only start OA queries that are
288 * compatible with the currently open fd...
289 */
290 int current_oa_metrics_set_id;
291 int current_oa_format;
292
293 /* List of buffers containing OA reports */
294 struct exec_list sample_buffers;
295
296 /* Cached list of empty sample buffers */
297 struct exec_list free_sample_buffers;
298
299 int n_active_oa_queries;
300 int n_active_pipeline_stats_queries;
301
302 /* The number of queries depending on running OA counters which
303 * extends beyond brw_end_perf_query() since we need to wait until
304 * the last MI_RPC command has parsed by the GPU.
305 *
306 * Accurate accounting is important here as emitting an
307 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
308 * effectively hang the gpu.
309 */
310 int n_oa_users;
311
312 /* To help catch an spurious problem with the hardware or perf
313 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
314 * with a unique ID that we can explicitly check for...
315 */
316 int next_query_start_report_id;
317
318 /**
319 * An array of queries whose results haven't yet been assembled
320 * based on the data in buffer objects.
321 *
322 * These may be active, or have already ended. However, the
323 * results have not been requested.
324 */
325 struct gen_perf_query_object **unaccumulated;
326 int unaccumulated_elements;
327 int unaccumulated_array_size;
328
329 /* The total number of query objects so we can relinquish
330 * our exclusive access to perf if the application deletes
331 * all of its objects. (NB: We only disable perf while
332 * there are no active queries)
333 */
334 int n_query_instances;
335 };
336
337 const struct gen_perf_query_info*
338 gen_perf_query_info(const struct gen_perf_query_object *query)
339 {
340 return query->queryinfo;
341 }
342
343 struct gen_perf_context *
344 gen_perf_new_context(void *parent)
345 {
346 struct gen_perf_context *ctx = rzalloc(parent, struct gen_perf_context);
347 if (! ctx)
348 fprintf(stderr, "%s: failed to alloc context\n", __func__);
349 return ctx;
350 }
351
352 struct gen_perf_config *
353 gen_perf_config(struct gen_perf_context *ctx)
354 {
355 return ctx->perf;
356 }
357
358 struct gen_perf_query_object *
359 gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
360 {
361 const struct gen_perf_query_info *query =
362 &perf_ctx->perf->queries[query_index];
363 struct gen_perf_query_object *obj =
364 calloc(1, sizeof(struct gen_perf_query_object));
365
366 if (!obj)
367 return NULL;
368
369 obj->queryinfo = query;
370
371 perf_ctx->n_query_instances++;
372 return obj;
373 }
374
375 int
376 gen_perf_active_queries(struct gen_perf_context *perf_ctx,
377 const struct gen_perf_query_info *query)
378 {
379 assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
380
381 switch (query->kind) {
382 case GEN_PERF_QUERY_TYPE_OA:
383 case GEN_PERF_QUERY_TYPE_RAW:
384 return perf_ctx->n_active_oa_queries;
385 break;
386
387 case GEN_PERF_QUERY_TYPE_PIPELINE:
388 return perf_ctx->n_active_pipeline_stats_queries;
389 break;
390
391 default:
392 unreachable("Unknown query type");
393 break;
394 }
395 }
396
397 static inline uint64_t to_user_pointer(void *ptr)
398 {
399 return (uintptr_t) ptr;
400 }
401
402 static bool
403 is_dir_or_link(const struct dirent *entry, const char *parent_dir)
404 {
405 #ifdef HAVE_DIRENT_D_TYPE
406 return entry->d_type == DT_DIR || entry->d_type == DT_LNK;
407 #else
408 struct stat st;
409 char path[PATH_MAX + 1];
410 snprintf(path, sizeof(path), "%s/%s", parent_dir, entry->d_name);
411 lstat(path, &st);
412 return S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode);
413 #endif
414 }
415
416 static bool
417 get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
418 {
419 struct stat sb;
420 int min, maj;
421 DIR *drmdir;
422 struct dirent *drm_entry;
423 int len;
424
425 perf->sysfs_dev_dir[0] = '\0';
426
427 if (fstat(fd, &sb)) {
428 DBG("Failed to stat DRM fd\n");
429 return false;
430 }
431
432 maj = major(sb.st_rdev);
433 min = minor(sb.st_rdev);
434
435 if (!S_ISCHR(sb.st_mode)) {
436 DBG("DRM fd is not a character device as expected\n");
437 return false;
438 }
439
440 len = snprintf(perf->sysfs_dev_dir,
441 sizeof(perf->sysfs_dev_dir),
442 "/sys/dev/char/%d:%d/device/drm", maj, min);
443 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
444 DBG("Failed to concatenate sysfs path to drm device\n");
445 return false;
446 }
447
448 drmdir = opendir(perf->sysfs_dev_dir);
449 if (!drmdir) {
450 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
451 return false;
452 }
453
454 while ((drm_entry = readdir(drmdir))) {
455 if (is_dir_or_link(drm_entry, perf->sysfs_dev_dir) &&
456 strncmp(drm_entry->d_name, "card", 4) == 0)
457 {
458 len = snprintf(perf->sysfs_dev_dir,
459 sizeof(perf->sysfs_dev_dir),
460 "/sys/dev/char/%d:%d/device/drm/%s",
461 maj, min, drm_entry->d_name);
462 closedir(drmdir);
463 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
464 return false;
465 else
466 return true;
467 }
468 }
469
470 closedir(drmdir);
471
472 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
473 maj, min);
474
475 return false;
476 }
477
478 static bool
479 read_file_uint64(const char *file, uint64_t *val)
480 {
481 char buf[32];
482 int fd, n;
483
484 fd = open(file, 0);
485 if (fd < 0)
486 return false;
487 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
488 errno == EINTR);
489 close(fd);
490 if (n < 0)
491 return false;
492
493 buf[n] = '\0';
494 *val = strtoull(buf, NULL, 0);
495
496 return true;
497 }
498
499 static bool
500 read_sysfs_drm_device_file_uint64(struct gen_perf_config *perf,
501 const char *file,
502 uint64_t *value)
503 {
504 char buf[512];
505 int len;
506
507 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
508 if (len < 0 || len >= sizeof(buf)) {
509 DBG("Failed to concatenate sys filename to read u64 from\n");
510 return false;
511 }
512
513 return read_file_uint64(buf, value);
514 }
515
516 static inline struct gen_perf_query_info *
517 append_query_info(struct gen_perf_config *perf, int max_counters)
518 {
519 struct gen_perf_query_info *query;
520
521 perf->queries = reralloc(perf, perf->queries,
522 struct gen_perf_query_info,
523 ++perf->n_queries);
524 query = &perf->queries[perf->n_queries - 1];
525 memset(query, 0, sizeof(*query));
526
527 if (max_counters > 0) {
528 query->max_counters = max_counters;
529 query->counters =
530 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
531 }
532
533 return query;
534 }
535
536 static void
537 register_oa_config(struct gen_perf_config *perf,
538 const struct gen_perf_query_info *query,
539 uint64_t config_id)
540 {
541 struct gen_perf_query_info *registered_query = append_query_info(perf, 0);
542
543 *registered_query = *query;
544 registered_query->oa_metrics_set_id = config_id;
545 DBG("metric set registered: id = %" PRIu64", guid = %s\n",
546 registered_query->oa_metrics_set_id, query->guid);
547 }
548
549 static void
550 enumerate_sysfs_metrics(struct gen_perf_config *perf)
551 {
552 DIR *metricsdir = NULL;
553 struct dirent *metric_entry;
554 char buf[256];
555 int len;
556
557 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
558 if (len < 0 || len >= sizeof(buf)) {
559 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
560 return;
561 }
562
563 metricsdir = opendir(buf);
564 if (!metricsdir) {
565 DBG("Failed to open %s: %m\n", buf);
566 return;
567 }
568
569 while ((metric_entry = readdir(metricsdir))) {
570 struct hash_entry *entry;
571 if (!is_dir_or_link(metric_entry, buf) ||
572 metric_entry->d_name[0] == '.')
573 continue;
574
575 DBG("metric set: %s\n", metric_entry->d_name);
576 entry = _mesa_hash_table_search(perf->oa_metrics_table,
577 metric_entry->d_name);
578 if (entry) {
579 uint64_t id;
580 if (!gen_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
581 DBG("Failed to read metric set id from %s: %m", buf);
582 continue;
583 }
584
585 register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
586 } else
587 DBG("metric set not known by mesa (skipping)\n");
588 }
589
590 closedir(metricsdir);
591 }
592
593 static bool
594 kernel_has_dynamic_config_support(struct gen_perf_config *perf, int fd)
595 {
596 uint64_t invalid_config_id = UINT64_MAX;
597
598 return gen_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
599 &invalid_config_id) < 0 && errno == ENOENT;
600 }
601
602 static int
603 i915_query_items(struct gen_perf_config *perf, int fd,
604 struct drm_i915_query_item *items, uint32_t n_items)
605 {
606 struct drm_i915_query q = {
607 .num_items = n_items,
608 .items_ptr = to_user_pointer(items),
609 };
610 return gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &q);
611 }
612
613 static bool
614 i915_query_perf_config_supported(struct gen_perf_config *perf, int fd)
615 {
616 struct drm_i915_query_item item = {
617 .query_id = DRM_I915_QUERY_PERF_CONFIG,
618 .flags = DRM_I915_QUERY_PERF_CONFIG_LIST,
619 };
620
621 return i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0;
622 }
623
624 static bool
625 i915_query_perf_config_data(struct gen_perf_config *perf,
626 int fd, const char *guid,
627 struct drm_i915_perf_oa_config *config)
628 {
629 struct {
630 struct drm_i915_query_perf_config query;
631 struct drm_i915_perf_oa_config config;
632 } item_data;
633 struct drm_i915_query_item item = {
634 .query_id = DRM_I915_QUERY_PERF_CONFIG,
635 .flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
636 .data_ptr = to_user_pointer(&item_data),
637 .length = sizeof(item_data),
638 };
639
640 memset(&item_data, 0, sizeof(item_data));
641 memcpy(item_data.query.uuid, guid, sizeof(item_data.query.uuid));
642 memcpy(&item_data.config, config, sizeof(item_data.config));
643
644 if (!(i915_query_items(perf, fd, &item, 1) == 0 && item.length > 0))
645 return false;
646
647 memcpy(config, &item_data.config, sizeof(item_data.config));
648
649 return true;
650 }
651
652 bool
653 gen_perf_load_metric_id(struct gen_perf_config *perf_cfg,
654 const char *guid,
655 uint64_t *metric_id)
656 {
657 char config_path[280];
658
659 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
660 perf_cfg->sysfs_dev_dir, guid);
661
662 /* Don't recreate already loaded configs. */
663 return read_file_uint64(config_path, metric_id);
664 }
665
666 static uint64_t
667 i915_add_config(struct gen_perf_config *perf, int fd,
668 const struct gen_perf_registers *config,
669 const char *guid)
670 {
671 struct drm_i915_perf_oa_config i915_config = { 0, };
672
673 memcpy(i915_config.uuid, guid, sizeof(i915_config.uuid));
674
675 i915_config.n_mux_regs = config->n_mux_regs;
676 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
677
678 i915_config.n_boolean_regs = config->n_b_counter_regs;
679 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
680
681 i915_config.n_flex_regs = config->n_flex_regs;
682 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
683
684 int ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
685 return ret > 0 ? ret : 0;
686 }
687
688 static void
689 init_oa_configs(struct gen_perf_config *perf, int fd)
690 {
691 hash_table_foreach(perf->oa_metrics_table, entry) {
692 const struct gen_perf_query_info *query = entry->data;
693 uint64_t config_id;
694
695 if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
696 DBG("metric set: %s (already loaded)\n", query->guid);
697 register_oa_config(perf, query, config_id);
698 continue;
699 }
700
701 int ret = i915_add_config(perf, fd, &query->config, query->guid);
702 if (ret < 0) {
703 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
704 query->name, query->guid, strerror(errno));
705 continue;
706 }
707
708 register_oa_config(perf, query, ret);
709 DBG("metric set: %s (added)\n", query->guid);
710 }
711 }
712
713 static void
714 compute_topology_builtins(struct gen_perf_config *perf,
715 const struct gen_device_info *devinfo)
716 {
717 perf->sys_vars.slice_mask = devinfo->slice_masks;
718 perf->sys_vars.n_eu_slices = devinfo->num_slices;
719
720 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
721 perf->sys_vars.n_eu_sub_slices +=
722 __builtin_popcount(devinfo->subslice_masks[i]);
723 }
724
725 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
726 perf->sys_vars.n_eus += __builtin_popcount(devinfo->eu_masks[i]);
727
728 perf->sys_vars.eu_threads_count = devinfo->num_thread_per_eu;
729
730 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
731 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
732 * slice.
733 *
734 * Ideally equations would be updated to have a slice/subslice query
735 * function/operator.
736 */
737 perf->sys_vars.subslice_mask = 0;
738
739 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
740
741 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
742 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
743 if (gen_device_info_subslice_available(devinfo, s, ss))
744 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
745 }
746 }
747 }
748
749 static bool
750 init_oa_sys_vars(struct gen_perf_config *perf, const struct gen_device_info *devinfo)
751 {
752 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
753
754 if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
755 return false;
756
757 if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
758 return false;
759
760 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
761 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
762 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
763 perf->sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
764 perf->sys_vars.revision = devinfo->revision;
765 compute_topology_builtins(perf, devinfo);
766
767 return true;
768 }
769
770 typedef void (*perf_register_oa_queries_t)(struct gen_perf_config *);
771
772 static perf_register_oa_queries_t
773 get_register_queries_function(const struct gen_device_info *devinfo)
774 {
775 if (devinfo->is_haswell)
776 return gen_oa_register_queries_hsw;
777 if (devinfo->is_cherryview)
778 return gen_oa_register_queries_chv;
779 if (devinfo->is_broadwell)
780 return gen_oa_register_queries_bdw;
781 if (devinfo->is_broxton)
782 return gen_oa_register_queries_bxt;
783 if (devinfo->is_skylake) {
784 if (devinfo->gt == 2)
785 return gen_oa_register_queries_sklgt2;
786 if (devinfo->gt == 3)
787 return gen_oa_register_queries_sklgt3;
788 if (devinfo->gt == 4)
789 return gen_oa_register_queries_sklgt4;
790 }
791 if (devinfo->is_kabylake) {
792 if (devinfo->gt == 2)
793 return gen_oa_register_queries_kblgt2;
794 if (devinfo->gt == 3)
795 return gen_oa_register_queries_kblgt3;
796 }
797 if (devinfo->is_geminilake)
798 return gen_oa_register_queries_glk;
799 if (devinfo->is_coffeelake) {
800 if (devinfo->gt == 2)
801 return gen_oa_register_queries_cflgt2;
802 if (devinfo->gt == 3)
803 return gen_oa_register_queries_cflgt3;
804 }
805 if (devinfo->is_cannonlake)
806 return gen_oa_register_queries_cnl;
807 if (devinfo->gen == 11) {
808 if (devinfo->is_elkhartlake)
809 return gen_oa_register_queries_lkf;
810 return gen_oa_register_queries_icl;
811 }
812 if (devinfo->gen == 12)
813 return gen_oa_register_queries_tgl;
814
815 return NULL;
816 }
817
818 static inline void
819 add_stat_reg(struct gen_perf_query_info *query, uint32_t reg,
820 uint32_t numerator, uint32_t denominator,
821 const char *name, const char *description)
822 {
823 struct gen_perf_query_counter *counter;
824
825 assert(query->n_counters < query->max_counters);
826
827 counter = &query->counters[query->n_counters];
828 counter->name = name;
829 counter->desc = description;
830 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
831 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
832 counter->offset = sizeof(uint64_t) * query->n_counters;
833 counter->pipeline_stat.reg = reg;
834 counter->pipeline_stat.numerator = numerator;
835 counter->pipeline_stat.denominator = denominator;
836
837 query->n_counters++;
838 }
839
840 static inline void
841 add_basic_stat_reg(struct gen_perf_query_info *query,
842 uint32_t reg, const char *name)
843 {
844 add_stat_reg(query, reg, 1, 1, name, name);
845 }
846
847 static void
848 load_pipeline_statistic_metrics(struct gen_perf_config *perf_cfg,
849 const struct gen_device_info *devinfo)
850 {
851 struct gen_perf_query_info *query =
852 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
853
854 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
855 query->name = "Pipeline Statistics Registers";
856
857 add_basic_stat_reg(query, IA_VERTICES_COUNT,
858 "N vertices submitted");
859 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
860 "N primitives submitted");
861 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
862 "N vertex shader invocations");
863
864 if (devinfo->gen == 6) {
865 add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
866 "SO_PRIM_STORAGE_NEEDED",
867 "N geometry shader stream-out primitives (total)");
868 add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
869 "SO_NUM_PRIMS_WRITTEN",
870 "N geometry shader stream-out primitives (written)");
871 } else {
872 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
873 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
874 "N stream-out (stream 0) primitives (total)");
875 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
876 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
877 "N stream-out (stream 1) primitives (total)");
878 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
879 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
880 "N stream-out (stream 2) primitives (total)");
881 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
882 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
883 "N stream-out (stream 3) primitives (total)");
884 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
885 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
886 "N stream-out (stream 0) primitives (written)");
887 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
888 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
889 "N stream-out (stream 1) primitives (written)");
890 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
891 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
892 "N stream-out (stream 2) primitives (written)");
893 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
894 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
895 "N stream-out (stream 3) primitives (written)");
896 }
897
898 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
899 "N TCS shader invocations");
900 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
901 "N TES shader invocations");
902
903 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
904 "N geometry shader invocations");
905 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
906 "N geometry shader primitives emitted");
907
908 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
909 "N primitives entering clipping");
910 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
911 "N primitives leaving clipping");
912
913 if (devinfo->is_haswell || devinfo->gen == 8) {
914 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
915 "N fragment shader invocations",
916 "N fragment shader invocations");
917 } else {
918 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
919 "N fragment shader invocations");
920 }
921
922 add_basic_stat_reg(query, PS_DEPTH_COUNT,
923 "N z-pass fragments");
924
925 if (devinfo->gen >= 7) {
926 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
927 "N compute shader invocations");
928 }
929
930 query->data_size = sizeof(uint64_t) * query->n_counters;
931 }
932
933 static bool
934 load_oa_metrics(struct gen_perf_config *perf, int fd,
935 const struct gen_device_info *devinfo)
936 {
937 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
938 bool i915_perf_oa_available = false;
939 struct stat sb;
940
941 perf->i915_query_supported = i915_query_perf_config_supported(perf, fd);
942
943 /* The existence of this sysctl parameter implies the kernel supports
944 * the i915 perf interface.
945 */
946 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
947
948 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
949 * metrics unless running as root.
950 */
951 if (devinfo->is_haswell)
952 i915_perf_oa_available = true;
953 else {
954 uint64_t paranoid = 1;
955
956 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
957
958 if (paranoid == 0 || geteuid() == 0)
959 i915_perf_oa_available = true;
960 }
961 }
962
963 if (!i915_perf_oa_available ||
964 !oa_register ||
965 !get_sysfs_dev_dir(perf, fd) ||
966 !init_oa_sys_vars(perf, devinfo))
967 return false;
968
969 perf->oa_metrics_table =
970 _mesa_hash_table_create(perf, _mesa_key_hash_string,
971 _mesa_key_string_equal);
972
973 /* Index all the metric sets mesa knows about before looking to see what
974 * the kernel is advertising.
975 */
976 oa_register(perf);
977
978 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
979 kernel_has_dynamic_config_support(perf, fd))
980 init_oa_configs(perf, fd);
981 else
982 enumerate_sysfs_metrics(perf);
983
984 return true;
985 }
986
987 struct gen_perf_registers *
988 gen_perf_load_configuration(struct gen_perf_config *perf_cfg, int fd, const char *guid)
989 {
990 if (!perf_cfg->i915_query_supported)
991 return NULL;
992
993 struct drm_i915_perf_oa_config i915_config = { 0, };
994 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config))
995 return NULL;
996
997 struct gen_perf_registers *config = rzalloc(NULL, struct gen_perf_registers);
998 config->n_flex_regs = i915_config.n_flex_regs;
999 config->flex_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_flex_regs);
1000 config->n_mux_regs = i915_config.n_mux_regs;
1001 config->mux_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_mux_regs);
1002 config->n_b_counter_regs = i915_config.n_boolean_regs;
1003 config->b_counter_regs = rzalloc_array(config, struct gen_perf_query_register_prog, config->n_b_counter_regs);
1004
1005 /*
1006 * struct gen_perf_query_register_prog maps exactly to the tuple of
1007 * (register offset, register value) returned by the i915.
1008 */
1009 i915_config.flex_regs_ptr = to_user_pointer(config->flex_regs);
1010 i915_config.mux_regs_ptr = to_user_pointer(config->mux_regs);
1011 i915_config.boolean_regs_ptr = to_user_pointer(config->b_counter_regs);
1012 if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config)) {
1013 ralloc_free(config);
1014 return NULL;
1015 }
1016
1017 return config;
1018 }
1019
1020 uint64_t
1021 gen_perf_store_configuration(struct gen_perf_config *perf_cfg, int fd,
1022 const struct gen_perf_registers *config,
1023 const char *guid)
1024 {
1025 if (guid)
1026 return i915_add_config(perf_cfg, fd, config, guid);
1027
1028 struct mesa_sha1 sha1_ctx;
1029 _mesa_sha1_init(&sha1_ctx);
1030
1031 if (config->flex_regs) {
1032 _mesa_sha1_update(&sha1_ctx, config->flex_regs,
1033 sizeof(config->flex_regs[0]) *
1034 config->n_flex_regs);
1035 }
1036 if (config->mux_regs) {
1037 _mesa_sha1_update(&sha1_ctx, config->mux_regs,
1038 sizeof(config->mux_regs[0]) *
1039 config->n_mux_regs);
1040 }
1041 if (config->b_counter_regs) {
1042 _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
1043 sizeof(config->b_counter_regs[0]) *
1044 config->n_b_counter_regs);
1045 }
1046
1047 uint8_t hash[20];
1048 _mesa_sha1_final(&sha1_ctx, hash);
1049
1050 char formatted_hash[41];
1051 _mesa_sha1_format(formatted_hash, hash);
1052
1053 char generated_guid[37];
1054 snprintf(generated_guid, sizeof(generated_guid),
1055 "%.8s-%.4s-%.4s-%.4s-%.12s",
1056 &formatted_hash[0], &formatted_hash[8],
1057 &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
1058 &formatted_hash[8 + 4 + 4 + 4]);
1059
1060 /* Check if already present. */
1061 uint64_t id;
1062 if (gen_perf_load_metric_id(perf_cfg, generated_guid, &id))
1063 return id;
1064
1065 return i915_add_config(perf_cfg, fd, config, generated_guid);
1066 }
1067
1068 /* Accumulate 32bits OA counters */
1069 static inline void
1070 accumulate_uint32(const uint32_t *report0,
1071 const uint32_t *report1,
1072 uint64_t *accumulator)
1073 {
1074 *accumulator += (uint32_t)(*report1 - *report0);
1075 }
1076
1077 /* Accumulate 40bits OA counters */
1078 static inline void
1079 accumulate_uint40(int a_index,
1080 const uint32_t *report0,
1081 const uint32_t *report1,
1082 uint64_t *accumulator)
1083 {
1084 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
1085 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
1086 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
1087 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
1088 uint64_t value0 = report0[a_index + 4] | high0;
1089 uint64_t value1 = report1[a_index + 4] | high1;
1090 uint64_t delta;
1091
1092 if (value0 > value1)
1093 delta = (1ULL << 40) + value1 - value0;
1094 else
1095 delta = value1 - value0;
1096
1097 *accumulator += delta;
1098 }
1099
1100 static void
1101 gen8_read_report_clock_ratios(const uint32_t *report,
1102 uint64_t *slice_freq_hz,
1103 uint64_t *unslice_freq_hz)
1104 {
1105 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1106 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1107 * divided this way :
1108 *
1109 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1110 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1111 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1112 *
1113 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1114 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1115 *
1116 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1117 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1118 */
1119
1120 uint32_t unslice_freq = report[0] & 0x1ff;
1121 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1122 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1123 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1124
1125 *slice_freq_hz = slice_freq * 16666667ULL;
1126 *unslice_freq_hz = unslice_freq * 16666667ULL;
1127 }
1128
1129 void
1130 gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
1131 const struct gen_device_info *devinfo,
1132 const uint32_t *start,
1133 const uint32_t *end)
1134 {
1135 /* Slice/Unslice frequency is only available in the OA reports when the
1136 * "Disable OA reports due to clock ratio change" field in
1137 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1138 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1139 *
1140 * Documentation says this should be available on Gen9+ but experimentation
1141 * shows that Gen8 reports similar values, so we enable it there too.
1142 */
1143 if (devinfo->gen < 8)
1144 return;
1145
1146 gen8_read_report_clock_ratios(start,
1147 &result->slice_frequency[0],
1148 &result->unslice_frequency[0]);
1149 gen8_read_report_clock_ratios(end,
1150 &result->slice_frequency[1],
1151 &result->unslice_frequency[1]);
1152 }
1153
1154 void
1155 gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
1156 const struct gen_perf_query_info *query,
1157 const uint32_t *start,
1158 const uint32_t *end)
1159 {
1160 int i, idx = 0;
1161
1162 if (result->hw_id == OA_REPORT_INVALID_CTX_ID &&
1163 start[2] != OA_REPORT_INVALID_CTX_ID)
1164 result->hw_id = start[2];
1165 if (result->reports_accumulated == 0)
1166 result->begin_timestamp = start[1];
1167 result->reports_accumulated++;
1168
1169 switch (query->oa_format) {
1170 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
1171 accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
1172 accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
1173
1174 /* 32x 40bit A counters... */
1175 for (i = 0; i < 32; i++)
1176 accumulate_uint40(i, start, end, result->accumulator + idx++);
1177
1178 /* 4x 32bit A counters... */
1179 for (i = 0; i < 4; i++)
1180 accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
1181
1182 /* 8x 32bit B counters + 8x 32bit C counters... */
1183 for (i = 0; i < 16; i++)
1184 accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
1185 break;
1186
1187 case I915_OA_FORMAT_A45_B8_C8:
1188 accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
1189
1190 for (i = 0; i < 61; i++)
1191 accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
1192 break;
1193
1194 default:
1195 unreachable("Can't accumulate OA counters in unknown format");
1196 }
1197
1198 }
1199
1200 void
1201 gen_perf_query_result_clear(struct gen_perf_query_result *result)
1202 {
1203 memset(result, 0, sizeof(*result));
1204 result->hw_id = OA_REPORT_INVALID_CTX_ID; /* invalid */
1205 }
1206
1207 static void
1208 register_mdapi_statistic_query(struct gen_perf_config *perf_cfg,
1209 const struct gen_device_info *devinfo)
1210 {
1211 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1212 return;
1213
1214 struct gen_perf_query_info *query =
1215 append_query_info(perf_cfg, MAX_STAT_COUNTERS);
1216
1217 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
1218 query->name = "Intel_Raw_Pipeline_Statistics_Query";
1219
1220 /* The order has to match mdapi_pipeline_metrics. */
1221 add_basic_stat_reg(query, IA_VERTICES_COUNT,
1222 "N vertices submitted");
1223 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1224 "N primitives submitted");
1225 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1226 "N vertex shader invocations");
1227 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1228 "N geometry shader invocations");
1229 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1230 "N geometry shader primitives emitted");
1231 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1232 "N primitives entering clipping");
1233 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1234 "N primitives leaving clipping");
1235 if (devinfo->is_haswell || devinfo->gen == 8) {
1236 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1237 "N fragment shader invocations",
1238 "N fragment shader invocations");
1239 } else {
1240 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1241 "N fragment shader invocations");
1242 }
1243 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1244 "N TCS shader invocations");
1245 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1246 "N TES shader invocations");
1247 if (devinfo->gen >= 7) {
1248 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1249 "N compute shader invocations");
1250 }
1251
1252 if (devinfo->gen >= 10) {
1253 /* Reuse existing CS invocation register until we can expose this new
1254 * one.
1255 */
1256 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1257 "Reserved1");
1258 }
1259
1260 query->data_size = sizeof(uint64_t) * query->n_counters;
1261 }
1262
1263 static void
1264 fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
1265 const char *name,
1266 uint32_t data_offset,
1267 uint32_t data_size,
1268 enum gen_perf_counter_data_type data_type)
1269 {
1270 struct gen_perf_query_counter *counter = &query->counters[query->n_counters];
1271
1272 assert(query->n_counters <= query->max_counters);
1273
1274 counter->name = name;
1275 counter->desc = "Raw counter value";
1276 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
1277 counter->data_type = data_type;
1278 counter->offset = data_offset;
1279
1280 query->n_counters++;
1281
1282 assert(counter->offset + gen_perf_query_counter_get_size(counter) <= query->data_size);
1283 }
1284
1285 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
1286 fill_mdapi_perf_query_counter(query, #field_name, \
1287 (uint8_t *) &struct_name.field_name - \
1288 (uint8_t *) &struct_name, \
1289 sizeof(struct_name.field_name), \
1290 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1291 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
1292 fill_mdapi_perf_query_counter(query, \
1293 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
1294 (uint8_t *) &struct_name.field_name[idx] - \
1295 (uint8_t *) &struct_name, \
1296 sizeof(struct_name.field_name[0]), \
1297 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
1298
1299 static void
1300 register_mdapi_oa_query(const struct gen_device_info *devinfo,
1301 struct gen_perf_config *perf)
1302 {
1303 struct gen_perf_query_info *query = NULL;
1304
1305 /* MDAPI requires different structures for pretty much every generation
1306 * (right now we have definitions for gen 7 to 11).
1307 */
1308 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
1309 return;
1310
1311 switch (devinfo->gen) {
1312 case 7: {
1313 query = append_query_info(perf, 1 + 45 + 16 + 7);
1314 query->oa_format = I915_OA_FORMAT_A45_B8_C8;
1315
1316 struct gen7_mdapi_metrics metric_data;
1317 query->data_size = sizeof(metric_data);
1318
1319 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1320 for (int i = 0; i < ARRAY_SIZE(metric_data.ACounters); i++) {
1321 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1322 metric_data, ACounters, i, UINT64);
1323 }
1324 for (int i = 0; i < ARRAY_SIZE(metric_data.NOACounters); i++) {
1325 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1326 metric_data, NOACounters, i, UINT64);
1327 }
1328 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1329 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1330 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1331 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1332 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1333 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1334 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1335 break;
1336 }
1337 case 8: {
1338 query = append_query_info(perf, 2 + 36 + 16 + 16);
1339 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1340
1341 struct gen8_mdapi_metrics metric_data;
1342 query->data_size = sizeof(metric_data);
1343
1344 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1345 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1346 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1347 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1348 metric_data, OaCntr, i, UINT64);
1349 }
1350 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1351 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1352 metric_data, NoaCntr, i, UINT64);
1353 }
1354 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1355 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1356 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1357 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1358 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1359 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1360 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1361 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1362 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1363 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1364 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1365 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1366 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1367 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1368 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1369 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1370 break;
1371 }
1372 case 9:
1373 case 10:
1374 case 11: {
1375 query = append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
1376 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
1377
1378 struct gen9_mdapi_metrics metric_data;
1379 query->data_size = sizeof(metric_data);
1380
1381 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
1382 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
1383 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
1384 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1385 metric_data, OaCntr, i, UINT64);
1386 }
1387 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
1388 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1389 metric_data, NoaCntr, i, UINT64);
1390 }
1391 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
1392 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
1393 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
1394 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
1395 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
1396 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
1397 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
1398 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
1399 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
1400 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
1401 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
1402 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
1403 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
1404 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
1405 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
1406 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
1407 for (int i = 0; i < ARRAY_SIZE(metric_data.UserCntr); i++) {
1408 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
1409 metric_data, UserCntr, i, UINT64);
1410 }
1411 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UserCntrCfgId, UINT32);
1412 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved4, UINT32);
1413 break;
1414 }
1415 default:
1416 unreachable("Unsupported gen");
1417 break;
1418 }
1419
1420 query->kind = GEN_PERF_QUERY_TYPE_RAW;
1421 query->name = "Intel_Raw_Hardware_Counters_Set_0_Query";
1422 query->guid = GEN_PERF_QUERY_GUID_MDAPI;
1423
1424 {
1425 /* Accumulation buffer offsets copied from an actual query... */
1426 const struct gen_perf_query_info *copy_query =
1427 &perf->queries[0];
1428
1429 query->gpu_time_offset = copy_query->gpu_time_offset;
1430 query->gpu_clock_offset = copy_query->gpu_clock_offset;
1431 query->a_offset = copy_query->a_offset;
1432 query->b_offset = copy_query->b_offset;
1433 query->c_offset = copy_query->c_offset;
1434 }
1435 }
1436
1437 static uint64_t
1438 get_metric_id(struct gen_perf_config *perf,
1439 const struct gen_perf_query_info *query)
1440 {
1441 /* These queries are know not to ever change, their config ID has been
1442 * loaded upon the first query creation. No need to look them up again.
1443 */
1444 if (query->kind == GEN_PERF_QUERY_TYPE_OA)
1445 return query->oa_metrics_set_id;
1446
1447 assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
1448
1449 /* Raw queries can be reprogrammed up by an external application/library.
1450 * When a raw query is used for the first time it's id is set to a value !=
1451 * 0. When it stops being used the id returns to 0. No need to reload the
1452 * ID when it's already loaded.
1453 */
1454 if (query->oa_metrics_set_id != 0) {
1455 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
1456 query->name, query->guid, query->oa_metrics_set_id);
1457 return query->oa_metrics_set_id;
1458 }
1459
1460 struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
1461 if (!gen_perf_load_metric_id(perf, query->guid,
1462 &raw_query->oa_metrics_set_id)) {
1463 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
1464 raw_query->oa_metrics_set_id = 1ULL;
1465 } else {
1466 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
1467 query->name, query->guid, query->oa_metrics_set_id);
1468 }
1469 return query->oa_metrics_set_id;
1470 }
1471
1472 static struct oa_sample_buf *
1473 get_free_sample_buf(struct gen_perf_context *perf_ctx)
1474 {
1475 struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
1476 struct oa_sample_buf *buf;
1477
1478 if (node)
1479 buf = exec_node_data(struct oa_sample_buf, node, link);
1480 else {
1481 buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
1482
1483 exec_node_init(&buf->link);
1484 buf->refcount = 0;
1485 }
1486 buf->len = 0;
1487
1488 return buf;
1489 }
1490
1491 static void
1492 reap_old_sample_buffers(struct gen_perf_context *perf_ctx)
1493 {
1494 struct exec_node *tail_node =
1495 exec_list_get_tail(&perf_ctx->sample_buffers);
1496 struct oa_sample_buf *tail_buf =
1497 exec_node_data(struct oa_sample_buf, tail_node, link);
1498
1499 /* Remove all old, unreferenced sample buffers walking forward from
1500 * the head of the list, except always leave at least one node in
1501 * the list so we always have a node to reference when we Begin
1502 * a new query.
1503 */
1504 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1505 &perf_ctx->sample_buffers)
1506 {
1507 if (buf->refcount == 0 && buf != tail_buf) {
1508 exec_node_remove(&buf->link);
1509 exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
1510 } else
1511 return;
1512 }
1513 }
1514
1515 static void
1516 free_sample_bufs(struct gen_perf_context *perf_ctx)
1517 {
1518 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
1519 &perf_ctx->free_sample_buffers)
1520 ralloc_free(buf);
1521
1522 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1523 }
1524
1525 /******************************************************************************/
1526
1527 /**
1528 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1529 * pipeline statistics for the performance query object.
1530 */
1531 static void
1532 snapshot_statistics_registers(struct gen_perf_context *ctx,
1533 struct gen_perf_query_object *obj,
1534 uint32_t offset_in_bytes)
1535 {
1536 struct gen_perf_config *perf = ctx->perf;
1537 const struct gen_perf_query_info *query = obj->queryinfo;
1538 const int n_counters = query->n_counters;
1539
1540 for (int i = 0; i < n_counters; i++) {
1541 const struct gen_perf_query_counter *counter = &query->counters[i];
1542
1543 assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
1544
1545 perf->vtbl.store_register_mem(ctx->ctx, obj->pipeline_stats.bo,
1546 counter->pipeline_stat.reg, 8,
1547 offset_in_bytes + i * sizeof(uint64_t));
1548 }
1549 }
1550
1551 static void
1552 snapshot_freq_register(struct gen_perf_context *ctx,
1553 struct gen_perf_query_object *query,
1554 uint32_t bo_offset)
1555 {
1556 struct gen_perf_config *perf = ctx->perf;
1557 const struct gen_device_info *devinfo = ctx->devinfo;
1558
1559 if (devinfo->gen == 8 && !devinfo->is_cherryview)
1560 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN7_RPSTAT1, 4, bo_offset);
1561 else if (devinfo->gen >= 9)
1562 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN9_RPSTAT0, 4, bo_offset);
1563 }
1564
1565 static void
1566 gen_perf_close(struct gen_perf_context *perfquery,
1567 const struct gen_perf_query_info *query)
1568 {
1569 if (perfquery->oa_stream_fd != -1) {
1570 close(perfquery->oa_stream_fd);
1571 perfquery->oa_stream_fd = -1;
1572 }
1573 if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
1574 struct gen_perf_query_info *raw_query =
1575 (struct gen_perf_query_info *) query;
1576 raw_query->oa_metrics_set_id = 0;
1577 }
1578 }
1579
1580 static bool
1581 gen_perf_open(struct gen_perf_context *perf_ctx,
1582 int metrics_set_id,
1583 int report_format,
1584 int period_exponent,
1585 int drm_fd,
1586 uint32_t ctx_id)
1587 {
1588 uint64_t properties[] = {
1589 /* Single context sampling */
1590 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
1591
1592 /* Include OA reports in samples */
1593 DRM_I915_PERF_PROP_SAMPLE_OA, true,
1594
1595 /* OA unit configuration */
1596 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
1597 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
1598 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
1599 };
1600 struct drm_i915_perf_open_param param = {
1601 .flags = I915_PERF_FLAG_FD_CLOEXEC |
1602 I915_PERF_FLAG_FD_NONBLOCK |
1603 I915_PERF_FLAG_DISABLED,
1604 .num_properties = ARRAY_SIZE(properties) / 2,
1605 .properties_ptr = (uintptr_t) properties,
1606 };
1607 int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
1608 if (fd == -1) {
1609 DBG("Error opening gen perf OA stream: %m\n");
1610 return false;
1611 }
1612
1613 perf_ctx->oa_stream_fd = fd;
1614
1615 perf_ctx->current_oa_metrics_set_id = metrics_set_id;
1616 perf_ctx->current_oa_format = report_format;
1617
1618 return true;
1619 }
1620
1621 static bool
1622 inc_n_users(struct gen_perf_context *perf_ctx)
1623 {
1624 if (perf_ctx->n_oa_users == 0 &&
1625 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
1626 {
1627 return false;
1628 }
1629 ++perf_ctx->n_oa_users;
1630
1631 return true;
1632 }
1633
1634 static void
1635 dec_n_users(struct gen_perf_context *perf_ctx)
1636 {
1637 /* Disabling the i915 perf stream will effectively disable the OA
1638 * counters. Note it's important to be sure there are no outstanding
1639 * MI_RPC commands at this point since they could stall the CS
1640 * indefinitely once OACONTROL is disabled.
1641 */
1642 --perf_ctx->n_oa_users;
1643 if (perf_ctx->n_oa_users == 0 &&
1644 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
1645 {
1646 DBG("WARNING: Error disabling gen perf stream: %m\n");
1647 }
1648 }
1649
1650 void
1651 gen_perf_init_metrics(struct gen_perf_config *perf_cfg,
1652 const struct gen_device_info *devinfo,
1653 int drm_fd)
1654 {
1655 load_pipeline_statistic_metrics(perf_cfg, devinfo);
1656 register_mdapi_statistic_query(perf_cfg, devinfo);
1657 if (load_oa_metrics(perf_cfg, drm_fd, devinfo))
1658 register_mdapi_oa_query(devinfo, perf_cfg);
1659 }
1660
1661 void
1662 gen_perf_init_context(struct gen_perf_context *perf_ctx,
1663 struct gen_perf_config *perf_cfg,
1664 void * ctx, /* driver context (eg, brw_context) */
1665 void * bufmgr, /* eg brw_bufmgr */
1666 const struct gen_device_info *devinfo,
1667 uint32_t hw_ctx,
1668 int drm_fd)
1669 {
1670 perf_ctx->perf = perf_cfg;
1671 perf_ctx->ctx = ctx;
1672 perf_ctx->bufmgr = bufmgr;
1673 perf_ctx->drm_fd = drm_fd;
1674 perf_ctx->hw_ctx = hw_ctx;
1675 perf_ctx->devinfo = devinfo;
1676
1677 perf_ctx->unaccumulated =
1678 ralloc_array(ctx, struct gen_perf_query_object *, 2);
1679 perf_ctx->unaccumulated_elements = 0;
1680 perf_ctx->unaccumulated_array_size = 2;
1681
1682 exec_list_make_empty(&perf_ctx->sample_buffers);
1683 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1684
1685 /* It's convenient to guarantee that this linked list of sample
1686 * buffers is never empty so we add an empty head so when we
1687 * Begin an OA query we can always take a reference on a buffer
1688 * in this list.
1689 */
1690 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1691 exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
1692
1693 perf_ctx->oa_stream_fd = -1;
1694 perf_ctx->next_query_start_report_id = 1000;
1695 }
1696
1697 /**
1698 * Add a query to the global list of "unaccumulated queries."
1699 *
1700 * Queries are tracked here until all the associated OA reports have
1701 * been accumulated via accumulate_oa_reports() after the end
1702 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1703 */
1704 static void
1705 add_to_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
1706 struct gen_perf_query_object *obj)
1707 {
1708 if (perf_ctx->unaccumulated_elements >=
1709 perf_ctx->unaccumulated_array_size)
1710 {
1711 perf_ctx->unaccumulated_array_size *= 1.5;
1712 perf_ctx->unaccumulated =
1713 reralloc(perf_ctx->ctx, perf_ctx->unaccumulated,
1714 struct gen_perf_query_object *,
1715 perf_ctx->unaccumulated_array_size);
1716 }
1717
1718 perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
1719 }
1720
1721 bool
1722 gen_perf_begin_query(struct gen_perf_context *perf_ctx,
1723 struct gen_perf_query_object *query)
1724 {
1725 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1726 const struct gen_perf_query_info *queryinfo = query->queryinfo;
1727
1728 /* XXX: We have to consider that the command parser unit that parses batch
1729 * buffer commands and is used to capture begin/end counter snapshots isn't
1730 * implicitly synchronized with what's currently running across other GPU
1731 * units (such as the EUs running shaders) that the performance counters are
1732 * associated with.
1733 *
1734 * The intention of performance queries is to measure the work associated
1735 * with commands between the begin/end delimiters and so for that to be the
1736 * case we need to explicitly synchronize the parsing of commands to capture
1737 * Begin/End counter snapshots with what's running across other parts of the
1738 * GPU.
1739 *
1740 * When the command parser reaches a Begin marker it effectively needs to
1741 * drain everything currently running on the GPU until the hardware is idle
1742 * before capturing the first snapshot of counters - otherwise the results
1743 * would also be measuring the effects of earlier commands.
1744 *
1745 * When the command parser reaches an End marker it needs to stall until
1746 * everything currently running on the GPU has finished before capturing the
1747 * end snapshot - otherwise the results won't be a complete representation
1748 * of the work.
1749 *
1750 * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
1751 * additional work to be processed by the pipeline until all pixels of the
1752 * previous draw has be completed).
1753 *
1754 * N.B. The final results are based on deltas of counters between (inside)
1755 * Begin/End markers so even though the total wall clock time of the
1756 * workload is stretched by larger pipeline bubbles the bubbles themselves
1757 * are generally invisible to the query results. Whether that's a good or a
1758 * bad thing depends on the use case. For a lower real-time impact while
1759 * capturing metrics then periodic sampling may be a better choice than
1760 * INTEL_performance_query.
1761 *
1762 *
1763 * This is our Begin synchronization point to drain current work on the
1764 * GPU before we capture our first counter snapshot...
1765 */
1766 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
1767
1768 switch (queryinfo->kind) {
1769 case GEN_PERF_QUERY_TYPE_OA:
1770 case GEN_PERF_QUERY_TYPE_RAW: {
1771
1772 /* Opening an i915 perf stream implies exclusive access to the OA unit
1773 * which will generate counter reports for a specific counter set with a
1774 * specific layout/format so we can't begin any OA based queries that
1775 * require a different counter set or format unless we get an opportunity
1776 * to close the stream and open a new one...
1777 */
1778 uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
1779
1780 if (perf_ctx->oa_stream_fd != -1 &&
1781 perf_ctx->current_oa_metrics_set_id != metric_id) {
1782
1783 if (perf_ctx->n_oa_users != 0) {
1784 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
1785 perf_ctx->current_oa_metrics_set_id, metric_id);
1786 return false;
1787 } else
1788 gen_perf_close(perf_ctx, queryinfo);
1789 }
1790
1791 /* If the OA counters aren't already on, enable them. */
1792 if (perf_ctx->oa_stream_fd == -1) {
1793 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1794
1795 /* The period_exponent gives a sampling period as follows:
1796 * sample_period = timestamp_period * 2^(period_exponent + 1)
1797 *
1798 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1799 * ~83ns (GEN8/9).
1800 *
1801 * The counter overflow period is derived from the EuActive counter
1802 * which reads a counter that increments by the number of clock
1803 * cycles multiplied by the number of EUs. It can be calculated as:
1804 *
1805 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1806 *
1807 * (E.g. 40 EUs @ 1GHz = ~53ms)
1808 *
1809 * We select a sampling period inferior to that overflow period to
1810 * ensure we cannot see more than 1 counter overflow, otherwise we
1811 * could loose information.
1812 */
1813
1814 int a_counter_in_bits = 32;
1815 if (devinfo->gen >= 8)
1816 a_counter_in_bits = 40;
1817
1818 uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
1819 /* drop 1GHz freq to have units in nanoseconds */
1820 2);
1821
1822 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1823 overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
1824
1825 int period_exponent = 0;
1826 uint64_t prev_sample_period, next_sample_period;
1827 for (int e = 0; e < 30; e++) {
1828 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1829 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1830
1831 /* Take the previous sampling period, lower than the overflow
1832 * period.
1833 */
1834 if (prev_sample_period < overflow_period &&
1835 next_sample_period > overflow_period)
1836 period_exponent = e + 1;
1837 }
1838
1839 if (period_exponent == 0) {
1840 DBG("WARNING: enable to find a sampling exponent\n");
1841 return false;
1842 }
1843
1844 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1845 prev_sample_period / 1000000ul);
1846
1847 if (!gen_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
1848 period_exponent, perf_ctx->drm_fd,
1849 perf_ctx->hw_ctx))
1850 return false;
1851 } else {
1852 assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
1853 perf_ctx->current_oa_format == queryinfo->oa_format);
1854 }
1855
1856 if (!inc_n_users(perf_ctx)) {
1857 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1858 return false;
1859 }
1860
1861 if (query->oa.bo) {
1862 perf_cfg->vtbl.bo_unreference(query->oa.bo);
1863 query->oa.bo = NULL;
1864 }
1865
1866 query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1867 "perf. query OA MI_RPC bo",
1868 MI_RPC_BO_SIZE);
1869 #ifdef DEBUG
1870 /* Pre-filling the BO helps debug whether writes landed. */
1871 void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
1872 memset(map, 0x80, MI_RPC_BO_SIZE);
1873 perf_cfg->vtbl.bo_unmap(query->oa.bo);
1874 #endif
1875
1876 query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
1877 perf_ctx->next_query_start_report_id += 2;
1878
1879 /* Take a starting OA counter snapshot. */
1880 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo, 0,
1881 query->oa.begin_report_id);
1882 snapshot_freq_register(perf_ctx, query, MI_FREQ_START_OFFSET_BYTES);
1883
1884 ++perf_ctx->n_active_oa_queries;
1885
1886 /* No already-buffered samples can possibly be associated with this query
1887 * so create a marker within the list of sample buffers enabling us to
1888 * easily ignore earlier samples when processing this query after
1889 * completion.
1890 */
1891 assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
1892 query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
1893
1894 struct oa_sample_buf *buf =
1895 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1896
1897 /* This reference will ensure that future/following sample
1898 * buffers (that may relate to this query) can't be freed until
1899 * this drops to zero.
1900 */
1901 buf->refcount++;
1902
1903 gen_perf_query_result_clear(&query->oa.result);
1904 query->oa.results_accumulated = false;
1905
1906 add_to_unaccumulated_query_list(perf_ctx, query);
1907 break;
1908 }
1909
1910 case GEN_PERF_QUERY_TYPE_PIPELINE:
1911 if (query->pipeline_stats.bo) {
1912 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1913 query->pipeline_stats.bo = NULL;
1914 }
1915
1916 query->pipeline_stats.bo =
1917 perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1918 "perf. query pipeline stats bo",
1919 STATS_BO_SIZE);
1920
1921 /* Take starting snapshots. */
1922 snapshot_statistics_registers(perf_ctx, query, 0);
1923
1924 ++perf_ctx->n_active_pipeline_stats_queries;
1925 break;
1926
1927 default:
1928 unreachable("Unknown query type");
1929 break;
1930 }
1931
1932 return true;
1933 }
1934
1935 void
1936 gen_perf_end_query(struct gen_perf_context *perf_ctx,
1937 struct gen_perf_query_object *query)
1938 {
1939 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1940
1941 /* Ensure that the work associated with the queried commands will have
1942 * finished before taking our query end counter readings.
1943 *
1944 * For more details see comment in brw_begin_perf_query for
1945 * corresponding flush.
1946 */
1947 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
1948
1949 switch (query->queryinfo->kind) {
1950 case GEN_PERF_QUERY_TYPE_OA:
1951 case GEN_PERF_QUERY_TYPE_RAW:
1952
1953 /* NB: It's possible that the query will have already been marked
1954 * as 'accumulated' if an error was seen while reading samples
1955 * from perf. In this case we mustn't try and emit a closing
1956 * MI_RPC command in case the OA unit has already been disabled
1957 */
1958 if (!query->oa.results_accumulated) {
1959 /* Take an ending OA counter snapshot. */
1960 snapshot_freq_register(perf_ctx, query, MI_FREQ_END_OFFSET_BYTES);
1961 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
1962 MI_RPC_BO_END_OFFSET_BYTES,
1963 query->oa.begin_report_id + 1);
1964 }
1965
1966 --perf_ctx->n_active_oa_queries;
1967
1968 /* NB: even though the query has now ended, it can't be accumulated
1969 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1970 * to query->oa.bo
1971 */
1972 break;
1973
1974 case GEN_PERF_QUERY_TYPE_PIPELINE:
1975 snapshot_statistics_registers(perf_ctx, query,
1976 STATS_BO_END_OFFSET_BYTES);
1977 --perf_ctx->n_active_pipeline_stats_queries;
1978 break;
1979
1980 default:
1981 unreachable("Unknown query type");
1982 break;
1983 }
1984 }
1985
1986 enum OaReadStatus {
1987 OA_READ_STATUS_ERROR,
1988 OA_READ_STATUS_UNFINISHED,
1989 OA_READ_STATUS_FINISHED,
1990 };
1991
1992 static enum OaReadStatus
1993 read_oa_samples_until(struct gen_perf_context *perf_ctx,
1994 uint32_t start_timestamp,
1995 uint32_t end_timestamp)
1996 {
1997 struct exec_node *tail_node =
1998 exec_list_get_tail(&perf_ctx->sample_buffers);
1999 struct oa_sample_buf *tail_buf =
2000 exec_node_data(struct oa_sample_buf, tail_node, link);
2001 uint32_t last_timestamp =
2002 tail_buf->len == 0 ? start_timestamp : tail_buf->last_timestamp;
2003
2004 while (1) {
2005 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
2006 uint32_t offset;
2007 int len;
2008
2009 while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
2010 sizeof(buf->buf))) < 0 && errno == EINTR)
2011 ;
2012
2013 if (len <= 0) {
2014 exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
2015
2016 if (len < 0) {
2017 if (errno == EAGAIN) {
2018 return ((last_timestamp - start_timestamp) < INT32_MAX &&
2019 (last_timestamp - start_timestamp) >=
2020 (end_timestamp - start_timestamp)) ?
2021 OA_READ_STATUS_FINISHED :
2022 OA_READ_STATUS_UNFINISHED;
2023 } else {
2024 DBG("Error reading i915 perf samples: %m\n");
2025 }
2026 } else
2027 DBG("Spurious EOF reading i915 perf samples\n");
2028
2029 return OA_READ_STATUS_ERROR;
2030 }
2031
2032 buf->len = len;
2033 exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
2034
2035 /* Go through the reports and update the last timestamp. */
2036 offset = 0;
2037 while (offset < buf->len) {
2038 const struct drm_i915_perf_record_header *header =
2039 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
2040 uint32_t *report = (uint32_t *) (header + 1);
2041
2042 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
2043 last_timestamp = report[1];
2044
2045 offset += header->size;
2046 }
2047
2048 buf->last_timestamp = last_timestamp;
2049 }
2050
2051 unreachable("not reached");
2052 return OA_READ_STATUS_ERROR;
2053 }
2054
2055 /**
2056 * Try to read all the reports until either the delimiting timestamp
2057 * or an error arises.
2058 */
2059 static bool
2060 read_oa_samples_for_query(struct gen_perf_context *perf_ctx,
2061 struct gen_perf_query_object *query,
2062 void *current_batch)
2063 {
2064 uint32_t *start;
2065 uint32_t *last;
2066 uint32_t *end;
2067 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2068
2069 /* We need the MI_REPORT_PERF_COUNT to land before we can start
2070 * accumulate. */
2071 assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2072 !perf_cfg->vtbl.bo_busy(query->oa.bo));
2073
2074 /* Map the BO once here and let accumulate_oa_reports() unmap
2075 * it. */
2076 if (query->oa.map == NULL)
2077 query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
2078
2079 start = last = query->oa.map;
2080 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2081
2082 if (start[0] != query->oa.begin_report_id) {
2083 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2084 return true;
2085 }
2086 if (end[0] != (query->oa.begin_report_id + 1)) {
2087 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2088 return true;
2089 }
2090
2091 /* Read the reports until the end timestamp. */
2092 switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
2093 case OA_READ_STATUS_ERROR:
2094 /* Fallthrough and let accumulate_oa_reports() deal with the
2095 * error. */
2096 case OA_READ_STATUS_FINISHED:
2097 return true;
2098 case OA_READ_STATUS_UNFINISHED:
2099 return false;
2100 }
2101
2102 unreachable("invalid read status");
2103 return false;
2104 }
2105
2106 void
2107 gen_perf_wait_query(struct gen_perf_context *perf_ctx,
2108 struct gen_perf_query_object *query,
2109 void *current_batch)
2110 {
2111 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2112 struct brw_bo *bo = NULL;
2113
2114 switch (query->queryinfo->kind) {
2115 case GEN_PERF_QUERY_TYPE_OA:
2116 case GEN_PERF_QUERY_TYPE_RAW:
2117 bo = query->oa.bo;
2118 break;
2119
2120 case GEN_PERF_QUERY_TYPE_PIPELINE:
2121 bo = query->pipeline_stats.bo;
2122 break;
2123
2124 default:
2125 unreachable("Unknown query type");
2126 break;
2127 }
2128
2129 if (bo == NULL)
2130 return;
2131
2132 /* If the current batch references our results bo then we need to
2133 * flush first...
2134 */
2135 if (perf_cfg->vtbl.batch_references(current_batch, bo))
2136 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
2137
2138 perf_cfg->vtbl.bo_wait_rendering(bo);
2139
2140 /* Due to a race condition between the OA unit signaling report
2141 * availability and the report actually being written into memory,
2142 * we need to wait for all the reports to come in before we can
2143 * read them.
2144 */
2145 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
2146 query->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
2147 while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
2148 ;
2149 }
2150 }
2151
2152 bool
2153 gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
2154 struct gen_perf_query_object *query,
2155 void *current_batch)
2156 {
2157 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2158
2159 switch (query->queryinfo->kind) {
2160 case GEN_PERF_QUERY_TYPE_OA:
2161 case GEN_PERF_QUERY_TYPE_RAW:
2162 return (query->oa.results_accumulated ||
2163 (query->oa.bo &&
2164 !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
2165 !perf_cfg->vtbl.bo_busy(query->oa.bo) &&
2166 read_oa_samples_for_query(perf_ctx, query, current_batch)));
2167 case GEN_PERF_QUERY_TYPE_PIPELINE:
2168 return (query->pipeline_stats.bo &&
2169 !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
2170 !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
2171
2172 default:
2173 unreachable("Unknown query type");
2174 break;
2175 }
2176
2177 return false;
2178 }
2179
2180 /**
2181 * Remove a query from the global list of unaccumulated queries once
2182 * after successfully accumulating the OA reports associated with the
2183 * query in accumulate_oa_reports() or when discarding unwanted query
2184 * results.
2185 */
2186 static void
2187 drop_from_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
2188 struct gen_perf_query_object *query)
2189 {
2190 for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
2191 if (perf_ctx->unaccumulated[i] == query) {
2192 int last_elt = --perf_ctx->unaccumulated_elements;
2193
2194 if (i == last_elt)
2195 perf_ctx->unaccumulated[i] = NULL;
2196 else {
2197 perf_ctx->unaccumulated[i] =
2198 perf_ctx->unaccumulated[last_elt];
2199 }
2200
2201 break;
2202 }
2203 }
2204
2205 /* Drop our samples_head reference so that associated periodic
2206 * sample data buffers can potentially be reaped if they aren't
2207 * referenced by any other queries...
2208 */
2209
2210 struct oa_sample_buf *buf =
2211 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
2212
2213 assert(buf->refcount > 0);
2214 buf->refcount--;
2215
2216 query->oa.samples_head = NULL;
2217
2218 reap_old_sample_buffers(perf_ctx);
2219 }
2220
2221 /* In general if we see anything spurious while accumulating results,
2222 * we don't try and continue accumulating the current query, hoping
2223 * for the best, we scrap anything outstanding, and then hope for the
2224 * best with new queries.
2225 */
2226 static void
2227 discard_all_queries(struct gen_perf_context *perf_ctx)
2228 {
2229 while (perf_ctx->unaccumulated_elements) {
2230 struct gen_perf_query_object *query = perf_ctx->unaccumulated[0];
2231
2232 query->oa.results_accumulated = true;
2233 drop_from_unaccumulated_query_list(perf_ctx, query);
2234
2235 dec_n_users(perf_ctx);
2236 }
2237 }
2238
2239 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
2240 static bool
2241 oa_report_ctx_id_valid(const struct gen_device_info *devinfo,
2242 const uint32_t *report)
2243 {
2244 assert(devinfo->gen >= 8);
2245 if (devinfo->gen == 8)
2246 return (report[0] & (1 << 25)) != 0;
2247 return (report[0] & (1 << 16)) != 0;
2248 }
2249
2250 /**
2251 * Accumulate raw OA counter values based on deltas between pairs of
2252 * OA reports.
2253 *
2254 * Accumulation starts from the first report captured via
2255 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
2256 * last MI_RPC report requested by brw_end_perf_query(). Between these
2257 * two reports there may also some number of periodically sampled OA
2258 * reports collected via the i915 perf interface - depending on the
2259 * duration of the query.
2260 *
2261 * These periodic snapshots help to ensure we handle counter overflow
2262 * correctly by being frequent enough to ensure we don't miss multiple
2263 * overflows of a counter between snapshots. For Gen8+ the i915 perf
2264 * snapshots provide the extra context-switch reports that let us
2265 * subtract out the progress of counters associated with other
2266 * contexts running on the system.
2267 */
2268 static void
2269 accumulate_oa_reports(struct gen_perf_context *perf_ctx,
2270 struct gen_perf_query_object *query)
2271 {
2272 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2273 uint32_t *start;
2274 uint32_t *last;
2275 uint32_t *end;
2276 struct exec_node *first_samples_node;
2277 bool last_report_ctx_match = true;
2278 int out_duration = 0;
2279
2280 assert(query->oa.map != NULL);
2281
2282 start = last = query->oa.map;
2283 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2284
2285 if (start[0] != query->oa.begin_report_id) {
2286 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
2287 goto error;
2288 }
2289 if (end[0] != (query->oa.begin_report_id + 1)) {
2290 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
2291 goto error;
2292 }
2293
2294 /* On Gen12+ OA reports are sourced from per context counters, so we don't
2295 * ever have to look at the global OA buffer. Yey \o/
2296 */
2297 if (perf_ctx->devinfo->gen >= 12) {
2298 last = start;
2299 goto end;
2300 }
2301
2302 /* See if we have any periodic reports to accumulate too... */
2303
2304 /* N.B. The oa.samples_head was set when the query began and
2305 * pointed to the tail of the perf_ctx->sample_buffers list at
2306 * the time the query started. Since the buffer existed before the
2307 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
2308 * that no data in this particular node's buffer can possibly be
2309 * associated with the query - so skip ahead one...
2310 */
2311 first_samples_node = query->oa.samples_head->next;
2312
2313 foreach_list_typed_from(struct oa_sample_buf, buf, link,
2314 &perf_ctx->sample_buffers,
2315 first_samples_node)
2316 {
2317 int offset = 0;
2318
2319 while (offset < buf->len) {
2320 const struct drm_i915_perf_record_header *header =
2321 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
2322
2323 assert(header->size != 0);
2324 assert(header->size <= buf->len);
2325
2326 offset += header->size;
2327
2328 switch (header->type) {
2329 case DRM_I915_PERF_RECORD_SAMPLE: {
2330 uint32_t *report = (uint32_t *)(header + 1);
2331 bool report_ctx_match = true;
2332 bool add = true;
2333
2334 /* Ignore reports that come before the start marker.
2335 * (Note: takes care to allow overflow of 32bit timestamps)
2336 */
2337 if (gen_device_info_timebase_scale(devinfo,
2338 report[1] - start[1]) > 5000000000) {
2339 continue;
2340 }
2341
2342 /* Ignore reports that come after the end marker.
2343 * (Note: takes care to allow overflow of 32bit timestamps)
2344 */
2345 if (gen_device_info_timebase_scale(devinfo,
2346 report[1] - end[1]) <= 5000000000) {
2347 goto end;
2348 }
2349
2350 /* For Gen8+ since the counters continue while other
2351 * contexts are running we need to discount any unrelated
2352 * deltas. The hardware automatically generates a report
2353 * on context switch which gives us a new reference point
2354 * to continuing adding deltas from.
2355 *
2356 * For Haswell we can rely on the HW to stop the progress
2357 * of OA counters while any other context is acctive.
2358 */
2359 if (devinfo->gen >= 8) {
2360 /* Consider that the current report matches our context only if
2361 * the report says the report ID is valid.
2362 */
2363 report_ctx_match = oa_report_ctx_id_valid(devinfo, report) &&
2364 report[2] == start[2];
2365 if (report_ctx_match)
2366 out_duration = 0;
2367 else
2368 out_duration++;
2369
2370 /* Only add the delta between <last, report> if the last report
2371 * was clearly identified as our context, or if we have at most
2372 * 1 report without a matching ID.
2373 *
2374 * The OA unit will sometimes label reports with an invalid
2375 * context ID when i915 rewrites the execlist submit register
2376 * with the same context as the one currently running. This
2377 * happens when i915 wants to notify the HW of ringbuffer tail
2378 * register update. We have to consider this report as part of
2379 * our context as the 3d pipeline behind the OACS unit is still
2380 * processing the operations started at the previous execlist
2381 * submission.
2382 */
2383 add = last_report_ctx_match && out_duration < 2;
2384 }
2385
2386 if (add) {
2387 gen_perf_query_result_accumulate(&query->oa.result,
2388 query->queryinfo,
2389 last, report);
2390 } else {
2391 /* We're not adding the delta because we've identified it's not
2392 * for the context we filter for. We can consider that the
2393 * query was split.
2394 */
2395 query->oa.result.query_disjoint = true;
2396 }
2397
2398 last = report;
2399 last_report_ctx_match = report_ctx_match;
2400
2401 break;
2402 }
2403
2404 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
2405 DBG("i915 perf: OA error: all reports lost\n");
2406 goto error;
2407 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
2408 DBG("i915 perf: OA report lost\n");
2409 break;
2410 }
2411 }
2412 }
2413
2414 end:
2415
2416 gen_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
2417 last, end);
2418
2419 query->oa.results_accumulated = true;
2420 drop_from_unaccumulated_query_list(perf_ctx, query);
2421 dec_n_users(perf_ctx);
2422
2423 return;
2424
2425 error:
2426
2427 discard_all_queries(perf_ctx);
2428 }
2429
2430 void
2431 gen_perf_delete_query(struct gen_perf_context *perf_ctx,
2432 struct gen_perf_query_object *query)
2433 {
2434 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2435
2436 /* We can assume that the frontend waits for a query to complete
2437 * before ever calling into here, so we don't have to worry about
2438 * deleting an in-flight query object.
2439 */
2440 switch (query->queryinfo->kind) {
2441 case GEN_PERF_QUERY_TYPE_OA:
2442 case GEN_PERF_QUERY_TYPE_RAW:
2443 if (query->oa.bo) {
2444 if (!query->oa.results_accumulated) {
2445 drop_from_unaccumulated_query_list(perf_ctx, query);
2446 dec_n_users(perf_ctx);
2447 }
2448
2449 perf_cfg->vtbl.bo_unreference(query->oa.bo);
2450 query->oa.bo = NULL;
2451 }
2452
2453 query->oa.results_accumulated = false;
2454 break;
2455
2456 case GEN_PERF_QUERY_TYPE_PIPELINE:
2457 if (query->pipeline_stats.bo) {
2458 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
2459 query->pipeline_stats.bo = NULL;
2460 }
2461 break;
2462
2463 default:
2464 unreachable("Unknown query type");
2465 break;
2466 }
2467
2468 /* As an indication that the INTEL_performance_query extension is no
2469 * longer in use, it's a good time to free our cache of sample
2470 * buffers and close any current i915-perf stream.
2471 */
2472 if (--perf_ctx->n_query_instances == 0) {
2473 free_sample_bufs(perf_ctx);
2474 gen_perf_close(perf_ctx, query->queryinfo);
2475 }
2476
2477 free(query);
2478 }
2479
2480 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
2481
2482 static void
2483 read_gt_frequency(struct gen_perf_context *perf_ctx,
2484 struct gen_perf_query_object *obj)
2485 {
2486 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2487 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
2488 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
2489
2490 switch (devinfo->gen) {
2491 case 7:
2492 case 8:
2493 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2494 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
2495 break;
2496 case 9:
2497 case 10:
2498 case 11:
2499 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2500 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
2501 break;
2502 default:
2503 unreachable("unexpected gen");
2504 }
2505
2506 /* Put the numbers into Hz. */
2507 obj->oa.gt_frequency[0] *= 1000000ULL;
2508 obj->oa.gt_frequency[1] *= 1000000ULL;
2509 }
2510
2511 static int
2512 get_oa_counter_data(struct gen_perf_context *perf_ctx,
2513 struct gen_perf_query_object *query,
2514 size_t data_size,
2515 uint8_t *data)
2516 {
2517 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2518 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2519 int n_counters = queryinfo->n_counters;
2520 int written = 0;
2521
2522 for (int i = 0; i < n_counters; i++) {
2523 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2524 uint64_t *out_uint64;
2525 float *out_float;
2526 size_t counter_size = gen_perf_query_counter_get_size(counter);
2527
2528 if (counter_size) {
2529 switch (counter->data_type) {
2530 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
2531 out_uint64 = (uint64_t *)(data + counter->offset);
2532 *out_uint64 =
2533 counter->oa_counter_read_uint64(perf_cfg, queryinfo,
2534 query->oa.result.accumulator);
2535 break;
2536 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
2537 out_float = (float *)(data + counter->offset);
2538 *out_float =
2539 counter->oa_counter_read_float(perf_cfg, queryinfo,
2540 query->oa.result.accumulator);
2541 break;
2542 default:
2543 /* So far we aren't using uint32, double or bool32... */
2544 unreachable("unexpected counter data type");
2545 }
2546 written = counter->offset + counter_size;
2547 }
2548 }
2549
2550 return written;
2551 }
2552
2553 static int
2554 get_pipeline_stats_data(struct gen_perf_context *perf_ctx,
2555 struct gen_perf_query_object *query,
2556 size_t data_size,
2557 uint8_t *data)
2558
2559 {
2560 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2561 const struct gen_perf_query_info *queryinfo = query->queryinfo;
2562 int n_counters = queryinfo->n_counters;
2563 uint8_t *p = data;
2564
2565 uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
2566 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
2567
2568 for (int i = 0; i < n_counters; i++) {
2569 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
2570 uint64_t value = end[i] - start[i];
2571
2572 if (counter->pipeline_stat.numerator !=
2573 counter->pipeline_stat.denominator) {
2574 value *= counter->pipeline_stat.numerator;
2575 value /= counter->pipeline_stat.denominator;
2576 }
2577
2578 *((uint64_t *)p) = value;
2579 p += 8;
2580 }
2581
2582 perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
2583
2584 return p - data;
2585 }
2586
2587 void
2588 gen_perf_get_query_data(struct gen_perf_context *perf_ctx,
2589 struct gen_perf_query_object *query,
2590 int data_size,
2591 unsigned *data,
2592 unsigned *bytes_written)
2593 {
2594 struct gen_perf_config *perf_cfg = perf_ctx->perf;
2595 int written = 0;
2596
2597 switch (query->queryinfo->kind) {
2598 case GEN_PERF_QUERY_TYPE_OA:
2599 case GEN_PERF_QUERY_TYPE_RAW:
2600 if (!query->oa.results_accumulated) {
2601 read_gt_frequency(perf_ctx, query);
2602 uint32_t *begin_report = query->oa.map;
2603 uint32_t *end_report = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
2604 gen_perf_query_result_read_frequencies(&query->oa.result,
2605 perf_ctx->devinfo,
2606 begin_report,
2607 end_report);
2608 accumulate_oa_reports(perf_ctx, query);
2609 assert(query->oa.results_accumulated);
2610
2611 perf_cfg->vtbl.bo_unmap(query->oa.bo);
2612 query->oa.map = NULL;
2613 }
2614 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
2615 written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
2616 } else {
2617 const struct gen_device_info *devinfo = perf_ctx->devinfo;
2618
2619 written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
2620 devinfo, &query->oa.result,
2621 query->oa.gt_frequency[0],
2622 query->oa.gt_frequency[1]);
2623 }
2624 break;
2625
2626 case GEN_PERF_QUERY_TYPE_PIPELINE:
2627 written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
2628 break;
2629
2630 default:
2631 unreachable("Unknown query type");
2632 break;
2633 }
2634
2635 if (bytes_written)
2636 *bytes_written = written;
2637 }
2638
2639 void
2640 gen_perf_dump_query_count(struct gen_perf_context *perf_ctx)
2641 {
2642 DBG("Queries: (Open queries = %d, OA users = %d)\n",
2643 perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
2644 }
2645
2646 void
2647 gen_perf_dump_query(struct gen_perf_context *ctx,
2648 struct gen_perf_query_object *obj,
2649 void *current_batch)
2650 {
2651 switch (obj->queryinfo->kind) {
2652 case GEN_PERF_QUERY_TYPE_OA:
2653 case GEN_PERF_QUERY_TYPE_RAW:
2654 DBG("BO: %-4s OA data: %-10s %-15s\n",
2655 obj->oa.bo ? "yes," : "no,",
2656 gen_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
2657 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
2658 break;
2659 case GEN_PERF_QUERY_TYPE_PIPELINE:
2660 DBG("BO: %-4s\n",
2661 obj->pipeline_stats.bo ? "yes" : "no");
2662 break;
2663 default:
2664 unreachable("Unknown query type");
2665 break;
2666 }
2667 }