777adfc7b7ee3fed039af99c532a6b4722d962f9
[mesa.git] / src / intel / perf / gen_perf.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31
32 #include <drm-uapi/i915_drm.h>
33
34 #include "gen_perf.h"
35 #include "perf/gen_perf_metrics.h"
36
37 #include "dev/gen_debug.h"
38 #include "dev/gen_device_info.h"
39 #include "util/bitscan.h"
40
41 #define FILE_DEBUG_FLAG DEBUG_PERFMON
42
43 static bool
44 get_sysfs_dev_dir(struct gen_perf *perf, int fd)
45 {
46 struct stat sb;
47 int min, maj;
48 DIR *drmdir;
49 struct dirent *drm_entry;
50 int len;
51
52 perf->sysfs_dev_dir[0] = '\0';
53
54 if (fstat(fd, &sb)) {
55 DBG("Failed to stat DRM fd\n");
56 return false;
57 }
58
59 maj = major(sb.st_rdev);
60 min = minor(sb.st_rdev);
61
62 if (!S_ISCHR(sb.st_mode)) {
63 DBG("DRM fd is not a character device as expected\n");
64 return false;
65 }
66
67 len = snprintf(perf->sysfs_dev_dir,
68 sizeof(perf->sysfs_dev_dir),
69 "/sys/dev/char/%d:%d/device/drm", maj, min);
70 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
71 DBG("Failed to concatenate sysfs path to drm device\n");
72 return false;
73 }
74
75 drmdir = opendir(perf->sysfs_dev_dir);
76 if (!drmdir) {
77 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
78 return false;
79 }
80
81 while ((drm_entry = readdir(drmdir))) {
82 if ((drm_entry->d_type == DT_DIR ||
83 drm_entry->d_type == DT_LNK) &&
84 strncmp(drm_entry->d_name, "card", 4) == 0)
85 {
86 len = snprintf(perf->sysfs_dev_dir,
87 sizeof(perf->sysfs_dev_dir),
88 "/sys/dev/char/%d:%d/device/drm/%s",
89 maj, min, drm_entry->d_name);
90 closedir(drmdir);
91 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
92 return false;
93 else
94 return true;
95 }
96 }
97
98 closedir(drmdir);
99
100 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
101 maj, min);
102
103 return false;
104 }
105
106 static bool
107 read_file_uint64(const char *file, uint64_t *val)
108 {
109 char buf[32];
110 int fd, n;
111
112 fd = open(file, 0);
113 if (fd < 0)
114 return false;
115 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
116 errno == EINTR);
117 close(fd);
118 if (n < 0)
119 return false;
120
121 buf[n] = '\0';
122 *val = strtoull(buf, NULL, 0);
123
124 return true;
125 }
126
127 static bool
128 read_sysfs_drm_device_file_uint64(struct gen_perf *perf,
129 const char *file,
130 uint64_t *value)
131 {
132 char buf[512];
133 int len;
134
135 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
136 if (len < 0 || len >= sizeof(buf)) {
137 DBG("Failed to concatenate sys filename to read u64 from\n");
138 return false;
139 }
140
141 return read_file_uint64(buf, value);
142 }
143
144 static void
145 register_oa_config(struct gen_perf *perf,
146 const struct gen_perf_query_info *query,
147 uint64_t config_id)
148 {
149 struct gen_perf_query_info *registred_query =
150 gen_perf_query_append_query_info(perf, 0);
151
152 *registred_query = *query;
153 registred_query->oa_metrics_set_id = config_id;
154 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
155 registred_query->oa_metrics_set_id, query->guid);
156 }
157
158 static void
159 enumerate_sysfs_metrics(struct gen_perf *perf)
160 {
161 DIR *metricsdir = NULL;
162 struct dirent *metric_entry;
163 char buf[256];
164 int len;
165
166 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
167 if (len < 0 || len >= sizeof(buf)) {
168 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
169 return;
170 }
171
172 metricsdir = opendir(buf);
173 if (!metricsdir) {
174 DBG("Failed to open %s: %m\n", buf);
175 return;
176 }
177
178 while ((metric_entry = readdir(metricsdir))) {
179 struct hash_entry *entry;
180
181 if ((metric_entry->d_type != DT_DIR &&
182 metric_entry->d_type != DT_LNK) ||
183 metric_entry->d_name[0] == '.')
184 continue;
185
186 DBG("metric set: %s\n", metric_entry->d_name);
187 entry = _mesa_hash_table_search(perf->oa_metrics_table,
188 metric_entry->d_name);
189 if (entry) {
190 uint64_t id;
191
192 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
193 perf->sysfs_dev_dir, metric_entry->d_name);
194 if (len < 0 || len >= sizeof(buf)) {
195 DBG("Failed to concatenate path to sysfs metric id file\n");
196 continue;
197 }
198
199 if (!read_file_uint64(buf, &id)) {
200 DBG("Failed to read metric set id from %s: %m", buf);
201 continue;
202 }
203
204 register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
205 } else
206 DBG("metric set not known by mesa (skipping)\n");
207 }
208
209 closedir(metricsdir);
210 }
211
212 static bool
213 kernel_has_dynamic_config_support(struct gen_perf *perf, int fd)
214 {
215 hash_table_foreach(perf->oa_metrics_table, entry) {
216 struct gen_perf_query_info *query = entry->data;
217 char config_path[280];
218 uint64_t config_id;
219
220 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
221 perf->sysfs_dev_dir, query->guid);
222
223 /* Look for the test config, which we know we can't replace. */
224 if (read_file_uint64(config_path, &config_id) && config_id == 1) {
225 return perf->ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
226 &config_id) < 0 && errno == ENOENT;
227 }
228 }
229
230 return false;
231 }
232
233 bool
234 gen_perf_load_metric_id(struct gen_perf *perf, const char *guid,
235 uint64_t *metric_id)
236 {
237 char config_path[280];
238
239 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
240 perf->sysfs_dev_dir, guid);
241
242 /* Don't recreate already loaded configs. */
243 return read_file_uint64(config_path, metric_id);
244 }
245
246 static void
247 init_oa_configs(struct gen_perf *perf, int fd)
248 {
249 hash_table_foreach(perf->oa_metrics_table, entry) {
250 const struct gen_perf_query_info *query = entry->data;
251 struct drm_i915_perf_oa_config config;
252 uint64_t config_id;
253 int ret;
254
255 if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
256 DBG("metric set: %s (already loaded)\n", query->guid);
257 register_oa_config(perf, query, config_id);
258 continue;
259 }
260
261 memset(&config, 0, sizeof(config));
262
263 memcpy(config.uuid, query->guid, sizeof(config.uuid));
264
265 config.n_mux_regs = query->n_mux_regs;
266 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
267
268 config.n_boolean_regs = query->n_b_counter_regs;
269 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
270
271 config.n_flex_regs = query->n_flex_regs;
272 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
273
274 ret = perf->ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
275 if (ret < 0) {
276 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
277 query->name, query->guid, strerror(errno));
278 continue;
279 }
280
281 register_oa_config(perf, query, ret);
282 DBG("metric set: %s (added)\n", query->guid);
283 }
284 }
285
286 static void
287 compute_topology_builtins(struct gen_perf *perf,
288 const struct gen_device_info *devinfo)
289 {
290 perf->sys_vars.slice_mask = devinfo->slice_masks;
291 perf->sys_vars.n_eu_slices = devinfo->num_slices;
292
293 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
294 perf->sys_vars.n_eu_sub_slices +=
295 __builtin_popcount(devinfo->subslice_masks[i]);
296 }
297
298 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
299 perf->sys_vars.n_eus += __builtin_popcount(devinfo->eu_masks[i]);
300
301 perf->sys_vars.eu_threads_count =
302 perf->sys_vars.n_eus * devinfo->num_thread_per_eu;
303
304 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
305 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
306 * slice.
307 *
308 * Ideally equations would be updated to have a slice/subslice query
309 * function/operator.
310 */
311 perf->sys_vars.subslice_mask = 0;
312
313 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
314
315 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
316 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
317 if (gen_device_info_subslice_available(devinfo, s, ss))
318 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
319 }
320 }
321 }
322
323 static bool
324 init_oa_sys_vars(struct gen_perf *perf, const struct gen_device_info *devinfo)
325 {
326 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
327
328 if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
329 return false;
330
331 if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
332 return false;
333
334 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
335 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
336 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
337 perf->sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
338 perf->sys_vars.revision = devinfo->revision;
339 compute_topology_builtins(perf, devinfo);
340
341 return true;
342 }
343
344 typedef void (*perf_register_oa_queries_t)(struct gen_perf *);
345
346 static perf_register_oa_queries_t
347 get_register_queries_function(const struct gen_device_info *devinfo)
348 {
349 if (devinfo->is_haswell)
350 return gen_oa_register_queries_hsw;
351 if (devinfo->is_cherryview)
352 return gen_oa_register_queries_chv;
353 if (devinfo->is_broadwell)
354 return gen_oa_register_queries_bdw;
355 if (devinfo->is_broxton)
356 return gen_oa_register_queries_bxt;
357 if (devinfo->is_skylake) {
358 if (devinfo->gt == 2)
359 return gen_oa_register_queries_sklgt2;
360 if (devinfo->gt == 3)
361 return gen_oa_register_queries_sklgt3;
362 if (devinfo->gt == 4)
363 return gen_oa_register_queries_sklgt4;
364 }
365 if (devinfo->is_kabylake) {
366 if (devinfo->gt == 2)
367 return gen_oa_register_queries_kblgt2;
368 if (devinfo->gt == 3)
369 return gen_oa_register_queries_kblgt3;
370 }
371 if (devinfo->is_geminilake)
372 return gen_oa_register_queries_glk;
373 if (devinfo->is_coffeelake) {
374 if (devinfo->gt == 2)
375 return gen_oa_register_queries_cflgt2;
376 if (devinfo->gt == 3)
377 return gen_oa_register_queries_cflgt3;
378 }
379 if (devinfo->is_cannonlake)
380 return gen_oa_register_queries_cnl;
381
382 return NULL;
383 }
384
385 bool
386 gen_perf_load_oa_metrics(struct gen_perf *perf, int fd,
387 const struct gen_device_info *devinfo)
388 {
389 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
390 bool i915_perf_oa_available = false;
391 struct stat sb;
392
393 /* The existence of this sysctl parameter implies the kernel supports
394 * the i915 perf interface.
395 */
396 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
397
398 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
399 * metrics unless running as root.
400 */
401 if (devinfo->is_haswell)
402 i915_perf_oa_available = true;
403 else {
404 uint64_t paranoid = 1;
405
406 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
407
408 if (paranoid == 0 || geteuid() == 0)
409 i915_perf_oa_available = true;
410 }
411 }
412
413 if (!i915_perf_oa_available ||
414 !oa_register ||
415 !get_sysfs_dev_dir(perf, fd) ||
416 !init_oa_sys_vars(perf, devinfo))
417 return false;
418
419 perf->oa_metrics_table =
420 _mesa_hash_table_create(perf, _mesa_key_hash_string,
421 _mesa_key_string_equal);
422
423 /* Index all the metric sets mesa knows about before looking to see what
424 * the kernel is advertising.
425 */
426 oa_register(perf);
427
428 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
429 kernel_has_dynamic_config_support(perf, fd))
430 init_oa_configs(perf, fd);
431 else
432 enumerate_sysfs_metrics(perf);
433
434 return true;
435 }
436
437 /* Accumulate 32bits OA counters */
438 static inline void
439 accumulate_uint32(const uint32_t *report0,
440 const uint32_t *report1,
441 uint64_t *accumulator)
442 {
443 *accumulator += (uint32_t)(*report1 - *report0);
444 }
445
446 /* Accumulate 40bits OA counters */
447 static inline void
448 accumulate_uint40(int a_index,
449 const uint32_t *report0,
450 const uint32_t *report1,
451 uint64_t *accumulator)
452 {
453 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
454 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
455 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
456 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
457 uint64_t value0 = report0[a_index + 4] | high0;
458 uint64_t value1 = report1[a_index + 4] | high1;
459 uint64_t delta;
460
461 if (value0 > value1)
462 delta = (1ULL << 40) + value1 - value0;
463 else
464 delta = value1 - value0;
465
466 *accumulator += delta;
467 }
468
469 static void
470 gen8_read_report_clock_ratios(const uint32_t *report,
471 uint64_t *slice_freq_hz,
472 uint64_t *unslice_freq_hz)
473 {
474 /* The lower 16bits of the RPT_ID field of the OA reports contains a
475 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
476 * divided this way :
477 *
478 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
479 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
480 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
481 *
482 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
483 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
484 *
485 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
486 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
487 */
488
489 uint32_t unslice_freq = report[0] & 0x1ff;
490 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
491 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
492 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
493
494 *slice_freq_hz = slice_freq * 16666667ULL;
495 *unslice_freq_hz = unslice_freq * 16666667ULL;
496 }
497
498 void
499 gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
500 const struct gen_device_info *devinfo,
501 const uint32_t *start,
502 const uint32_t *end)
503 {
504 /* Slice/Unslice frequency is only available in the OA reports when the
505 * "Disable OA reports due to clock ratio change" field in
506 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
507 * global register (see drivers/gpu/drm/i915/i915_perf.c)
508 *
509 * Documentation says this should be available on Gen9+ but experimentation
510 * shows that Gen8 reports similar values, so we enable it there too.
511 */
512 if (devinfo->gen < 8)
513 return;
514
515 gen8_read_report_clock_ratios(start,
516 &result->slice_frequency[0],
517 &result->unslice_frequency[0]);
518 gen8_read_report_clock_ratios(end,
519 &result->slice_frequency[1],
520 &result->unslice_frequency[1]);
521 }
522
523 void
524 gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
525 const struct gen_perf_query_info *query,
526 const uint32_t *start,
527 const uint32_t *end)
528 {
529 int i, idx = 0;
530
531 result->hw_id = start[2];
532 result->reports_accumulated++;
533
534 switch (query->oa_format) {
535 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
536 accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
537 accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
538
539 /* 32x 40bit A counters... */
540 for (i = 0; i < 32; i++)
541 accumulate_uint40(i, start, end, result->accumulator + idx++);
542
543 /* 4x 32bit A counters... */
544 for (i = 0; i < 4; i++)
545 accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
546
547 /* 8x 32bit B counters + 8x 32bit C counters... */
548 for (i = 0; i < 16; i++)
549 accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
550 break;
551
552 case I915_OA_FORMAT_A45_B8_C8:
553 accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
554
555 for (i = 0; i < 61; i++)
556 accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
557 break;
558
559 default:
560 unreachable("Can't accumulate OA counters in unknown format");
561 }
562
563 }
564
565 void
566 gen_perf_query_result_clear(struct gen_perf_query_result *result)
567 {
568 memset(result, 0, sizeof(*result));
569 result->hw_id = 0xffffffff; /* invalid */
570 }