intel/perf: fix EuThreadsCount value in performance equations
[mesa.git] / src / intel / perf / gen_perf.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31
32 #include <drm-uapi/i915_drm.h>
33
34 #include "gen_perf.h"
35 #include "perf/gen_perf_metrics.h"
36
37 #include "dev/gen_debug.h"
38 #include "dev/gen_device_info.h"
39 #include "util/bitscan.h"
40
41 #define FILE_DEBUG_FLAG DEBUG_PERFMON
42
43 static bool
44 get_sysfs_dev_dir(struct gen_perf *perf, int fd)
45 {
46 struct stat sb;
47 int min, maj;
48 DIR *drmdir;
49 struct dirent *drm_entry;
50 int len;
51
52 perf->sysfs_dev_dir[0] = '\0';
53
54 if (fstat(fd, &sb)) {
55 DBG("Failed to stat DRM fd\n");
56 return false;
57 }
58
59 maj = major(sb.st_rdev);
60 min = minor(sb.st_rdev);
61
62 if (!S_ISCHR(sb.st_mode)) {
63 DBG("DRM fd is not a character device as expected\n");
64 return false;
65 }
66
67 len = snprintf(perf->sysfs_dev_dir,
68 sizeof(perf->sysfs_dev_dir),
69 "/sys/dev/char/%d:%d/device/drm", maj, min);
70 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
71 DBG("Failed to concatenate sysfs path to drm device\n");
72 return false;
73 }
74
75 drmdir = opendir(perf->sysfs_dev_dir);
76 if (!drmdir) {
77 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
78 return false;
79 }
80
81 while ((drm_entry = readdir(drmdir))) {
82 if ((drm_entry->d_type == DT_DIR ||
83 drm_entry->d_type == DT_LNK) &&
84 strncmp(drm_entry->d_name, "card", 4) == 0)
85 {
86 len = snprintf(perf->sysfs_dev_dir,
87 sizeof(perf->sysfs_dev_dir),
88 "/sys/dev/char/%d:%d/device/drm/%s",
89 maj, min, drm_entry->d_name);
90 closedir(drmdir);
91 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
92 return false;
93 else
94 return true;
95 }
96 }
97
98 closedir(drmdir);
99
100 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
101 maj, min);
102
103 return false;
104 }
105
106 static bool
107 read_file_uint64(const char *file, uint64_t *val)
108 {
109 char buf[32];
110 int fd, n;
111
112 fd = open(file, 0);
113 if (fd < 0)
114 return false;
115 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
116 errno == EINTR);
117 close(fd);
118 if (n < 0)
119 return false;
120
121 buf[n] = '\0';
122 *val = strtoull(buf, NULL, 0);
123
124 return true;
125 }
126
127 static bool
128 read_sysfs_drm_device_file_uint64(struct gen_perf *perf,
129 const char *file,
130 uint64_t *value)
131 {
132 char buf[512];
133 int len;
134
135 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
136 if (len < 0 || len >= sizeof(buf)) {
137 DBG("Failed to concatenate sys filename to read u64 from\n");
138 return false;
139 }
140
141 return read_file_uint64(buf, value);
142 }
143
144 static void
145 register_oa_config(struct gen_perf *perf,
146 const struct gen_perf_query_info *query,
147 uint64_t config_id)
148 {
149 struct gen_perf_query_info *registred_query =
150 gen_perf_query_append_query_info(perf, 0);
151
152 *registred_query = *query;
153 registred_query->oa_metrics_set_id = config_id;
154 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
155 registred_query->oa_metrics_set_id, query->guid);
156 }
157
158 static void
159 enumerate_sysfs_metrics(struct gen_perf *perf)
160 {
161 DIR *metricsdir = NULL;
162 struct dirent *metric_entry;
163 char buf[256];
164 int len;
165
166 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
167 if (len < 0 || len >= sizeof(buf)) {
168 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
169 return;
170 }
171
172 metricsdir = opendir(buf);
173 if (!metricsdir) {
174 DBG("Failed to open %s: %m\n", buf);
175 return;
176 }
177
178 while ((metric_entry = readdir(metricsdir))) {
179 struct hash_entry *entry;
180
181 if ((metric_entry->d_type != DT_DIR &&
182 metric_entry->d_type != DT_LNK) ||
183 metric_entry->d_name[0] == '.')
184 continue;
185
186 DBG("metric set: %s\n", metric_entry->d_name);
187 entry = _mesa_hash_table_search(perf->oa_metrics_table,
188 metric_entry->d_name);
189 if (entry) {
190 uint64_t id;
191
192 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
193 perf->sysfs_dev_dir, metric_entry->d_name);
194 if (len < 0 || len >= sizeof(buf)) {
195 DBG("Failed to concatenate path to sysfs metric id file\n");
196 continue;
197 }
198
199 if (!read_file_uint64(buf, &id)) {
200 DBG("Failed to read metric set id from %s: %m", buf);
201 continue;
202 }
203
204 register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
205 } else
206 DBG("metric set not known by mesa (skipping)\n");
207 }
208
209 closedir(metricsdir);
210 }
211
212 static bool
213 kernel_has_dynamic_config_support(struct gen_perf *perf, int fd)
214 {
215 hash_table_foreach(perf->oa_metrics_table, entry) {
216 struct gen_perf_query_info *query = entry->data;
217 char config_path[280];
218 uint64_t config_id;
219
220 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
221 perf->sysfs_dev_dir, query->guid);
222
223 /* Look for the test config, which we know we can't replace. */
224 if (read_file_uint64(config_path, &config_id) && config_id == 1) {
225 return perf->ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
226 &config_id) < 0 && errno == ENOENT;
227 }
228 }
229
230 return false;
231 }
232
233 bool
234 gen_perf_load_metric_id(struct gen_perf *perf, const char *guid,
235 uint64_t *metric_id)
236 {
237 char config_path[280];
238
239 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
240 perf->sysfs_dev_dir, guid);
241
242 /* Don't recreate already loaded configs. */
243 return read_file_uint64(config_path, metric_id);
244 }
245
246 static void
247 init_oa_configs(struct gen_perf *perf, int fd)
248 {
249 hash_table_foreach(perf->oa_metrics_table, entry) {
250 const struct gen_perf_query_info *query = entry->data;
251 struct drm_i915_perf_oa_config config;
252 uint64_t config_id;
253 int ret;
254
255 if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
256 DBG("metric set: %s (already loaded)\n", query->guid);
257 register_oa_config(perf, query, config_id);
258 continue;
259 }
260
261 memset(&config, 0, sizeof(config));
262
263 memcpy(config.uuid, query->guid, sizeof(config.uuid));
264
265 config.n_mux_regs = query->n_mux_regs;
266 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
267
268 config.n_boolean_regs = query->n_b_counter_regs;
269 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
270
271 config.n_flex_regs = query->n_flex_regs;
272 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
273
274 ret = perf->ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
275 if (ret < 0) {
276 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
277 query->name, query->guid, strerror(errno));
278 continue;
279 }
280
281 register_oa_config(perf, query, ret);
282 DBG("metric set: %s (added)\n", query->guid);
283 }
284 }
285
286 static void
287 compute_topology_builtins(struct gen_perf *perf,
288 const struct gen_device_info *devinfo)
289 {
290 perf->sys_vars.slice_mask = devinfo->slice_masks;
291 perf->sys_vars.n_eu_slices = devinfo->num_slices;
292
293 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
294 perf->sys_vars.n_eu_sub_slices +=
295 __builtin_popcount(devinfo->subslice_masks[i]);
296 }
297
298 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
299 perf->sys_vars.n_eus += __builtin_popcount(devinfo->eu_masks[i]);
300
301 perf->sys_vars.eu_threads_count = devinfo->num_thread_per_eu;
302
303 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
304 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
305 * slice.
306 *
307 * Ideally equations would be updated to have a slice/subslice query
308 * function/operator.
309 */
310 perf->sys_vars.subslice_mask = 0;
311
312 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
313
314 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
315 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
316 if (gen_device_info_subslice_available(devinfo, s, ss))
317 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
318 }
319 }
320 }
321
322 static bool
323 init_oa_sys_vars(struct gen_perf *perf, const struct gen_device_info *devinfo)
324 {
325 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
326
327 if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
328 return false;
329
330 if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
331 return false;
332
333 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
334 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
335 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
336 perf->sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
337 perf->sys_vars.revision = devinfo->revision;
338 compute_topology_builtins(perf, devinfo);
339
340 return true;
341 }
342
343 typedef void (*perf_register_oa_queries_t)(struct gen_perf *);
344
345 static perf_register_oa_queries_t
346 get_register_queries_function(const struct gen_device_info *devinfo)
347 {
348 if (devinfo->is_haswell)
349 return gen_oa_register_queries_hsw;
350 if (devinfo->is_cherryview)
351 return gen_oa_register_queries_chv;
352 if (devinfo->is_broadwell)
353 return gen_oa_register_queries_bdw;
354 if (devinfo->is_broxton)
355 return gen_oa_register_queries_bxt;
356 if (devinfo->is_skylake) {
357 if (devinfo->gt == 2)
358 return gen_oa_register_queries_sklgt2;
359 if (devinfo->gt == 3)
360 return gen_oa_register_queries_sklgt3;
361 if (devinfo->gt == 4)
362 return gen_oa_register_queries_sklgt4;
363 }
364 if (devinfo->is_kabylake) {
365 if (devinfo->gt == 2)
366 return gen_oa_register_queries_kblgt2;
367 if (devinfo->gt == 3)
368 return gen_oa_register_queries_kblgt3;
369 }
370 if (devinfo->is_geminilake)
371 return gen_oa_register_queries_glk;
372 if (devinfo->is_coffeelake) {
373 if (devinfo->gt == 2)
374 return gen_oa_register_queries_cflgt2;
375 if (devinfo->gt == 3)
376 return gen_oa_register_queries_cflgt3;
377 }
378 if (devinfo->is_cannonlake)
379 return gen_oa_register_queries_cnl;
380 if (devinfo->gen == 11)
381 return gen_oa_register_queries_icl;
382
383 return NULL;
384 }
385
386 bool
387 gen_perf_load_oa_metrics(struct gen_perf *perf, int fd,
388 const struct gen_device_info *devinfo)
389 {
390 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
391 bool i915_perf_oa_available = false;
392 struct stat sb;
393
394 /* The existence of this sysctl parameter implies the kernel supports
395 * the i915 perf interface.
396 */
397 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
398
399 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
400 * metrics unless running as root.
401 */
402 if (devinfo->is_haswell)
403 i915_perf_oa_available = true;
404 else {
405 uint64_t paranoid = 1;
406
407 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
408
409 if (paranoid == 0 || geteuid() == 0)
410 i915_perf_oa_available = true;
411 }
412 }
413
414 if (!i915_perf_oa_available ||
415 !oa_register ||
416 !get_sysfs_dev_dir(perf, fd) ||
417 !init_oa_sys_vars(perf, devinfo))
418 return false;
419
420 perf->oa_metrics_table =
421 _mesa_hash_table_create(perf, _mesa_key_hash_string,
422 _mesa_key_string_equal);
423
424 /* Index all the metric sets mesa knows about before looking to see what
425 * the kernel is advertising.
426 */
427 oa_register(perf);
428
429 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
430 kernel_has_dynamic_config_support(perf, fd))
431 init_oa_configs(perf, fd);
432 else
433 enumerate_sysfs_metrics(perf);
434
435 return true;
436 }
437
438 /* Accumulate 32bits OA counters */
439 static inline void
440 accumulate_uint32(const uint32_t *report0,
441 const uint32_t *report1,
442 uint64_t *accumulator)
443 {
444 *accumulator += (uint32_t)(*report1 - *report0);
445 }
446
447 /* Accumulate 40bits OA counters */
448 static inline void
449 accumulate_uint40(int a_index,
450 const uint32_t *report0,
451 const uint32_t *report1,
452 uint64_t *accumulator)
453 {
454 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
455 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
456 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
457 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
458 uint64_t value0 = report0[a_index + 4] | high0;
459 uint64_t value1 = report1[a_index + 4] | high1;
460 uint64_t delta;
461
462 if (value0 > value1)
463 delta = (1ULL << 40) + value1 - value0;
464 else
465 delta = value1 - value0;
466
467 *accumulator += delta;
468 }
469
470 static void
471 gen8_read_report_clock_ratios(const uint32_t *report,
472 uint64_t *slice_freq_hz,
473 uint64_t *unslice_freq_hz)
474 {
475 /* The lower 16bits of the RPT_ID field of the OA reports contains a
476 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
477 * divided this way :
478 *
479 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
480 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
481 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
482 *
483 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
484 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
485 *
486 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
487 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
488 */
489
490 uint32_t unslice_freq = report[0] & 0x1ff;
491 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
492 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
493 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
494
495 *slice_freq_hz = slice_freq * 16666667ULL;
496 *unslice_freq_hz = unslice_freq * 16666667ULL;
497 }
498
499 void
500 gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
501 const struct gen_device_info *devinfo,
502 const uint32_t *start,
503 const uint32_t *end)
504 {
505 /* Slice/Unslice frequency is only available in the OA reports when the
506 * "Disable OA reports due to clock ratio change" field in
507 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
508 * global register (see drivers/gpu/drm/i915/i915_perf.c)
509 *
510 * Documentation says this should be available on Gen9+ but experimentation
511 * shows that Gen8 reports similar values, so we enable it there too.
512 */
513 if (devinfo->gen < 8)
514 return;
515
516 gen8_read_report_clock_ratios(start,
517 &result->slice_frequency[0],
518 &result->unslice_frequency[0]);
519 gen8_read_report_clock_ratios(end,
520 &result->slice_frequency[1],
521 &result->unslice_frequency[1]);
522 }
523
524 void
525 gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
526 const struct gen_perf_query_info *query,
527 const uint32_t *start,
528 const uint32_t *end)
529 {
530 int i, idx = 0;
531
532 result->hw_id = start[2];
533 result->reports_accumulated++;
534
535 switch (query->oa_format) {
536 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
537 accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
538 accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
539
540 /* 32x 40bit A counters... */
541 for (i = 0; i < 32; i++)
542 accumulate_uint40(i, start, end, result->accumulator + idx++);
543
544 /* 4x 32bit A counters... */
545 for (i = 0; i < 4; i++)
546 accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
547
548 /* 8x 32bit B counters + 8x 32bit C counters... */
549 for (i = 0; i < 16; i++)
550 accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
551 break;
552
553 case I915_OA_FORMAT_A45_B8_C8:
554 accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
555
556 for (i = 0; i < 61; i++)
557 accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
558 break;
559
560 default:
561 unreachable("Can't accumulate OA counters in unknown format");
562 }
563
564 }
565
566 void
567 gen_perf_query_result_clear(struct gen_perf_query_result *result)
568 {
569 memset(result, 0, sizeof(*result));
570 result->hw_id = 0xffffffff; /* invalid */
571 }