intel/perf: move delete_query to gen_perf
[mesa.git] / src / intel / perf / gen_perf.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31
32 #include <drm-uapi/i915_drm.h>
33
34 #include "common/gen_gem.h"
35 #include "gen_perf.h"
36 #include "perf/gen_perf_mdapi.h"
37 #include "perf/gen_perf_metrics.h"
38
39 #include "dev/gen_debug.h"
40 #include "dev/gen_device_info.h"
41 #include "util/bitscan.h"
42 #include "util/u_math.h"
43
44 #define FILE_DEBUG_FLAG DEBUG_PERFMON
45 #define MI_RPC_BO_SIZE 4096
46 #define MI_FREQ_START_OFFSET_BYTES (3072)
47 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
48 #define MI_FREQ_END_OFFSET_BYTES (3076)
49
50 #define MAP_READ (1 << 0)
51 #define MAP_WRITE (1 << 1)
52
53 static bool
54 get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
55 {
56 struct stat sb;
57 int min, maj;
58 DIR *drmdir;
59 struct dirent *drm_entry;
60 int len;
61
62 perf->sysfs_dev_dir[0] = '\0';
63
64 if (fstat(fd, &sb)) {
65 DBG("Failed to stat DRM fd\n");
66 return false;
67 }
68
69 maj = major(sb.st_rdev);
70 min = minor(sb.st_rdev);
71
72 if (!S_ISCHR(sb.st_mode)) {
73 DBG("DRM fd is not a character device as expected\n");
74 return false;
75 }
76
77 len = snprintf(perf->sysfs_dev_dir,
78 sizeof(perf->sysfs_dev_dir),
79 "/sys/dev/char/%d:%d/device/drm", maj, min);
80 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
81 DBG("Failed to concatenate sysfs path to drm device\n");
82 return false;
83 }
84
85 drmdir = opendir(perf->sysfs_dev_dir);
86 if (!drmdir) {
87 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
88 return false;
89 }
90
91 while ((drm_entry = readdir(drmdir))) {
92 if ((drm_entry->d_type == DT_DIR ||
93 drm_entry->d_type == DT_LNK) &&
94 strncmp(drm_entry->d_name, "card", 4) == 0)
95 {
96 len = snprintf(perf->sysfs_dev_dir,
97 sizeof(perf->sysfs_dev_dir),
98 "/sys/dev/char/%d:%d/device/drm/%s",
99 maj, min, drm_entry->d_name);
100 closedir(drmdir);
101 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
102 return false;
103 else
104 return true;
105 }
106 }
107
108 closedir(drmdir);
109
110 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
111 maj, min);
112
113 return false;
114 }
115
116 static bool
117 read_file_uint64(const char *file, uint64_t *val)
118 {
119 char buf[32];
120 int fd, n;
121
122 fd = open(file, 0);
123 if (fd < 0)
124 return false;
125 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
126 errno == EINTR);
127 close(fd);
128 if (n < 0)
129 return false;
130
131 buf[n] = '\0';
132 *val = strtoull(buf, NULL, 0);
133
134 return true;
135 }
136
137 static bool
138 read_sysfs_drm_device_file_uint64(struct gen_perf_config *perf,
139 const char *file,
140 uint64_t *value)
141 {
142 char buf[512];
143 int len;
144
145 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
146 if (len < 0 || len >= sizeof(buf)) {
147 DBG("Failed to concatenate sys filename to read u64 from\n");
148 return false;
149 }
150
151 return read_file_uint64(buf, value);
152 }
153
154 static void
155 register_oa_config(struct gen_perf_config *perf,
156 const struct gen_perf_query_info *query,
157 uint64_t config_id)
158 {
159 struct gen_perf_query_info *registred_query =
160 gen_perf_query_append_query_info(perf, 0);
161
162 *registred_query = *query;
163 registred_query->oa_metrics_set_id = config_id;
164 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
165 registred_query->oa_metrics_set_id, query->guid);
166 }
167
168 static void
169 enumerate_sysfs_metrics(struct gen_perf_config *perf)
170 {
171 DIR *metricsdir = NULL;
172 struct dirent *metric_entry;
173 char buf[256];
174 int len;
175
176 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
177 if (len < 0 || len >= sizeof(buf)) {
178 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
179 return;
180 }
181
182 metricsdir = opendir(buf);
183 if (!metricsdir) {
184 DBG("Failed to open %s: %m\n", buf);
185 return;
186 }
187
188 while ((metric_entry = readdir(metricsdir))) {
189 struct hash_entry *entry;
190
191 if ((metric_entry->d_type != DT_DIR &&
192 metric_entry->d_type != DT_LNK) ||
193 metric_entry->d_name[0] == '.')
194 continue;
195
196 DBG("metric set: %s\n", metric_entry->d_name);
197 entry = _mesa_hash_table_search(perf->oa_metrics_table,
198 metric_entry->d_name);
199 if (entry) {
200 uint64_t id;
201
202 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
203 perf->sysfs_dev_dir, metric_entry->d_name);
204 if (len < 0 || len >= sizeof(buf)) {
205 DBG("Failed to concatenate path to sysfs metric id file\n");
206 continue;
207 }
208
209 if (!read_file_uint64(buf, &id)) {
210 DBG("Failed to read metric set id from %s: %m", buf);
211 continue;
212 }
213
214 register_oa_config(perf, (const struct gen_perf_query_info *)entry->data, id);
215 } else
216 DBG("metric set not known by mesa (skipping)\n");
217 }
218
219 closedir(metricsdir);
220 }
221
222 static bool
223 kernel_has_dynamic_config_support(struct gen_perf_config *perf, int fd)
224 {
225 uint64_t invalid_config_id = UINT64_MAX;
226
227 return gen_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
228 &invalid_config_id) < 0 && errno == ENOENT;
229 }
230
231 bool
232 gen_perf_load_metric_id(struct gen_perf_config *perf, const char *guid,
233 uint64_t *metric_id)
234 {
235 char config_path[280];
236
237 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
238 perf->sysfs_dev_dir, guid);
239
240 /* Don't recreate already loaded configs. */
241 return read_file_uint64(config_path, metric_id);
242 }
243
244 static void
245 init_oa_configs(struct gen_perf_config *perf, int fd)
246 {
247 hash_table_foreach(perf->oa_metrics_table, entry) {
248 const struct gen_perf_query_info *query = entry->data;
249 struct drm_i915_perf_oa_config config;
250 uint64_t config_id;
251 int ret;
252
253 if (gen_perf_load_metric_id(perf, query->guid, &config_id)) {
254 DBG("metric set: %s (already loaded)\n", query->guid);
255 register_oa_config(perf, query, config_id);
256 continue;
257 }
258
259 memset(&config, 0, sizeof(config));
260
261 memcpy(config.uuid, query->guid, sizeof(config.uuid));
262
263 config.n_mux_regs = query->n_mux_regs;
264 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
265
266 config.n_boolean_regs = query->n_b_counter_regs;
267 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
268
269 config.n_flex_regs = query->n_flex_regs;
270 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
271
272 ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
273 if (ret < 0) {
274 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
275 query->name, query->guid, strerror(errno));
276 continue;
277 }
278
279 register_oa_config(perf, query, ret);
280 DBG("metric set: %s (added)\n", query->guid);
281 }
282 }
283
284 static void
285 compute_topology_builtins(struct gen_perf_config *perf,
286 const struct gen_device_info *devinfo)
287 {
288 perf->sys_vars.slice_mask = devinfo->slice_masks;
289 perf->sys_vars.n_eu_slices = devinfo->num_slices;
290
291 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
292 perf->sys_vars.n_eu_sub_slices +=
293 __builtin_popcount(devinfo->subslice_masks[i]);
294 }
295
296 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
297 perf->sys_vars.n_eus += __builtin_popcount(devinfo->eu_masks[i]);
298
299 perf->sys_vars.eu_threads_count = devinfo->num_thread_per_eu;
300
301 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
302 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
303 * slice.
304 *
305 * Ideally equations would be updated to have a slice/subslice query
306 * function/operator.
307 */
308 perf->sys_vars.subslice_mask = 0;
309
310 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
311
312 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
313 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
314 if (gen_device_info_subslice_available(devinfo, s, ss))
315 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
316 }
317 }
318 }
319
320 static bool
321 init_oa_sys_vars(struct gen_perf_config *perf, const struct gen_device_info *devinfo)
322 {
323 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
324
325 if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
326 return false;
327
328 if (!read_sysfs_drm_device_file_uint64(perf, "gt_max_freq_mhz", &max_freq_mhz))
329 return false;
330
331 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
332 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
333 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
334 perf->sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
335 perf->sys_vars.revision = devinfo->revision;
336 compute_topology_builtins(perf, devinfo);
337
338 return true;
339 }
340
341 typedef void (*perf_register_oa_queries_t)(struct gen_perf_config *);
342
343 static perf_register_oa_queries_t
344 get_register_queries_function(const struct gen_device_info *devinfo)
345 {
346 if (devinfo->is_haswell)
347 return gen_oa_register_queries_hsw;
348 if (devinfo->is_cherryview)
349 return gen_oa_register_queries_chv;
350 if (devinfo->is_broadwell)
351 return gen_oa_register_queries_bdw;
352 if (devinfo->is_broxton)
353 return gen_oa_register_queries_bxt;
354 if (devinfo->is_skylake) {
355 if (devinfo->gt == 2)
356 return gen_oa_register_queries_sklgt2;
357 if (devinfo->gt == 3)
358 return gen_oa_register_queries_sklgt3;
359 if (devinfo->gt == 4)
360 return gen_oa_register_queries_sklgt4;
361 }
362 if (devinfo->is_kabylake) {
363 if (devinfo->gt == 2)
364 return gen_oa_register_queries_kblgt2;
365 if (devinfo->gt == 3)
366 return gen_oa_register_queries_kblgt3;
367 }
368 if (devinfo->is_geminilake)
369 return gen_oa_register_queries_glk;
370 if (devinfo->is_coffeelake) {
371 if (devinfo->gt == 2)
372 return gen_oa_register_queries_cflgt2;
373 if (devinfo->gt == 3)
374 return gen_oa_register_queries_cflgt3;
375 }
376 if (devinfo->is_cannonlake)
377 return gen_oa_register_queries_cnl;
378 if (devinfo->gen == 11)
379 return gen_oa_register_queries_icl;
380
381 return NULL;
382 }
383
384 bool
385 gen_perf_load_oa_metrics(struct gen_perf_config *perf, int fd,
386 const struct gen_device_info *devinfo)
387 {
388 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
389 bool i915_perf_oa_available = false;
390 struct stat sb;
391
392 /* The existence of this sysctl parameter implies the kernel supports
393 * the i915 perf interface.
394 */
395 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
396
397 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
398 * metrics unless running as root.
399 */
400 if (devinfo->is_haswell)
401 i915_perf_oa_available = true;
402 else {
403 uint64_t paranoid = 1;
404
405 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
406
407 if (paranoid == 0 || geteuid() == 0)
408 i915_perf_oa_available = true;
409 }
410 }
411
412 if (!i915_perf_oa_available ||
413 !oa_register ||
414 !get_sysfs_dev_dir(perf, fd) ||
415 !init_oa_sys_vars(perf, devinfo))
416 return false;
417
418 perf->oa_metrics_table =
419 _mesa_hash_table_create(perf, _mesa_key_hash_string,
420 _mesa_key_string_equal);
421
422 /* Index all the metric sets mesa knows about before looking to see what
423 * the kernel is advertising.
424 */
425 oa_register(perf);
426
427 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
428 kernel_has_dynamic_config_support(perf, fd))
429 init_oa_configs(perf, fd);
430 else
431 enumerate_sysfs_metrics(perf);
432
433 return true;
434 }
435
436 /* Accumulate 32bits OA counters */
437 static inline void
438 accumulate_uint32(const uint32_t *report0,
439 const uint32_t *report1,
440 uint64_t *accumulator)
441 {
442 *accumulator += (uint32_t)(*report1 - *report0);
443 }
444
445 /* Accumulate 40bits OA counters */
446 static inline void
447 accumulate_uint40(int a_index,
448 const uint32_t *report0,
449 const uint32_t *report1,
450 uint64_t *accumulator)
451 {
452 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
453 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
454 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
455 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
456 uint64_t value0 = report0[a_index + 4] | high0;
457 uint64_t value1 = report1[a_index + 4] | high1;
458 uint64_t delta;
459
460 if (value0 > value1)
461 delta = (1ULL << 40) + value1 - value0;
462 else
463 delta = value1 - value0;
464
465 *accumulator += delta;
466 }
467
468 static void
469 gen8_read_report_clock_ratios(const uint32_t *report,
470 uint64_t *slice_freq_hz,
471 uint64_t *unslice_freq_hz)
472 {
473 /* The lower 16bits of the RPT_ID field of the OA reports contains a
474 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
475 * divided this way :
476 *
477 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
478 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
479 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
480 *
481 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
482 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
483 *
484 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
485 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
486 */
487
488 uint32_t unslice_freq = report[0] & 0x1ff;
489 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
490 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
491 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
492
493 *slice_freq_hz = slice_freq * 16666667ULL;
494 *unslice_freq_hz = unslice_freq * 16666667ULL;
495 }
496
497 void
498 gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
499 const struct gen_device_info *devinfo,
500 const uint32_t *start,
501 const uint32_t *end)
502 {
503 /* Slice/Unslice frequency is only available in the OA reports when the
504 * "Disable OA reports due to clock ratio change" field in
505 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
506 * global register (see drivers/gpu/drm/i915/i915_perf.c)
507 *
508 * Documentation says this should be available on Gen9+ but experimentation
509 * shows that Gen8 reports similar values, so we enable it there too.
510 */
511 if (devinfo->gen < 8)
512 return;
513
514 gen8_read_report_clock_ratios(start,
515 &result->slice_frequency[0],
516 &result->unslice_frequency[0]);
517 gen8_read_report_clock_ratios(end,
518 &result->slice_frequency[1],
519 &result->unslice_frequency[1]);
520 }
521
522 void
523 gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
524 const struct gen_perf_query_info *query,
525 const uint32_t *start,
526 const uint32_t *end)
527 {
528 int i, idx = 0;
529
530 result->hw_id = start[2];
531 result->reports_accumulated++;
532
533 switch (query->oa_format) {
534 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
535 accumulate_uint32(start + 1, end + 1, result->accumulator + idx++); /* timestamp */
536 accumulate_uint32(start + 3, end + 3, result->accumulator + idx++); /* clock */
537
538 /* 32x 40bit A counters... */
539 for (i = 0; i < 32; i++)
540 accumulate_uint40(i, start, end, result->accumulator + idx++);
541
542 /* 4x 32bit A counters... */
543 for (i = 0; i < 4; i++)
544 accumulate_uint32(start + 36 + i, end + 36 + i, result->accumulator + idx++);
545
546 /* 8x 32bit B counters + 8x 32bit C counters... */
547 for (i = 0; i < 16; i++)
548 accumulate_uint32(start + 48 + i, end + 48 + i, result->accumulator + idx++);
549 break;
550
551 case I915_OA_FORMAT_A45_B8_C8:
552 accumulate_uint32(start + 1, end + 1, result->accumulator); /* timestamp */
553
554 for (i = 0; i < 61; i++)
555 accumulate_uint32(start + 3 + i, end + 3 + i, result->accumulator + 1 + i);
556 break;
557
558 default:
559 unreachable("Can't accumulate OA counters in unknown format");
560 }
561
562 }
563
564 void
565 gen_perf_query_result_clear(struct gen_perf_query_result *result)
566 {
567 memset(result, 0, sizeof(*result));
568 result->hw_id = 0xffffffff; /* invalid */
569 }
570
571 static void
572 fill_mdapi_perf_query_counter(struct gen_perf_query_info *query,
573 const char *name,
574 uint32_t data_offset,
575 uint32_t data_size,
576 enum gen_perf_counter_data_type data_type)
577 {
578 struct gen_perf_query_counter *counter = &query->counters[query->n_counters];
579
580 assert(query->n_counters <= query->max_counters);
581
582 counter->name = name;
583 counter->desc = "Raw counter value";
584 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
585 counter->data_type = data_type;
586 counter->offset = data_offset;
587
588 query->n_counters++;
589
590 assert(counter->offset + gen_perf_query_counter_get_size(counter) <= query->data_size);
591 }
592
593 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
594 fill_mdapi_perf_query_counter(query, #field_name, \
595 (uint8_t *) &struct_name.field_name - \
596 (uint8_t *) &struct_name, \
597 sizeof(struct_name.field_name), \
598 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
599 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
600 fill_mdapi_perf_query_counter(query, \
601 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
602 (uint8_t *) &struct_name.field_name[idx] - \
603 (uint8_t *) &struct_name, \
604 sizeof(struct_name.field_name[0]), \
605 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
606
607 void
608 gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
609 struct gen_perf_config *perf)
610 {
611 struct gen_perf_query_info *query = NULL;
612
613 /* MDAPI requires different structures for pretty much every generation
614 * (right now we have definitions for gen 7 to 11).
615 */
616 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
617 return;
618
619 switch (devinfo->gen) {
620 case 7: {
621 query = gen_perf_query_append_query_info(perf, 1 + 45 + 16 + 7);
622 query->oa_format = I915_OA_FORMAT_A45_B8_C8;
623
624 struct gen7_mdapi_metrics metric_data;
625 query->data_size = sizeof(metric_data);
626
627 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
628 for (int i = 0; i < ARRAY_SIZE(metric_data.ACounters); i++) {
629 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
630 metric_data, ACounters, i, UINT64);
631 }
632 for (int i = 0; i < ARRAY_SIZE(metric_data.NOACounters); i++) {
633 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
634 metric_data, NOACounters, i, UINT64);
635 }
636 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
637 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
638 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
639 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
640 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
641 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
642 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
643 break;
644 }
645 case 8: {
646 query = gen_perf_query_append_query_info(perf, 2 + 36 + 16 + 16);
647 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
648
649 struct gen8_mdapi_metrics metric_data;
650 query->data_size = sizeof(metric_data);
651
652 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
653 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
654 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
655 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
656 metric_data, OaCntr, i, UINT64);
657 }
658 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
659 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
660 metric_data, NoaCntr, i, UINT64);
661 }
662 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
663 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
664 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
665 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
666 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
667 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
668 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
669 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
670 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
671 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
672 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
673 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
674 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
675 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
676 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
677 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
678 break;
679 }
680 case 9:
681 case 10:
682 case 11: {
683 query = gen_perf_query_append_query_info(perf, 2 + 36 + 16 + 16 + 16 + 2);
684 query->oa_format = I915_OA_FORMAT_A32u40_A4u32_B8_C8;
685
686 struct gen9_mdapi_metrics metric_data;
687 query->data_size = sizeof(metric_data);
688
689 MDAPI_QUERY_ADD_COUNTER(query, metric_data, TotalTime, UINT64);
690 MDAPI_QUERY_ADD_COUNTER(query, metric_data, GPUTicks, UINT64);
691 for (int i = 0; i < ARRAY_SIZE(metric_data.OaCntr); i++) {
692 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
693 metric_data, OaCntr, i, UINT64);
694 }
695 for (int i = 0; i < ARRAY_SIZE(metric_data.NoaCntr); i++) {
696 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
697 metric_data, NoaCntr, i, UINT64);
698 }
699 MDAPI_QUERY_ADD_COUNTER(query, metric_data, BeginTimestamp, UINT64);
700 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved1, UINT64);
701 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved2, UINT64);
702 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved3, UINT32);
703 MDAPI_QUERY_ADD_COUNTER(query, metric_data, OverrunOccured, BOOL32);
704 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerUser, UINT64);
705 MDAPI_QUERY_ADD_COUNTER(query, metric_data, MarkerDriver, UINT64);
706 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SliceFrequency, UINT64);
707 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UnsliceFrequency, UINT64);
708 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter1, UINT64);
709 MDAPI_QUERY_ADD_COUNTER(query, metric_data, PerfCounter2, UINT64);
710 MDAPI_QUERY_ADD_COUNTER(query, metric_data, SplitOccured, BOOL32);
711 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequencyChanged, BOOL32);
712 MDAPI_QUERY_ADD_COUNTER(query, metric_data, CoreFrequency, UINT64);
713 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportId, UINT32);
714 MDAPI_QUERY_ADD_COUNTER(query, metric_data, ReportsCount, UINT32);
715 for (int i = 0; i < ARRAY_SIZE(metric_data.UserCntr); i++) {
716 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf->queries, query,
717 metric_data, UserCntr, i, UINT64);
718 }
719 MDAPI_QUERY_ADD_COUNTER(query, metric_data, UserCntrCfgId, UINT32);
720 MDAPI_QUERY_ADD_COUNTER(query, metric_data, Reserved4, UINT32);
721 break;
722 }
723 default:
724 unreachable("Unsupported gen");
725 break;
726 }
727
728 query->kind = GEN_PERF_QUERY_TYPE_RAW;
729 query->name = "Intel_Raw_Hardware_Counters_Set_0_Query";
730 query->guid = GEN_PERF_QUERY_GUID_MDAPI;
731
732 {
733 /* Accumulation buffer offsets copied from an actual query... */
734 const struct gen_perf_query_info *copy_query =
735 &perf->queries[0];
736
737 query->gpu_time_offset = copy_query->gpu_time_offset;
738 query->gpu_clock_offset = copy_query->gpu_clock_offset;
739 query->a_offset = copy_query->a_offset;
740 query->b_offset = copy_query->b_offset;
741 query->c_offset = copy_query->c_offset;
742 }
743 }
744
745 void
746 gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info *devinfo,
747 struct gen_perf_config *perf)
748 {
749 if (!(devinfo->gen >= 7 && devinfo->gen <= 11))
750 return;
751
752 struct gen_perf_query_info *query =
753 gen_perf_query_append_query_info(perf, MAX_STAT_COUNTERS);
754
755 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
756 query->name = "Intel_Raw_Pipeline_Statistics_Query";
757
758 /* The order has to match mdapi_pipeline_metrics. */
759 gen_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
760 "N vertices submitted");
761 gen_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
762 "N primitives submitted");
763 gen_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
764 "N vertex shader invocations");
765 gen_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
766 "N geometry shader invocations");
767 gen_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
768 "N geometry shader primitives emitted");
769 gen_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
770 "N primitives entering clipping");
771 gen_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
772 "N primitives leaving clipping");
773 if (devinfo->is_haswell || devinfo->gen == 8) {
774 gen_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
775 "N fragment shader invocations",
776 "N fragment shader invocations");
777 } else {
778 gen_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
779 "N fragment shader invocations");
780 }
781 gen_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
782 "N TCS shader invocations");
783 gen_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
784 "N TES shader invocations");
785 if (devinfo->gen >= 7) {
786 gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
787 "N compute shader invocations");
788 }
789
790 if (devinfo->gen >= 10) {
791 /* Reuse existing CS invocation register until we can expose this new
792 * one.
793 */
794 gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
795 "Reserved1");
796 }
797
798 query->data_size = sizeof(uint64_t) * query->n_counters;
799 }
800
801 uint64_t
802 gen_perf_query_get_metric_id(struct gen_perf_config *perf,
803 const struct gen_perf_query_info *query)
804 {
805 /* These queries are know not to ever change, their config ID has been
806 * loaded upon the first query creation. No need to look them up again.
807 */
808 if (query->kind == GEN_PERF_QUERY_TYPE_OA)
809 return query->oa_metrics_set_id;
810
811 assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
812
813 /* Raw queries can be reprogrammed up by an external application/library.
814 * When a raw query is used for the first time it's id is set to a value !=
815 * 0. When it stops being used the id returns to 0. No need to reload the
816 * ID when it's already loaded.
817 */
818 if (query->oa_metrics_set_id != 0) {
819 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
820 query->name, query->guid, query->oa_metrics_set_id);
821 return query->oa_metrics_set_id;
822 }
823
824 struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
825 if (!gen_perf_load_metric_id(perf, query->guid,
826 &raw_query->oa_metrics_set_id)) {
827 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
828 raw_query->oa_metrics_set_id = 1ULL;
829 } else {
830 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
831 query->name, query->guid, query->oa_metrics_set_id);
832 }
833 return query->oa_metrics_set_id;
834 }
835
836 struct oa_sample_buf *
837 gen_perf_get_free_sample_buf(struct gen_perf_context *perf_ctx)
838 {
839 struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
840 struct oa_sample_buf *buf;
841
842 if (node)
843 buf = exec_node_data(struct oa_sample_buf, node, link);
844 else {
845 buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
846
847 exec_node_init(&buf->link);
848 buf->refcount = 0;
849 buf->len = 0;
850 }
851
852 return buf;
853 }
854
855 void
856 gen_perf_reap_old_sample_buffers(struct gen_perf_context *perf_ctx)
857 {
858 struct exec_node *tail_node =
859 exec_list_get_tail(&perf_ctx->sample_buffers);
860 struct oa_sample_buf *tail_buf =
861 exec_node_data(struct oa_sample_buf, tail_node, link);
862
863 /* Remove all old, unreferenced sample buffers walking forward from
864 * the head of the list, except always leave at least one node in
865 * the list so we always have a node to reference when we Begin
866 * a new query.
867 */
868 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
869 &perf_ctx->sample_buffers)
870 {
871 if (buf->refcount == 0 && buf != tail_buf) {
872 exec_node_remove(&buf->link);
873 exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
874 } else
875 return;
876 }
877 }
878
879 void
880 gen_perf_free_sample_bufs(struct gen_perf_context *perf_ctx)
881 {
882 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
883 &perf_ctx->free_sample_buffers)
884 ralloc_free(buf);
885
886 exec_list_make_empty(&perf_ctx->free_sample_buffers);
887 }
888
889 /******************************************************************************/
890
891 /**
892 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
893 * pipeline statistics for the performance query object.
894 */
895 void
896 gen_perf_snapshot_statistics_registers(void *context,
897 struct gen_perf_config *perf,
898 struct gen_perf_query_object *obj,
899 uint32_t offset_in_bytes)
900 {
901 const struct gen_perf_query_info *query = obj->queryinfo;
902 const int n_counters = query->n_counters;
903
904 for (int i = 0; i < n_counters; i++) {
905 const struct gen_perf_query_counter *counter = &query->counters[i];
906
907 assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
908
909 perf->vtbl.store_register_mem64(context, obj->pipeline_stats.bo,
910 counter->pipeline_stat.reg,
911 offset_in_bytes + i * sizeof(uint64_t));
912 }
913 }
914
915 void
916 gen_perf_close(struct gen_perf_context *perfquery,
917 const struct gen_perf_query_info *query)
918 {
919 if (perfquery->oa_stream_fd != -1) {
920 close(perfquery->oa_stream_fd);
921 perfquery->oa_stream_fd = -1;
922 }
923 if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
924 struct gen_perf_query_info *raw_query =
925 (struct gen_perf_query_info *) query;
926 raw_query->oa_metrics_set_id = 0;
927 }
928 }
929
930 bool
931 gen_perf_open(struct gen_perf_context *perf_ctx,
932 int metrics_set_id,
933 int report_format,
934 int period_exponent,
935 int drm_fd,
936 uint32_t ctx_id)
937 {
938 uint64_t properties[] = {
939 /* Single context sampling */
940 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
941
942 /* Include OA reports in samples */
943 DRM_I915_PERF_PROP_SAMPLE_OA, true,
944
945 /* OA unit configuration */
946 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
947 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
948 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
949 };
950 struct drm_i915_perf_open_param param = {
951 .flags = I915_PERF_FLAG_FD_CLOEXEC |
952 I915_PERF_FLAG_FD_NONBLOCK |
953 I915_PERF_FLAG_DISABLED,
954 .num_properties = ARRAY_SIZE(properties) / 2,
955 .properties_ptr = (uintptr_t) properties,
956 };
957 int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
958 if (fd == -1) {
959 DBG("Error opening gen perf OA stream: %m\n");
960 return false;
961 }
962
963 perf_ctx->oa_stream_fd = fd;
964
965 perf_ctx->current_oa_metrics_set_id = metrics_set_id;
966 perf_ctx->current_oa_format = report_format;
967
968 return true;
969 }
970
971 bool
972 gen_perf_inc_n_users(struct gen_perf_context *perf_ctx)
973 {
974 if (perf_ctx->n_oa_users == 0 &&
975 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
976 {
977 return false;
978 }
979 ++perf_ctx->n_oa_users;
980
981 return true;
982 }
983
984 void
985 gen_perf_dec_n_users(struct gen_perf_context *perf_ctx)
986 {
987 /* Disabling the i915 perf stream will effectively disable the OA
988 * counters. Note it's important to be sure there are no outstanding
989 * MI_RPC commands at this point since they could stall the CS
990 * indefinitely once OACONTROL is disabled.
991 */
992 --perf_ctx->n_oa_users;
993 if (perf_ctx->n_oa_users == 0 &&
994 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
995 {
996 DBG("WARNING: Error disabling gen perf stream: %m\n");
997 }
998 }
999
1000 void
1001 gen_perf_init_context(struct gen_perf_context *perf_ctx,
1002 struct gen_perf_config *perf_cfg,
1003 void * ctx, /* driver context (eg, brw_context) */
1004 void * bufmgr, /* eg brw_bufmgr */
1005 const struct gen_device_info *devinfo,
1006 uint32_t hw_ctx,
1007 int drm_fd)
1008 {
1009 perf_ctx->perf = perf_cfg;
1010 perf_ctx->ctx = ctx;
1011 perf_ctx->bufmgr = bufmgr;
1012 perf_ctx->drm_fd = drm_fd;
1013 perf_ctx->hw_ctx = hw_ctx;
1014 perf_ctx->devinfo = devinfo;
1015
1016 perf_ctx->unaccumulated =
1017 ralloc_array(ctx, struct gen_perf_query_object *, 2);
1018 perf_ctx->unaccumulated_elements = 0;
1019 perf_ctx->unaccumulated_array_size = 2;
1020
1021 exec_list_make_empty(&perf_ctx->sample_buffers);
1022 exec_list_make_empty(&perf_ctx->free_sample_buffers);
1023
1024 /* It's convenient to guarantee that this linked list of sample
1025 * buffers is never empty so we add an empty head so when we
1026 * Begin an OA query we can always take a reference on a buffer
1027 * in this list.
1028 */
1029 struct oa_sample_buf *buf = gen_perf_get_free_sample_buf(perf_ctx);
1030 exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
1031
1032 perf_ctx->oa_stream_fd = -1;
1033 perf_ctx->next_query_start_report_id = 1000;
1034 }
1035
1036 /**
1037 * Add a query to the global list of "unaccumulated queries."
1038 *
1039 * Queries are tracked here until all the associated OA reports have
1040 * been accumulated via accumulate_oa_reports() after the end
1041 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1042 */
1043 static void
1044 add_to_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
1045 struct gen_perf_query_object *obj)
1046 {
1047 if (perf_ctx->unaccumulated_elements >=
1048 perf_ctx->unaccumulated_array_size)
1049 {
1050 perf_ctx->unaccumulated_array_size *= 1.5;
1051 perf_ctx->unaccumulated =
1052 reralloc(perf_ctx->ctx, perf_ctx->unaccumulated,
1053 struct gen_perf_query_object *,
1054 perf_ctx->unaccumulated_array_size);
1055 }
1056
1057 perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
1058 }
1059
1060 bool
1061 gen_perf_begin_query(struct gen_perf_context *perf_ctx,
1062 struct gen_perf_query_object *query)
1063 {
1064 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1065 const struct gen_perf_query_info *queryinfo = query->queryinfo;
1066
1067 /* XXX: We have to consider that the command parser unit that parses batch
1068 * buffer commands and is used to capture begin/end counter snapshots isn't
1069 * implicitly synchronized with what's currently running across other GPU
1070 * units (such as the EUs running shaders) that the performance counters are
1071 * associated with.
1072 *
1073 * The intention of performance queries is to measure the work associated
1074 * with commands between the begin/end delimiters and so for that to be the
1075 * case we need to explicitly synchronize the parsing of commands to capture
1076 * Begin/End counter snapshots with what's running across other parts of the
1077 * GPU.
1078 *
1079 * When the command parser reaches a Begin marker it effectively needs to
1080 * drain everything currently running on the GPU until the hardware is idle
1081 * before capturing the first snapshot of counters - otherwise the results
1082 * would also be measuring the effects of earlier commands.
1083 *
1084 * When the command parser reaches an End marker it needs to stall until
1085 * everything currently running on the GPU has finished before capturing the
1086 * end snapshot - otherwise the results won't be a complete representation
1087 * of the work.
1088 *
1089 * Theoretically there could be opportunities to minimize how much of the
1090 * GPU pipeline is drained, or that we stall for, when we know what specific
1091 * units the performance counters being queried relate to but we don't
1092 * currently attempt to be clever here.
1093 *
1094 * Note: with our current simple approach here then for back-to-back queries
1095 * we will redundantly emit duplicate commands to synchronize the command
1096 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1097 * second synchronization is effectively a NOOP.
1098 *
1099 * N.B. The final results are based on deltas of counters between (inside)
1100 * Begin/End markers so even though the total wall clock time of the
1101 * workload is stretched by larger pipeline bubbles the bubbles themselves
1102 * are generally invisible to the query results. Whether that's a good or a
1103 * bad thing depends on the use case. For a lower real-time impact while
1104 * capturing metrics then periodic sampling may be a better choice than
1105 * INTEL_performance_query.
1106 *
1107 *
1108 * This is our Begin synchronization point to drain current work on the
1109 * GPU before we capture our first counter snapshot...
1110 */
1111 perf_cfg->vtbl.emit_mi_flush(perf_ctx->ctx);
1112
1113 switch (queryinfo->kind) {
1114 case GEN_PERF_QUERY_TYPE_OA:
1115 case GEN_PERF_QUERY_TYPE_RAW: {
1116
1117 /* Opening an i915 perf stream implies exclusive access to the OA unit
1118 * which will generate counter reports for a specific counter set with a
1119 * specific layout/format so we can't begin any OA based queries that
1120 * require a different counter set or format unless we get an opportunity
1121 * to close the stream and open a new one...
1122 */
1123 uint64_t metric_id = gen_perf_query_get_metric_id(perf_ctx->perf, queryinfo);
1124
1125 if (perf_ctx->oa_stream_fd != -1 &&
1126 perf_ctx->current_oa_metrics_set_id != metric_id) {
1127
1128 if (perf_ctx->n_oa_users != 0) {
1129 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
1130 perf_ctx->current_oa_metrics_set_id, metric_id);
1131 return false;
1132 } else
1133 gen_perf_close(perf_ctx, queryinfo);
1134 }
1135
1136 /* If the OA counters aren't already on, enable them. */
1137 if (perf_ctx->oa_stream_fd == -1) {
1138 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1139
1140 /* The period_exponent gives a sampling period as follows:
1141 * sample_period = timestamp_period * 2^(period_exponent + 1)
1142 *
1143 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1144 * ~83ns (GEN8/9).
1145 *
1146 * The counter overflow period is derived from the EuActive counter
1147 * which reads a counter that increments by the number of clock
1148 * cycles multiplied by the number of EUs. It can be calculated as:
1149 *
1150 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1151 *
1152 * (E.g. 40 EUs @ 1GHz = ~53ms)
1153 *
1154 * We select a sampling period inferior to that overflow period to
1155 * ensure we cannot see more than 1 counter overflow, otherwise we
1156 * could loose information.
1157 */
1158
1159 int a_counter_in_bits = 32;
1160 if (devinfo->gen >= 8)
1161 a_counter_in_bits = 40;
1162
1163 uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
1164 /* drop 1GHz freq to have units in nanoseconds */
1165 2);
1166
1167 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1168 overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
1169
1170 int period_exponent = 0;
1171 uint64_t prev_sample_period, next_sample_period;
1172 for (int e = 0; e < 30; e++) {
1173 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1174 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1175
1176 /* Take the previous sampling period, lower than the overflow
1177 * period.
1178 */
1179 if (prev_sample_period < overflow_period &&
1180 next_sample_period > overflow_period)
1181 period_exponent = e + 1;
1182 }
1183
1184 if (period_exponent == 0) {
1185 DBG("WARNING: enable to find a sampling exponent\n");
1186 return false;
1187 }
1188
1189 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1190 prev_sample_period / 1000000ul);
1191
1192 if (!gen_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
1193 period_exponent, perf_ctx->drm_fd,
1194 perf_ctx->hw_ctx))
1195 return false;
1196 } else {
1197 assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
1198 perf_ctx->current_oa_format == queryinfo->oa_format);
1199 }
1200
1201 if (!gen_perf_inc_n_users(perf_ctx)) {
1202 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1203 return false;
1204 }
1205
1206 if (query->oa.bo) {
1207 perf_cfg->vtbl.bo_unreference(query->oa.bo);
1208 query->oa.bo = NULL;
1209 }
1210
1211 query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1212 "perf. query OA MI_RPC bo",
1213 MI_RPC_BO_SIZE);
1214 #ifdef DEBUG
1215 /* Pre-filling the BO helps debug whether writes landed. */
1216 void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
1217 memset(map, 0x80, MI_RPC_BO_SIZE);
1218 perf_cfg->vtbl.bo_unmap(query->oa.bo);
1219 #endif
1220
1221 query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
1222 perf_ctx->next_query_start_report_id += 2;
1223
1224 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1225 * delimiting commands end up in different batchbuffers. If that's the
1226 * case, the measurement will include the time it takes for the kernel
1227 * scheduler to load a new request into the hardware. This is manifested in
1228 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1229 */
1230 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
1231
1232 /* Take a starting OA counter snapshot. */
1233 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo, 0,
1234 query->oa.begin_report_id);
1235 perf_cfg->vtbl.capture_frequency_stat_register(perf_ctx->ctx, query->oa.bo,
1236 MI_FREQ_START_OFFSET_BYTES);
1237
1238 ++perf_ctx->n_active_oa_queries;
1239
1240 /* No already-buffered samples can possibly be associated with this query
1241 * so create a marker within the list of sample buffers enabling us to
1242 * easily ignore earlier samples when processing this query after
1243 * completion.
1244 */
1245 assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
1246 query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
1247
1248 struct oa_sample_buf *buf =
1249 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1250
1251 /* This reference will ensure that future/following sample
1252 * buffers (that may relate to this query) can't be freed until
1253 * this drops to zero.
1254 */
1255 buf->refcount++;
1256
1257 gen_perf_query_result_clear(&query->oa.result);
1258 query->oa.results_accumulated = false;
1259
1260 add_to_unaccumulated_query_list(perf_ctx, query);
1261 break;
1262 }
1263
1264 case GEN_PERF_QUERY_TYPE_PIPELINE:
1265 if (query->pipeline_stats.bo) {
1266 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1267 query->pipeline_stats.bo = NULL;
1268 }
1269
1270 query->pipeline_stats.bo =
1271 perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
1272 "perf. query pipeline stats bo",
1273 STATS_BO_SIZE);
1274
1275 /* Take starting snapshots. */
1276 gen_perf_snapshot_statistics_registers(perf_ctx->ctx , perf_cfg, query, 0);
1277
1278 ++perf_ctx->n_active_pipeline_stats_queries;
1279 break;
1280
1281 default:
1282 unreachable("Unknown query type");
1283 break;
1284 }
1285
1286 return true;
1287 }
1288
1289 void
1290 gen_perf_end_query(struct gen_perf_context *perf_ctx,
1291 struct gen_perf_query_object *query)
1292 {
1293 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1294
1295 /* Ensure that the work associated with the queried commands will have
1296 * finished before taking our query end counter readings.
1297 *
1298 * For more details see comment in brw_begin_perf_query for
1299 * corresponding flush.
1300 */
1301 perf_cfg->vtbl.emit_mi_flush(perf_ctx->ctx);
1302
1303 switch (query->queryinfo->kind) {
1304 case GEN_PERF_QUERY_TYPE_OA:
1305 case GEN_PERF_QUERY_TYPE_RAW:
1306
1307 /* NB: It's possible that the query will have already been marked
1308 * as 'accumulated' if an error was seen while reading samples
1309 * from perf. In this case we mustn't try and emit a closing
1310 * MI_RPC command in case the OA unit has already been disabled
1311 */
1312 if (!query->oa.results_accumulated) {
1313 /* Take an ending OA counter snapshot. */
1314 perf_cfg->vtbl.capture_frequency_stat_register(perf_ctx->ctx, query->oa.bo,
1315 MI_FREQ_END_OFFSET_BYTES);
1316 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
1317 MI_RPC_BO_END_OFFSET_BYTES,
1318 query->oa.begin_report_id + 1);
1319 }
1320
1321 --perf_ctx->n_active_oa_queries;
1322
1323 /* NB: even though the query has now ended, it can't be accumulated
1324 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1325 * to query->oa.bo
1326 */
1327 break;
1328
1329 case GEN_PERF_QUERY_TYPE_PIPELINE:
1330 gen_perf_snapshot_statistics_registers(perf_ctx->ctx, perf_cfg, query,
1331 STATS_BO_END_OFFSET_BYTES);
1332 --perf_ctx->n_active_pipeline_stats_queries;
1333 break;
1334
1335 default:
1336 unreachable("Unknown query type");
1337 break;
1338 }
1339 }
1340
1341 enum OaReadStatus {
1342 OA_READ_STATUS_ERROR,
1343 OA_READ_STATUS_UNFINISHED,
1344 OA_READ_STATUS_FINISHED,
1345 };
1346
1347 static enum OaReadStatus
1348 read_oa_samples_until(struct gen_perf_context *perf_ctx,
1349 uint32_t start_timestamp,
1350 uint32_t end_timestamp)
1351 {
1352 struct exec_node *tail_node =
1353 exec_list_get_tail(&perf_ctx->sample_buffers);
1354 struct oa_sample_buf *tail_buf =
1355 exec_node_data(struct oa_sample_buf, tail_node, link);
1356 uint32_t last_timestamp = tail_buf->last_timestamp;
1357
1358 while (1) {
1359 struct oa_sample_buf *buf = gen_perf_get_free_sample_buf(perf_ctx);
1360 uint32_t offset;
1361 int len;
1362
1363 while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
1364 sizeof(buf->buf))) < 0 && errno == EINTR)
1365 ;
1366
1367 if (len <= 0) {
1368 exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
1369
1370 if (len < 0) {
1371 if (errno == EAGAIN)
1372 return ((last_timestamp - start_timestamp) >=
1373 (end_timestamp - start_timestamp)) ?
1374 OA_READ_STATUS_FINISHED :
1375 OA_READ_STATUS_UNFINISHED;
1376 else {
1377 DBG("Error reading i915 perf samples: %m\n");
1378 }
1379 } else
1380 DBG("Spurious EOF reading i915 perf samples\n");
1381
1382 return OA_READ_STATUS_ERROR;
1383 }
1384
1385 buf->len = len;
1386 exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
1387
1388 /* Go through the reports and update the last timestamp. */
1389 offset = 0;
1390 while (offset < buf->len) {
1391 const struct drm_i915_perf_record_header *header =
1392 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
1393 uint32_t *report = (uint32_t *) (header + 1);
1394
1395 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
1396 last_timestamp = report[1];
1397
1398 offset += header->size;
1399 }
1400
1401 buf->last_timestamp = last_timestamp;
1402 }
1403
1404 unreachable("not reached");
1405 return OA_READ_STATUS_ERROR;
1406 }
1407
1408 /**
1409 * Try to read all the reports until either the delimiting timestamp
1410 * or an error arises.
1411 */
1412 static bool
1413 read_oa_samples_for_query(struct gen_perf_context *perf_ctx,
1414 struct gen_perf_query_object *query,
1415 void *current_batch)
1416 {
1417 uint32_t *start;
1418 uint32_t *last;
1419 uint32_t *end;
1420 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1421
1422 /* We need the MI_REPORT_PERF_COUNT to land before we can start
1423 * accumulate. */
1424 assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1425 !perf_cfg->vtbl.bo_busy(query->oa.bo));
1426
1427 /* Map the BO once here and let accumulate_oa_reports() unmap
1428 * it. */
1429 if (query->oa.map == NULL)
1430 query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
1431
1432 start = last = query->oa.map;
1433 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
1434
1435 if (start[0] != query->oa.begin_report_id) {
1436 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
1437 return true;
1438 }
1439 if (end[0] != (query->oa.begin_report_id + 1)) {
1440 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
1441 return true;
1442 }
1443
1444 /* Read the reports until the end timestamp. */
1445 switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
1446 case OA_READ_STATUS_ERROR:
1447 /* Fallthrough and let accumulate_oa_reports() deal with the
1448 * error. */
1449 case OA_READ_STATUS_FINISHED:
1450 return true;
1451 case OA_READ_STATUS_UNFINISHED:
1452 return false;
1453 }
1454
1455 unreachable("invalid read status");
1456 return false;
1457 }
1458
1459 void
1460 gen_perf_wait_query(struct gen_perf_context *perf_ctx,
1461 struct gen_perf_query_object *query,
1462 void *current_batch)
1463 {
1464 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1465 struct brw_bo *bo = NULL;
1466
1467 switch (query->queryinfo->kind) {
1468 case GEN_PERF_QUERY_TYPE_OA:
1469 case GEN_PERF_QUERY_TYPE_RAW:
1470 bo = query->oa.bo;
1471 break;
1472
1473 case GEN_PERF_QUERY_TYPE_PIPELINE:
1474 bo = query->pipeline_stats.bo;
1475 break;
1476
1477 default:
1478 unreachable("Unknown query type");
1479 break;
1480 }
1481
1482 if (bo == NULL)
1483 return;
1484
1485 /* If the current batch references our results bo then we need to
1486 * flush first...
1487 */
1488 if (perf_cfg->vtbl.batch_references(current_batch, bo))
1489 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
1490
1491 perf_cfg->vtbl.bo_wait_rendering(bo);
1492
1493 /* Due to a race condition between the OA unit signaling report
1494 * availability and the report actually being written into memory,
1495 * we need to wait for all the reports to come in before we can
1496 * read them.
1497 */
1498 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA ||
1499 query->queryinfo->kind == GEN_PERF_QUERY_TYPE_RAW) {
1500 while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
1501 ;
1502 }
1503 }
1504
1505 bool
1506 gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
1507 struct gen_perf_query_object *query,
1508 void *current_batch)
1509 {
1510 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1511
1512 switch (query->queryinfo->kind) {
1513 case GEN_PERF_QUERY_TYPE_OA:
1514 case GEN_PERF_QUERY_TYPE_RAW:
1515 return (query->oa.results_accumulated ||
1516 (query->oa.bo &&
1517 !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1518 !perf_cfg->vtbl.bo_busy(query->oa.bo) &&
1519 read_oa_samples_for_query(perf_ctx, query, current_batch)));
1520 case GEN_PERF_QUERY_TYPE_PIPELINE:
1521 return (query->pipeline_stats.bo &&
1522 !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
1523 !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
1524
1525 default:
1526 unreachable("Unknown query type");
1527 break;
1528 }
1529
1530 return false;
1531 }
1532
1533 /**
1534 * Remove a query from the global list of unaccumulated queries once
1535 * after successfully accumulating the OA reports associated with the
1536 * query in accumulate_oa_reports() or when discarding unwanted query
1537 * results.
1538 */
1539 static void
1540 drop_from_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
1541 struct gen_perf_query_object *query)
1542 {
1543 for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
1544 if (perf_ctx->unaccumulated[i] == query) {
1545 int last_elt = --perf_ctx->unaccumulated_elements;
1546
1547 if (i == last_elt)
1548 perf_ctx->unaccumulated[i] = NULL;
1549 else {
1550 perf_ctx->unaccumulated[i] =
1551 perf_ctx->unaccumulated[last_elt];
1552 }
1553
1554 break;
1555 }
1556 }
1557
1558 /* Drop our samples_head reference so that associated periodic
1559 * sample data buffers can potentially be reaped if they aren't
1560 * referenced by any other queries...
1561 */
1562
1563 struct oa_sample_buf *buf =
1564 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1565
1566 assert(buf->refcount > 0);
1567 buf->refcount--;
1568
1569 query->oa.samples_head = NULL;
1570
1571 gen_perf_reap_old_sample_buffers(perf_ctx);
1572 }
1573
1574 void
1575 gen_perf_delete_query(struct gen_perf_context *perf_ctx,
1576 struct gen_perf_query_object *query)
1577 {
1578 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1579
1580 /* We can assume that the frontend waits for a query to complete
1581 * before ever calling into here, so we don't have to worry about
1582 * deleting an in-flight query object.
1583 */
1584 switch (query->queryinfo->kind) {
1585 case GEN_PERF_QUERY_TYPE_OA:
1586 case GEN_PERF_QUERY_TYPE_RAW:
1587 if (query->oa.bo) {
1588 if (!query->oa.results_accumulated) {
1589 drop_from_unaccumulated_query_list(perf_ctx, query);
1590 gen_perf_dec_n_users(perf_ctx);
1591 }
1592
1593 perf_cfg->vtbl.bo_unreference(query->oa.bo);
1594 query->oa.bo = NULL;
1595 }
1596
1597 query->oa.results_accumulated = false;
1598 break;
1599
1600 case GEN_PERF_QUERY_TYPE_PIPELINE:
1601 if (query->pipeline_stats.bo) {
1602 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1603 query->pipeline_stats.bo = NULL;
1604 }
1605 break;
1606
1607 default:
1608 unreachable("Unknown query type");
1609 break;
1610 }
1611
1612 /* As an indication that the INTEL_performance_query extension is no
1613 * longer in use, it's a good time to free our cache of sample
1614 * buffers and close any current i915-perf stream.
1615 */
1616 if (--perf_ctx->n_query_instances == 0) {
1617 gen_perf_free_sample_bufs(perf_ctx);
1618 gen_perf_close(perf_ctx, query->queryinfo);
1619 }
1620
1621 free(query);
1622 }