2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <sys/types.h>
32 #include <drm-uapi/i915_drm.h>
34 #include "common/gen_gem.h"
36 #include "perf/gen_perf_mdapi.h"
37 #include "perf/gen_perf_metrics.h"
39 #include "dev/gen_debug.h"
40 #include "dev/gen_device_info.h"
41 #include "util/bitscan.h"
42 #include "util/u_math.h"
44 #define FILE_DEBUG_FLAG DEBUG_PERFMON
45 #define MI_RPC_BO_SIZE 4096
46 #define MI_FREQ_START_OFFSET_BYTES (3072)
47 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
48 #define MI_FREQ_END_OFFSET_BYTES (3076)
50 #define INTEL_MASK(high, low) (((1u<<((high)-(low)+1))-1)<<(low))
52 #define GEN7_RPSTAT1 0xA01C
53 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
54 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
55 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
56 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
58 #define GEN9_RPSTAT0 0xA01C
59 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
60 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
61 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
62 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
64 #define GEN6_SO_PRIM_STORAGE_NEEDED 0x2280
65 #define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
66 #define GEN6_SO_NUM_PRIMS_WRITTEN 0x2288
67 #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
69 #define MAP_READ (1 << 0)
70 #define MAP_WRITE (1 << 1)
73 get_sysfs_dev_dir(struct gen_perf_config
*perf
, int fd
)
78 struct dirent
*drm_entry
;
81 perf
->sysfs_dev_dir
[0] = '\0';
84 DBG("Failed to stat DRM fd\n");
88 maj
= major(sb
.st_rdev
);
89 min
= minor(sb
.st_rdev
);
91 if (!S_ISCHR(sb
.st_mode
)) {
92 DBG("DRM fd is not a character device as expected\n");
96 len
= snprintf(perf
->sysfs_dev_dir
,
97 sizeof(perf
->sysfs_dev_dir
),
98 "/sys/dev/char/%d:%d/device/drm", maj
, min
);
99 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
)) {
100 DBG("Failed to concatenate sysfs path to drm device\n");
104 drmdir
= opendir(perf
->sysfs_dev_dir
);
106 DBG("Failed to open %s: %m\n", perf
->sysfs_dev_dir
);
110 while ((drm_entry
= readdir(drmdir
))) {
111 if ((drm_entry
->d_type
== DT_DIR
||
112 drm_entry
->d_type
== DT_LNK
) &&
113 strncmp(drm_entry
->d_name
, "card", 4) == 0)
115 len
= snprintf(perf
->sysfs_dev_dir
,
116 sizeof(perf
->sysfs_dev_dir
),
117 "/sys/dev/char/%d:%d/device/drm/%s",
118 maj
, min
, drm_entry
->d_name
);
120 if (len
< 0 || len
>= sizeof(perf
->sysfs_dev_dir
))
129 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
136 read_file_uint64(const char *file
, uint64_t *val
)
144 while ((n
= read(fd
, buf
, sizeof (buf
) - 1)) < 0 &&
151 *val
= strtoull(buf
, NULL
, 0);
157 read_sysfs_drm_device_file_uint64(struct gen_perf_config
*perf
,
164 len
= snprintf(buf
, sizeof(buf
), "%s/%s", perf
->sysfs_dev_dir
, file
);
165 if (len
< 0 || len
>= sizeof(buf
)) {
166 DBG("Failed to concatenate sys filename to read u64 from\n");
170 return read_file_uint64(buf
, value
);
173 static inline struct gen_perf_query_info
*
174 append_query_info(struct gen_perf_config
*perf
, int max_counters
)
176 struct gen_perf_query_info
*query
;
178 perf
->queries
= reralloc(perf
, perf
->queries
,
179 struct gen_perf_query_info
,
181 query
= &perf
->queries
[perf
->n_queries
- 1];
182 memset(query
, 0, sizeof(*query
));
184 if (max_counters
> 0) {
185 query
->max_counters
= max_counters
;
187 rzalloc_array(perf
, struct gen_perf_query_counter
, max_counters
);
194 register_oa_config(struct gen_perf_config
*perf
,
195 const struct gen_perf_query_info
*query
,
198 struct gen_perf_query_info
*registred_query
= append_query_info(perf
, 0);
200 *registred_query
= *query
;
201 registred_query
->oa_metrics_set_id
= config_id
;
202 DBG("metric set registred: id = %" PRIu64
", guid = %s\n",
203 registred_query
->oa_metrics_set_id
, query
->guid
);
207 enumerate_sysfs_metrics(struct gen_perf_config
*perf
)
209 DIR *metricsdir
= NULL
;
210 struct dirent
*metric_entry
;
214 len
= snprintf(buf
, sizeof(buf
), "%s/metrics", perf
->sysfs_dev_dir
);
215 if (len
< 0 || len
>= sizeof(buf
)) {
216 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
220 metricsdir
= opendir(buf
);
222 DBG("Failed to open %s: %m\n", buf
);
226 while ((metric_entry
= readdir(metricsdir
))) {
227 struct hash_entry
*entry
;
229 if ((metric_entry
->d_type
!= DT_DIR
&&
230 metric_entry
->d_type
!= DT_LNK
) ||
231 metric_entry
->d_name
[0] == '.')
234 DBG("metric set: %s\n", metric_entry
->d_name
);
235 entry
= _mesa_hash_table_search(perf
->oa_metrics_table
,
236 metric_entry
->d_name
);
240 len
= snprintf(buf
, sizeof(buf
), "%s/metrics/%s/id",
241 perf
->sysfs_dev_dir
, metric_entry
->d_name
);
242 if (len
< 0 || len
>= sizeof(buf
)) {
243 DBG("Failed to concatenate path to sysfs metric id file\n");
247 if (!read_file_uint64(buf
, &id
)) {
248 DBG("Failed to read metric set id from %s: %m", buf
);
252 register_oa_config(perf
, (const struct gen_perf_query_info
*)entry
->data
, id
);
254 DBG("metric set not known by mesa (skipping)\n");
257 closedir(metricsdir
);
261 kernel_has_dynamic_config_support(struct gen_perf_config
*perf
, int fd
)
263 uint64_t invalid_config_id
= UINT64_MAX
;
265 return gen_ioctl(fd
, DRM_IOCTL_I915_PERF_REMOVE_CONFIG
,
266 &invalid_config_id
) < 0 && errno
== ENOENT
;
270 gen_perf_load_metric_id(struct gen_perf_config
*perf
, const char *guid
,
273 char config_path
[280];
275 snprintf(config_path
, sizeof(config_path
), "%s/metrics/%s/id",
276 perf
->sysfs_dev_dir
, guid
);
278 /* Don't recreate already loaded configs. */
279 return read_file_uint64(config_path
, metric_id
);
283 init_oa_configs(struct gen_perf_config
*perf
, int fd
)
285 hash_table_foreach(perf
->oa_metrics_table
, entry
) {
286 const struct gen_perf_query_info
*query
= entry
->data
;
287 struct drm_i915_perf_oa_config config
;
291 if (gen_perf_load_metric_id(perf
, query
->guid
, &config_id
)) {
292 DBG("metric set: %s (already loaded)\n", query
->guid
);
293 register_oa_config(perf
, query
, config_id
);
297 memset(&config
, 0, sizeof(config
));
299 memcpy(config
.uuid
, query
->guid
, sizeof(config
.uuid
));
301 config
.n_mux_regs
= query
->n_mux_regs
;
302 config
.mux_regs_ptr
= (uintptr_t) query
->mux_regs
;
304 config
.n_boolean_regs
= query
->n_b_counter_regs
;
305 config
.boolean_regs_ptr
= (uintptr_t) query
->b_counter_regs
;
307 config
.n_flex_regs
= query
->n_flex_regs
;
308 config
.flex_regs_ptr
= (uintptr_t) query
->flex_regs
;
310 ret
= gen_ioctl(fd
, DRM_IOCTL_I915_PERF_ADD_CONFIG
, &config
);
312 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
313 query
->name
, query
->guid
, strerror(errno
));
317 register_oa_config(perf
, query
, ret
);
318 DBG("metric set: %s (added)\n", query
->guid
);
323 compute_topology_builtins(struct gen_perf_config
*perf
,
324 const struct gen_device_info
*devinfo
)
326 perf
->sys_vars
.slice_mask
= devinfo
->slice_masks
;
327 perf
->sys_vars
.n_eu_slices
= devinfo
->num_slices
;
329 for (int i
= 0; i
< sizeof(devinfo
->subslice_masks
[i
]); i
++) {
330 perf
->sys_vars
.n_eu_sub_slices
+=
331 __builtin_popcount(devinfo
->subslice_masks
[i
]);
334 for (int i
= 0; i
< sizeof(devinfo
->eu_masks
); i
++)
335 perf
->sys_vars
.n_eus
+= __builtin_popcount(devinfo
->eu_masks
[i
]);
337 perf
->sys_vars
.eu_threads_count
= devinfo
->num_thread_per_eu
;
339 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
340 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
343 * Ideally equations would be updated to have a slice/subslice query
346 perf
->sys_vars
.subslice_mask
= 0;
348 int bits_per_subslice
= devinfo
->gen
== 11 ? 8 : 3;
350 for (int s
= 0; s
< util_last_bit(devinfo
->slice_masks
); s
++) {
351 for (int ss
= 0; ss
< (devinfo
->subslice_slice_stride
* 8); ss
++) {
352 if (gen_device_info_subslice_available(devinfo
, s
, ss
))
353 perf
->sys_vars
.subslice_mask
|= 1ULL << (s
* bits_per_subslice
+ ss
);
359 init_oa_sys_vars(struct gen_perf_config
*perf
, const struct gen_device_info
*devinfo
)
361 uint64_t min_freq_mhz
= 0, max_freq_mhz
= 0;
363 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_min_freq_mhz", &min_freq_mhz
))
366 if (!read_sysfs_drm_device_file_uint64(perf
, "gt_max_freq_mhz", &max_freq_mhz
))
369 memset(&perf
->sys_vars
, 0, sizeof(perf
->sys_vars
));
370 perf
->sys_vars
.gt_min_freq
= min_freq_mhz
* 1000000;
371 perf
->sys_vars
.gt_max_freq
= max_freq_mhz
* 1000000;
372 perf
->sys_vars
.timestamp_frequency
= devinfo
->timestamp_frequency
;
373 perf
->sys_vars
.revision
= devinfo
->revision
;
374 compute_topology_builtins(perf
, devinfo
);
379 typedef void (*perf_register_oa_queries_t
)(struct gen_perf_config
*);
381 static perf_register_oa_queries_t
382 get_register_queries_function(const struct gen_device_info
*devinfo
)
384 if (devinfo
->is_haswell
)
385 return gen_oa_register_queries_hsw
;
386 if (devinfo
->is_cherryview
)
387 return gen_oa_register_queries_chv
;
388 if (devinfo
->is_broadwell
)
389 return gen_oa_register_queries_bdw
;
390 if (devinfo
->is_broxton
)
391 return gen_oa_register_queries_bxt
;
392 if (devinfo
->is_skylake
) {
393 if (devinfo
->gt
== 2)
394 return gen_oa_register_queries_sklgt2
;
395 if (devinfo
->gt
== 3)
396 return gen_oa_register_queries_sklgt3
;
397 if (devinfo
->gt
== 4)
398 return gen_oa_register_queries_sklgt4
;
400 if (devinfo
->is_kabylake
) {
401 if (devinfo
->gt
== 2)
402 return gen_oa_register_queries_kblgt2
;
403 if (devinfo
->gt
== 3)
404 return gen_oa_register_queries_kblgt3
;
406 if (devinfo
->is_geminilake
)
407 return gen_oa_register_queries_glk
;
408 if (devinfo
->is_coffeelake
) {
409 if (devinfo
->gt
== 2)
410 return gen_oa_register_queries_cflgt2
;
411 if (devinfo
->gt
== 3)
412 return gen_oa_register_queries_cflgt3
;
414 if (devinfo
->is_cannonlake
)
415 return gen_oa_register_queries_cnl
;
416 if (devinfo
->gen
== 11)
417 return gen_oa_register_queries_icl
;
423 add_stat_reg(struct gen_perf_query_info
*query
, uint32_t reg
,
424 uint32_t numerator
, uint32_t denominator
,
425 const char *name
, const char *description
)
427 struct gen_perf_query_counter
*counter
;
429 assert(query
->n_counters
< query
->max_counters
);
431 counter
= &query
->counters
[query
->n_counters
];
432 counter
->name
= name
;
433 counter
->desc
= description
;
434 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
435 counter
->data_type
= GEN_PERF_COUNTER_DATA_TYPE_UINT64
;
436 counter
->offset
= sizeof(uint64_t) * query
->n_counters
;
437 counter
->pipeline_stat
.reg
= reg
;
438 counter
->pipeline_stat
.numerator
= numerator
;
439 counter
->pipeline_stat
.denominator
= denominator
;
445 add_basic_stat_reg(struct gen_perf_query_info
*query
,
446 uint32_t reg
, const char *name
)
448 add_stat_reg(query
, reg
, 1, 1, name
, name
);
452 load_pipeline_statistic_metrics(struct gen_perf_config
*perf_cfg
,
453 const struct gen_device_info
*devinfo
)
455 struct gen_perf_query_info
*query
=
456 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
458 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
459 query
->name
= "Pipeline Statistics Registers";
461 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
462 "N vertices submitted");
463 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
464 "N primitives submitted");
465 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
466 "N vertex shader invocations");
468 if (devinfo
->gen
== 6) {
469 add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
470 "SO_PRIM_STORAGE_NEEDED",
471 "N geometry shader stream-out primitives (total)");
472 add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
473 "SO_NUM_PRIMS_WRITTEN",
474 "N geometry shader stream-out primitives (written)");
476 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
477 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
478 "N stream-out (stream 0) primitives (total)");
479 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
480 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
481 "N stream-out (stream 1) primitives (total)");
482 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
483 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
484 "N stream-out (stream 2) primitives (total)");
485 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
486 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
487 "N stream-out (stream 3) primitives (total)");
488 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
489 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
490 "N stream-out (stream 0) primitives (written)");
491 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
492 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
493 "N stream-out (stream 1) primitives (written)");
494 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
495 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
496 "N stream-out (stream 2) primitives (written)");
497 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
498 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
499 "N stream-out (stream 3) primitives (written)");
502 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
503 "N TCS shader invocations");
504 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
505 "N TES shader invocations");
507 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
508 "N geometry shader invocations");
509 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
510 "N geometry shader primitives emitted");
512 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
513 "N primitives entering clipping");
514 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
515 "N primitives leaving clipping");
517 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
518 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
519 "N fragment shader invocations",
520 "N fragment shader invocations");
522 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
523 "N fragment shader invocations");
526 add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
527 "N z-pass fragments");
529 if (devinfo
->gen
>= 7) {
530 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
531 "N compute shader invocations");
534 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
538 load_oa_metrics(struct gen_perf_config
*perf
, int fd
,
539 const struct gen_device_info
*devinfo
)
541 perf_register_oa_queries_t oa_register
= get_register_queries_function(devinfo
);
542 bool i915_perf_oa_available
= false;
545 /* The existence of this sysctl parameter implies the kernel supports
546 * the i915 perf interface.
548 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb
) == 0) {
550 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
551 * metrics unless running as root.
553 if (devinfo
->is_haswell
)
554 i915_perf_oa_available
= true;
556 uint64_t paranoid
= 1;
558 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", ¶noid
);
560 if (paranoid
== 0 || geteuid() == 0)
561 i915_perf_oa_available
= true;
565 if (!i915_perf_oa_available
||
567 !get_sysfs_dev_dir(perf
, fd
) ||
568 !init_oa_sys_vars(perf
, devinfo
))
571 perf
->oa_metrics_table
=
572 _mesa_hash_table_create(perf
, _mesa_key_hash_string
,
573 _mesa_key_string_equal
);
575 /* Index all the metric sets mesa knows about before looking to see what
576 * the kernel is advertising.
580 if (likely((INTEL_DEBUG
& DEBUG_NO_OACONFIG
) == 0) &&
581 kernel_has_dynamic_config_support(perf
, fd
))
582 init_oa_configs(perf
, fd
);
584 enumerate_sysfs_metrics(perf
);
589 /* Accumulate 32bits OA counters */
591 accumulate_uint32(const uint32_t *report0
,
592 const uint32_t *report1
,
593 uint64_t *accumulator
)
595 *accumulator
+= (uint32_t)(*report1
- *report0
);
598 /* Accumulate 40bits OA counters */
600 accumulate_uint40(int a_index
,
601 const uint32_t *report0
,
602 const uint32_t *report1
,
603 uint64_t *accumulator
)
605 const uint8_t *high_bytes0
= (uint8_t *)(report0
+ 40);
606 const uint8_t *high_bytes1
= (uint8_t *)(report1
+ 40);
607 uint64_t high0
= (uint64_t)(high_bytes0
[a_index
]) << 32;
608 uint64_t high1
= (uint64_t)(high_bytes1
[a_index
]) << 32;
609 uint64_t value0
= report0
[a_index
+ 4] | high0
;
610 uint64_t value1
= report1
[a_index
+ 4] | high1
;
614 delta
= (1ULL << 40) + value1
- value0
;
616 delta
= value1
- value0
;
618 *accumulator
+= delta
;
622 gen8_read_report_clock_ratios(const uint32_t *report
,
623 uint64_t *slice_freq_hz
,
624 uint64_t *unslice_freq_hz
)
626 /* The lower 16bits of the RPT_ID field of the OA reports contains a
627 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
630 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
631 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
632 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
634 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
635 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
637 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
638 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
641 uint32_t unslice_freq
= report
[0] & 0x1ff;
642 uint32_t slice_freq_low
= (report
[0] >> 25) & 0x7f;
643 uint32_t slice_freq_high
= (report
[0] >> 9) & 0x3;
644 uint32_t slice_freq
= slice_freq_low
| (slice_freq_high
<< 7);
646 *slice_freq_hz
= slice_freq
* 16666667ULL;
647 *unslice_freq_hz
= unslice_freq
* 16666667ULL;
651 gen_perf_query_result_read_frequencies(struct gen_perf_query_result
*result
,
652 const struct gen_device_info
*devinfo
,
653 const uint32_t *start
,
656 /* Slice/Unslice frequency is only available in the OA reports when the
657 * "Disable OA reports due to clock ratio change" field in
658 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
659 * global register (see drivers/gpu/drm/i915/i915_perf.c)
661 * Documentation says this should be available on Gen9+ but experimentation
662 * shows that Gen8 reports similar values, so we enable it there too.
664 if (devinfo
->gen
< 8)
667 gen8_read_report_clock_ratios(start
,
668 &result
->slice_frequency
[0],
669 &result
->unslice_frequency
[0]);
670 gen8_read_report_clock_ratios(end
,
671 &result
->slice_frequency
[1],
672 &result
->unslice_frequency
[1]);
676 gen_perf_query_result_accumulate(struct gen_perf_query_result
*result
,
677 const struct gen_perf_query_info
*query
,
678 const uint32_t *start
,
683 result
->hw_id
= start
[2];
684 result
->reports_accumulated
++;
686 switch (query
->oa_format
) {
687 case I915_OA_FORMAT_A32u40_A4u32_B8_C8
:
688 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
+ idx
++); /* timestamp */
689 accumulate_uint32(start
+ 3, end
+ 3, result
->accumulator
+ idx
++); /* clock */
691 /* 32x 40bit A counters... */
692 for (i
= 0; i
< 32; i
++)
693 accumulate_uint40(i
, start
, end
, result
->accumulator
+ idx
++);
695 /* 4x 32bit A counters... */
696 for (i
= 0; i
< 4; i
++)
697 accumulate_uint32(start
+ 36 + i
, end
+ 36 + i
, result
->accumulator
+ idx
++);
699 /* 8x 32bit B counters + 8x 32bit C counters... */
700 for (i
= 0; i
< 16; i
++)
701 accumulate_uint32(start
+ 48 + i
, end
+ 48 + i
, result
->accumulator
+ idx
++);
704 case I915_OA_FORMAT_A45_B8_C8
:
705 accumulate_uint32(start
+ 1, end
+ 1, result
->accumulator
); /* timestamp */
707 for (i
= 0; i
< 61; i
++)
708 accumulate_uint32(start
+ 3 + i
, end
+ 3 + i
, result
->accumulator
+ 1 + i
);
712 unreachable("Can't accumulate OA counters in unknown format");
718 gen_perf_query_result_clear(struct gen_perf_query_result
*result
)
720 memset(result
, 0, sizeof(*result
));
721 result
->hw_id
= 0xffffffff; /* invalid */
725 gen_perf_query_register_mdapi_statistic_query(struct gen_perf_config
*perf_cfg
,
726 const struct gen_device_info
*devinfo
)
728 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
731 struct gen_perf_query_info
*query
=
732 append_query_info(perf_cfg
, MAX_STAT_COUNTERS
);
734 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
735 query
->name
= "Intel_Raw_Pipeline_Statistics_Query";
737 /* The order has to match mdapi_pipeline_metrics. */
738 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
739 "N vertices submitted");
740 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
741 "N primitives submitted");
742 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
743 "N vertex shader invocations");
744 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
745 "N geometry shader invocations");
746 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
747 "N geometry shader primitives emitted");
748 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
749 "N primitives entering clipping");
750 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
751 "N primitives leaving clipping");
752 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
753 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
754 "N fragment shader invocations",
755 "N fragment shader invocations");
757 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
758 "N fragment shader invocations");
760 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
761 "N TCS shader invocations");
762 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
763 "N TES shader invocations");
764 if (devinfo
->gen
>= 7) {
765 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
766 "N compute shader invocations");
769 if (devinfo
->gen
>= 10) {
770 /* Reuse existing CS invocation register until we can expose this new
773 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
777 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
781 fill_mdapi_perf_query_counter(struct gen_perf_query_info
*query
,
783 uint32_t data_offset
,
785 enum gen_perf_counter_data_type data_type
)
787 struct gen_perf_query_counter
*counter
= &query
->counters
[query
->n_counters
];
789 assert(query
->n_counters
<= query
->max_counters
);
791 counter
->name
= name
;
792 counter
->desc
= "Raw counter value";
793 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
794 counter
->data_type
= data_type
;
795 counter
->offset
= data_offset
;
799 assert(counter
->offset
+ gen_perf_query_counter_get_size(counter
) <= query
->data_size
);
802 #define MDAPI_QUERY_ADD_COUNTER(query, struct_name, field_name, type_name) \
803 fill_mdapi_perf_query_counter(query, #field_name, \
804 (uint8_t *) &struct_name.field_name - \
805 (uint8_t *) &struct_name, \
806 sizeof(struct_name.field_name), \
807 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
808 #define MDAPI_QUERY_ADD_ARRAY_COUNTER(ctx, query, struct_name, field_name, idx, type_name) \
809 fill_mdapi_perf_query_counter(query, \
810 ralloc_asprintf(ctx, "%s%i", #field_name, idx), \
811 (uint8_t *) &struct_name.field_name[idx] - \
812 (uint8_t *) &struct_name, \
813 sizeof(struct_name.field_name[0]), \
814 GEN_PERF_COUNTER_DATA_TYPE_##type_name)
817 register_mdapi_oa_query(const struct gen_device_info
*devinfo
,
818 struct gen_perf_config
*perf
)
820 struct gen_perf_query_info
*query
= NULL
;
822 /* MDAPI requires different structures for pretty much every generation
823 * (right now we have definitions for gen 7 to 11).
825 if (!(devinfo
->gen
>= 7 && devinfo
->gen
<= 11))
828 switch (devinfo
->gen
) {
830 query
= append_query_info(perf
, 1 + 45 + 16 + 7);
831 query
->oa_format
= I915_OA_FORMAT_A45_B8_C8
;
833 struct gen7_mdapi_metrics metric_data
;
834 query
->data_size
= sizeof(metric_data
);
836 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
837 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.ACounters
); i
++) {
838 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
839 metric_data
, ACounters
, i
, UINT64
);
841 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NOACounters
); i
++) {
842 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
843 metric_data
, NOACounters
, i
, UINT64
);
845 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
846 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
847 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
848 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
849 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
850 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
851 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
855 query
= append_query_info(perf
, 2 + 36 + 16 + 16);
856 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
858 struct gen8_mdapi_metrics metric_data
;
859 query
->data_size
= sizeof(metric_data
);
861 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
862 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
863 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
864 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
865 metric_data
, OaCntr
, i
, UINT64
);
867 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
868 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
869 metric_data
, NoaCntr
, i
, UINT64
);
871 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
872 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
873 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
874 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
875 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
876 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
877 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
878 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
879 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
880 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
881 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
882 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
883 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
884 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
885 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
886 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
892 query
= append_query_info(perf
, 2 + 36 + 16 + 16 + 16 + 2);
893 query
->oa_format
= I915_OA_FORMAT_A32u40_A4u32_B8_C8
;
895 struct gen9_mdapi_metrics metric_data
;
896 query
->data_size
= sizeof(metric_data
);
898 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, TotalTime
, UINT64
);
899 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, GPUTicks
, UINT64
);
900 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.OaCntr
); i
++) {
901 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
902 metric_data
, OaCntr
, i
, UINT64
);
904 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.NoaCntr
); i
++) {
905 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
906 metric_data
, NoaCntr
, i
, UINT64
);
908 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, BeginTimestamp
, UINT64
);
909 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved1
, UINT64
);
910 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved2
, UINT64
);
911 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved3
, UINT32
);
912 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, OverrunOccured
, BOOL32
);
913 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerUser
, UINT64
);
914 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, MarkerDriver
, UINT64
);
915 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SliceFrequency
, UINT64
);
916 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UnsliceFrequency
, UINT64
);
917 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter1
, UINT64
);
918 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, PerfCounter2
, UINT64
);
919 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, SplitOccured
, BOOL32
);
920 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequencyChanged
, BOOL32
);
921 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, CoreFrequency
, UINT64
);
922 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportId
, UINT32
);
923 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, ReportsCount
, UINT32
);
924 for (int i
= 0; i
< ARRAY_SIZE(metric_data
.UserCntr
); i
++) {
925 MDAPI_QUERY_ADD_ARRAY_COUNTER(perf
->queries
, query
,
926 metric_data
, UserCntr
, i
, UINT64
);
928 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, UserCntrCfgId
, UINT32
);
929 MDAPI_QUERY_ADD_COUNTER(query
, metric_data
, Reserved4
, UINT32
);
933 unreachable("Unsupported gen");
937 query
->kind
= GEN_PERF_QUERY_TYPE_RAW
;
938 query
->name
= "Intel_Raw_Hardware_Counters_Set_0_Query";
939 query
->guid
= GEN_PERF_QUERY_GUID_MDAPI
;
942 /* Accumulation buffer offsets copied from an actual query... */
943 const struct gen_perf_query_info
*copy_query
=
946 query
->gpu_time_offset
= copy_query
->gpu_time_offset
;
947 query
->gpu_clock_offset
= copy_query
->gpu_clock_offset
;
948 query
->a_offset
= copy_query
->a_offset
;
949 query
->b_offset
= copy_query
->b_offset
;
950 query
->c_offset
= copy_query
->c_offset
;
955 gen_perf_query_get_metric_id(struct gen_perf_config
*perf
,
956 const struct gen_perf_query_info
*query
)
958 /* These queries are know not to ever change, their config ID has been
959 * loaded upon the first query creation. No need to look them up again.
961 if (query
->kind
== GEN_PERF_QUERY_TYPE_OA
)
962 return query
->oa_metrics_set_id
;
964 assert(query
->kind
== GEN_PERF_QUERY_TYPE_RAW
);
966 /* Raw queries can be reprogrammed up by an external application/library.
967 * When a raw query is used for the first time it's id is set to a value !=
968 * 0. When it stops being used the id returns to 0. No need to reload the
969 * ID when it's already loaded.
971 if (query
->oa_metrics_set_id
!= 0) {
972 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64
"\n",
973 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
974 return query
->oa_metrics_set_id
;
977 struct gen_perf_query_info
*raw_query
= (struct gen_perf_query_info
*)query
;
978 if (!gen_perf_load_metric_id(perf
, query
->guid
,
979 &raw_query
->oa_metrics_set_id
)) {
980 DBG("Unable to read query guid=%s ID, falling back to test config\n", query
->guid
);
981 raw_query
->oa_metrics_set_id
= 1ULL;
983 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64
"\n",
984 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
986 return query
->oa_metrics_set_id
;
989 struct oa_sample_buf
*
990 gen_perf_get_free_sample_buf(struct gen_perf_context
*perf_ctx
)
992 struct exec_node
*node
= exec_list_pop_head(&perf_ctx
->free_sample_buffers
);
993 struct oa_sample_buf
*buf
;
996 buf
= exec_node_data(struct oa_sample_buf
, node
, link
);
998 buf
= ralloc_size(perf_ctx
->perf
, sizeof(*buf
));
1000 exec_node_init(&buf
->link
);
1009 gen_perf_reap_old_sample_buffers(struct gen_perf_context
*perf_ctx
)
1011 struct exec_node
*tail_node
=
1012 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1013 struct oa_sample_buf
*tail_buf
=
1014 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
1016 /* Remove all old, unreferenced sample buffers walking forward from
1017 * the head of the list, except always leave at least one node in
1018 * the list so we always have a node to reference when we Begin
1021 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1022 &perf_ctx
->sample_buffers
)
1024 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
1025 exec_node_remove(&buf
->link
);
1026 exec_list_push_head(&perf_ctx
->free_sample_buffers
, &buf
->link
);
1033 gen_perf_free_sample_bufs(struct gen_perf_context
*perf_ctx
)
1035 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
1036 &perf_ctx
->free_sample_buffers
)
1039 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1042 /******************************************************************************/
1045 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
1046 * pipeline statistics for the performance query object.
1049 gen_perf_snapshot_statistics_registers(void *context
,
1050 struct gen_perf_config
*perf
,
1051 struct gen_perf_query_object
*obj
,
1052 uint32_t offset_in_bytes
)
1054 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
1055 const int n_counters
= query
->n_counters
;
1057 for (int i
= 0; i
< n_counters
; i
++) {
1058 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1060 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
1062 perf
->vtbl
.store_register_mem64(context
, obj
->pipeline_stats
.bo
,
1063 counter
->pipeline_stat
.reg
,
1064 offset_in_bytes
+ i
* sizeof(uint64_t));
1069 gen_perf_close(struct gen_perf_context
*perfquery
,
1070 const struct gen_perf_query_info
*query
)
1072 if (perfquery
->oa_stream_fd
!= -1) {
1073 close(perfquery
->oa_stream_fd
);
1074 perfquery
->oa_stream_fd
= -1;
1076 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1077 struct gen_perf_query_info
*raw_query
=
1078 (struct gen_perf_query_info
*) query
;
1079 raw_query
->oa_metrics_set_id
= 0;
1084 gen_perf_open(struct gen_perf_context
*perf_ctx
,
1087 int period_exponent
,
1091 uint64_t properties
[] = {
1092 /* Single context sampling */
1093 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
1095 /* Include OA reports in samples */
1096 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
1098 /* OA unit configuration */
1099 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
1100 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
1101 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
1103 struct drm_i915_perf_open_param param
= {
1104 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
1105 I915_PERF_FLAG_FD_NONBLOCK
|
1106 I915_PERF_FLAG_DISABLED
,
1107 .num_properties
= ARRAY_SIZE(properties
) / 2,
1108 .properties_ptr
= (uintptr_t) properties
,
1110 int fd
= gen_ioctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
1112 DBG("Error opening gen perf OA stream: %m\n");
1116 perf_ctx
->oa_stream_fd
= fd
;
1118 perf_ctx
->current_oa_metrics_set_id
= metrics_set_id
;
1119 perf_ctx
->current_oa_format
= report_format
;
1125 gen_perf_inc_n_users(struct gen_perf_context
*perf_ctx
)
1127 if (perf_ctx
->n_oa_users
== 0 &&
1128 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_ENABLE
, 0) < 0)
1132 ++perf_ctx
->n_oa_users
;
1138 gen_perf_dec_n_users(struct gen_perf_context
*perf_ctx
)
1140 /* Disabling the i915 perf stream will effectively disable the OA
1141 * counters. Note it's important to be sure there are no outstanding
1142 * MI_RPC commands at this point since they could stall the CS
1143 * indefinitely once OACONTROL is disabled.
1145 --perf_ctx
->n_oa_users
;
1146 if (perf_ctx
->n_oa_users
== 0 &&
1147 gen_ioctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
1149 DBG("WARNING: Error disabling gen perf stream: %m\n");
1154 gen_perf_init_metrics(struct gen_perf_config
*perf_cfg
,
1155 const struct gen_device_info
*devinfo
,
1158 load_pipeline_statistic_metrics(perf_cfg
, devinfo
);
1159 gen_perf_query_register_mdapi_statistic_query(perf_cfg
, devinfo
);
1160 if (load_oa_metrics(perf_cfg
, drm_fd
, devinfo
))
1161 register_mdapi_oa_query(devinfo
, perf_cfg
);
1165 gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
1166 struct gen_perf_config
*perf_cfg
,
1167 void * ctx
, /* driver context (eg, brw_context) */
1168 void * bufmgr
, /* eg brw_bufmgr */
1169 const struct gen_device_info
*devinfo
,
1173 perf_ctx
->perf
= perf_cfg
;
1174 perf_ctx
->ctx
= ctx
;
1175 perf_ctx
->bufmgr
= bufmgr
;
1176 perf_ctx
->drm_fd
= drm_fd
;
1177 perf_ctx
->hw_ctx
= hw_ctx
;
1178 perf_ctx
->devinfo
= devinfo
;
1180 perf_ctx
->unaccumulated
=
1181 ralloc_array(ctx
, struct gen_perf_query_object
*, 2);
1182 perf_ctx
->unaccumulated_elements
= 0;
1183 perf_ctx
->unaccumulated_array_size
= 2;
1185 exec_list_make_empty(&perf_ctx
->sample_buffers
);
1186 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1188 /* It's convenient to guarantee that this linked list of sample
1189 * buffers is never empty so we add an empty head so when we
1190 * Begin an OA query we can always take a reference on a buffer
1193 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(perf_ctx
);
1194 exec_list_push_head(&perf_ctx
->sample_buffers
, &buf
->link
);
1196 perf_ctx
->oa_stream_fd
= -1;
1197 perf_ctx
->next_query_start_report_id
= 1000;
1201 * Add a query to the global list of "unaccumulated queries."
1203 * Queries are tracked here until all the associated OA reports have
1204 * been accumulated via accumulate_oa_reports() after the end
1205 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
1208 add_to_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
1209 struct gen_perf_query_object
*obj
)
1211 if (perf_ctx
->unaccumulated_elements
>=
1212 perf_ctx
->unaccumulated_array_size
)
1214 perf_ctx
->unaccumulated_array_size
*= 1.5;
1215 perf_ctx
->unaccumulated
=
1216 reralloc(perf_ctx
->ctx
, perf_ctx
->unaccumulated
,
1217 struct gen_perf_query_object
*,
1218 perf_ctx
->unaccumulated_array_size
);
1221 perf_ctx
->unaccumulated
[perf_ctx
->unaccumulated_elements
++] = obj
;
1225 gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
1226 struct gen_perf_query_object
*query
)
1228 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1229 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
1231 /* XXX: We have to consider that the command parser unit that parses batch
1232 * buffer commands and is used to capture begin/end counter snapshots isn't
1233 * implicitly synchronized with what's currently running across other GPU
1234 * units (such as the EUs running shaders) that the performance counters are
1237 * The intention of performance queries is to measure the work associated
1238 * with commands between the begin/end delimiters and so for that to be the
1239 * case we need to explicitly synchronize the parsing of commands to capture
1240 * Begin/End counter snapshots with what's running across other parts of the
1243 * When the command parser reaches a Begin marker it effectively needs to
1244 * drain everything currently running on the GPU until the hardware is idle
1245 * before capturing the first snapshot of counters - otherwise the results
1246 * would also be measuring the effects of earlier commands.
1248 * When the command parser reaches an End marker it needs to stall until
1249 * everything currently running on the GPU has finished before capturing the
1250 * end snapshot - otherwise the results won't be a complete representation
1253 * Theoretically there could be opportunities to minimize how much of the
1254 * GPU pipeline is drained, or that we stall for, when we know what specific
1255 * units the performance counters being queried relate to but we don't
1256 * currently attempt to be clever here.
1258 * Note: with our current simple approach here then for back-to-back queries
1259 * we will redundantly emit duplicate commands to synchronize the command
1260 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1261 * second synchronization is effectively a NOOP.
1263 * N.B. The final results are based on deltas of counters between (inside)
1264 * Begin/End markers so even though the total wall clock time of the
1265 * workload is stretched by larger pipeline bubbles the bubbles themselves
1266 * are generally invisible to the query results. Whether that's a good or a
1267 * bad thing depends on the use case. For a lower real-time impact while
1268 * capturing metrics then periodic sampling may be a better choice than
1269 * INTEL_performance_query.
1272 * This is our Begin synchronization point to drain current work on the
1273 * GPU before we capture our first counter snapshot...
1275 perf_cfg
->vtbl
.emit_mi_flush(perf_ctx
->ctx
);
1277 switch (queryinfo
->kind
) {
1278 case GEN_PERF_QUERY_TYPE_OA
:
1279 case GEN_PERF_QUERY_TYPE_RAW
: {
1281 /* Opening an i915 perf stream implies exclusive access to the OA unit
1282 * which will generate counter reports for a specific counter set with a
1283 * specific layout/format so we can't begin any OA based queries that
1284 * require a different counter set or format unless we get an opportunity
1285 * to close the stream and open a new one...
1287 uint64_t metric_id
= gen_perf_query_get_metric_id(perf_ctx
->perf
, queryinfo
);
1289 if (perf_ctx
->oa_stream_fd
!= -1 &&
1290 perf_ctx
->current_oa_metrics_set_id
!= metric_id
) {
1292 if (perf_ctx
->n_oa_users
!= 0) {
1293 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64
"\n",
1294 perf_ctx
->current_oa_metrics_set_id
, metric_id
);
1297 gen_perf_close(perf_ctx
, queryinfo
);
1300 /* If the OA counters aren't already on, enable them. */
1301 if (perf_ctx
->oa_stream_fd
== -1) {
1302 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1304 /* The period_exponent gives a sampling period as follows:
1305 * sample_period = timestamp_period * 2^(period_exponent + 1)
1307 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1310 * The counter overflow period is derived from the EuActive counter
1311 * which reads a counter that increments by the number of clock
1312 * cycles multiplied by the number of EUs. It can be calculated as:
1314 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1316 * (E.g. 40 EUs @ 1GHz = ~53ms)
1318 * We select a sampling period inferior to that overflow period to
1319 * ensure we cannot see more than 1 counter overflow, otherwise we
1320 * could loose information.
1323 int a_counter_in_bits
= 32;
1324 if (devinfo
->gen
>= 8)
1325 a_counter_in_bits
= 40;
1327 uint64_t overflow_period
= pow(2, a_counter_in_bits
) / (perf_cfg
->sys_vars
.n_eus
*
1328 /* drop 1GHz freq to have units in nanoseconds */
1331 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
1332 overflow_period
, overflow_period
/ 1000000ul, perf_cfg
->sys_vars
.n_eus
);
1334 int period_exponent
= 0;
1335 uint64_t prev_sample_period
, next_sample_period
;
1336 for (int e
= 0; e
< 30; e
++) {
1337 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
1338 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
1340 /* Take the previous sampling period, lower than the overflow
1343 if (prev_sample_period
< overflow_period
&&
1344 next_sample_period
> overflow_period
)
1345 period_exponent
= e
+ 1;
1348 if (period_exponent
== 0) {
1349 DBG("WARNING: enable to find a sampling exponent\n");
1353 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
1354 prev_sample_period
/ 1000000ul);
1356 if (!gen_perf_open(perf_ctx
, metric_id
, queryinfo
->oa_format
,
1357 period_exponent
, perf_ctx
->drm_fd
,
1361 assert(perf_ctx
->current_oa_metrics_set_id
== metric_id
&&
1362 perf_ctx
->current_oa_format
== queryinfo
->oa_format
);
1365 if (!gen_perf_inc_n_users(perf_ctx
)) {
1366 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1371 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
1372 query
->oa
.bo
= NULL
;
1375 query
->oa
.bo
= perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1376 "perf. query OA MI_RPC bo",
1379 /* Pre-filling the BO helps debug whether writes landed. */
1380 void *map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_WRITE
);
1381 memset(map
, 0x80, MI_RPC_BO_SIZE
);
1382 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
1385 query
->oa
.begin_report_id
= perf_ctx
->next_query_start_report_id
;
1386 perf_ctx
->next_query_start_report_id
+= 2;
1388 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1389 * delimiting commands end up in different batchbuffers. If that's the
1390 * case, the measurement will include the time it takes for the kernel
1391 * scheduler to load a new request into the hardware. This is manifested in
1392 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1394 perf_cfg
->vtbl
.batchbuffer_flush(perf_ctx
->ctx
, __FILE__
, __LINE__
);
1396 /* Take a starting OA counter snapshot. */
1397 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
, 0,
1398 query
->oa
.begin_report_id
);
1399 perf_cfg
->vtbl
.capture_frequency_stat_register(perf_ctx
->ctx
, query
->oa
.bo
,
1400 MI_FREQ_START_OFFSET_BYTES
);
1402 ++perf_ctx
->n_active_oa_queries
;
1404 /* No already-buffered samples can possibly be associated with this query
1405 * so create a marker within the list of sample buffers enabling us to
1406 * easily ignore earlier samples when processing this query after
1409 assert(!exec_list_is_empty(&perf_ctx
->sample_buffers
));
1410 query
->oa
.samples_head
= exec_list_get_tail(&perf_ctx
->sample_buffers
);
1412 struct oa_sample_buf
*buf
=
1413 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
1415 /* This reference will ensure that future/following sample
1416 * buffers (that may relate to this query) can't be freed until
1417 * this drops to zero.
1421 gen_perf_query_result_clear(&query
->oa
.result
);
1422 query
->oa
.results_accumulated
= false;
1424 add_to_unaccumulated_query_list(perf_ctx
, query
);
1428 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1429 if (query
->pipeline_stats
.bo
) {
1430 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
1431 query
->pipeline_stats
.bo
= NULL
;
1434 query
->pipeline_stats
.bo
=
1435 perf_cfg
->vtbl
.bo_alloc(perf_ctx
->bufmgr
,
1436 "perf. query pipeline stats bo",
1439 /* Take starting snapshots. */
1440 gen_perf_snapshot_statistics_registers(perf_ctx
->ctx
, perf_cfg
, query
, 0);
1442 ++perf_ctx
->n_active_pipeline_stats_queries
;
1446 unreachable("Unknown query type");
1454 gen_perf_end_query(struct gen_perf_context
*perf_ctx
,
1455 struct gen_perf_query_object
*query
)
1457 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1459 /* Ensure that the work associated with the queried commands will have
1460 * finished before taking our query end counter readings.
1462 * For more details see comment in brw_begin_perf_query for
1463 * corresponding flush.
1465 perf_cfg
->vtbl
.emit_mi_flush(perf_ctx
->ctx
);
1467 switch (query
->queryinfo
->kind
) {
1468 case GEN_PERF_QUERY_TYPE_OA
:
1469 case GEN_PERF_QUERY_TYPE_RAW
:
1471 /* NB: It's possible that the query will have already been marked
1472 * as 'accumulated' if an error was seen while reading samples
1473 * from perf. In this case we mustn't try and emit a closing
1474 * MI_RPC command in case the OA unit has already been disabled
1476 if (!query
->oa
.results_accumulated
) {
1477 /* Take an ending OA counter snapshot. */
1478 perf_cfg
->vtbl
.capture_frequency_stat_register(perf_ctx
->ctx
, query
->oa
.bo
,
1479 MI_FREQ_END_OFFSET_BYTES
);
1480 perf_cfg
->vtbl
.emit_mi_report_perf_count(perf_ctx
->ctx
, query
->oa
.bo
,
1481 MI_RPC_BO_END_OFFSET_BYTES
,
1482 query
->oa
.begin_report_id
+ 1);
1485 --perf_ctx
->n_active_oa_queries
;
1487 /* NB: even though the query has now ended, it can't be accumulated
1488 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1493 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1494 gen_perf_snapshot_statistics_registers(perf_ctx
->ctx
, perf_cfg
, query
,
1495 STATS_BO_END_OFFSET_BYTES
);
1496 --perf_ctx
->n_active_pipeline_stats_queries
;
1500 unreachable("Unknown query type");
1506 OA_READ_STATUS_ERROR
,
1507 OA_READ_STATUS_UNFINISHED
,
1508 OA_READ_STATUS_FINISHED
,
1511 static enum OaReadStatus
1512 read_oa_samples_until(struct gen_perf_context
*perf_ctx
,
1513 uint32_t start_timestamp
,
1514 uint32_t end_timestamp
)
1516 struct exec_node
*tail_node
=
1517 exec_list_get_tail(&perf_ctx
->sample_buffers
);
1518 struct oa_sample_buf
*tail_buf
=
1519 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
1520 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
1523 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(perf_ctx
);
1527 while ((len
= read(perf_ctx
->oa_stream_fd
, buf
->buf
,
1528 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
1532 exec_list_push_tail(&perf_ctx
->free_sample_buffers
, &buf
->link
);
1535 if (errno
== EAGAIN
)
1536 return ((last_timestamp
- start_timestamp
) >=
1537 (end_timestamp
- start_timestamp
)) ?
1538 OA_READ_STATUS_FINISHED
:
1539 OA_READ_STATUS_UNFINISHED
;
1541 DBG("Error reading i915 perf samples: %m\n");
1544 DBG("Spurious EOF reading i915 perf samples\n");
1546 return OA_READ_STATUS_ERROR
;
1550 exec_list_push_tail(&perf_ctx
->sample_buffers
, &buf
->link
);
1552 /* Go through the reports and update the last timestamp. */
1554 while (offset
< buf
->len
) {
1555 const struct drm_i915_perf_record_header
*header
=
1556 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
1557 uint32_t *report
= (uint32_t *) (header
+ 1);
1559 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
1560 last_timestamp
= report
[1];
1562 offset
+= header
->size
;
1565 buf
->last_timestamp
= last_timestamp
;
1568 unreachable("not reached");
1569 return OA_READ_STATUS_ERROR
;
1573 * Try to read all the reports until either the delimiting timestamp
1574 * or an error arises.
1577 read_oa_samples_for_query(struct gen_perf_context
*perf_ctx
,
1578 struct gen_perf_query_object
*query
,
1579 void *current_batch
)
1584 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1586 /* We need the MI_REPORT_PERF_COUNT to land before we can start
1588 assert(!perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
1589 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
));
1591 /* Map the BO once here and let accumulate_oa_reports() unmap
1593 if (query
->oa
.map
== NULL
)
1594 query
->oa
.map
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->oa
.bo
, MAP_READ
);
1596 start
= last
= query
->oa
.map
;
1597 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1599 if (start
[0] != query
->oa
.begin_report_id
) {
1600 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
1603 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
1604 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
1608 /* Read the reports until the end timestamp. */
1609 switch (read_oa_samples_until(perf_ctx
, start
[1], end
[1])) {
1610 case OA_READ_STATUS_ERROR
:
1611 /* Fallthrough and let accumulate_oa_reports() deal with the
1613 case OA_READ_STATUS_FINISHED
:
1615 case OA_READ_STATUS_UNFINISHED
:
1619 unreachable("invalid read status");
1624 gen_perf_wait_query(struct gen_perf_context
*perf_ctx
,
1625 struct gen_perf_query_object
*query
,
1626 void *current_batch
)
1628 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1629 struct brw_bo
*bo
= NULL
;
1631 switch (query
->queryinfo
->kind
) {
1632 case GEN_PERF_QUERY_TYPE_OA
:
1633 case GEN_PERF_QUERY_TYPE_RAW
:
1637 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1638 bo
= query
->pipeline_stats
.bo
;
1642 unreachable("Unknown query type");
1649 /* If the current batch references our results bo then we need to
1652 if (perf_cfg
->vtbl
.batch_references(current_batch
, bo
))
1653 perf_cfg
->vtbl
.batchbuffer_flush(perf_ctx
->ctx
, __FILE__
, __LINE__
);
1655 perf_cfg
->vtbl
.bo_wait_rendering(bo
);
1657 /* Due to a race condition between the OA unit signaling report
1658 * availability and the report actually being written into memory,
1659 * we need to wait for all the reports to come in before we can
1662 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1663 query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1664 while (!read_oa_samples_for_query(perf_ctx
, query
, current_batch
))
1670 gen_perf_is_query_ready(struct gen_perf_context
*perf_ctx
,
1671 struct gen_perf_query_object
*query
,
1672 void *current_batch
)
1674 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1676 switch (query
->queryinfo
->kind
) {
1677 case GEN_PERF_QUERY_TYPE_OA
:
1678 case GEN_PERF_QUERY_TYPE_RAW
:
1679 return (query
->oa
.results_accumulated
||
1681 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->oa
.bo
) &&
1682 !perf_cfg
->vtbl
.bo_busy(query
->oa
.bo
) &&
1683 read_oa_samples_for_query(perf_ctx
, query
, current_batch
)));
1684 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1685 return (query
->pipeline_stats
.bo
&&
1686 !perf_cfg
->vtbl
.batch_references(current_batch
, query
->pipeline_stats
.bo
) &&
1687 !perf_cfg
->vtbl
.bo_busy(query
->pipeline_stats
.bo
));
1690 unreachable("Unknown query type");
1698 * Remove a query from the global list of unaccumulated queries once
1699 * after successfully accumulating the OA reports associated with the
1700 * query in accumulate_oa_reports() or when discarding unwanted query
1704 drop_from_unaccumulated_query_list(struct gen_perf_context
*perf_ctx
,
1705 struct gen_perf_query_object
*query
)
1707 for (int i
= 0; i
< perf_ctx
->unaccumulated_elements
; i
++) {
1708 if (perf_ctx
->unaccumulated
[i
] == query
) {
1709 int last_elt
= --perf_ctx
->unaccumulated_elements
;
1712 perf_ctx
->unaccumulated
[i
] = NULL
;
1714 perf_ctx
->unaccumulated
[i
] =
1715 perf_ctx
->unaccumulated
[last_elt
];
1722 /* Drop our samples_head reference so that associated periodic
1723 * sample data buffers can potentially be reaped if they aren't
1724 * referenced by any other queries...
1727 struct oa_sample_buf
*buf
=
1728 exec_node_data(struct oa_sample_buf
, query
->oa
.samples_head
, link
);
1730 assert(buf
->refcount
> 0);
1733 query
->oa
.samples_head
= NULL
;
1735 gen_perf_reap_old_sample_buffers(perf_ctx
);
1738 /* In general if we see anything spurious while accumulating results,
1739 * we don't try and continue accumulating the current query, hoping
1740 * for the best, we scrap anything outstanding, and then hope for the
1741 * best with new queries.
1744 discard_all_queries(struct gen_perf_context
*perf_ctx
)
1746 while (perf_ctx
->unaccumulated_elements
) {
1747 struct gen_perf_query_object
*query
= perf_ctx
->unaccumulated
[0];
1749 query
->oa
.results_accumulated
= true;
1750 drop_from_unaccumulated_query_list(perf_ctx
, query
);
1752 gen_perf_dec_n_users(perf_ctx
);
1757 * Accumulate raw OA counter values based on deltas between pairs of
1760 * Accumulation starts from the first report captured via
1761 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
1762 * last MI_RPC report requested by brw_end_perf_query(). Between these
1763 * two reports there may also some number of periodically sampled OA
1764 * reports collected via the i915 perf interface - depending on the
1765 * duration of the query.
1767 * These periodic snapshots help to ensure we handle counter overflow
1768 * correctly by being frequent enough to ensure we don't miss multiple
1769 * overflows of a counter between snapshots. For Gen8+ the i915 perf
1770 * snapshots provide the extra context-switch reports that let us
1771 * subtract out the progress of counters associated with other
1772 * contexts running on the system.
1775 accumulate_oa_reports(struct gen_perf_context
*perf_ctx
,
1776 struct gen_perf_query_object
*query
)
1778 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1782 struct exec_node
*first_samples_node
;
1784 int out_duration
= 0;
1786 assert(query
->oa
.map
!= NULL
);
1788 start
= last
= query
->oa
.map
;
1789 end
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1791 if (start
[0] != query
->oa
.begin_report_id
) {
1792 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
1795 if (end
[0] != (query
->oa
.begin_report_id
+ 1)) {
1796 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
1800 /* See if we have any periodic reports to accumulate too... */
1802 /* N.B. The oa.samples_head was set when the query began and
1803 * pointed to the tail of the perf_ctx->sample_buffers list at
1804 * the time the query started. Since the buffer existed before the
1805 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
1806 * that no data in this particular node's buffer can possibly be
1807 * associated with the query - so skip ahead one...
1809 first_samples_node
= query
->oa
.samples_head
->next
;
1811 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
1812 &perf_ctx
.sample_buffers
,
1817 while (offset
< buf
->len
) {
1818 const struct drm_i915_perf_record_header
*header
=
1819 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
1821 assert(header
->size
!= 0);
1822 assert(header
->size
<= buf
->len
);
1824 offset
+= header
->size
;
1826 switch (header
->type
) {
1827 case DRM_I915_PERF_RECORD_SAMPLE
: {
1828 uint32_t *report
= (uint32_t *)(header
+ 1);
1831 /* Ignore reports that come before the start marker.
1832 * (Note: takes care to allow overflow of 32bit timestamps)
1834 if (gen_device_info_timebase_scale(devinfo
,
1835 report
[1] - start
[1]) > 5000000000) {
1839 /* Ignore reports that come after the end marker.
1840 * (Note: takes care to allow overflow of 32bit timestamps)
1842 if (gen_device_info_timebase_scale(devinfo
,
1843 report
[1] - end
[1]) <= 5000000000) {
1847 /* For Gen8+ since the counters continue while other
1848 * contexts are running we need to discount any unrelated
1849 * deltas. The hardware automatically generates a report
1850 * on context switch which gives us a new reference point
1851 * to continuing adding deltas from.
1853 * For Haswell we can rely on the HW to stop the progress
1854 * of OA counters while any other context is acctive.
1856 if (devinfo
->gen
>= 8) {
1857 if (in_ctx
&& report
[2] != query
->oa
.result
.hw_id
) {
1858 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
1861 } else if (in_ctx
== false && report
[2] == query
->oa
.result
.hw_id
) {
1862 DBG("i915 perf: Switch TO\n");
1865 /* From experimentation in IGT, we found that the OA unit
1866 * might label some report as "idle" (using an invalid
1867 * context ID), right after a report for a given context.
1868 * Deltas generated by those reports actually belong to the
1869 * previous context, even though they're not labelled as
1872 * We didn't *really* Switch AWAY in the case that we e.g.
1873 * saw a single periodic report while idle...
1875 if (out_duration
>= 1)
1877 } else if (in_ctx
) {
1878 assert(report
[2] == query
->oa
.result
.hw_id
);
1879 DBG("i915 perf: Continuation IN\n");
1881 assert(report
[2] != query
->oa
.result
.hw_id
);
1882 DBG("i915 perf: Continuation OUT\n");
1889 gen_perf_query_result_accumulate(&query
->oa
.result
, query
->queryinfo
,
1898 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
1899 DBG("i915 perf: OA error: all reports lost\n");
1901 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
1902 DBG("i915 perf: OA report lost\n");
1910 gen_perf_query_result_accumulate(&query
->oa
.result
, query
->queryinfo
,
1913 query
->oa
.results_accumulated
= true;
1914 drop_from_unaccumulated_query_list(perf_ctx
, query
);
1915 gen_perf_dec_n_users(perf_ctx
);
1921 discard_all_queries(perf_ctx
);
1925 gen_perf_delete_query(struct gen_perf_context
*perf_ctx
,
1926 struct gen_perf_query_object
*query
)
1928 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1930 /* We can assume that the frontend waits for a query to complete
1931 * before ever calling into here, so we don't have to worry about
1932 * deleting an in-flight query object.
1934 switch (query
->queryinfo
->kind
) {
1935 case GEN_PERF_QUERY_TYPE_OA
:
1936 case GEN_PERF_QUERY_TYPE_RAW
:
1938 if (!query
->oa
.results_accumulated
) {
1939 drop_from_unaccumulated_query_list(perf_ctx
, query
);
1940 gen_perf_dec_n_users(perf_ctx
);
1943 perf_cfg
->vtbl
.bo_unreference(query
->oa
.bo
);
1944 query
->oa
.bo
= NULL
;
1947 query
->oa
.results_accumulated
= false;
1950 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1951 if (query
->pipeline_stats
.bo
) {
1952 perf_cfg
->vtbl
.bo_unreference(query
->pipeline_stats
.bo
);
1953 query
->pipeline_stats
.bo
= NULL
;
1958 unreachable("Unknown query type");
1962 /* As an indication that the INTEL_performance_query extension is no
1963 * longer in use, it's a good time to free our cache of sample
1964 * buffers and close any current i915-perf stream.
1966 if (--perf_ctx
->n_query_instances
== 0) {
1967 gen_perf_free_sample_bufs(perf_ctx
);
1968 gen_perf_close(perf_ctx
, query
->queryinfo
);
1974 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
1977 read_gt_frequency(struct gen_perf_context
*perf_ctx
,
1978 struct gen_perf_query_object
*obj
)
1980 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
1981 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
1982 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
1984 switch (devinfo
->gen
) {
1987 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1988 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1993 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1994 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1997 unreachable("unexpected gen");
2000 /* Put the numbers into Hz. */
2001 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
2002 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
2006 get_oa_counter_data(struct gen_perf_context
*perf_ctx
,
2007 struct gen_perf_query_object
*query
,
2011 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2012 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2013 int n_counters
= queryinfo
->n_counters
;
2016 for (int i
= 0; i
< n_counters
; i
++) {
2017 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2018 uint64_t *out_uint64
;
2020 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
2023 switch (counter
->data_type
) {
2024 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
2025 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
2027 counter
->oa_counter_read_uint64(perf_cfg
, queryinfo
,
2028 query
->oa
.result
.accumulator
);
2030 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
2031 out_float
= (float *)(data
+ counter
->offset
);
2033 counter
->oa_counter_read_float(perf_cfg
, queryinfo
,
2034 query
->oa
.result
.accumulator
);
2037 /* So far we aren't using uint32, double or bool32... */
2038 unreachable("unexpected counter data type");
2040 written
= counter
->offset
+ counter_size
;
2048 get_pipeline_stats_data(struct gen_perf_context
*perf_ctx
,
2049 struct gen_perf_query_object
*query
,
2054 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2055 const struct gen_perf_query_info
*queryinfo
= query
->queryinfo
;
2056 int n_counters
= queryinfo
->n_counters
;
2059 uint64_t *start
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, query
->pipeline_stats
.bo
, MAP_READ
);
2060 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
2062 for (int i
= 0; i
< n_counters
; i
++) {
2063 const struct gen_perf_query_counter
*counter
= &queryinfo
->counters
[i
];
2064 uint64_t value
= end
[i
] - start
[i
];
2066 if (counter
->pipeline_stat
.numerator
!=
2067 counter
->pipeline_stat
.denominator
) {
2068 value
*= counter
->pipeline_stat
.numerator
;
2069 value
/= counter
->pipeline_stat
.denominator
;
2072 *((uint64_t *)p
) = value
;
2076 perf_cfg
->vtbl
.bo_unmap(query
->pipeline_stats
.bo
);
2082 gen_perf_get_query_data(struct gen_perf_context
*perf_ctx
,
2083 struct gen_perf_query_object
*query
,
2086 unsigned *bytes_written
)
2088 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
2091 switch (query
->queryinfo
->kind
) {
2092 case GEN_PERF_QUERY_TYPE_OA
:
2093 case GEN_PERF_QUERY_TYPE_RAW
:
2094 if (!query
->oa
.results_accumulated
) {
2095 read_gt_frequency(perf_ctx
, query
);
2096 uint32_t *begin_report
= query
->oa
.map
;
2097 uint32_t *end_report
= query
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
2098 gen_perf_query_result_read_frequencies(&query
->oa
.result
,
2102 accumulate_oa_reports(perf_ctx
, query
);
2103 assert(query
->oa
.results_accumulated
);
2105 perf_cfg
->vtbl
.bo_unmap(query
->oa
.bo
);
2106 query
->oa
.map
= NULL
;
2108 if (query
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
2109 written
= get_oa_counter_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2111 const struct gen_device_info
*devinfo
= perf_ctx
->devinfo
;
2113 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
2114 devinfo
, &query
->oa
.result
,
2115 query
->oa
.gt_frequency
[0],
2116 query
->oa
.gt_frequency
[1]);
2120 case GEN_PERF_QUERY_TYPE_PIPELINE
:
2121 written
= get_pipeline_stats_data(perf_ctx
, query
, data_size
, (uint8_t *)data
);
2125 unreachable("Unknown query type");
2130 *bytes_written
= written
;