2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <sys/sysmacros.h>
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
37 struct gen_device_info
;
39 struct gen_perf_config
;
40 struct gen_perf_query_info
;
42 enum gen_perf_counter_type
{
43 GEN_PERF_COUNTER_TYPE_EVENT
,
44 GEN_PERF_COUNTER_TYPE_DURATION_NORM
,
45 GEN_PERF_COUNTER_TYPE_DURATION_RAW
,
46 GEN_PERF_COUNTER_TYPE_THROUGHPUT
,
47 GEN_PERF_COUNTER_TYPE_RAW
,
48 GEN_PERF_COUNTER_TYPE_TIMESTAMP
,
51 enum gen_perf_counter_data_type
{
52 GEN_PERF_COUNTER_DATA_TYPE_BOOL32
,
53 GEN_PERF_COUNTER_DATA_TYPE_UINT32
,
54 GEN_PERF_COUNTER_DATA_TYPE_UINT64
,
55 GEN_PERF_COUNTER_DATA_TYPE_FLOAT
,
56 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
,
59 struct gen_pipeline_stat
{
66 * The largest OA formats we can use include:
68 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
70 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
72 #define MAX_OA_REPORT_COUNTERS 62
74 #define IA_VERTICES_COUNT 0x2310
75 #define IA_PRIMITIVES_COUNT 0x2318
76 #define VS_INVOCATION_COUNT 0x2320
77 #define HS_INVOCATION_COUNT 0x2300
78 #define DS_INVOCATION_COUNT 0x2308
79 #define GS_INVOCATION_COUNT 0x2328
80 #define GS_PRIMITIVES_COUNT 0x2330
81 #define CL_INVOCATION_COUNT 0x2338
82 #define CL_PRIMITIVES_COUNT 0x2340
83 #define PS_INVOCATION_COUNT 0x2348
84 #define CS_INVOCATION_COUNT 0x2290
85 #define PS_DEPTH_COUNT 0x2350
88 * When currently allocate only one page for pipeline statistics queries. Here
89 * we derived the maximum number of counters for that amount.
91 #define STATS_BO_SIZE 4096
92 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
93 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
95 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
96 256) /* OA counter report */
98 struct gen_perf_query_result
{
100 * Storage for the final accumulated OA counters.
102 uint64_t accumulator
[MAX_OA_REPORT_COUNTERS
];
105 * Hw ID used by the context on which the query was running.
110 * Number of reports accumulated to produce the results.
112 uint32_t reports_accumulated
;
115 * Frequency in the slices of the GT at the begin and end of the
118 uint64_t slice_frequency
[2];
121 * Frequency in the unslice of the GT at the begin and end of the
124 uint64_t unslice_frequency
[2];
127 struct gen_perf_query_counter
{
130 enum gen_perf_counter_type type
;
131 enum gen_perf_counter_data_type data_type
;
136 uint64_t (*oa_counter_read_uint64
)(struct gen_perf_config
*perf
,
137 const struct gen_perf_query_info
*query
,
138 const uint64_t *accumulator
);
139 float (*oa_counter_read_float
)(struct gen_perf_config
*perf
,
140 const struct gen_perf_query_info
*query
,
141 const uint64_t *accumulator
);
142 struct gen_pipeline_stat pipeline_stat
;
146 struct gen_perf_query_register_prog
{
151 struct gen_perf_query_info
{
152 enum gen_perf_query_type
{
153 GEN_PERF_QUERY_TYPE_OA
,
154 GEN_PERF_QUERY_TYPE_RAW
,
155 GEN_PERF_QUERY_TYPE_PIPELINE
,
159 struct gen_perf_query_counter
*counters
;
165 uint64_t oa_metrics_set_id
;
168 /* For indexing into the accumulator[] ... */
170 int gpu_clock_offset
;
175 /* Register programming for a given query */
176 struct gen_perf_query_register_prog
*flex_regs
;
177 uint32_t n_flex_regs
;
179 struct gen_perf_query_register_prog
*mux_regs
;
182 struct gen_perf_query_register_prog
*b_counter_regs
;
183 uint32_t n_b_counter_regs
;
186 struct gen_perf_config
{
187 struct gen_perf_query_info
*queries
;
190 /* Variables referenced in the XML meta data for OA performance
191 * counters, e.g in the normalization equations.
193 * All uint64_t for consistent operand types in generated code
196 uint64_t timestamp_frequency
; /** $GpuTimestampFrequency */
197 uint64_t n_eus
; /** $EuCoresTotalCount */
198 uint64_t n_eu_slices
; /** $EuSlicesTotalCount */
199 uint64_t n_eu_sub_slices
; /** $EuSubslicesTotalCount */
200 uint64_t eu_threads_count
; /** $EuThreadsCount */
201 uint64_t slice_mask
; /** $SliceMask */
202 uint64_t subslice_mask
; /** $SubsliceMask */
203 uint64_t gt_min_freq
; /** $GpuMinFrequency */
204 uint64_t gt_max_freq
; /** $GpuMaxFrequency */
205 uint64_t revision
; /** $SkuRevisionId */
208 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
209 * cross-reference with the GUIDs of configs advertised by the kernel at
212 struct hash_table
*oa_metrics_table
;
214 /* Location of the device's sysfs entry. */
215 char sysfs_dev_dir
[256];
218 void *(*bo_alloc
)(void *bufmgr
, const char *name
, uint64_t size
);
219 void (*bo_unreference
)(void *bo
);
220 void *(*bo_map
)(void *ctx
, void *bo
, unsigned flags
);
221 void (*bo_unmap
)(void *bo
);
222 void (*emit_mi_flush
)(void *ctx
);
223 void (*emit_mi_report_perf_count
)(void *ctx
,
225 uint32_t offset_in_bytes
,
227 void (*batchbuffer_flush
)(void *ctx
,
228 const char *file
, int line
);
229 void (*capture_frequency_stat_register
)(void *ctx
, void *bo
,
231 void (*store_register_mem64
)(void *ctx
, void *bo
, uint32_t reg
, uint32_t offset
);
237 * Periodic OA samples are read() into these buffer structures via the
238 * i915 perf kernel interface and appended to the
239 * brw->perfquery.sample_buffers linked list. When we process the
240 * results of an OA metrics query we need to consider all the periodic
241 * samples between the Begin and End MI_REPORT_PERF_COUNT command
244 * 'Periodic' is a simplification as there are other automatic reports
245 * written by the hardware also buffered here.
247 * Considering three queries, A, B and C:
250 * ________________A_________________
252 * | ________B_________ _____C___________
255 * And an illustration of sample buffers read over this time frame:
256 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
258 * These nodes may hold samples for query A:
259 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
261 * These nodes may hold samples for query B:
262 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
264 * These nodes may hold samples for query C:
265 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
267 * The illustration assumes we have an even distribution of periodic
268 * samples so all nodes have the same size plotted against time:
270 * Note, to simplify code, the list is never empty.
272 * With overlapping queries we can see that periodic OA reports may
273 * relate to multiple queries and care needs to be take to keep
274 * track of sample buffers until there are no queries that might
275 * depend on their contents.
277 * We use a node ref counting system where a reference ensures that a
278 * node and all following nodes can't be freed/recycled until the
279 * reference drops to zero.
281 * E.g. with a ref of one here:
282 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
284 * These nodes could be freed or recycled ("reaped"):
287 * These must be preserved until the leading ref drops to zero:
288 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
290 * When a query starts we take a reference on the current tail of
291 * the list, knowing that no already-buffered samples can possibly
292 * relate to the newly-started query. A pointer to this node is
293 * also saved in the query object's ->oa.samples_head.
295 * E.g. starting query A while there are two nodes in .sample_buffers:
296 * ________________A________
300 * ^_______ Add a reference and store pointer to node in
303 * Moving forward to when the B query starts with no new buffer nodes:
304 * (for reference, i915 perf reads() are only done when queries finish)
305 * ________________A_______
310 * ^_______ Add a reference and store pointer to
311 * node in B->oa.samples_head
313 * Once a query is finished, after an OA query has become 'Ready',
314 * once the End OA report has landed and after we we have processed
315 * all the intermediate periodic samples then we drop the
316 * ->oa.samples_head reference we took at the start.
318 * So when the B query has finished we have:
319 * ________________A________
320 * | ______B___________
322 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
323 * ^_______ Drop B->oa.samples_head reference
325 * We still can't free these due to the A->oa.samples_head ref:
326 * [ 1 ][ 0 ][ 0 ][ 0 ]
328 * When the A query finishes: (note there's a new ref for C's samples_head)
329 * ________________A_________________
333 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
334 * ^_______ Drop A->oa.samples_head reference
336 * And we can now reap these nodes up to the C->oa.samples_head:
337 * [ X ][ X ][ X ][ X ]
338 * keeping -> [ 1 ][ 0 ][ 0 ]
340 * We reap old sample buffers each time we finish processing an OA
341 * query by iterating the sample_buffers list from the head until we
342 * find a referenced node and stop.
344 * Reaped buffers move to a perfquery.free_sample_buffers list and
345 * when we come to read() we first look to recycle a buffer from the
346 * free_sample_buffers list before allocating a new buffer.
348 struct oa_sample_buf
{
349 struct exec_node link
;
352 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
353 uint32_t last_timestamp
;
357 * gen representation of a performance query object.
359 * NB: We want to keep this structure relatively lean considering that
360 * applications may expect to allocate enough objects to be able to
361 * query around all draw calls in a frame.
363 struct gen_perf_query_object
365 const struct gen_perf_query_info
*queryinfo
;
367 /* See query->kind to know which state below is in use... */
372 * BO containing OA counter snapshots at query Begin/End time.
377 * Address of mapped of @bo
382 * The MI_REPORT_PERF_COUNT command lets us specify a unique
383 * ID that will be reflected in the resulting OA report
384 * that's written by the GPU. This is the ID we're expecting
385 * in the begin report and the the end report should be
386 * @begin_report_id + 1.
391 * Reference the head of the brw->perfquery.sample_buffers
392 * list at the time that the query started (so we only need
393 * to look at nodes after this point when looking for samples
394 * related to this query)
396 * (See struct brw_oa_sample_buf description for more details)
398 struct exec_node
*samples_head
;
401 * false while in the unaccumulated_elements list, and set to
402 * true when the final, end MI_RPC snapshot has been
405 bool results_accumulated
;
408 * Frequency of the GT at begin and end of the query.
410 uint64_t gt_frequency
[2];
413 * Accumulated OA results between begin and end of the query.
415 struct gen_perf_query_result result
;
420 * BO containing starting and ending snapshots for the
421 * statistics counters.
428 struct gen_perf_context
{
429 struct gen_perf_config
*perf
;
431 void * ctx
; /* driver context (eg, brw_context) */
433 const struct gen_device_info
*devinfo
;
438 /* The i915 perf stream we open to setup + enable the OA counters */
441 /* An i915 perf stream fd gives exclusive access to the OA unit that will
442 * report counter snapshots for a specific counter set/profile in a
443 * specific layout/format so we can only start OA queries that are
444 * compatible with the currently open fd...
446 int current_oa_metrics_set_id
;
447 int current_oa_format
;
449 /* List of buffers containing OA reports */
450 struct exec_list sample_buffers
;
452 /* Cached list of empty sample buffers */
453 struct exec_list free_sample_buffers
;
455 int n_active_oa_queries
;
456 int n_active_pipeline_stats_queries
;
458 /* The number of queries depending on running OA counters which
459 * extends beyond brw_end_perf_query() since we need to wait until
460 * the last MI_RPC command has parsed by the GPU.
462 * Accurate accounting is important here as emitting an
463 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
464 * effectively hang the gpu.
468 /* To help catch an spurious problem with the hardware or perf
469 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
470 * with a unique ID that we can explicitly check for...
472 int next_query_start_report_id
;
475 * An array of queries whose results haven't yet been assembled
476 * based on the data in buffer objects.
478 * These may be active, or have already ended. However, the
479 * results have not been requested.
481 struct gen_perf_query_object
**unaccumulated
;
482 int unaccumulated_elements
;
483 int unaccumulated_array_size
;
485 /* The total number of query objects so we can relinquish
486 * our exclusive access to perf if the application deletes
487 * all of its objects. (NB: We only disable perf while
488 * there are no active queries)
490 int n_query_instances
;
493 void gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
494 struct gen_perf_config
*perf_cfg
,
495 void * ctx
, /* driver context (eg, brw_context) */
496 void * bufmgr
, /* eg brw_bufmgr */
497 const struct gen_device_info
*devinfo
,
502 gen_perf_query_counter_get_size(const struct gen_perf_query_counter
*counter
)
504 switch (counter
->data_type
) {
505 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
:
506 return sizeof(uint32_t);
507 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
:
508 return sizeof(uint32_t);
509 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
510 return sizeof(uint64_t);
511 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
512 return sizeof(float);
513 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
:
514 return sizeof(double);
516 unreachable("invalid counter data type");
520 static inline struct gen_perf_query_info
*
521 gen_perf_query_append_query_info(struct gen_perf_config
*perf
, int max_counters
)
523 struct gen_perf_query_info
*query
;
525 perf
->queries
= reralloc(perf
, perf
->queries
,
526 struct gen_perf_query_info
,
528 query
= &perf
->queries
[perf
->n_queries
- 1];
529 memset(query
, 0, sizeof(*query
));
531 if (max_counters
> 0) {
532 query
->max_counters
= max_counters
;
534 rzalloc_array(perf
, struct gen_perf_query_counter
, max_counters
);
541 gen_perf_query_info_add_stat_reg(struct gen_perf_query_info
*query
,
544 uint32_t denominator
,
546 const char *description
)
548 struct gen_perf_query_counter
*counter
;
550 assert(query
->n_counters
< query
->max_counters
);
552 counter
= &query
->counters
[query
->n_counters
];
553 counter
->name
= name
;
554 counter
->desc
= description
;
555 counter
->type
= GEN_PERF_COUNTER_TYPE_RAW
;
556 counter
->data_type
= GEN_PERF_COUNTER_DATA_TYPE_UINT64
;
557 counter
->offset
= sizeof(uint64_t) * query
->n_counters
;
558 counter
->pipeline_stat
.reg
= reg
;
559 counter
->pipeline_stat
.numerator
= numerator
;
560 counter
->pipeline_stat
.denominator
= denominator
;
566 gen_perf_query_info_add_basic_stat_reg(struct gen_perf_query_info
*query
,
567 uint32_t reg
, const char *name
)
569 gen_perf_query_info_add_stat_reg(query
, reg
, 1, 1, name
, name
);
572 static inline struct gen_perf_config
*
573 gen_perf_new(void *ctx
)
575 struct gen_perf_config
*perf
= rzalloc(ctx
, struct gen_perf_config
);
579 bool gen_perf_load_oa_metrics(struct gen_perf_config
*perf
, int fd
,
580 const struct gen_device_info
*devinfo
);
581 bool gen_perf_load_metric_id(struct gen_perf_config
*perf
, const char *guid
,
582 uint64_t *metric_id
);
584 void gen_perf_query_result_read_frequencies(struct gen_perf_query_result
*result
,
585 const struct gen_device_info
*devinfo
,
586 const uint32_t *start
,
587 const uint32_t *end
);
588 void gen_perf_query_result_accumulate(struct gen_perf_query_result
*result
,
589 const struct gen_perf_query_info
*query
,
590 const uint32_t *start
,
591 const uint32_t *end
);
592 void gen_perf_query_result_clear(struct gen_perf_query_result
*result
);
593 void gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info
*devinfo
,
594 struct gen_perf_config
*perf
);
595 void gen_perf_query_register_mdapi_oa_query(const struct gen_device_info
*devinfo
,
596 struct gen_perf_config
*perf
);
597 uint64_t gen_perf_query_get_metric_id(struct gen_perf_config
*perf
,
598 const struct gen_perf_query_info
*query
);
599 struct oa_sample_buf
* gen_perf_get_free_sample_buf(struct gen_perf_context
*perf
);
600 void gen_perf_reap_old_sample_buffers(struct gen_perf_context
*perf_ctx
);
601 void gen_perf_free_sample_bufs(struct gen_perf_context
*perf_ctx
);
603 void gen_perf_snapshot_statistics_registers(void *context
,
604 struct gen_perf_config
*perf
,
605 struct gen_perf_query_object
*obj
,
606 uint32_t offset_in_bytes
);
608 void gen_perf_close(struct gen_perf_context
*perfquery
,
609 const struct gen_perf_query_info
*query
);
610 bool gen_perf_open(struct gen_perf_context
*perfquery
,
617 bool gen_perf_inc_n_users(struct gen_perf_context
*perfquery
);
618 void gen_perf_dec_n_users(struct gen_perf_context
*perfquery
);
620 bool gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
621 struct gen_perf_query_object
*query
);
623 #endif /* GEN_PERF_H */