2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <sys/sysmacros.h>
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
37 struct gen_device_info
;
39 struct gen_perf_config
;
40 struct gen_perf_query_info
;
42 #define GEN7_RPSTAT1 0xA01C
43 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
44 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
45 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
46 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
48 #define GEN9_RPSTAT0 0xA01C
49 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
50 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
51 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
52 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
54 enum gen_perf_counter_type
{
55 GEN_PERF_COUNTER_TYPE_EVENT
,
56 GEN_PERF_COUNTER_TYPE_DURATION_NORM
,
57 GEN_PERF_COUNTER_TYPE_DURATION_RAW
,
58 GEN_PERF_COUNTER_TYPE_THROUGHPUT
,
59 GEN_PERF_COUNTER_TYPE_RAW
,
60 GEN_PERF_COUNTER_TYPE_TIMESTAMP
,
63 enum gen_perf_counter_data_type
{
64 GEN_PERF_COUNTER_DATA_TYPE_BOOL32
,
65 GEN_PERF_COUNTER_DATA_TYPE_UINT32
,
66 GEN_PERF_COUNTER_DATA_TYPE_UINT64
,
67 GEN_PERF_COUNTER_DATA_TYPE_FLOAT
,
68 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
,
71 struct gen_pipeline_stat
{
78 * The largest OA formats we can use include:
80 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
82 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
84 #define MAX_OA_REPORT_COUNTERS 62
86 #define IA_VERTICES_COUNT 0x2310
87 #define IA_PRIMITIVES_COUNT 0x2318
88 #define VS_INVOCATION_COUNT 0x2320
89 #define HS_INVOCATION_COUNT 0x2300
90 #define DS_INVOCATION_COUNT 0x2308
91 #define GS_INVOCATION_COUNT 0x2328
92 #define GS_PRIMITIVES_COUNT 0x2330
93 #define CL_INVOCATION_COUNT 0x2338
94 #define CL_PRIMITIVES_COUNT 0x2340
95 #define PS_INVOCATION_COUNT 0x2348
96 #define CS_INVOCATION_COUNT 0x2290
97 #define PS_DEPTH_COUNT 0x2350
100 * When currently allocate only one page for pipeline statistics queries. Here
101 * we derived the maximum number of counters for that amount.
103 #define STATS_BO_SIZE 4096
104 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
105 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
107 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
108 256) /* OA counter report */
110 struct gen_perf_query_result
{
112 * Storage for the final accumulated OA counters.
114 uint64_t accumulator
[MAX_OA_REPORT_COUNTERS
];
117 * Hw ID used by the context on which the query was running.
122 * Number of reports accumulated to produce the results.
124 uint32_t reports_accumulated
;
127 * Frequency in the slices of the GT at the begin and end of the
130 uint64_t slice_frequency
[2];
133 * Frequency in the unslice of the GT at the begin and end of the
136 uint64_t unslice_frequency
[2];
139 struct gen_perf_query_counter
{
142 enum gen_perf_counter_type type
;
143 enum gen_perf_counter_data_type data_type
;
148 uint64_t (*oa_counter_read_uint64
)(struct gen_perf_config
*perf
,
149 const struct gen_perf_query_info
*query
,
150 const uint64_t *accumulator
);
151 float (*oa_counter_read_float
)(struct gen_perf_config
*perf
,
152 const struct gen_perf_query_info
*query
,
153 const uint64_t *accumulator
);
154 struct gen_pipeline_stat pipeline_stat
;
158 struct gen_perf_query_register_prog
{
163 struct gen_perf_query_info
{
164 enum gen_perf_query_type
{
165 GEN_PERF_QUERY_TYPE_OA
,
166 GEN_PERF_QUERY_TYPE_RAW
,
167 GEN_PERF_QUERY_TYPE_PIPELINE
,
171 struct gen_perf_query_counter
*counters
;
177 uint64_t oa_metrics_set_id
;
180 /* For indexing into the accumulator[] ... */
182 int gpu_clock_offset
;
187 /* Register programming for a given query */
188 struct gen_perf_query_register_prog
*flex_regs
;
189 uint32_t n_flex_regs
;
191 struct gen_perf_query_register_prog
*mux_regs
;
194 struct gen_perf_query_register_prog
*b_counter_regs
;
195 uint32_t n_b_counter_regs
;
198 struct gen_perf_config
{
199 struct gen_perf_query_info
*queries
;
202 /* Variables referenced in the XML meta data for OA performance
203 * counters, e.g in the normalization equations.
205 * All uint64_t for consistent operand types in generated code
208 uint64_t timestamp_frequency
; /** $GpuTimestampFrequency */
209 uint64_t n_eus
; /** $EuCoresTotalCount */
210 uint64_t n_eu_slices
; /** $EuSlicesTotalCount */
211 uint64_t n_eu_sub_slices
; /** $EuSubslicesTotalCount */
212 uint64_t eu_threads_count
; /** $EuThreadsCount */
213 uint64_t slice_mask
; /** $SliceMask */
214 uint64_t subslice_mask
; /** $SubsliceMask */
215 uint64_t gt_min_freq
; /** $GpuMinFrequency */
216 uint64_t gt_max_freq
; /** $GpuMaxFrequency */
217 uint64_t revision
; /** $SkuRevisionId */
220 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
221 * cross-reference with the GUIDs of configs advertised by the kernel at
224 struct hash_table
*oa_metrics_table
;
226 /* Location of the device's sysfs entry. */
227 char sysfs_dev_dir
[256];
230 void *(*bo_alloc
)(void *bufmgr
, const char *name
, uint64_t size
);
231 void (*bo_unreference
)(void *bo
);
232 void *(*bo_map
)(void *ctx
, void *bo
, unsigned flags
);
233 void (*bo_unmap
)(void *bo
);
234 bool (*batch_references
)(void *batch
, void *bo
);
235 void (*bo_wait_rendering
)(void *bo
);
236 int (*bo_busy
)(void *bo
);
237 void (*emit_mi_flush
)(void *ctx
);
238 void (*emit_mi_report_perf_count
)(void *ctx
,
240 uint32_t offset_in_bytes
,
242 void (*batchbuffer_flush
)(void *ctx
,
243 const char *file
, int line
);
244 void (*capture_frequency_stat_register
)(void *ctx
, void *bo
,
246 void (*store_register_mem64
)(void *ctx
, void *bo
, uint32_t reg
, uint32_t offset
);
252 * Periodic OA samples are read() into these buffer structures via the
253 * i915 perf kernel interface and appended to the
254 * brw->perfquery.sample_buffers linked list. When we process the
255 * results of an OA metrics query we need to consider all the periodic
256 * samples between the Begin and End MI_REPORT_PERF_COUNT command
259 * 'Periodic' is a simplification as there are other automatic reports
260 * written by the hardware also buffered here.
262 * Considering three queries, A, B and C:
265 * ________________A_________________
267 * | ________B_________ _____C___________
270 * And an illustration of sample buffers read over this time frame:
271 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
273 * These nodes may hold samples for query A:
274 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
276 * These nodes may hold samples for query B:
277 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
279 * These nodes may hold samples for query C:
280 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
282 * The illustration assumes we have an even distribution of periodic
283 * samples so all nodes have the same size plotted against time:
285 * Note, to simplify code, the list is never empty.
287 * With overlapping queries we can see that periodic OA reports may
288 * relate to multiple queries and care needs to be take to keep
289 * track of sample buffers until there are no queries that might
290 * depend on their contents.
292 * We use a node ref counting system where a reference ensures that a
293 * node and all following nodes can't be freed/recycled until the
294 * reference drops to zero.
296 * E.g. with a ref of one here:
297 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
299 * These nodes could be freed or recycled ("reaped"):
302 * These must be preserved until the leading ref drops to zero:
303 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
305 * When a query starts we take a reference on the current tail of
306 * the list, knowing that no already-buffered samples can possibly
307 * relate to the newly-started query. A pointer to this node is
308 * also saved in the query object's ->oa.samples_head.
310 * E.g. starting query A while there are two nodes in .sample_buffers:
311 * ________________A________
315 * ^_______ Add a reference and store pointer to node in
318 * Moving forward to when the B query starts with no new buffer nodes:
319 * (for reference, i915 perf reads() are only done when queries finish)
320 * ________________A_______
325 * ^_______ Add a reference and store pointer to
326 * node in B->oa.samples_head
328 * Once a query is finished, after an OA query has become 'Ready',
329 * once the End OA report has landed and after we we have processed
330 * all the intermediate periodic samples then we drop the
331 * ->oa.samples_head reference we took at the start.
333 * So when the B query has finished we have:
334 * ________________A________
335 * | ______B___________
337 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
338 * ^_______ Drop B->oa.samples_head reference
340 * We still can't free these due to the A->oa.samples_head ref:
341 * [ 1 ][ 0 ][ 0 ][ 0 ]
343 * When the A query finishes: (note there's a new ref for C's samples_head)
344 * ________________A_________________
348 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
349 * ^_______ Drop A->oa.samples_head reference
351 * And we can now reap these nodes up to the C->oa.samples_head:
352 * [ X ][ X ][ X ][ X ]
353 * keeping -> [ 1 ][ 0 ][ 0 ]
355 * We reap old sample buffers each time we finish processing an OA
356 * query by iterating the sample_buffers list from the head until we
357 * find a referenced node and stop.
359 * Reaped buffers move to a perfquery.free_sample_buffers list and
360 * when we come to read() we first look to recycle a buffer from the
361 * free_sample_buffers list before allocating a new buffer.
363 struct oa_sample_buf
{
364 struct exec_node link
;
367 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
368 uint32_t last_timestamp
;
372 * gen representation of a performance query object.
374 * NB: We want to keep this structure relatively lean considering that
375 * applications may expect to allocate enough objects to be able to
376 * query around all draw calls in a frame.
378 struct gen_perf_query_object
380 const struct gen_perf_query_info
*queryinfo
;
382 /* See query->kind to know which state below is in use... */
387 * BO containing OA counter snapshots at query Begin/End time.
392 * Address of mapped of @bo
397 * The MI_REPORT_PERF_COUNT command lets us specify a unique
398 * ID that will be reflected in the resulting OA report
399 * that's written by the GPU. This is the ID we're expecting
400 * in the begin report and the the end report should be
401 * @begin_report_id + 1.
406 * Reference the head of the brw->perfquery.sample_buffers
407 * list at the time that the query started (so we only need
408 * to look at nodes after this point when looking for samples
409 * related to this query)
411 * (See struct brw_oa_sample_buf description for more details)
413 struct exec_node
*samples_head
;
416 * false while in the unaccumulated_elements list, and set to
417 * true when the final, end MI_RPC snapshot has been
420 bool results_accumulated
;
423 * Frequency of the GT at begin and end of the query.
425 uint64_t gt_frequency
[2];
428 * Accumulated OA results between begin and end of the query.
430 struct gen_perf_query_result result
;
435 * BO containing starting and ending snapshots for the
436 * statistics counters.
443 struct gen_perf_context
{
444 struct gen_perf_config
*perf
;
446 void * ctx
; /* driver context (eg, brw_context) */
448 const struct gen_device_info
*devinfo
;
453 /* The i915 perf stream we open to setup + enable the OA counters */
456 /* An i915 perf stream fd gives exclusive access to the OA unit that will
457 * report counter snapshots for a specific counter set/profile in a
458 * specific layout/format so we can only start OA queries that are
459 * compatible with the currently open fd...
461 int current_oa_metrics_set_id
;
462 int current_oa_format
;
464 /* List of buffers containing OA reports */
465 struct exec_list sample_buffers
;
467 /* Cached list of empty sample buffers */
468 struct exec_list free_sample_buffers
;
470 int n_active_oa_queries
;
471 int n_active_pipeline_stats_queries
;
473 /* The number of queries depending on running OA counters which
474 * extends beyond brw_end_perf_query() since we need to wait until
475 * the last MI_RPC command has parsed by the GPU.
477 * Accurate accounting is important here as emitting an
478 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
479 * effectively hang the gpu.
483 /* To help catch an spurious problem with the hardware or perf
484 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
485 * with a unique ID that we can explicitly check for...
487 int next_query_start_report_id
;
490 * An array of queries whose results haven't yet been assembled
491 * based on the data in buffer objects.
493 * These may be active, or have already ended. However, the
494 * results have not been requested.
496 struct gen_perf_query_object
**unaccumulated
;
497 int unaccumulated_elements
;
498 int unaccumulated_array_size
;
500 /* The total number of query objects so we can relinquish
501 * our exclusive access to perf if the application deletes
502 * all of its objects. (NB: We only disable perf while
503 * there are no active queries)
505 int n_query_instances
;
508 void gen_perf_init_metrics(struct gen_perf_config
*perf_cfg
,
509 const struct gen_device_info
*devinfo
,
511 void gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
512 struct gen_perf_config
*perf_cfg
,
513 void * ctx
, /* driver context (eg, brw_context) */
514 void * bufmgr
, /* eg brw_bufmgr */
515 const struct gen_device_info
*devinfo
,
520 gen_perf_query_counter_get_size(const struct gen_perf_query_counter
*counter
)
522 switch (counter
->data_type
) {
523 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
:
524 return sizeof(uint32_t);
525 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
:
526 return sizeof(uint32_t);
527 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
528 return sizeof(uint64_t);
529 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
530 return sizeof(float);
531 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
:
532 return sizeof(double);
534 unreachable("invalid counter data type");
538 static inline struct gen_perf_config
*
539 gen_perf_new(void *ctx
)
541 struct gen_perf_config
*perf
= rzalloc(ctx
, struct gen_perf_config
);
545 bool gen_perf_load_metric_id(struct gen_perf_config
*perf
, const char *guid
,
546 uint64_t *metric_id
);
548 void gen_perf_query_result_read_frequencies(struct gen_perf_query_result
*result
,
549 const struct gen_device_info
*devinfo
,
550 const uint32_t *start
,
551 const uint32_t *end
);
552 void gen_perf_query_result_accumulate(struct gen_perf_query_result
*result
,
553 const struct gen_perf_query_info
*query
,
554 const uint32_t *start
,
555 const uint32_t *end
);
556 void gen_perf_query_result_clear(struct gen_perf_query_result
*result
);
557 uint64_t gen_perf_query_get_metric_id(struct gen_perf_config
*perf
,
558 const struct gen_perf_query_info
*query
);
559 struct oa_sample_buf
* gen_perf_get_free_sample_buf(struct gen_perf_context
*perf
);
560 void gen_perf_reap_old_sample_buffers(struct gen_perf_context
*perf_ctx
);
561 void gen_perf_free_sample_bufs(struct gen_perf_context
*perf_ctx
);
563 void gen_perf_snapshot_statistics_registers(void *context
,
564 struct gen_perf_config
*perf
,
565 struct gen_perf_query_object
*obj
,
566 uint32_t offset_in_bytes
);
567 struct gen_perf_query_object
*
568 gen_perf_new_query(struct gen_perf_context
*, unsigned query_index
);
570 void gen_perf_close(struct gen_perf_context
*perfquery
,
571 const struct gen_perf_query_info
*query
);
572 bool gen_perf_open(struct gen_perf_context
*perfquery
,
579 bool gen_perf_inc_n_users(struct gen_perf_context
*perfquery
);
580 void gen_perf_dec_n_users(struct gen_perf_context
*perfquery
);
582 bool gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
583 struct gen_perf_query_object
*query
);
584 void gen_perf_end_query(struct gen_perf_context
*perf_ctx
,
585 struct gen_perf_query_object
*query
);
586 void gen_perf_wait_query(struct gen_perf_context
*perf_ctx
,
587 struct gen_perf_query_object
*query
,
588 void *current_batch
);
589 bool gen_perf_is_query_ready(struct gen_perf_context
*perf_ctx
,
590 struct gen_perf_query_object
*query
,
591 void *current_batch
);
592 void gen_perf_delete_query(struct gen_perf_context
*perf_ctx
,
593 struct gen_perf_query_object
*query
);
594 void gen_perf_get_query_data(struct gen_perf_context
*perf_ctx
,
595 struct gen_perf_query_object
*query
,
598 unsigned *bytes_written
);
600 #endif /* GEN_PERF_H */