intel/perf: expose method to create query
[mesa.git] / src / intel / perf / gen_perf.h
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef GEN_PERF_H
25 #define GEN_PERF_H
26
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <sys/sysmacros.h>
32
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
36
37 struct gen_device_info;
38
39 struct gen_perf_config;
40 struct gen_perf_query_info;
41
42 #define GEN7_RPSTAT1 0xA01C
43 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
44 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
45 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
46 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
47
48 #define GEN9_RPSTAT0 0xA01C
49 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
50 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
51 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
52 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
53
54 enum gen_perf_counter_type {
55 GEN_PERF_COUNTER_TYPE_EVENT,
56 GEN_PERF_COUNTER_TYPE_DURATION_NORM,
57 GEN_PERF_COUNTER_TYPE_DURATION_RAW,
58 GEN_PERF_COUNTER_TYPE_THROUGHPUT,
59 GEN_PERF_COUNTER_TYPE_RAW,
60 GEN_PERF_COUNTER_TYPE_TIMESTAMP,
61 };
62
63 enum gen_perf_counter_data_type {
64 GEN_PERF_COUNTER_DATA_TYPE_BOOL32,
65 GEN_PERF_COUNTER_DATA_TYPE_UINT32,
66 GEN_PERF_COUNTER_DATA_TYPE_UINT64,
67 GEN_PERF_COUNTER_DATA_TYPE_FLOAT,
68 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE,
69 };
70
71 struct gen_pipeline_stat {
72 uint32_t reg;
73 uint32_t numerator;
74 uint32_t denominator;
75 };
76
77 /*
78 * The largest OA formats we can use include:
79 * For Haswell:
80 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
81 * For Gen8+
82 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
83 */
84 #define MAX_OA_REPORT_COUNTERS 62
85
86 #define IA_VERTICES_COUNT 0x2310
87 #define IA_PRIMITIVES_COUNT 0x2318
88 #define VS_INVOCATION_COUNT 0x2320
89 #define HS_INVOCATION_COUNT 0x2300
90 #define DS_INVOCATION_COUNT 0x2308
91 #define GS_INVOCATION_COUNT 0x2328
92 #define GS_PRIMITIVES_COUNT 0x2330
93 #define CL_INVOCATION_COUNT 0x2338
94 #define CL_PRIMITIVES_COUNT 0x2340
95 #define PS_INVOCATION_COUNT 0x2348
96 #define CS_INVOCATION_COUNT 0x2290
97 #define PS_DEPTH_COUNT 0x2350
98
99 /*
100 * When currently allocate only one page for pipeline statistics queries. Here
101 * we derived the maximum number of counters for that amount.
102 */
103 #define STATS_BO_SIZE 4096
104 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
105 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
106
107 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
108 256) /* OA counter report */
109
110 struct gen_perf_query_result {
111 /**
112 * Storage for the final accumulated OA counters.
113 */
114 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
115
116 /**
117 * Hw ID used by the context on which the query was running.
118 */
119 uint32_t hw_id;
120
121 /**
122 * Number of reports accumulated to produce the results.
123 */
124 uint32_t reports_accumulated;
125
126 /**
127 * Frequency in the slices of the GT at the begin and end of the
128 * query.
129 */
130 uint64_t slice_frequency[2];
131
132 /**
133 * Frequency in the unslice of the GT at the begin and end of the
134 * query.
135 */
136 uint64_t unslice_frequency[2];
137 };
138
139 struct gen_perf_query_counter {
140 const char *name;
141 const char *desc;
142 enum gen_perf_counter_type type;
143 enum gen_perf_counter_data_type data_type;
144 uint64_t raw_max;
145 size_t offset;
146
147 union {
148 uint64_t (*oa_counter_read_uint64)(struct gen_perf_config *perf,
149 const struct gen_perf_query_info *query,
150 const uint64_t *accumulator);
151 float (*oa_counter_read_float)(struct gen_perf_config *perf,
152 const struct gen_perf_query_info *query,
153 const uint64_t *accumulator);
154 struct gen_pipeline_stat pipeline_stat;
155 };
156 };
157
158 struct gen_perf_query_register_prog {
159 uint32_t reg;
160 uint32_t val;
161 };
162
163 struct gen_perf_query_info {
164 enum gen_perf_query_type {
165 GEN_PERF_QUERY_TYPE_OA,
166 GEN_PERF_QUERY_TYPE_RAW,
167 GEN_PERF_QUERY_TYPE_PIPELINE,
168 } kind;
169 const char *name;
170 const char *guid;
171 struct gen_perf_query_counter *counters;
172 int n_counters;
173 int max_counters;
174 size_t data_size;
175
176 /* OA specific */
177 uint64_t oa_metrics_set_id;
178 int oa_format;
179
180 /* For indexing into the accumulator[] ... */
181 int gpu_time_offset;
182 int gpu_clock_offset;
183 int a_offset;
184 int b_offset;
185 int c_offset;
186
187 /* Register programming for a given query */
188 struct gen_perf_query_register_prog *flex_regs;
189 uint32_t n_flex_regs;
190
191 struct gen_perf_query_register_prog *mux_regs;
192 uint32_t n_mux_regs;
193
194 struct gen_perf_query_register_prog *b_counter_regs;
195 uint32_t n_b_counter_regs;
196 };
197
198 struct gen_perf_config {
199 struct gen_perf_query_info *queries;
200 int n_queries;
201
202 /* Variables referenced in the XML meta data for OA performance
203 * counters, e.g in the normalization equations.
204 *
205 * All uint64_t for consistent operand types in generated code
206 */
207 struct {
208 uint64_t timestamp_frequency; /** $GpuTimestampFrequency */
209 uint64_t n_eus; /** $EuCoresTotalCount */
210 uint64_t n_eu_slices; /** $EuSlicesTotalCount */
211 uint64_t n_eu_sub_slices; /** $EuSubslicesTotalCount */
212 uint64_t eu_threads_count; /** $EuThreadsCount */
213 uint64_t slice_mask; /** $SliceMask */
214 uint64_t subslice_mask; /** $SubsliceMask */
215 uint64_t gt_min_freq; /** $GpuMinFrequency */
216 uint64_t gt_max_freq; /** $GpuMaxFrequency */
217 uint64_t revision; /** $SkuRevisionId */
218 } sys_vars;
219
220 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
221 * cross-reference with the GUIDs of configs advertised by the kernel at
222 * runtime
223 */
224 struct hash_table *oa_metrics_table;
225
226 /* Location of the device's sysfs entry. */
227 char sysfs_dev_dir[256];
228
229 struct {
230 void *(*bo_alloc)(void *bufmgr, const char *name, uint64_t size);
231 void (*bo_unreference)(void *bo);
232 void *(*bo_map)(void *ctx, void *bo, unsigned flags);
233 void (*bo_unmap)(void *bo);
234 bool (*batch_references)(void *batch, void *bo);
235 void (*bo_wait_rendering)(void *bo);
236 int (*bo_busy)(void *bo);
237 void (*emit_mi_flush)(void *ctx);
238 void (*emit_mi_report_perf_count)(void *ctx,
239 void *bo,
240 uint32_t offset_in_bytes,
241 uint32_t report_id);
242 void (*batchbuffer_flush)(void *ctx,
243 const char *file, int line);
244 void (*capture_frequency_stat_register)(void *ctx, void *bo,
245 uint32_t bo_offset);
246 void (*store_register_mem64)(void *ctx, void *bo, uint32_t reg, uint32_t offset);
247
248 } vtbl;
249 };
250
251 /**
252 * Periodic OA samples are read() into these buffer structures via the
253 * i915 perf kernel interface and appended to the
254 * brw->perfquery.sample_buffers linked list. When we process the
255 * results of an OA metrics query we need to consider all the periodic
256 * samples between the Begin and End MI_REPORT_PERF_COUNT command
257 * markers.
258 *
259 * 'Periodic' is a simplification as there are other automatic reports
260 * written by the hardware also buffered here.
261 *
262 * Considering three queries, A, B and C:
263 *
264 * Time ---->
265 * ________________A_________________
266 * | |
267 * | ________B_________ _____C___________
268 * | | | | | |
269 *
270 * And an illustration of sample buffers read over this time frame:
271 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
272 *
273 * These nodes may hold samples for query A:
274 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
275 *
276 * These nodes may hold samples for query B:
277 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
278 *
279 * These nodes may hold samples for query C:
280 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
281 *
282 * The illustration assumes we have an even distribution of periodic
283 * samples so all nodes have the same size plotted against time:
284 *
285 * Note, to simplify code, the list is never empty.
286 *
287 * With overlapping queries we can see that periodic OA reports may
288 * relate to multiple queries and care needs to be take to keep
289 * track of sample buffers until there are no queries that might
290 * depend on their contents.
291 *
292 * We use a node ref counting system where a reference ensures that a
293 * node and all following nodes can't be freed/recycled until the
294 * reference drops to zero.
295 *
296 * E.g. with a ref of one here:
297 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
298 *
299 * These nodes could be freed or recycled ("reaped"):
300 * [ 0 ][ 0 ]
301 *
302 * These must be preserved until the leading ref drops to zero:
303 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
304 *
305 * When a query starts we take a reference on the current tail of
306 * the list, knowing that no already-buffered samples can possibly
307 * relate to the newly-started query. A pointer to this node is
308 * also saved in the query object's ->oa.samples_head.
309 *
310 * E.g. starting query A while there are two nodes in .sample_buffers:
311 * ________________A________
312 * |
313 *
314 * [ 0 ][ 1 ]
315 * ^_______ Add a reference and store pointer to node in
316 * A->oa.samples_head
317 *
318 * Moving forward to when the B query starts with no new buffer nodes:
319 * (for reference, i915 perf reads() are only done when queries finish)
320 * ________________A_______
321 * | ________B___
322 * | |
323 *
324 * [ 0 ][ 2 ]
325 * ^_______ Add a reference and store pointer to
326 * node in B->oa.samples_head
327 *
328 * Once a query is finished, after an OA query has become 'Ready',
329 * once the End OA report has landed and after we we have processed
330 * all the intermediate periodic samples then we drop the
331 * ->oa.samples_head reference we took at the start.
332 *
333 * So when the B query has finished we have:
334 * ________________A________
335 * | ______B___________
336 * | | |
337 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
338 * ^_______ Drop B->oa.samples_head reference
339 *
340 * We still can't free these due to the A->oa.samples_head ref:
341 * [ 1 ][ 0 ][ 0 ][ 0 ]
342 *
343 * When the A query finishes: (note there's a new ref for C's samples_head)
344 * ________________A_________________
345 * | |
346 * | _____C_________
347 * | | |
348 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
349 * ^_______ Drop A->oa.samples_head reference
350 *
351 * And we can now reap these nodes up to the C->oa.samples_head:
352 * [ X ][ X ][ X ][ X ]
353 * keeping -> [ 1 ][ 0 ][ 0 ]
354 *
355 * We reap old sample buffers each time we finish processing an OA
356 * query by iterating the sample_buffers list from the head until we
357 * find a referenced node and stop.
358 *
359 * Reaped buffers move to a perfquery.free_sample_buffers list and
360 * when we come to read() we first look to recycle a buffer from the
361 * free_sample_buffers list before allocating a new buffer.
362 */
363 struct oa_sample_buf {
364 struct exec_node link;
365 int refcount;
366 int len;
367 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
368 uint32_t last_timestamp;
369 };
370
371 /**
372 * gen representation of a performance query object.
373 *
374 * NB: We want to keep this structure relatively lean considering that
375 * applications may expect to allocate enough objects to be able to
376 * query around all draw calls in a frame.
377 */
378 struct gen_perf_query_object
379 {
380 const struct gen_perf_query_info *queryinfo;
381
382 /* See query->kind to know which state below is in use... */
383 union {
384 struct {
385
386 /**
387 * BO containing OA counter snapshots at query Begin/End time.
388 */
389 void *bo;
390
391 /**
392 * Address of mapped of @bo
393 */
394 void *map;
395
396 /**
397 * The MI_REPORT_PERF_COUNT command lets us specify a unique
398 * ID that will be reflected in the resulting OA report
399 * that's written by the GPU. This is the ID we're expecting
400 * in the begin report and the the end report should be
401 * @begin_report_id + 1.
402 */
403 int begin_report_id;
404
405 /**
406 * Reference the head of the brw->perfquery.sample_buffers
407 * list at the time that the query started (so we only need
408 * to look at nodes after this point when looking for samples
409 * related to this query)
410 *
411 * (See struct brw_oa_sample_buf description for more details)
412 */
413 struct exec_node *samples_head;
414
415 /**
416 * false while in the unaccumulated_elements list, and set to
417 * true when the final, end MI_RPC snapshot has been
418 * accumulated.
419 */
420 bool results_accumulated;
421
422 /**
423 * Frequency of the GT at begin and end of the query.
424 */
425 uint64_t gt_frequency[2];
426
427 /**
428 * Accumulated OA results between begin and end of the query.
429 */
430 struct gen_perf_query_result result;
431 } oa;
432
433 struct {
434 /**
435 * BO containing starting and ending snapshots for the
436 * statistics counters.
437 */
438 void *bo;
439 } pipeline_stats;
440 };
441 };
442
443 struct gen_perf_context {
444 struct gen_perf_config *perf;
445
446 void * ctx; /* driver context (eg, brw_context) */
447 void * bufmgr;
448 const struct gen_device_info *devinfo;
449
450 uint32_t hw_ctx;
451 int drm_fd;
452
453 /* The i915 perf stream we open to setup + enable the OA counters */
454 int oa_stream_fd;
455
456 /* An i915 perf stream fd gives exclusive access to the OA unit that will
457 * report counter snapshots for a specific counter set/profile in a
458 * specific layout/format so we can only start OA queries that are
459 * compatible with the currently open fd...
460 */
461 int current_oa_metrics_set_id;
462 int current_oa_format;
463
464 /* List of buffers containing OA reports */
465 struct exec_list sample_buffers;
466
467 /* Cached list of empty sample buffers */
468 struct exec_list free_sample_buffers;
469
470 int n_active_oa_queries;
471 int n_active_pipeline_stats_queries;
472
473 /* The number of queries depending on running OA counters which
474 * extends beyond brw_end_perf_query() since we need to wait until
475 * the last MI_RPC command has parsed by the GPU.
476 *
477 * Accurate accounting is important here as emitting an
478 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
479 * effectively hang the gpu.
480 */
481 int n_oa_users;
482
483 /* To help catch an spurious problem with the hardware or perf
484 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
485 * with a unique ID that we can explicitly check for...
486 */
487 int next_query_start_report_id;
488
489 /**
490 * An array of queries whose results haven't yet been assembled
491 * based on the data in buffer objects.
492 *
493 * These may be active, or have already ended. However, the
494 * results have not been requested.
495 */
496 struct gen_perf_query_object **unaccumulated;
497 int unaccumulated_elements;
498 int unaccumulated_array_size;
499
500 /* The total number of query objects so we can relinquish
501 * our exclusive access to perf if the application deletes
502 * all of its objects. (NB: We only disable perf while
503 * there are no active queries)
504 */
505 int n_query_instances;
506 };
507
508 void gen_perf_init_metrics(struct gen_perf_config *perf_cfg,
509 const struct gen_device_info *devinfo,
510 int drm_fd);
511 void gen_perf_init_context(struct gen_perf_context *perf_ctx,
512 struct gen_perf_config *perf_cfg,
513 void * ctx, /* driver context (eg, brw_context) */
514 void * bufmgr, /* eg brw_bufmgr */
515 const struct gen_device_info *devinfo,
516 uint32_t hw_ctx,
517 int drm_fd);
518
519 static inline size_t
520 gen_perf_query_counter_get_size(const struct gen_perf_query_counter *counter)
521 {
522 switch (counter->data_type) {
523 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32:
524 return sizeof(uint32_t);
525 case GEN_PERF_COUNTER_DATA_TYPE_UINT32:
526 return sizeof(uint32_t);
527 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
528 return sizeof(uint64_t);
529 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
530 return sizeof(float);
531 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE:
532 return sizeof(double);
533 default:
534 unreachable("invalid counter data type");
535 }
536 }
537
538 static inline struct gen_perf_config *
539 gen_perf_new(void *ctx)
540 {
541 struct gen_perf_config *perf = rzalloc(ctx, struct gen_perf_config);
542 return perf;
543 }
544
545 bool gen_perf_load_metric_id(struct gen_perf_config *perf, const char *guid,
546 uint64_t *metric_id);
547
548 void gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
549 const struct gen_device_info *devinfo,
550 const uint32_t *start,
551 const uint32_t *end);
552 void gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
553 const struct gen_perf_query_info *query,
554 const uint32_t *start,
555 const uint32_t *end);
556 void gen_perf_query_result_clear(struct gen_perf_query_result *result);
557 uint64_t gen_perf_query_get_metric_id(struct gen_perf_config *perf,
558 const struct gen_perf_query_info *query);
559 struct oa_sample_buf * gen_perf_get_free_sample_buf(struct gen_perf_context *perf);
560 void gen_perf_reap_old_sample_buffers(struct gen_perf_context *perf_ctx);
561 void gen_perf_free_sample_bufs(struct gen_perf_context *perf_ctx);
562
563 void gen_perf_snapshot_statistics_registers(void *context,
564 struct gen_perf_config *perf,
565 struct gen_perf_query_object *obj,
566 uint32_t offset_in_bytes);
567 struct gen_perf_query_object *
568 gen_perf_new_query(struct gen_perf_context *, unsigned query_index);
569
570 void gen_perf_close(struct gen_perf_context *perfquery,
571 const struct gen_perf_query_info *query);
572 bool gen_perf_open(struct gen_perf_context *perfquery,
573 int metrics_set_id,
574 int report_format,
575 int period_exponent,
576 int drm_fd,
577 uint32_t ctx_id);
578
579 bool gen_perf_inc_n_users(struct gen_perf_context *perfquery);
580 void gen_perf_dec_n_users(struct gen_perf_context *perfquery);
581
582 bool gen_perf_begin_query(struct gen_perf_context *perf_ctx,
583 struct gen_perf_query_object *query);
584 void gen_perf_end_query(struct gen_perf_context *perf_ctx,
585 struct gen_perf_query_object *query);
586 void gen_perf_wait_query(struct gen_perf_context *perf_ctx,
587 struct gen_perf_query_object *query,
588 void *current_batch);
589 bool gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
590 struct gen_perf_query_object *query,
591 void *current_batch);
592 void gen_perf_delete_query(struct gen_perf_context *perf_ctx,
593 struct gen_perf_query_object *query);
594 void gen_perf_get_query_data(struct gen_perf_context *perf_ctx,
595 struct gen_perf_query_object *query,
596 int data_size,
597 unsigned *data,
598 unsigned *bytes_written);
599
600 #endif /* GEN_PERF_H */