intel/perf: refactor gen_perf_begin_query into gen_perf
[mesa.git] / src / intel / perf / gen_perf.h
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef GEN_PERF_H
25 #define GEN_PERF_H
26
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <sys/sysmacros.h>
32
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
36
37 struct gen_device_info;
38
39 struct gen_perf_config;
40 struct gen_perf_query_info;
41
42 enum gen_perf_counter_type {
43 GEN_PERF_COUNTER_TYPE_EVENT,
44 GEN_PERF_COUNTER_TYPE_DURATION_NORM,
45 GEN_PERF_COUNTER_TYPE_DURATION_RAW,
46 GEN_PERF_COUNTER_TYPE_THROUGHPUT,
47 GEN_PERF_COUNTER_TYPE_RAW,
48 GEN_PERF_COUNTER_TYPE_TIMESTAMP,
49 };
50
51 enum gen_perf_counter_data_type {
52 GEN_PERF_COUNTER_DATA_TYPE_BOOL32,
53 GEN_PERF_COUNTER_DATA_TYPE_UINT32,
54 GEN_PERF_COUNTER_DATA_TYPE_UINT64,
55 GEN_PERF_COUNTER_DATA_TYPE_FLOAT,
56 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE,
57 };
58
59 struct gen_pipeline_stat {
60 uint32_t reg;
61 uint32_t numerator;
62 uint32_t denominator;
63 };
64
65 /*
66 * The largest OA formats we can use include:
67 * For Haswell:
68 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
69 * For Gen8+
70 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
71 */
72 #define MAX_OA_REPORT_COUNTERS 62
73
74 #define IA_VERTICES_COUNT 0x2310
75 #define IA_PRIMITIVES_COUNT 0x2318
76 #define VS_INVOCATION_COUNT 0x2320
77 #define HS_INVOCATION_COUNT 0x2300
78 #define DS_INVOCATION_COUNT 0x2308
79 #define GS_INVOCATION_COUNT 0x2328
80 #define GS_PRIMITIVES_COUNT 0x2330
81 #define CL_INVOCATION_COUNT 0x2338
82 #define CL_PRIMITIVES_COUNT 0x2340
83 #define PS_INVOCATION_COUNT 0x2348
84 #define CS_INVOCATION_COUNT 0x2290
85 #define PS_DEPTH_COUNT 0x2350
86
87 /*
88 * When currently allocate only one page for pipeline statistics queries. Here
89 * we derived the maximum number of counters for that amount.
90 */
91 #define STATS_BO_SIZE 4096
92 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
93 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
94
95 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
96 256) /* OA counter report */
97
98 struct gen_perf_query_result {
99 /**
100 * Storage for the final accumulated OA counters.
101 */
102 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
103
104 /**
105 * Hw ID used by the context on which the query was running.
106 */
107 uint32_t hw_id;
108
109 /**
110 * Number of reports accumulated to produce the results.
111 */
112 uint32_t reports_accumulated;
113
114 /**
115 * Frequency in the slices of the GT at the begin and end of the
116 * query.
117 */
118 uint64_t slice_frequency[2];
119
120 /**
121 * Frequency in the unslice of the GT at the begin and end of the
122 * query.
123 */
124 uint64_t unslice_frequency[2];
125 };
126
127 struct gen_perf_query_counter {
128 const char *name;
129 const char *desc;
130 enum gen_perf_counter_type type;
131 enum gen_perf_counter_data_type data_type;
132 uint64_t raw_max;
133 size_t offset;
134
135 union {
136 uint64_t (*oa_counter_read_uint64)(struct gen_perf_config *perf,
137 const struct gen_perf_query_info *query,
138 const uint64_t *accumulator);
139 float (*oa_counter_read_float)(struct gen_perf_config *perf,
140 const struct gen_perf_query_info *query,
141 const uint64_t *accumulator);
142 struct gen_pipeline_stat pipeline_stat;
143 };
144 };
145
146 struct gen_perf_query_register_prog {
147 uint32_t reg;
148 uint32_t val;
149 };
150
151 struct gen_perf_query_info {
152 enum gen_perf_query_type {
153 GEN_PERF_QUERY_TYPE_OA,
154 GEN_PERF_QUERY_TYPE_RAW,
155 GEN_PERF_QUERY_TYPE_PIPELINE,
156 } kind;
157 const char *name;
158 const char *guid;
159 struct gen_perf_query_counter *counters;
160 int n_counters;
161 int max_counters;
162 size_t data_size;
163
164 /* OA specific */
165 uint64_t oa_metrics_set_id;
166 int oa_format;
167
168 /* For indexing into the accumulator[] ... */
169 int gpu_time_offset;
170 int gpu_clock_offset;
171 int a_offset;
172 int b_offset;
173 int c_offset;
174
175 /* Register programming for a given query */
176 struct gen_perf_query_register_prog *flex_regs;
177 uint32_t n_flex_regs;
178
179 struct gen_perf_query_register_prog *mux_regs;
180 uint32_t n_mux_regs;
181
182 struct gen_perf_query_register_prog *b_counter_regs;
183 uint32_t n_b_counter_regs;
184 };
185
186 struct gen_perf_config {
187 struct gen_perf_query_info *queries;
188 int n_queries;
189
190 /* Variables referenced in the XML meta data for OA performance
191 * counters, e.g in the normalization equations.
192 *
193 * All uint64_t for consistent operand types in generated code
194 */
195 struct {
196 uint64_t timestamp_frequency; /** $GpuTimestampFrequency */
197 uint64_t n_eus; /** $EuCoresTotalCount */
198 uint64_t n_eu_slices; /** $EuSlicesTotalCount */
199 uint64_t n_eu_sub_slices; /** $EuSubslicesTotalCount */
200 uint64_t eu_threads_count; /** $EuThreadsCount */
201 uint64_t slice_mask; /** $SliceMask */
202 uint64_t subslice_mask; /** $SubsliceMask */
203 uint64_t gt_min_freq; /** $GpuMinFrequency */
204 uint64_t gt_max_freq; /** $GpuMaxFrequency */
205 uint64_t revision; /** $SkuRevisionId */
206 } sys_vars;
207
208 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
209 * cross-reference with the GUIDs of configs advertised by the kernel at
210 * runtime
211 */
212 struct hash_table *oa_metrics_table;
213
214 /* Location of the device's sysfs entry. */
215 char sysfs_dev_dir[256];
216
217 struct {
218 void *(*bo_alloc)(void *bufmgr, const char *name, uint64_t size);
219 void (*bo_unreference)(void *bo);
220 void *(*bo_map)(void *ctx, void *bo, unsigned flags);
221 void (*bo_unmap)(void *bo);
222 void (*emit_mi_flush)(void *ctx);
223 void (*emit_mi_report_perf_count)(void *ctx,
224 void *bo,
225 uint32_t offset_in_bytes,
226 uint32_t report_id);
227 void (*batchbuffer_flush)(void *ctx,
228 const char *file, int line);
229 void (*capture_frequency_stat_register)(void *ctx, void *bo,
230 uint32_t bo_offset);
231 void (*store_register_mem64)(void *ctx, void *bo, uint32_t reg, uint32_t offset);
232
233 } vtbl;
234 };
235
236 /**
237 * Periodic OA samples are read() into these buffer structures via the
238 * i915 perf kernel interface and appended to the
239 * brw->perfquery.sample_buffers linked list. When we process the
240 * results of an OA metrics query we need to consider all the periodic
241 * samples between the Begin and End MI_REPORT_PERF_COUNT command
242 * markers.
243 *
244 * 'Periodic' is a simplification as there are other automatic reports
245 * written by the hardware also buffered here.
246 *
247 * Considering three queries, A, B and C:
248 *
249 * Time ---->
250 * ________________A_________________
251 * | |
252 * | ________B_________ _____C___________
253 * | | | | | |
254 *
255 * And an illustration of sample buffers read over this time frame:
256 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
257 *
258 * These nodes may hold samples for query A:
259 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
260 *
261 * These nodes may hold samples for query B:
262 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
263 *
264 * These nodes may hold samples for query C:
265 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
266 *
267 * The illustration assumes we have an even distribution of periodic
268 * samples so all nodes have the same size plotted against time:
269 *
270 * Note, to simplify code, the list is never empty.
271 *
272 * With overlapping queries we can see that periodic OA reports may
273 * relate to multiple queries and care needs to be take to keep
274 * track of sample buffers until there are no queries that might
275 * depend on their contents.
276 *
277 * We use a node ref counting system where a reference ensures that a
278 * node and all following nodes can't be freed/recycled until the
279 * reference drops to zero.
280 *
281 * E.g. with a ref of one here:
282 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
283 *
284 * These nodes could be freed or recycled ("reaped"):
285 * [ 0 ][ 0 ]
286 *
287 * These must be preserved until the leading ref drops to zero:
288 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
289 *
290 * When a query starts we take a reference on the current tail of
291 * the list, knowing that no already-buffered samples can possibly
292 * relate to the newly-started query. A pointer to this node is
293 * also saved in the query object's ->oa.samples_head.
294 *
295 * E.g. starting query A while there are two nodes in .sample_buffers:
296 * ________________A________
297 * |
298 *
299 * [ 0 ][ 1 ]
300 * ^_______ Add a reference and store pointer to node in
301 * A->oa.samples_head
302 *
303 * Moving forward to when the B query starts with no new buffer nodes:
304 * (for reference, i915 perf reads() are only done when queries finish)
305 * ________________A_______
306 * | ________B___
307 * | |
308 *
309 * [ 0 ][ 2 ]
310 * ^_______ Add a reference and store pointer to
311 * node in B->oa.samples_head
312 *
313 * Once a query is finished, after an OA query has become 'Ready',
314 * once the End OA report has landed and after we we have processed
315 * all the intermediate periodic samples then we drop the
316 * ->oa.samples_head reference we took at the start.
317 *
318 * So when the B query has finished we have:
319 * ________________A________
320 * | ______B___________
321 * | | |
322 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
323 * ^_______ Drop B->oa.samples_head reference
324 *
325 * We still can't free these due to the A->oa.samples_head ref:
326 * [ 1 ][ 0 ][ 0 ][ 0 ]
327 *
328 * When the A query finishes: (note there's a new ref for C's samples_head)
329 * ________________A_________________
330 * | |
331 * | _____C_________
332 * | | |
333 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
334 * ^_______ Drop A->oa.samples_head reference
335 *
336 * And we can now reap these nodes up to the C->oa.samples_head:
337 * [ X ][ X ][ X ][ X ]
338 * keeping -> [ 1 ][ 0 ][ 0 ]
339 *
340 * We reap old sample buffers each time we finish processing an OA
341 * query by iterating the sample_buffers list from the head until we
342 * find a referenced node and stop.
343 *
344 * Reaped buffers move to a perfquery.free_sample_buffers list and
345 * when we come to read() we first look to recycle a buffer from the
346 * free_sample_buffers list before allocating a new buffer.
347 */
348 struct oa_sample_buf {
349 struct exec_node link;
350 int refcount;
351 int len;
352 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
353 uint32_t last_timestamp;
354 };
355
356 /**
357 * gen representation of a performance query object.
358 *
359 * NB: We want to keep this structure relatively lean considering that
360 * applications may expect to allocate enough objects to be able to
361 * query around all draw calls in a frame.
362 */
363 struct gen_perf_query_object
364 {
365 const struct gen_perf_query_info *queryinfo;
366
367 /* See query->kind to know which state below is in use... */
368 union {
369 struct {
370
371 /**
372 * BO containing OA counter snapshots at query Begin/End time.
373 */
374 void *bo;
375
376 /**
377 * Address of mapped of @bo
378 */
379 void *map;
380
381 /**
382 * The MI_REPORT_PERF_COUNT command lets us specify a unique
383 * ID that will be reflected in the resulting OA report
384 * that's written by the GPU. This is the ID we're expecting
385 * in the begin report and the the end report should be
386 * @begin_report_id + 1.
387 */
388 int begin_report_id;
389
390 /**
391 * Reference the head of the brw->perfquery.sample_buffers
392 * list at the time that the query started (so we only need
393 * to look at nodes after this point when looking for samples
394 * related to this query)
395 *
396 * (See struct brw_oa_sample_buf description for more details)
397 */
398 struct exec_node *samples_head;
399
400 /**
401 * false while in the unaccumulated_elements list, and set to
402 * true when the final, end MI_RPC snapshot has been
403 * accumulated.
404 */
405 bool results_accumulated;
406
407 /**
408 * Frequency of the GT at begin and end of the query.
409 */
410 uint64_t gt_frequency[2];
411
412 /**
413 * Accumulated OA results between begin and end of the query.
414 */
415 struct gen_perf_query_result result;
416 } oa;
417
418 struct {
419 /**
420 * BO containing starting and ending snapshots for the
421 * statistics counters.
422 */
423 void *bo;
424 } pipeline_stats;
425 };
426 };
427
428 struct gen_perf_context {
429 struct gen_perf_config *perf;
430
431 void * ctx; /* driver context (eg, brw_context) */
432 void * bufmgr;
433 const struct gen_device_info *devinfo;
434
435 uint32_t hw_ctx;
436 int drm_fd;
437
438 /* The i915 perf stream we open to setup + enable the OA counters */
439 int oa_stream_fd;
440
441 /* An i915 perf stream fd gives exclusive access to the OA unit that will
442 * report counter snapshots for a specific counter set/profile in a
443 * specific layout/format so we can only start OA queries that are
444 * compatible with the currently open fd...
445 */
446 int current_oa_metrics_set_id;
447 int current_oa_format;
448
449 /* List of buffers containing OA reports */
450 struct exec_list sample_buffers;
451
452 /* Cached list of empty sample buffers */
453 struct exec_list free_sample_buffers;
454
455 int n_active_oa_queries;
456 int n_active_pipeline_stats_queries;
457
458 /* The number of queries depending on running OA counters which
459 * extends beyond brw_end_perf_query() since we need to wait until
460 * the last MI_RPC command has parsed by the GPU.
461 *
462 * Accurate accounting is important here as emitting an
463 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
464 * effectively hang the gpu.
465 */
466 int n_oa_users;
467
468 /* To help catch an spurious problem with the hardware or perf
469 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
470 * with a unique ID that we can explicitly check for...
471 */
472 int next_query_start_report_id;
473
474 /**
475 * An array of queries whose results haven't yet been assembled
476 * based on the data in buffer objects.
477 *
478 * These may be active, or have already ended. However, the
479 * results have not been requested.
480 */
481 struct gen_perf_query_object **unaccumulated;
482 int unaccumulated_elements;
483 int unaccumulated_array_size;
484
485 /* The total number of query objects so we can relinquish
486 * our exclusive access to perf if the application deletes
487 * all of its objects. (NB: We only disable perf while
488 * there are no active queries)
489 */
490 int n_query_instances;
491 };
492
493 void gen_perf_init_context(struct gen_perf_context *perf_ctx,
494 struct gen_perf_config *perf_cfg,
495 void * ctx, /* driver context (eg, brw_context) */
496 void * bufmgr, /* eg brw_bufmgr */
497 const struct gen_device_info *devinfo,
498 uint32_t hw_ctx,
499 int drm_fd);
500
501 static inline size_t
502 gen_perf_query_counter_get_size(const struct gen_perf_query_counter *counter)
503 {
504 switch (counter->data_type) {
505 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32:
506 return sizeof(uint32_t);
507 case GEN_PERF_COUNTER_DATA_TYPE_UINT32:
508 return sizeof(uint32_t);
509 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
510 return sizeof(uint64_t);
511 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
512 return sizeof(float);
513 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE:
514 return sizeof(double);
515 default:
516 unreachable("invalid counter data type");
517 }
518 }
519
520 static inline struct gen_perf_query_info *
521 gen_perf_query_append_query_info(struct gen_perf_config *perf, int max_counters)
522 {
523 struct gen_perf_query_info *query;
524
525 perf->queries = reralloc(perf, perf->queries,
526 struct gen_perf_query_info,
527 ++perf->n_queries);
528 query = &perf->queries[perf->n_queries - 1];
529 memset(query, 0, sizeof(*query));
530
531 if (max_counters > 0) {
532 query->max_counters = max_counters;
533 query->counters =
534 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
535 }
536
537 return query;
538 }
539
540 static inline void
541 gen_perf_query_info_add_stat_reg(struct gen_perf_query_info *query,
542 uint32_t reg,
543 uint32_t numerator,
544 uint32_t denominator,
545 const char *name,
546 const char *description)
547 {
548 struct gen_perf_query_counter *counter;
549
550 assert(query->n_counters < query->max_counters);
551
552 counter = &query->counters[query->n_counters];
553 counter->name = name;
554 counter->desc = description;
555 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
556 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
557 counter->offset = sizeof(uint64_t) * query->n_counters;
558 counter->pipeline_stat.reg = reg;
559 counter->pipeline_stat.numerator = numerator;
560 counter->pipeline_stat.denominator = denominator;
561
562 query->n_counters++;
563 }
564
565 static inline void
566 gen_perf_query_info_add_basic_stat_reg(struct gen_perf_query_info *query,
567 uint32_t reg, const char *name)
568 {
569 gen_perf_query_info_add_stat_reg(query, reg, 1, 1, name, name);
570 }
571
572 static inline struct gen_perf_config *
573 gen_perf_new(void *ctx)
574 {
575 struct gen_perf_config *perf = rzalloc(ctx, struct gen_perf_config);
576 return perf;
577 }
578
579 bool gen_perf_load_oa_metrics(struct gen_perf_config *perf, int fd,
580 const struct gen_device_info *devinfo);
581 bool gen_perf_load_metric_id(struct gen_perf_config *perf, const char *guid,
582 uint64_t *metric_id);
583
584 void gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
585 const struct gen_device_info *devinfo,
586 const uint32_t *start,
587 const uint32_t *end);
588 void gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
589 const struct gen_perf_query_info *query,
590 const uint32_t *start,
591 const uint32_t *end);
592 void gen_perf_query_result_clear(struct gen_perf_query_result *result);
593 void gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info *devinfo,
594 struct gen_perf_config *perf);
595 void gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
596 struct gen_perf_config *perf);
597 uint64_t gen_perf_query_get_metric_id(struct gen_perf_config *perf,
598 const struct gen_perf_query_info *query);
599 struct oa_sample_buf * gen_perf_get_free_sample_buf(struct gen_perf_context *perf);
600 void gen_perf_reap_old_sample_buffers(struct gen_perf_context *perf_ctx);
601 void gen_perf_free_sample_bufs(struct gen_perf_context *perf_ctx);
602
603 void gen_perf_snapshot_statistics_registers(void *context,
604 struct gen_perf_config *perf,
605 struct gen_perf_query_object *obj,
606 uint32_t offset_in_bytes);
607
608 void gen_perf_close(struct gen_perf_context *perfquery,
609 const struct gen_perf_query_info *query);
610 bool gen_perf_open(struct gen_perf_context *perfquery,
611 int metrics_set_id,
612 int report_format,
613 int period_exponent,
614 int drm_fd,
615 uint32_t ctx_id);
616
617 bool gen_perf_inc_n_users(struct gen_perf_context *perfquery);
618 void gen_perf_dec_n_users(struct gen_perf_context *perfquery);
619
620 bool gen_perf_begin_query(struct gen_perf_context *perf_ctx,
621 struct gen_perf_query_object *query);
622
623 #endif /* GEN_PERF_H */