intel/perf: move client reference counts into perf
[mesa.git] / src / intel / perf / gen_perf.h
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef GEN_PERF_H
25 #define GEN_PERF_H
26
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <sys/sysmacros.h>
32
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
36
37 struct gen_device_info;
38
39 struct gen_perf_config;
40 struct gen_perf_query_info;
41
42 enum gen_perf_counter_type {
43 GEN_PERF_COUNTER_TYPE_EVENT,
44 GEN_PERF_COUNTER_TYPE_DURATION_NORM,
45 GEN_PERF_COUNTER_TYPE_DURATION_RAW,
46 GEN_PERF_COUNTER_TYPE_THROUGHPUT,
47 GEN_PERF_COUNTER_TYPE_RAW,
48 GEN_PERF_COUNTER_TYPE_TIMESTAMP,
49 };
50
51 enum gen_perf_counter_data_type {
52 GEN_PERF_COUNTER_DATA_TYPE_BOOL32,
53 GEN_PERF_COUNTER_DATA_TYPE_UINT32,
54 GEN_PERF_COUNTER_DATA_TYPE_UINT64,
55 GEN_PERF_COUNTER_DATA_TYPE_FLOAT,
56 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE,
57 };
58
59 struct gen_pipeline_stat {
60 uint32_t reg;
61 uint32_t numerator;
62 uint32_t denominator;
63 };
64
65 /*
66 * The largest OA formats we can use include:
67 * For Haswell:
68 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
69 * For Gen8+
70 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
71 */
72 #define MAX_OA_REPORT_COUNTERS 62
73
74 #define IA_VERTICES_COUNT 0x2310
75 #define IA_PRIMITIVES_COUNT 0x2318
76 #define VS_INVOCATION_COUNT 0x2320
77 #define HS_INVOCATION_COUNT 0x2300
78 #define DS_INVOCATION_COUNT 0x2308
79 #define GS_INVOCATION_COUNT 0x2328
80 #define GS_PRIMITIVES_COUNT 0x2330
81 #define CL_INVOCATION_COUNT 0x2338
82 #define CL_PRIMITIVES_COUNT 0x2340
83 #define PS_INVOCATION_COUNT 0x2348
84 #define CS_INVOCATION_COUNT 0x2290
85 #define PS_DEPTH_COUNT 0x2350
86
87 /*
88 * When currently allocate only one page for pipeline statistics queries. Here
89 * we derived the maximum number of counters for that amount.
90 */
91 #define STATS_BO_SIZE 4096
92 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
93 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
94
95 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
96 256) /* OA counter report */
97
98 struct gen_perf_query_result {
99 /**
100 * Storage for the final accumulated OA counters.
101 */
102 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
103
104 /**
105 * Hw ID used by the context on which the query was running.
106 */
107 uint32_t hw_id;
108
109 /**
110 * Number of reports accumulated to produce the results.
111 */
112 uint32_t reports_accumulated;
113
114 /**
115 * Frequency in the slices of the GT at the begin and end of the
116 * query.
117 */
118 uint64_t slice_frequency[2];
119
120 /**
121 * Frequency in the unslice of the GT at the begin and end of the
122 * query.
123 */
124 uint64_t unslice_frequency[2];
125 };
126
127 struct gen_perf_query_counter {
128 const char *name;
129 const char *desc;
130 enum gen_perf_counter_type type;
131 enum gen_perf_counter_data_type data_type;
132 uint64_t raw_max;
133 size_t offset;
134
135 union {
136 uint64_t (*oa_counter_read_uint64)(struct gen_perf_config *perf,
137 const struct gen_perf_query_info *query,
138 const uint64_t *accumulator);
139 float (*oa_counter_read_float)(struct gen_perf_config *perf,
140 const struct gen_perf_query_info *query,
141 const uint64_t *accumulator);
142 struct gen_pipeline_stat pipeline_stat;
143 };
144 };
145
146 struct gen_perf_query_register_prog {
147 uint32_t reg;
148 uint32_t val;
149 };
150
151 struct gen_perf_query_info {
152 enum gen_perf_query_type {
153 GEN_PERF_QUERY_TYPE_OA,
154 GEN_PERF_QUERY_TYPE_RAW,
155 GEN_PERF_QUERY_TYPE_PIPELINE,
156 } kind;
157 const char *name;
158 const char *guid;
159 struct gen_perf_query_counter *counters;
160 int n_counters;
161 int max_counters;
162 size_t data_size;
163
164 /* OA specific */
165 uint64_t oa_metrics_set_id;
166 int oa_format;
167
168 /* For indexing into the accumulator[] ... */
169 int gpu_time_offset;
170 int gpu_clock_offset;
171 int a_offset;
172 int b_offset;
173 int c_offset;
174
175 /* Register programming for a given query */
176 struct gen_perf_query_register_prog *flex_regs;
177 uint32_t n_flex_regs;
178
179 struct gen_perf_query_register_prog *mux_regs;
180 uint32_t n_mux_regs;
181
182 struct gen_perf_query_register_prog *b_counter_regs;
183 uint32_t n_b_counter_regs;
184 };
185
186 struct gen_perf_config {
187 struct gen_perf_query_info *queries;
188 int n_queries;
189
190 /* Variables referenced in the XML meta data for OA performance
191 * counters, e.g in the normalization equations.
192 *
193 * All uint64_t for consistent operand types in generated code
194 */
195 struct {
196 uint64_t timestamp_frequency; /** $GpuTimestampFrequency */
197 uint64_t n_eus; /** $EuCoresTotalCount */
198 uint64_t n_eu_slices; /** $EuSlicesTotalCount */
199 uint64_t n_eu_sub_slices; /** $EuSubslicesTotalCount */
200 uint64_t eu_threads_count; /** $EuThreadsCount */
201 uint64_t slice_mask; /** $SliceMask */
202 uint64_t subslice_mask; /** $SubsliceMask */
203 uint64_t gt_min_freq; /** $GpuMinFrequency */
204 uint64_t gt_max_freq; /** $GpuMaxFrequency */
205 uint64_t revision; /** $SkuRevisionId */
206 } sys_vars;
207
208 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
209 * cross-reference with the GUIDs of configs advertised by the kernel at
210 * runtime
211 */
212 struct hash_table *oa_metrics_table;
213
214 /* Location of the device's sysfs entry. */
215 char sysfs_dev_dir[256];
216
217 struct {
218 void *(*bo_alloc)(void *bufmgr, const char *name, uint64_t size);
219 void (*bo_unreference)(void *bo);
220 void (*emit_mi_flush)(void *ctx);
221 void (*emit_mi_report_perf_count)(void *ctx,
222 void *bo,
223 uint32_t offset_in_bytes,
224 uint32_t report_id);
225 void (*batchbuffer_flush)(void *ctx,
226 const char *file, int line);
227 void (*capture_frequency_stat_register)(void *ctx, void *bo,
228 uint32_t bo_offset);
229 void (*store_register_mem64)(void *ctx, void *bo, uint32_t reg, uint32_t offset);
230
231 } vtbl;
232 };
233
234 /**
235 * Periodic OA samples are read() into these buffer structures via the
236 * i915 perf kernel interface and appended to the
237 * brw->perfquery.sample_buffers linked list. When we process the
238 * results of an OA metrics query we need to consider all the periodic
239 * samples between the Begin and End MI_REPORT_PERF_COUNT command
240 * markers.
241 *
242 * 'Periodic' is a simplification as there are other automatic reports
243 * written by the hardware also buffered here.
244 *
245 * Considering three queries, A, B and C:
246 *
247 * Time ---->
248 * ________________A_________________
249 * | |
250 * | ________B_________ _____C___________
251 * | | | | | |
252 *
253 * And an illustration of sample buffers read over this time frame:
254 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
255 *
256 * These nodes may hold samples for query A:
257 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
258 *
259 * These nodes may hold samples for query B:
260 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
261 *
262 * These nodes may hold samples for query C:
263 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
264 *
265 * The illustration assumes we have an even distribution of periodic
266 * samples so all nodes have the same size plotted against time:
267 *
268 * Note, to simplify code, the list is never empty.
269 *
270 * With overlapping queries we can see that periodic OA reports may
271 * relate to multiple queries and care needs to be take to keep
272 * track of sample buffers until there are no queries that might
273 * depend on their contents.
274 *
275 * We use a node ref counting system where a reference ensures that a
276 * node and all following nodes can't be freed/recycled until the
277 * reference drops to zero.
278 *
279 * E.g. with a ref of one here:
280 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
281 *
282 * These nodes could be freed or recycled ("reaped"):
283 * [ 0 ][ 0 ]
284 *
285 * These must be preserved until the leading ref drops to zero:
286 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
287 *
288 * When a query starts we take a reference on the current tail of
289 * the list, knowing that no already-buffered samples can possibly
290 * relate to the newly-started query. A pointer to this node is
291 * also saved in the query object's ->oa.samples_head.
292 *
293 * E.g. starting query A while there are two nodes in .sample_buffers:
294 * ________________A________
295 * |
296 *
297 * [ 0 ][ 1 ]
298 * ^_______ Add a reference and store pointer to node in
299 * A->oa.samples_head
300 *
301 * Moving forward to when the B query starts with no new buffer nodes:
302 * (for reference, i915 perf reads() are only done when queries finish)
303 * ________________A_______
304 * | ________B___
305 * | |
306 *
307 * [ 0 ][ 2 ]
308 * ^_______ Add a reference and store pointer to
309 * node in B->oa.samples_head
310 *
311 * Once a query is finished, after an OA query has become 'Ready',
312 * once the End OA report has landed and after we we have processed
313 * all the intermediate periodic samples then we drop the
314 * ->oa.samples_head reference we took at the start.
315 *
316 * So when the B query has finished we have:
317 * ________________A________
318 * | ______B___________
319 * | | |
320 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
321 * ^_______ Drop B->oa.samples_head reference
322 *
323 * We still can't free these due to the A->oa.samples_head ref:
324 * [ 1 ][ 0 ][ 0 ][ 0 ]
325 *
326 * When the A query finishes: (note there's a new ref for C's samples_head)
327 * ________________A_________________
328 * | |
329 * | _____C_________
330 * | | |
331 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
332 * ^_______ Drop A->oa.samples_head reference
333 *
334 * And we can now reap these nodes up to the C->oa.samples_head:
335 * [ X ][ X ][ X ][ X ]
336 * keeping -> [ 1 ][ 0 ][ 0 ]
337 *
338 * We reap old sample buffers each time we finish processing an OA
339 * query by iterating the sample_buffers list from the head until we
340 * find a referenced node and stop.
341 *
342 * Reaped buffers move to a perfquery.free_sample_buffers list and
343 * when we come to read() we first look to recycle a buffer from the
344 * free_sample_buffers list before allocating a new buffer.
345 */
346 struct oa_sample_buf {
347 struct exec_node link;
348 int refcount;
349 int len;
350 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
351 uint32_t last_timestamp;
352 };
353
354 /**
355 * gen representation of a performance query object.
356 *
357 * NB: We want to keep this structure relatively lean considering that
358 * applications may expect to allocate enough objects to be able to
359 * query around all draw calls in a frame.
360 */
361 struct gen_perf_query_object
362 {
363 const struct gen_perf_query_info *queryinfo;
364
365 /* See query->kind to know which state below is in use... */
366 union {
367 struct {
368
369 /**
370 * BO containing OA counter snapshots at query Begin/End time.
371 */
372 void *bo;
373
374 /**
375 * Address of mapped of @bo
376 */
377 void *map;
378
379 /**
380 * The MI_REPORT_PERF_COUNT command lets us specify a unique
381 * ID that will be reflected in the resulting OA report
382 * that's written by the GPU. This is the ID we're expecting
383 * in the begin report and the the end report should be
384 * @begin_report_id + 1.
385 */
386 int begin_report_id;
387
388 /**
389 * Reference the head of the brw->perfquery.sample_buffers
390 * list at the time that the query started (so we only need
391 * to look at nodes after this point when looking for samples
392 * related to this query)
393 *
394 * (See struct brw_oa_sample_buf description for more details)
395 */
396 struct exec_node *samples_head;
397
398 /**
399 * false while in the unaccumulated_elements list, and set to
400 * true when the final, end MI_RPC snapshot has been
401 * accumulated.
402 */
403 bool results_accumulated;
404
405 /**
406 * Frequency of the GT at begin and end of the query.
407 */
408 uint64_t gt_frequency[2];
409
410 /**
411 * Accumulated OA results between begin and end of the query.
412 */
413 struct gen_perf_query_result result;
414 } oa;
415
416 struct {
417 /**
418 * BO containing starting and ending snapshots for the
419 * statistics counters.
420 */
421 void *bo;
422 } pipeline_stats;
423 };
424 };
425
426 struct gen_perf_context {
427 struct gen_perf_config *perf;
428
429 /* The i915 perf stream we open to setup + enable the OA counters */
430 int oa_stream_fd;
431
432 /* An i915 perf stream fd gives exclusive access to the OA unit that will
433 * report counter snapshots for a specific counter set/profile in a
434 * specific layout/format so we can only start OA queries that are
435 * compatible with the currently open fd...
436 */
437 int current_oa_metrics_set_id;
438 int current_oa_format;
439
440 /* List of buffers containing OA reports */
441 struct exec_list sample_buffers;
442
443 /* Cached list of empty sample buffers */
444 struct exec_list free_sample_buffers;
445
446 int n_active_oa_queries;
447 int n_active_pipeline_stats_queries;
448
449 /* The number of queries depending on running OA counters which
450 * extends beyond brw_end_perf_query() since we need to wait until
451 * the last MI_RPC command has parsed by the GPU.
452 *
453 * Accurate accounting is important here as emitting an
454 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
455 * effectively hang the gpu.
456 */
457 int n_oa_users;
458
459 /* To help catch an spurious problem with the hardware or perf
460 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
461 * with a unique ID that we can explicitly check for...
462 */
463 int next_query_start_report_id;
464
465 /**
466 * An array of queries whose results haven't yet been assembled
467 * based on the data in buffer objects.
468 *
469 * These may be active, or have already ended. However, the
470 * results have not been requested.
471 */
472 struct gen_perf_query_object **unaccumulated;
473 int unaccumulated_elements;
474 int unaccumulated_array_size;
475
476 /* The total number of query objects so we can relinquish
477 * our exclusive access to perf if the application deletes
478 * all of its objects. (NB: We only disable perf while
479 * there are no active queries)
480 */
481 int n_query_instances;
482 };
483
484 static inline size_t
485 gen_perf_query_counter_get_size(const struct gen_perf_query_counter *counter)
486 {
487 switch (counter->data_type) {
488 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32:
489 return sizeof(uint32_t);
490 case GEN_PERF_COUNTER_DATA_TYPE_UINT32:
491 return sizeof(uint32_t);
492 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
493 return sizeof(uint64_t);
494 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
495 return sizeof(float);
496 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE:
497 return sizeof(double);
498 default:
499 unreachable("invalid counter data type");
500 }
501 }
502
503 static inline struct gen_perf_query_info *
504 gen_perf_query_append_query_info(struct gen_perf_config *perf, int max_counters)
505 {
506 struct gen_perf_query_info *query;
507
508 perf->queries = reralloc(perf, perf->queries,
509 struct gen_perf_query_info,
510 ++perf->n_queries);
511 query = &perf->queries[perf->n_queries - 1];
512 memset(query, 0, sizeof(*query));
513
514 if (max_counters > 0) {
515 query->max_counters = max_counters;
516 query->counters =
517 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
518 }
519
520 return query;
521 }
522
523 static inline void
524 gen_perf_query_info_add_stat_reg(struct gen_perf_query_info *query,
525 uint32_t reg,
526 uint32_t numerator,
527 uint32_t denominator,
528 const char *name,
529 const char *description)
530 {
531 struct gen_perf_query_counter *counter;
532
533 assert(query->n_counters < query->max_counters);
534
535 counter = &query->counters[query->n_counters];
536 counter->name = name;
537 counter->desc = description;
538 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
539 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
540 counter->offset = sizeof(uint64_t) * query->n_counters;
541 counter->pipeline_stat.reg = reg;
542 counter->pipeline_stat.numerator = numerator;
543 counter->pipeline_stat.denominator = denominator;
544
545 query->n_counters++;
546 }
547
548 static inline void
549 gen_perf_query_info_add_basic_stat_reg(struct gen_perf_query_info *query,
550 uint32_t reg, const char *name)
551 {
552 gen_perf_query_info_add_stat_reg(query, reg, 1, 1, name, name);
553 }
554
555 static inline struct gen_perf_config *
556 gen_perf_new(void *ctx)
557 {
558 struct gen_perf_config *perf = rzalloc(ctx, struct gen_perf_config);
559 return perf;
560 }
561
562 bool gen_perf_load_oa_metrics(struct gen_perf_config *perf, int fd,
563 const struct gen_device_info *devinfo);
564 bool gen_perf_load_metric_id(struct gen_perf_config *perf, const char *guid,
565 uint64_t *metric_id);
566
567 void gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
568 const struct gen_device_info *devinfo,
569 const uint32_t *start,
570 const uint32_t *end);
571 void gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
572 const struct gen_perf_query_info *query,
573 const uint32_t *start,
574 const uint32_t *end);
575 void gen_perf_query_result_clear(struct gen_perf_query_result *result);
576 void gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info *devinfo,
577 struct gen_perf_config *perf);
578 void gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
579 struct gen_perf_config *perf);
580 uint64_t gen_perf_query_get_metric_id(struct gen_perf_config *perf,
581 const struct gen_perf_query_info *query);
582 struct oa_sample_buf * gen_perf_get_free_sample_buf(struct gen_perf_context *perf);
583 void gen_perf_reap_old_sample_buffers(struct gen_perf_context *perf_ctx);
584 void gen_perf_free_sample_bufs(struct gen_perf_context *perf_ctx);
585
586 void gen_perf_snapshot_statistics_registers(void *context,
587 struct gen_perf_config *perf,
588 struct gen_perf_query_object *obj,
589 uint32_t offset_in_bytes);
590
591 void gen_perf_close(struct gen_perf_context *perfquery,
592 const struct gen_perf_query_info *query);
593 bool gen_perf_open(struct gen_perf_context *perfquery,
594 int metrics_set_id,
595 int report_format,
596 int period_exponent,
597 int drm_fd,
598 uint32_t ctx_id);
599
600 bool gen_perf_inc_n_users(struct gen_perf_context *perfquery);
601 void gen_perf_dec_n_users(struct gen_perf_context *perfquery);
602
603 #endif /* GEN_PERF_H */