intel/perf: move is_query_ready to gen_perf
[mesa.git] / src / intel / perf / gen_perf.h
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef GEN_PERF_H
25 #define GEN_PERF_H
26
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <sys/sysmacros.h>
32
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
36
37 struct gen_device_info;
38
39 struct gen_perf_config;
40 struct gen_perf_query_info;
41
42 enum gen_perf_counter_type {
43 GEN_PERF_COUNTER_TYPE_EVENT,
44 GEN_PERF_COUNTER_TYPE_DURATION_NORM,
45 GEN_PERF_COUNTER_TYPE_DURATION_RAW,
46 GEN_PERF_COUNTER_TYPE_THROUGHPUT,
47 GEN_PERF_COUNTER_TYPE_RAW,
48 GEN_PERF_COUNTER_TYPE_TIMESTAMP,
49 };
50
51 enum gen_perf_counter_data_type {
52 GEN_PERF_COUNTER_DATA_TYPE_BOOL32,
53 GEN_PERF_COUNTER_DATA_TYPE_UINT32,
54 GEN_PERF_COUNTER_DATA_TYPE_UINT64,
55 GEN_PERF_COUNTER_DATA_TYPE_FLOAT,
56 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE,
57 };
58
59 struct gen_pipeline_stat {
60 uint32_t reg;
61 uint32_t numerator;
62 uint32_t denominator;
63 };
64
65 /*
66 * The largest OA formats we can use include:
67 * For Haswell:
68 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
69 * For Gen8+
70 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
71 */
72 #define MAX_OA_REPORT_COUNTERS 62
73
74 #define IA_VERTICES_COUNT 0x2310
75 #define IA_PRIMITIVES_COUNT 0x2318
76 #define VS_INVOCATION_COUNT 0x2320
77 #define HS_INVOCATION_COUNT 0x2300
78 #define DS_INVOCATION_COUNT 0x2308
79 #define GS_INVOCATION_COUNT 0x2328
80 #define GS_PRIMITIVES_COUNT 0x2330
81 #define CL_INVOCATION_COUNT 0x2338
82 #define CL_PRIMITIVES_COUNT 0x2340
83 #define PS_INVOCATION_COUNT 0x2348
84 #define CS_INVOCATION_COUNT 0x2290
85 #define PS_DEPTH_COUNT 0x2350
86
87 /*
88 * When currently allocate only one page for pipeline statistics queries. Here
89 * we derived the maximum number of counters for that amount.
90 */
91 #define STATS_BO_SIZE 4096
92 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
93 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
94
95 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
96 256) /* OA counter report */
97
98 struct gen_perf_query_result {
99 /**
100 * Storage for the final accumulated OA counters.
101 */
102 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
103
104 /**
105 * Hw ID used by the context on which the query was running.
106 */
107 uint32_t hw_id;
108
109 /**
110 * Number of reports accumulated to produce the results.
111 */
112 uint32_t reports_accumulated;
113
114 /**
115 * Frequency in the slices of the GT at the begin and end of the
116 * query.
117 */
118 uint64_t slice_frequency[2];
119
120 /**
121 * Frequency in the unslice of the GT at the begin and end of the
122 * query.
123 */
124 uint64_t unslice_frequency[2];
125 };
126
127 struct gen_perf_query_counter {
128 const char *name;
129 const char *desc;
130 enum gen_perf_counter_type type;
131 enum gen_perf_counter_data_type data_type;
132 uint64_t raw_max;
133 size_t offset;
134
135 union {
136 uint64_t (*oa_counter_read_uint64)(struct gen_perf_config *perf,
137 const struct gen_perf_query_info *query,
138 const uint64_t *accumulator);
139 float (*oa_counter_read_float)(struct gen_perf_config *perf,
140 const struct gen_perf_query_info *query,
141 const uint64_t *accumulator);
142 struct gen_pipeline_stat pipeline_stat;
143 };
144 };
145
146 struct gen_perf_query_register_prog {
147 uint32_t reg;
148 uint32_t val;
149 };
150
151 struct gen_perf_query_info {
152 enum gen_perf_query_type {
153 GEN_PERF_QUERY_TYPE_OA,
154 GEN_PERF_QUERY_TYPE_RAW,
155 GEN_PERF_QUERY_TYPE_PIPELINE,
156 } kind;
157 const char *name;
158 const char *guid;
159 struct gen_perf_query_counter *counters;
160 int n_counters;
161 int max_counters;
162 size_t data_size;
163
164 /* OA specific */
165 uint64_t oa_metrics_set_id;
166 int oa_format;
167
168 /* For indexing into the accumulator[] ... */
169 int gpu_time_offset;
170 int gpu_clock_offset;
171 int a_offset;
172 int b_offset;
173 int c_offset;
174
175 /* Register programming for a given query */
176 struct gen_perf_query_register_prog *flex_regs;
177 uint32_t n_flex_regs;
178
179 struct gen_perf_query_register_prog *mux_regs;
180 uint32_t n_mux_regs;
181
182 struct gen_perf_query_register_prog *b_counter_regs;
183 uint32_t n_b_counter_regs;
184 };
185
186 struct gen_perf_config {
187 struct gen_perf_query_info *queries;
188 int n_queries;
189
190 /* Variables referenced in the XML meta data for OA performance
191 * counters, e.g in the normalization equations.
192 *
193 * All uint64_t for consistent operand types in generated code
194 */
195 struct {
196 uint64_t timestamp_frequency; /** $GpuTimestampFrequency */
197 uint64_t n_eus; /** $EuCoresTotalCount */
198 uint64_t n_eu_slices; /** $EuSlicesTotalCount */
199 uint64_t n_eu_sub_slices; /** $EuSubslicesTotalCount */
200 uint64_t eu_threads_count; /** $EuThreadsCount */
201 uint64_t slice_mask; /** $SliceMask */
202 uint64_t subslice_mask; /** $SubsliceMask */
203 uint64_t gt_min_freq; /** $GpuMinFrequency */
204 uint64_t gt_max_freq; /** $GpuMaxFrequency */
205 uint64_t revision; /** $SkuRevisionId */
206 } sys_vars;
207
208 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
209 * cross-reference with the GUIDs of configs advertised by the kernel at
210 * runtime
211 */
212 struct hash_table *oa_metrics_table;
213
214 /* Location of the device's sysfs entry. */
215 char sysfs_dev_dir[256];
216
217 struct {
218 void *(*bo_alloc)(void *bufmgr, const char *name, uint64_t size);
219 void (*bo_unreference)(void *bo);
220 void *(*bo_map)(void *ctx, void *bo, unsigned flags);
221 void (*bo_unmap)(void *bo);
222 bool (*batch_references)(void *batch, void *bo);
223 void (*bo_wait_rendering)(void *bo);
224 int (*bo_busy)(void *bo);
225 void (*emit_mi_flush)(void *ctx);
226 void (*emit_mi_report_perf_count)(void *ctx,
227 void *bo,
228 uint32_t offset_in_bytes,
229 uint32_t report_id);
230 void (*batchbuffer_flush)(void *ctx,
231 const char *file, int line);
232 void (*capture_frequency_stat_register)(void *ctx, void *bo,
233 uint32_t bo_offset);
234 void (*store_register_mem64)(void *ctx, void *bo, uint32_t reg, uint32_t offset);
235
236 } vtbl;
237 };
238
239 /**
240 * Periodic OA samples are read() into these buffer structures via the
241 * i915 perf kernel interface and appended to the
242 * brw->perfquery.sample_buffers linked list. When we process the
243 * results of an OA metrics query we need to consider all the periodic
244 * samples between the Begin and End MI_REPORT_PERF_COUNT command
245 * markers.
246 *
247 * 'Periodic' is a simplification as there are other automatic reports
248 * written by the hardware also buffered here.
249 *
250 * Considering three queries, A, B and C:
251 *
252 * Time ---->
253 * ________________A_________________
254 * | |
255 * | ________B_________ _____C___________
256 * | | | | | |
257 *
258 * And an illustration of sample buffers read over this time frame:
259 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
260 *
261 * These nodes may hold samples for query A:
262 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
263 *
264 * These nodes may hold samples for query B:
265 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
266 *
267 * These nodes may hold samples for query C:
268 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
269 *
270 * The illustration assumes we have an even distribution of periodic
271 * samples so all nodes have the same size plotted against time:
272 *
273 * Note, to simplify code, the list is never empty.
274 *
275 * With overlapping queries we can see that periodic OA reports may
276 * relate to multiple queries and care needs to be take to keep
277 * track of sample buffers until there are no queries that might
278 * depend on their contents.
279 *
280 * We use a node ref counting system where a reference ensures that a
281 * node and all following nodes can't be freed/recycled until the
282 * reference drops to zero.
283 *
284 * E.g. with a ref of one here:
285 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
286 *
287 * These nodes could be freed or recycled ("reaped"):
288 * [ 0 ][ 0 ]
289 *
290 * These must be preserved until the leading ref drops to zero:
291 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
292 *
293 * When a query starts we take a reference on the current tail of
294 * the list, knowing that no already-buffered samples can possibly
295 * relate to the newly-started query. A pointer to this node is
296 * also saved in the query object's ->oa.samples_head.
297 *
298 * E.g. starting query A while there are two nodes in .sample_buffers:
299 * ________________A________
300 * |
301 *
302 * [ 0 ][ 1 ]
303 * ^_______ Add a reference and store pointer to node in
304 * A->oa.samples_head
305 *
306 * Moving forward to when the B query starts with no new buffer nodes:
307 * (for reference, i915 perf reads() are only done when queries finish)
308 * ________________A_______
309 * | ________B___
310 * | |
311 *
312 * [ 0 ][ 2 ]
313 * ^_______ Add a reference and store pointer to
314 * node in B->oa.samples_head
315 *
316 * Once a query is finished, after an OA query has become 'Ready',
317 * once the End OA report has landed and after we we have processed
318 * all the intermediate periodic samples then we drop the
319 * ->oa.samples_head reference we took at the start.
320 *
321 * So when the B query has finished we have:
322 * ________________A________
323 * | ______B___________
324 * | | |
325 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
326 * ^_______ Drop B->oa.samples_head reference
327 *
328 * We still can't free these due to the A->oa.samples_head ref:
329 * [ 1 ][ 0 ][ 0 ][ 0 ]
330 *
331 * When the A query finishes: (note there's a new ref for C's samples_head)
332 * ________________A_________________
333 * | |
334 * | _____C_________
335 * | | |
336 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
337 * ^_______ Drop A->oa.samples_head reference
338 *
339 * And we can now reap these nodes up to the C->oa.samples_head:
340 * [ X ][ X ][ X ][ X ]
341 * keeping -> [ 1 ][ 0 ][ 0 ]
342 *
343 * We reap old sample buffers each time we finish processing an OA
344 * query by iterating the sample_buffers list from the head until we
345 * find a referenced node and stop.
346 *
347 * Reaped buffers move to a perfquery.free_sample_buffers list and
348 * when we come to read() we first look to recycle a buffer from the
349 * free_sample_buffers list before allocating a new buffer.
350 */
351 struct oa_sample_buf {
352 struct exec_node link;
353 int refcount;
354 int len;
355 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
356 uint32_t last_timestamp;
357 };
358
359 /**
360 * gen representation of a performance query object.
361 *
362 * NB: We want to keep this structure relatively lean considering that
363 * applications may expect to allocate enough objects to be able to
364 * query around all draw calls in a frame.
365 */
366 struct gen_perf_query_object
367 {
368 const struct gen_perf_query_info *queryinfo;
369
370 /* See query->kind to know which state below is in use... */
371 union {
372 struct {
373
374 /**
375 * BO containing OA counter snapshots at query Begin/End time.
376 */
377 void *bo;
378
379 /**
380 * Address of mapped of @bo
381 */
382 void *map;
383
384 /**
385 * The MI_REPORT_PERF_COUNT command lets us specify a unique
386 * ID that will be reflected in the resulting OA report
387 * that's written by the GPU. This is the ID we're expecting
388 * in the begin report and the the end report should be
389 * @begin_report_id + 1.
390 */
391 int begin_report_id;
392
393 /**
394 * Reference the head of the brw->perfquery.sample_buffers
395 * list at the time that the query started (so we only need
396 * to look at nodes after this point when looking for samples
397 * related to this query)
398 *
399 * (See struct brw_oa_sample_buf description for more details)
400 */
401 struct exec_node *samples_head;
402
403 /**
404 * false while in the unaccumulated_elements list, and set to
405 * true when the final, end MI_RPC snapshot has been
406 * accumulated.
407 */
408 bool results_accumulated;
409
410 /**
411 * Frequency of the GT at begin and end of the query.
412 */
413 uint64_t gt_frequency[2];
414
415 /**
416 * Accumulated OA results between begin and end of the query.
417 */
418 struct gen_perf_query_result result;
419 } oa;
420
421 struct {
422 /**
423 * BO containing starting and ending snapshots for the
424 * statistics counters.
425 */
426 void *bo;
427 } pipeline_stats;
428 };
429 };
430
431 struct gen_perf_context {
432 struct gen_perf_config *perf;
433
434 void * ctx; /* driver context (eg, brw_context) */
435 void * bufmgr;
436 const struct gen_device_info *devinfo;
437
438 uint32_t hw_ctx;
439 int drm_fd;
440
441 /* The i915 perf stream we open to setup + enable the OA counters */
442 int oa_stream_fd;
443
444 /* An i915 perf stream fd gives exclusive access to the OA unit that will
445 * report counter snapshots for a specific counter set/profile in a
446 * specific layout/format so we can only start OA queries that are
447 * compatible with the currently open fd...
448 */
449 int current_oa_metrics_set_id;
450 int current_oa_format;
451
452 /* List of buffers containing OA reports */
453 struct exec_list sample_buffers;
454
455 /* Cached list of empty sample buffers */
456 struct exec_list free_sample_buffers;
457
458 int n_active_oa_queries;
459 int n_active_pipeline_stats_queries;
460
461 /* The number of queries depending on running OA counters which
462 * extends beyond brw_end_perf_query() since we need to wait until
463 * the last MI_RPC command has parsed by the GPU.
464 *
465 * Accurate accounting is important here as emitting an
466 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
467 * effectively hang the gpu.
468 */
469 int n_oa_users;
470
471 /* To help catch an spurious problem with the hardware or perf
472 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
473 * with a unique ID that we can explicitly check for...
474 */
475 int next_query_start_report_id;
476
477 /**
478 * An array of queries whose results haven't yet been assembled
479 * based on the data in buffer objects.
480 *
481 * These may be active, or have already ended. However, the
482 * results have not been requested.
483 */
484 struct gen_perf_query_object **unaccumulated;
485 int unaccumulated_elements;
486 int unaccumulated_array_size;
487
488 /* The total number of query objects so we can relinquish
489 * our exclusive access to perf if the application deletes
490 * all of its objects. (NB: We only disable perf while
491 * there are no active queries)
492 */
493 int n_query_instances;
494 };
495
496 void gen_perf_init_context(struct gen_perf_context *perf_ctx,
497 struct gen_perf_config *perf_cfg,
498 void * ctx, /* driver context (eg, brw_context) */
499 void * bufmgr, /* eg brw_bufmgr */
500 const struct gen_device_info *devinfo,
501 uint32_t hw_ctx,
502 int drm_fd);
503
504 static inline size_t
505 gen_perf_query_counter_get_size(const struct gen_perf_query_counter *counter)
506 {
507 switch (counter->data_type) {
508 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32:
509 return sizeof(uint32_t);
510 case GEN_PERF_COUNTER_DATA_TYPE_UINT32:
511 return sizeof(uint32_t);
512 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
513 return sizeof(uint64_t);
514 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
515 return sizeof(float);
516 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE:
517 return sizeof(double);
518 default:
519 unreachable("invalid counter data type");
520 }
521 }
522
523 static inline struct gen_perf_query_info *
524 gen_perf_query_append_query_info(struct gen_perf_config *perf, int max_counters)
525 {
526 struct gen_perf_query_info *query;
527
528 perf->queries = reralloc(perf, perf->queries,
529 struct gen_perf_query_info,
530 ++perf->n_queries);
531 query = &perf->queries[perf->n_queries - 1];
532 memset(query, 0, sizeof(*query));
533
534 if (max_counters > 0) {
535 query->max_counters = max_counters;
536 query->counters =
537 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
538 }
539
540 return query;
541 }
542
543 static inline void
544 gen_perf_query_info_add_stat_reg(struct gen_perf_query_info *query,
545 uint32_t reg,
546 uint32_t numerator,
547 uint32_t denominator,
548 const char *name,
549 const char *description)
550 {
551 struct gen_perf_query_counter *counter;
552
553 assert(query->n_counters < query->max_counters);
554
555 counter = &query->counters[query->n_counters];
556 counter->name = name;
557 counter->desc = description;
558 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
559 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
560 counter->offset = sizeof(uint64_t) * query->n_counters;
561 counter->pipeline_stat.reg = reg;
562 counter->pipeline_stat.numerator = numerator;
563 counter->pipeline_stat.denominator = denominator;
564
565 query->n_counters++;
566 }
567
568 static inline void
569 gen_perf_query_info_add_basic_stat_reg(struct gen_perf_query_info *query,
570 uint32_t reg, const char *name)
571 {
572 gen_perf_query_info_add_stat_reg(query, reg, 1, 1, name, name);
573 }
574
575 static inline struct gen_perf_config *
576 gen_perf_new(void *ctx)
577 {
578 struct gen_perf_config *perf = rzalloc(ctx, struct gen_perf_config);
579 return perf;
580 }
581
582 bool gen_perf_load_oa_metrics(struct gen_perf_config *perf, int fd,
583 const struct gen_device_info *devinfo);
584 bool gen_perf_load_metric_id(struct gen_perf_config *perf, const char *guid,
585 uint64_t *metric_id);
586
587 void gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
588 const struct gen_device_info *devinfo,
589 const uint32_t *start,
590 const uint32_t *end);
591 void gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
592 const struct gen_perf_query_info *query,
593 const uint32_t *start,
594 const uint32_t *end);
595 void gen_perf_query_result_clear(struct gen_perf_query_result *result);
596 void gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info *devinfo,
597 struct gen_perf_config *perf);
598 void gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
599 struct gen_perf_config *perf);
600 uint64_t gen_perf_query_get_metric_id(struct gen_perf_config *perf,
601 const struct gen_perf_query_info *query);
602 struct oa_sample_buf * gen_perf_get_free_sample_buf(struct gen_perf_context *perf);
603 void gen_perf_reap_old_sample_buffers(struct gen_perf_context *perf_ctx);
604 void gen_perf_free_sample_bufs(struct gen_perf_context *perf_ctx);
605
606 void gen_perf_snapshot_statistics_registers(void *context,
607 struct gen_perf_config *perf,
608 struct gen_perf_query_object *obj,
609 uint32_t offset_in_bytes);
610
611 void gen_perf_close(struct gen_perf_context *perfquery,
612 const struct gen_perf_query_info *query);
613 bool gen_perf_open(struct gen_perf_context *perfquery,
614 int metrics_set_id,
615 int report_format,
616 int period_exponent,
617 int drm_fd,
618 uint32_t ctx_id);
619
620 bool gen_perf_inc_n_users(struct gen_perf_context *perfquery);
621 void gen_perf_dec_n_users(struct gen_perf_context *perfquery);
622
623 bool gen_perf_begin_query(struct gen_perf_context *perf_ctx,
624 struct gen_perf_query_object *query);
625 void gen_perf_end_query(struct gen_perf_context *perf_ctx,
626 struct gen_perf_query_object *query);
627 void gen_perf_wait_query(struct gen_perf_context *perf_ctx,
628 struct gen_perf_query_object *query,
629 void *current_batch);
630 bool gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
631 struct gen_perf_query_object *query,
632 void *current_batch);
633
634 #endif /* GEN_PERF_H */