intel/perf: move free_sample_bufs into perf
[mesa.git] / src / intel / perf / gen_perf.h
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef GEN_PERF_H
25 #define GEN_PERF_H
26
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <sys/sysmacros.h>
32
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
36
37 struct gen_device_info;
38
39 struct gen_perf_config;
40 struct gen_perf_query_info;
41
42 enum gen_perf_counter_type {
43 GEN_PERF_COUNTER_TYPE_EVENT,
44 GEN_PERF_COUNTER_TYPE_DURATION_NORM,
45 GEN_PERF_COUNTER_TYPE_DURATION_RAW,
46 GEN_PERF_COUNTER_TYPE_THROUGHPUT,
47 GEN_PERF_COUNTER_TYPE_RAW,
48 GEN_PERF_COUNTER_TYPE_TIMESTAMP,
49 };
50
51 enum gen_perf_counter_data_type {
52 GEN_PERF_COUNTER_DATA_TYPE_BOOL32,
53 GEN_PERF_COUNTER_DATA_TYPE_UINT32,
54 GEN_PERF_COUNTER_DATA_TYPE_UINT64,
55 GEN_PERF_COUNTER_DATA_TYPE_FLOAT,
56 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE,
57 };
58
59 struct gen_pipeline_stat {
60 uint32_t reg;
61 uint32_t numerator;
62 uint32_t denominator;
63 };
64
65 /*
66 * The largest OA formats we can use include:
67 * For Haswell:
68 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
69 * For Gen8+
70 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
71 */
72 #define MAX_OA_REPORT_COUNTERS 62
73
74 #define IA_VERTICES_COUNT 0x2310
75 #define IA_PRIMITIVES_COUNT 0x2318
76 #define VS_INVOCATION_COUNT 0x2320
77 #define HS_INVOCATION_COUNT 0x2300
78 #define DS_INVOCATION_COUNT 0x2308
79 #define GS_INVOCATION_COUNT 0x2328
80 #define GS_PRIMITIVES_COUNT 0x2330
81 #define CL_INVOCATION_COUNT 0x2338
82 #define CL_PRIMITIVES_COUNT 0x2340
83 #define PS_INVOCATION_COUNT 0x2348
84 #define CS_INVOCATION_COUNT 0x2290
85 #define PS_DEPTH_COUNT 0x2350
86
87 /*
88 * When currently allocate only one page for pipeline statistics queries. Here
89 * we derived the maximum number of counters for that amount.
90 */
91 #define STATS_BO_SIZE 4096
92 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
93 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
94
95 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
96 256) /* OA counter report */
97
98 struct gen_perf_query_result {
99 /**
100 * Storage for the final accumulated OA counters.
101 */
102 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
103
104 /**
105 * Hw ID used by the context on which the query was running.
106 */
107 uint32_t hw_id;
108
109 /**
110 * Number of reports accumulated to produce the results.
111 */
112 uint32_t reports_accumulated;
113
114 /**
115 * Frequency in the slices of the GT at the begin and end of the
116 * query.
117 */
118 uint64_t slice_frequency[2];
119
120 /**
121 * Frequency in the unslice of the GT at the begin and end of the
122 * query.
123 */
124 uint64_t unslice_frequency[2];
125 };
126
127 struct gen_perf_query_counter {
128 const char *name;
129 const char *desc;
130 enum gen_perf_counter_type type;
131 enum gen_perf_counter_data_type data_type;
132 uint64_t raw_max;
133 size_t offset;
134
135 union {
136 uint64_t (*oa_counter_read_uint64)(struct gen_perf_config *perf,
137 const struct gen_perf_query_info *query,
138 const uint64_t *accumulator);
139 float (*oa_counter_read_float)(struct gen_perf_config *perf,
140 const struct gen_perf_query_info *query,
141 const uint64_t *accumulator);
142 struct gen_pipeline_stat pipeline_stat;
143 };
144 };
145
146 struct gen_perf_query_register_prog {
147 uint32_t reg;
148 uint32_t val;
149 };
150
151 struct gen_perf_query_info {
152 enum gen_perf_query_type {
153 GEN_PERF_QUERY_TYPE_OA,
154 GEN_PERF_QUERY_TYPE_RAW,
155 GEN_PERF_QUERY_TYPE_PIPELINE,
156 } kind;
157 const char *name;
158 const char *guid;
159 struct gen_perf_query_counter *counters;
160 int n_counters;
161 int max_counters;
162 size_t data_size;
163
164 /* OA specific */
165 uint64_t oa_metrics_set_id;
166 int oa_format;
167
168 /* For indexing into the accumulator[] ... */
169 int gpu_time_offset;
170 int gpu_clock_offset;
171 int a_offset;
172 int b_offset;
173 int c_offset;
174
175 /* Register programming for a given query */
176 struct gen_perf_query_register_prog *flex_regs;
177 uint32_t n_flex_regs;
178
179 struct gen_perf_query_register_prog *mux_regs;
180 uint32_t n_mux_regs;
181
182 struct gen_perf_query_register_prog *b_counter_regs;
183 uint32_t n_b_counter_regs;
184 };
185
186 struct gen_perf_config {
187 struct gen_perf_query_info *queries;
188 int n_queries;
189
190 /* Variables referenced in the XML meta data for OA performance
191 * counters, e.g in the normalization equations.
192 *
193 * All uint64_t for consistent operand types in generated code
194 */
195 struct {
196 uint64_t timestamp_frequency; /** $GpuTimestampFrequency */
197 uint64_t n_eus; /** $EuCoresTotalCount */
198 uint64_t n_eu_slices; /** $EuSlicesTotalCount */
199 uint64_t n_eu_sub_slices; /** $EuSubslicesTotalCount */
200 uint64_t eu_threads_count; /** $EuThreadsCount */
201 uint64_t slice_mask; /** $SliceMask */
202 uint64_t subslice_mask; /** $SubsliceMask */
203 uint64_t gt_min_freq; /** $GpuMinFrequency */
204 uint64_t gt_max_freq; /** $GpuMaxFrequency */
205 uint64_t revision; /** $SkuRevisionId */
206 } sys_vars;
207
208 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
209 * cross-reference with the GUIDs of configs advertised by the kernel at
210 * runtime
211 */
212 struct hash_table *oa_metrics_table;
213
214 /* Location of the device's sysfs entry. */
215 char sysfs_dev_dir[256];
216
217 struct {
218 void *(*bo_alloc)(void *bufmgr, const char *name, uint64_t size);
219 void (*bo_unreference)(void *bo);
220 void (*emit_mi_report_perf_count)(void *ctx,
221 void *bo,
222 uint32_t offset_in_bytes,
223 uint32_t report_id);
224 void (*batchbuffer_flush)(void *ctx,
225 const char *file, int line);
226 void (*capture_frequency_stat_register)(void *ctx, void *bo,
227 uint32_t bo_offset);
228 } vtbl;
229 };
230
231 /**
232 * Periodic OA samples are read() into these buffer structures via the
233 * i915 perf kernel interface and appended to the
234 * brw->perfquery.sample_buffers linked list. When we process the
235 * results of an OA metrics query we need to consider all the periodic
236 * samples between the Begin and End MI_REPORT_PERF_COUNT command
237 * markers.
238 *
239 * 'Periodic' is a simplification as there are other automatic reports
240 * written by the hardware also buffered here.
241 *
242 * Considering three queries, A, B and C:
243 *
244 * Time ---->
245 * ________________A_________________
246 * | |
247 * | ________B_________ _____C___________
248 * | | | | | |
249 *
250 * And an illustration of sample buffers read over this time frame:
251 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
252 *
253 * These nodes may hold samples for query A:
254 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
255 *
256 * These nodes may hold samples for query B:
257 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
258 *
259 * These nodes may hold samples for query C:
260 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
261 *
262 * The illustration assumes we have an even distribution of periodic
263 * samples so all nodes have the same size plotted against time:
264 *
265 * Note, to simplify code, the list is never empty.
266 *
267 * With overlapping queries we can see that periodic OA reports may
268 * relate to multiple queries and care needs to be take to keep
269 * track of sample buffers until there are no queries that might
270 * depend on their contents.
271 *
272 * We use a node ref counting system where a reference ensures that a
273 * node and all following nodes can't be freed/recycled until the
274 * reference drops to zero.
275 *
276 * E.g. with a ref of one here:
277 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
278 *
279 * These nodes could be freed or recycled ("reaped"):
280 * [ 0 ][ 0 ]
281 *
282 * These must be preserved until the leading ref drops to zero:
283 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
284 *
285 * When a query starts we take a reference on the current tail of
286 * the list, knowing that no already-buffered samples can possibly
287 * relate to the newly-started query. A pointer to this node is
288 * also saved in the query object's ->oa.samples_head.
289 *
290 * E.g. starting query A while there are two nodes in .sample_buffers:
291 * ________________A________
292 * |
293 *
294 * [ 0 ][ 1 ]
295 * ^_______ Add a reference and store pointer to node in
296 * A->oa.samples_head
297 *
298 * Moving forward to when the B query starts with no new buffer nodes:
299 * (for reference, i915 perf reads() are only done when queries finish)
300 * ________________A_______
301 * | ________B___
302 * | |
303 *
304 * [ 0 ][ 2 ]
305 * ^_______ Add a reference and store pointer to
306 * node in B->oa.samples_head
307 *
308 * Once a query is finished, after an OA query has become 'Ready',
309 * once the End OA report has landed and after we we have processed
310 * all the intermediate periodic samples then we drop the
311 * ->oa.samples_head reference we took at the start.
312 *
313 * So when the B query has finished we have:
314 * ________________A________
315 * | ______B___________
316 * | | |
317 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
318 * ^_______ Drop B->oa.samples_head reference
319 *
320 * We still can't free these due to the A->oa.samples_head ref:
321 * [ 1 ][ 0 ][ 0 ][ 0 ]
322 *
323 * When the A query finishes: (note there's a new ref for C's samples_head)
324 * ________________A_________________
325 * | |
326 * | _____C_________
327 * | | |
328 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
329 * ^_______ Drop A->oa.samples_head reference
330 *
331 * And we can now reap these nodes up to the C->oa.samples_head:
332 * [ X ][ X ][ X ][ X ]
333 * keeping -> [ 1 ][ 0 ][ 0 ]
334 *
335 * We reap old sample buffers each time we finish processing an OA
336 * query by iterating the sample_buffers list from the head until we
337 * find a referenced node and stop.
338 *
339 * Reaped buffers move to a perfquery.free_sample_buffers list and
340 * when we come to read() we first look to recycle a buffer from the
341 * free_sample_buffers list before allocating a new buffer.
342 */
343 struct oa_sample_buf {
344 struct exec_node link;
345 int refcount;
346 int len;
347 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
348 uint32_t last_timestamp;
349 };
350
351 struct gen_perf_context {
352 struct gen_perf_config *perf;
353
354 /* The i915 perf stream we open to setup + enable the OA counters */
355 int oa_stream_fd;
356
357 /* An i915 perf stream fd gives exclusive access to the OA unit that will
358 * report counter snapshots for a specific counter set/profile in a
359 * specific layout/format so we can only start OA queries that are
360 * compatible with the currently open fd...
361 */
362 int current_oa_metrics_set_id;
363 int current_oa_format;
364
365 /* List of buffers containing OA reports */
366 struct exec_list sample_buffers;
367
368 /* Cached list of empty sample buffers */
369 struct exec_list free_sample_buffers;
370
371 int n_active_oa_queries;
372 int n_active_pipeline_stats_queries;
373
374 /* The number of queries depending on running OA counters which
375 * extends beyond brw_end_perf_query() since we need to wait until
376 * the last MI_RPC command has parsed by the GPU.
377 *
378 * Accurate accounting is important here as emitting an
379 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
380 * effectively hang the gpu.
381 */
382 int n_oa_users;
383
384 /* To help catch an spurious problem with the hardware or perf
385 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
386 * with a unique ID that we can explicitly check for...
387 */
388 int next_query_start_report_id;
389
390 /**
391 * An array of queries whose results haven't yet been assembled
392 * based on the data in buffer objects.
393 *
394 * These may be active, or have already ended. However, the
395 * results have not been requested.
396 */
397 struct brw_perf_query_object **unaccumulated;
398 int unaccumulated_elements;
399 int unaccumulated_array_size;
400
401 /* The total number of query objects so we can relinquish
402 * our exclusive access to perf if the application deletes
403 * all of its objects. (NB: We only disable perf while
404 * there are no active queries)
405 */
406 int n_query_instances;
407 };
408
409 static inline size_t
410 gen_perf_query_counter_get_size(const struct gen_perf_query_counter *counter)
411 {
412 switch (counter->data_type) {
413 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32:
414 return sizeof(uint32_t);
415 case GEN_PERF_COUNTER_DATA_TYPE_UINT32:
416 return sizeof(uint32_t);
417 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
418 return sizeof(uint64_t);
419 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
420 return sizeof(float);
421 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE:
422 return sizeof(double);
423 default:
424 unreachable("invalid counter data type");
425 }
426 }
427
428 static inline struct gen_perf_query_info *
429 gen_perf_query_append_query_info(struct gen_perf_config *perf, int max_counters)
430 {
431 struct gen_perf_query_info *query;
432
433 perf->queries = reralloc(perf, perf->queries,
434 struct gen_perf_query_info,
435 ++perf->n_queries);
436 query = &perf->queries[perf->n_queries - 1];
437 memset(query, 0, sizeof(*query));
438
439 if (max_counters > 0) {
440 query->max_counters = max_counters;
441 query->counters =
442 rzalloc_array(perf, struct gen_perf_query_counter, max_counters);
443 }
444
445 return query;
446 }
447
448 static inline void
449 gen_perf_query_info_add_stat_reg(struct gen_perf_query_info *query,
450 uint32_t reg,
451 uint32_t numerator,
452 uint32_t denominator,
453 const char *name,
454 const char *description)
455 {
456 struct gen_perf_query_counter *counter;
457
458 assert(query->n_counters < query->max_counters);
459
460 counter = &query->counters[query->n_counters];
461 counter->name = name;
462 counter->desc = description;
463 counter->type = GEN_PERF_COUNTER_TYPE_RAW;
464 counter->data_type = GEN_PERF_COUNTER_DATA_TYPE_UINT64;
465 counter->offset = sizeof(uint64_t) * query->n_counters;
466 counter->pipeline_stat.reg = reg;
467 counter->pipeline_stat.numerator = numerator;
468 counter->pipeline_stat.denominator = denominator;
469
470 query->n_counters++;
471 }
472
473 static inline void
474 gen_perf_query_info_add_basic_stat_reg(struct gen_perf_query_info *query,
475 uint32_t reg, const char *name)
476 {
477 gen_perf_query_info_add_stat_reg(query, reg, 1, 1, name, name);
478 }
479
480 static inline struct gen_perf_config *
481 gen_perf_new(void *ctx)
482 {
483 struct gen_perf_config *perf = rzalloc(ctx, struct gen_perf_config);
484 return perf;
485 }
486
487 bool gen_perf_load_oa_metrics(struct gen_perf_config *perf, int fd,
488 const struct gen_device_info *devinfo);
489 bool gen_perf_load_metric_id(struct gen_perf_config *perf, const char *guid,
490 uint64_t *metric_id);
491
492 void gen_perf_query_result_read_frequencies(struct gen_perf_query_result *result,
493 const struct gen_device_info *devinfo,
494 const uint32_t *start,
495 const uint32_t *end);
496 void gen_perf_query_result_accumulate(struct gen_perf_query_result *result,
497 const struct gen_perf_query_info *query,
498 const uint32_t *start,
499 const uint32_t *end);
500 void gen_perf_query_result_clear(struct gen_perf_query_result *result);
501 void gen_perf_query_register_mdapi_statistic_query(const struct gen_device_info *devinfo,
502 struct gen_perf_config *perf);
503 void gen_perf_query_register_mdapi_oa_query(const struct gen_device_info *devinfo,
504 struct gen_perf_config *perf);
505 uint64_t gen_perf_query_get_metric_id(struct gen_perf_config *perf,
506 const struct gen_perf_query_info *query);
507 struct oa_sample_buf * gen_perf_get_free_sample_buf(struct gen_perf_context *perf);
508 void gen_perf_reap_old_sample_buffers(struct gen_perf_context *perf_ctx);
509 void gen_perf_free_sample_bufs(struct gen_perf_context *perf_ctx);
510
511
512 #endif /* GEN_PERF_H */