i965: perf: extract utility functions
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43 #include <dirent.h>
44
45 /* put before sys/types.h to silence glibc warnings */
46 #ifdef MAJOR_IN_MKDEV
47 #include <sys/mkdev.h>
48 #endif
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
51 #endif
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <fcntl.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57
58 #include <xf86drm.h>
59 #include <i915_drm.h>
60
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
65
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_metrics.h"
75 #include "intel_batchbuffer.h"
76
77 #define FILE_DEBUG_FLAG DEBUG_PERFMON
78
79 #define OAREPORT_REASON_MASK 0x3f
80 #define OAREPORT_REASON_SHIFT 19
81 #define OAREPORT_REASON_TIMER (1<<0)
82 #define OAREPORT_REASON_TRIGGER1 (1<<1)
83 #define OAREPORT_REASON_TRIGGER2 (1<<2)
84 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
85 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
86
87 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
88 256) /* OA counter report */
89
90 /**
91 * Periodic OA samples are read() into these buffer structures via the
92 * i915 perf kernel interface and appended to the
93 * brw->perfquery.sample_buffers linked list. When we process the
94 * results of an OA metrics query we need to consider all the periodic
95 * samples between the Begin and End MI_REPORT_PERF_COUNT command
96 * markers.
97 *
98 * 'Periodic' is a simplification as there are other automatic reports
99 * written by the hardware also buffered here.
100 *
101 * Considering three queries, A, B and C:
102 *
103 * Time ---->
104 * ________________A_________________
105 * | |
106 * | ________B_________ _____C___________
107 * | | | | | |
108 *
109 * And an illustration of sample buffers read over this time frame:
110 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
111 *
112 * These nodes may hold samples for query A:
113 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
114 *
115 * These nodes may hold samples for query B:
116 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
117 *
118 * These nodes may hold samples for query C:
119 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
120 *
121 * The illustration assumes we have an even distribution of periodic
122 * samples so all nodes have the same size plotted against time:
123 *
124 * Note, to simplify code, the list is never empty.
125 *
126 * With overlapping queries we can see that periodic OA reports may
127 * relate to multiple queries and care needs to be take to keep
128 * track of sample buffers until there are no queries that might
129 * depend on their contents.
130 *
131 * We use a node ref counting system where a reference ensures that a
132 * node and all following nodes can't be freed/recycled until the
133 * reference drops to zero.
134 *
135 * E.g. with a ref of one here:
136 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
137 *
138 * These nodes could be freed or recycled ("reaped"):
139 * [ 0 ][ 0 ]
140 *
141 * These must be preserved until the leading ref drops to zero:
142 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
143 *
144 * When a query starts we take a reference on the current tail of
145 * the list, knowing that no already-buffered samples can possibly
146 * relate to the newly-started query. A pointer to this node is
147 * also saved in the query object's ->oa.samples_head.
148 *
149 * E.g. starting query A while there are two nodes in .sample_buffers:
150 * ________________A________
151 * |
152 *
153 * [ 0 ][ 1 ]
154 * ^_______ Add a reference and store pointer to node in
155 * A->oa.samples_head
156 *
157 * Moving forward to when the B query starts with no new buffer nodes:
158 * (for reference, i915 perf reads() are only done when queries finish)
159 * ________________A_______
160 * | ________B___
161 * | |
162 *
163 * [ 0 ][ 2 ]
164 * ^_______ Add a reference and store pointer to
165 * node in B->oa.samples_head
166 *
167 * Once a query is finished, after an OA query has become 'Ready',
168 * once the End OA report has landed and after we we have processed
169 * all the intermediate periodic samples then we drop the
170 * ->oa.samples_head reference we took at the start.
171 *
172 * So when the B query has finished we have:
173 * ________________A________
174 * | ______B___________
175 * | | |
176 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
177 * ^_______ Drop B->oa.samples_head reference
178 *
179 * We still can't free these due to the A->oa.samples_head ref:
180 * [ 1 ][ 0 ][ 0 ][ 0 ]
181 *
182 * When the A query finishes: (note there's a new ref for C's samples_head)
183 * ________________A_________________
184 * | |
185 * | _____C_________
186 * | | |
187 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
188 * ^_______ Drop A->oa.samples_head reference
189 *
190 * And we can now reap these nodes up to the C->oa.samples_head:
191 * [ X ][ X ][ X ][ X ]
192 * keeping -> [ 1 ][ 0 ][ 0 ]
193 *
194 * We reap old sample buffers each time we finish processing an OA
195 * query by iterating the sample_buffers list from the head until we
196 * find a referenced node and stop.
197 *
198 * Reaped buffers move to a perfquery.free_sample_buffers list and
199 * when we come to read() we first look to recycle a buffer from the
200 * free_sample_buffers list before allocating a new buffer.
201 */
202 struct brw_oa_sample_buf {
203 struct exec_node link;
204 int refcount;
205 int len;
206 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
207 uint32_t last_timestamp;
208 };
209
210 /** Downcasting convenience macro. */
211 static inline struct brw_perf_query_object *
212 brw_perf_query(struct gl_perf_query_object *o)
213 {
214 return (struct brw_perf_query_object *) o;
215 }
216
217 #define MI_RPC_BO_SIZE 4096
218 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
219
220 /******************************************************************************/
221
222 static bool
223 read_file_uint64(const char *file, uint64_t *val)
224 {
225 char buf[32];
226 int fd, n;
227
228 fd = open(file, 0);
229 if (fd < 0)
230 return false;
231 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
232 errno == EINTR);
233 close(fd);
234 if (n < 0)
235 return false;
236
237 buf[n] = '\0';
238 *val = strtoull(buf, NULL, 0);
239
240 return true;
241 }
242
243 static bool
244 read_sysfs_drm_device_file_uint64(struct brw_context *brw,
245 const char *file,
246 uint64_t *value)
247 {
248 char buf[512];
249 int len;
250
251 len = snprintf(buf, sizeof(buf), "%s/%s",
252 brw->perfquery.sysfs_dev_dir, file);
253 if (len < 0 || len >= sizeof(buf)) {
254 DBG("Failed to concatenate sys filename to read u64 from\n");
255 return false;
256 }
257
258 return read_file_uint64(buf, value);
259 }
260
261 /******************************************************************************/
262
263 static bool
264 brw_is_perf_query_ready(struct gl_context *ctx,
265 struct gl_perf_query_object *o);
266
267 static void
268 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
269 {
270 struct gl_context *ctx = brw_void;
271 struct gl_perf_query_object *o = query_void;
272 struct brw_perf_query_object *obj = query_void;
273
274 switch (obj->query->kind) {
275 case OA_COUNTERS:
276 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
277 id,
278 o->Used ? "Dirty," : "New,",
279 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
280 obj->oa.bo ? "yes," : "no,",
281 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
282 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
283 break;
284 case PIPELINE_STATS:
285 DBG("%4d: %-6s %-8s BO: %-4s\n",
286 id,
287 o->Used ? "Dirty," : "New,",
288 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
289 obj->pipeline_stats.bo ? "yes" : "no");
290 break;
291 default:
292 unreachable("Unknown query type");
293 break;
294 }
295 }
296
297 static void
298 dump_perf_queries(struct brw_context *brw)
299 {
300 struct gl_context *ctx = &brw->ctx;
301 DBG("Queries: (Open queries = %d, OA users = %d)\n",
302 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
303 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
304 }
305
306 /******************************************************************************/
307
308 static struct brw_oa_sample_buf *
309 get_free_sample_buf(struct brw_context *brw)
310 {
311 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
312 struct brw_oa_sample_buf *buf;
313
314 if (node)
315 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
316 else {
317 buf = ralloc_size(brw, sizeof(*buf));
318
319 exec_node_init(&buf->link);
320 buf->refcount = 0;
321 buf->len = 0;
322 }
323
324 return buf;
325 }
326
327 static void
328 reap_old_sample_buffers(struct brw_context *brw)
329 {
330 struct exec_node *tail_node =
331 exec_list_get_tail(&brw->perfquery.sample_buffers);
332 struct brw_oa_sample_buf *tail_buf =
333 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
334
335 /* Remove all old, unreferenced sample buffers walking forward from
336 * the head of the list, except always leave at least one node in
337 * the list so we always have a node to reference when we Begin
338 * a new query.
339 */
340 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
341 &brw->perfquery.sample_buffers)
342 {
343 if (buf->refcount == 0 && buf != tail_buf) {
344 exec_node_remove(&buf->link);
345 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
346 } else
347 return;
348 }
349 }
350
351 static void
352 free_sample_bufs(struct brw_context *brw)
353 {
354 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
355 &brw->perfquery.free_sample_buffers)
356 ralloc_free(buf);
357
358 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
359 }
360
361 /******************************************************************************/
362
363 /**
364 * Driver hook for glGetPerfQueryInfoINTEL().
365 */
366 static void
367 brw_get_perf_query_info(struct gl_context *ctx,
368 unsigned query_index,
369 const char **name,
370 GLuint *data_size,
371 GLuint *n_counters,
372 GLuint *n_active)
373 {
374 struct brw_context *brw = brw_context(ctx);
375 const struct brw_perf_query_info *query =
376 &brw->perfquery.queries[query_index];
377
378 *name = query->name;
379 *data_size = query->data_size;
380 *n_counters = query->n_counters;
381
382 switch (query->kind) {
383 case OA_COUNTERS:
384 *n_active = brw->perfquery.n_active_oa_queries;
385 break;
386
387 case PIPELINE_STATS:
388 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
389 break;
390
391 default:
392 unreachable("Unknown query type");
393 break;
394 }
395 }
396
397 /**
398 * Driver hook for glGetPerfCounterInfoINTEL().
399 */
400 static void
401 brw_get_perf_counter_info(struct gl_context *ctx,
402 unsigned query_index,
403 unsigned counter_index,
404 const char **name,
405 const char **desc,
406 GLuint *offset,
407 GLuint *data_size,
408 GLuint *type_enum,
409 GLuint *data_type_enum,
410 GLuint64 *raw_max)
411 {
412 struct brw_context *brw = brw_context(ctx);
413 const struct brw_perf_query_info *query =
414 &brw->perfquery.queries[query_index];
415 const struct brw_perf_query_counter *counter =
416 &query->counters[counter_index];
417
418 *name = counter->name;
419 *desc = counter->desc;
420 *offset = counter->offset;
421 *data_size = counter->size;
422 *type_enum = counter->type;
423 *data_type_enum = counter->data_type;
424 *raw_max = counter->raw_max;
425 }
426
427 /******************************************************************************/
428
429 /**
430 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
431 * pipeline statistics for the performance query object.
432 */
433 static void
434 snapshot_statistics_registers(struct brw_context *brw,
435 struct brw_perf_query_object *obj,
436 uint32_t offset_in_bytes)
437 {
438 const struct brw_perf_query_info *query = obj->query;
439 const int n_counters = query->n_counters;
440
441 for (int i = 0; i < n_counters; i++) {
442 const struct brw_perf_query_counter *counter = &query->counters[i];
443
444 assert(counter->data_type == GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL);
445
446 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
447 counter->pipeline_stat.reg,
448 offset_in_bytes + i * sizeof(uint64_t));
449 }
450 }
451
452 /**
453 * Add a query to the global list of "unaccumulated queries."
454 *
455 * Queries are tracked here until all the associated OA reports have
456 * been accumulated via accumulate_oa_reports() after the end
457 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
458 */
459 static void
460 add_to_unaccumulated_query_list(struct brw_context *brw,
461 struct brw_perf_query_object *obj)
462 {
463 if (brw->perfquery.unaccumulated_elements >=
464 brw->perfquery.unaccumulated_array_size)
465 {
466 brw->perfquery.unaccumulated_array_size *= 1.5;
467 brw->perfquery.unaccumulated =
468 reralloc(brw, brw->perfquery.unaccumulated,
469 struct brw_perf_query_object *,
470 brw->perfquery.unaccumulated_array_size);
471 }
472
473 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
474 }
475
476 /**
477 * Remove a query from the global list of unaccumulated queries once
478 * after successfully accumulating the OA reports associated with the
479 * query in accumulate_oa_reports() or when discarding unwanted query
480 * results.
481 */
482 static void
483 drop_from_unaccumulated_query_list(struct brw_context *brw,
484 struct brw_perf_query_object *obj)
485 {
486 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
487 if (brw->perfquery.unaccumulated[i] == obj) {
488 int last_elt = --brw->perfquery.unaccumulated_elements;
489
490 if (i == last_elt)
491 brw->perfquery.unaccumulated[i] = NULL;
492 else {
493 brw->perfquery.unaccumulated[i] =
494 brw->perfquery.unaccumulated[last_elt];
495 }
496
497 break;
498 }
499 }
500
501 /* Drop our samples_head reference so that associated periodic
502 * sample data buffers can potentially be reaped if they aren't
503 * referenced by any other queries...
504 */
505
506 struct brw_oa_sample_buf *buf =
507 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
508
509 assert(buf->refcount > 0);
510 buf->refcount--;
511
512 obj->oa.samples_head = NULL;
513
514 reap_old_sample_buffers(brw);
515 }
516
517 /**
518 * Given pointers to starting and ending OA snapshots, add the deltas for each
519 * counter to the results.
520 */
521 static void
522 add_deltas(struct brw_context *brw,
523 struct brw_perf_query_object *obj,
524 const uint32_t *start,
525 const uint32_t *end)
526 {
527 const struct brw_perf_query_info *query = obj->query;
528 uint64_t *accumulator = obj->oa.accumulator;
529 int idx = 0;
530 int i;
531
532 obj->oa.reports_accumulated++;
533
534 switch (query->oa_format) {
535 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
536 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
537 brw_perf_query_accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
538
539 /* 32x 40bit A counters... */
540 for (i = 0; i < 32; i++)
541 brw_perf_query_accumulate_uint40(i, start, end, accumulator + idx++);
542
543 /* 4x 32bit A counters... */
544 for (i = 0; i < 4; i++)
545 brw_perf_query_accumulate_uint32(start + 36 + i, end + 36 + i,
546 accumulator + idx++);
547
548 /* 8x 32bit B counters + 8x 32bit C counters... */
549 for (i = 0; i < 16; i++)
550 brw_perf_query_accumulate_uint32(start + 48 + i, end + 48 + i,
551 accumulator + idx++);
552
553 break;
554 case I915_OA_FORMAT_A45_B8_C8:
555 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
556
557 for (i = 0; i < 61; i++)
558 brw_perf_query_accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
559
560 break;
561 default:
562 unreachable("Can't accumulate OA counters in unknown format");
563 }
564 }
565
566 static bool
567 inc_n_oa_users(struct brw_context *brw)
568 {
569 if (brw->perfquery.n_oa_users == 0 &&
570 drmIoctl(brw->perfquery.oa_stream_fd,
571 I915_PERF_IOCTL_ENABLE, 0) < 0)
572 {
573 return false;
574 }
575 ++brw->perfquery.n_oa_users;
576
577 return true;
578 }
579
580 static void
581 dec_n_oa_users(struct brw_context *brw)
582 {
583 /* Disabling the i915 perf stream will effectively disable the OA
584 * counters. Note it's important to be sure there are no outstanding
585 * MI_RPC commands at this point since they could stall the CS
586 * indefinitely once OACONTROL is disabled.
587 */
588 --brw->perfquery.n_oa_users;
589 if (brw->perfquery.n_oa_users == 0 &&
590 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
591 {
592 DBG("WARNING: Error disabling i915 perf stream: %m\n");
593 }
594 }
595
596 /* In general if we see anything spurious while accumulating results,
597 * we don't try and continue accumulating the current query, hoping
598 * for the best, we scrap anything outstanding, and then hope for the
599 * best with new queries.
600 */
601 static void
602 discard_all_queries(struct brw_context *brw)
603 {
604 while (brw->perfquery.unaccumulated_elements) {
605 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
606
607 obj->oa.results_accumulated = true;
608 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
609
610 dec_n_oa_users(brw);
611 }
612 }
613
614 enum OaReadStatus {
615 OA_READ_STATUS_ERROR,
616 OA_READ_STATUS_UNFINISHED,
617 OA_READ_STATUS_FINISHED,
618 };
619
620 static enum OaReadStatus
621 read_oa_samples_until(struct brw_context *brw,
622 uint32_t start_timestamp,
623 uint32_t end_timestamp)
624 {
625 struct exec_node *tail_node =
626 exec_list_get_tail(&brw->perfquery.sample_buffers);
627 struct brw_oa_sample_buf *tail_buf =
628 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
629 uint32_t last_timestamp = tail_buf->last_timestamp;
630
631 while (1) {
632 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
633 uint32_t offset;
634 int len;
635
636 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
637 sizeof(buf->buf))) < 0 && errno == EINTR)
638 ;
639
640 if (len <= 0) {
641 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
642
643 if (len < 0) {
644 if (errno == EAGAIN)
645 return ((last_timestamp - start_timestamp) >=
646 (end_timestamp - start_timestamp)) ?
647 OA_READ_STATUS_FINISHED :
648 OA_READ_STATUS_UNFINISHED;
649 else {
650 DBG("Error reading i915 perf samples: %m\n");
651 }
652 } else
653 DBG("Spurious EOF reading i915 perf samples\n");
654
655 return OA_READ_STATUS_ERROR;
656 }
657
658 buf->len = len;
659 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
660
661 /* Go through the reports and update the last timestamp. */
662 offset = 0;
663 while (offset < buf->len) {
664 const struct drm_i915_perf_record_header *header =
665 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
666 uint32_t *report = (uint32_t *) (header + 1);
667
668 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
669 last_timestamp = report[1];
670
671 offset += header->size;
672 }
673
674 buf->last_timestamp = last_timestamp;
675 }
676
677 unreachable("not reached");
678 return OA_READ_STATUS_ERROR;
679 }
680
681 /**
682 * Try to read all the reports until either the delimiting timestamp
683 * or an error arises.
684 */
685 static bool
686 read_oa_samples_for_query(struct brw_context *brw,
687 struct brw_perf_query_object *obj)
688 {
689 uint32_t *start;
690 uint32_t *last;
691 uint32_t *end;
692
693 /* We need the MI_REPORT_PERF_COUNT to land before we can start
694 * accumulate. */
695 assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
696 !brw_bo_busy(obj->oa.bo));
697
698 /* Map the BO once here and let accumulate_oa_reports() unmap
699 * it. */
700 if (obj->oa.map == NULL)
701 obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
702
703 start = last = obj->oa.map;
704 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
705
706 if (start[0] != obj->oa.begin_report_id) {
707 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
708 return true;
709 }
710 if (end[0] != (obj->oa.begin_report_id + 1)) {
711 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
712 return true;
713 }
714
715 /* Read the reports until the end timestamp. */
716 switch (read_oa_samples_until(brw, start[1], end[1])) {
717 case OA_READ_STATUS_ERROR:
718 /* Fallthrough and let accumulate_oa_reports() deal with the
719 * error. */
720 case OA_READ_STATUS_FINISHED:
721 return true;
722 case OA_READ_STATUS_UNFINISHED:
723 return false;
724 }
725
726 unreachable("invalid read status");
727 return false;
728 }
729
730 /**
731 * Accumulate raw OA counter values based on deltas between pairs of
732 * OA reports.
733 *
734 * Accumulation starts from the first report captured via
735 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
736 * last MI_RPC report requested by brw_end_perf_query(). Between these
737 * two reports there may also some number of periodically sampled OA
738 * reports collected via the i915 perf interface - depending on the
739 * duration of the query.
740 *
741 * These periodic snapshots help to ensure we handle counter overflow
742 * correctly by being frequent enough to ensure we don't miss multiple
743 * overflows of a counter between snapshots. For Gen8+ the i915 perf
744 * snapshots provide the extra context-switch reports that let us
745 * subtract out the progress of counters associated with other
746 * contexts running on the system.
747 */
748 static void
749 accumulate_oa_reports(struct brw_context *brw,
750 struct brw_perf_query_object *obj)
751 {
752 const struct gen_device_info *devinfo = &brw->screen->devinfo;
753 struct gl_perf_query_object *o = &obj->base;
754 uint32_t *start;
755 uint32_t *last;
756 uint32_t *end;
757 struct exec_node *first_samples_node;
758 bool in_ctx = true;
759 int out_duration = 0;
760
761 assert(o->Ready);
762 assert(obj->oa.map != NULL);
763
764 start = last = obj->oa.map;
765 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
766
767 if (start[0] != obj->oa.begin_report_id) {
768 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
769 goto error;
770 }
771 if (end[0] != (obj->oa.begin_report_id + 1)) {
772 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
773 goto error;
774 }
775
776 obj->oa.hw_id = start[2];
777
778 /* See if we have any periodic reports to accumulate too... */
779
780 /* N.B. The oa.samples_head was set when the query began and
781 * pointed to the tail of the brw->perfquery.sample_buffers list at
782 * the time the query started. Since the buffer existed before the
783 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
784 * that no data in this particular node's buffer can possibly be
785 * associated with the query - so skip ahead one...
786 */
787 first_samples_node = obj->oa.samples_head->next;
788
789 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
790 &brw->perfquery.sample_buffers,
791 first_samples_node)
792 {
793 int offset = 0;
794
795 while (offset < buf->len) {
796 const struct drm_i915_perf_record_header *header =
797 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
798
799 assert(header->size != 0);
800 assert(header->size <= buf->len);
801
802 offset += header->size;
803
804 switch (header->type) {
805 case DRM_I915_PERF_RECORD_SAMPLE: {
806 uint32_t *report = (uint32_t *)(header + 1);
807 bool add = true;
808
809 /* Ignore reports that come before the start marker.
810 * (Note: takes care to allow overflow of 32bit timestamps)
811 */
812 if (brw_timebase_scale(brw, report[1] - start[1]) > 5000000000)
813 continue;
814
815 /* Ignore reports that come after the end marker.
816 * (Note: takes care to allow overflow of 32bit timestamps)
817 */
818 if (brw_timebase_scale(brw, report[1] - end[1]) <= 5000000000)
819 goto end;
820
821 /* For Gen8+ since the counters continue while other
822 * contexts are running we need to discount any unrelated
823 * deltas. The hardware automatically generates a report
824 * on context switch which gives us a new reference point
825 * to continuing adding deltas from.
826 *
827 * For Haswell we can rely on the HW to stop the progress
828 * of OA counters while any other context is acctive.
829 */
830 if (devinfo->gen >= 8) {
831 if (in_ctx && report[2] != obj->oa.hw_id) {
832 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
833 in_ctx = false;
834 out_duration = 0;
835 } else if (in_ctx == false && report[2] == obj->oa.hw_id) {
836 DBG("i915 perf: Switch TO\n");
837 in_ctx = true;
838
839 /* From experimentation in IGT, we found that the OA unit
840 * might label some report as "idle" (using an invalid
841 * context ID), right after a report for a given context.
842 * Deltas generated by those reports actually belong to the
843 * previous context, even though they're not labelled as
844 * such.
845 *
846 * We didn't *really* Switch AWAY in the case that we e.g.
847 * saw a single periodic report while idle...
848 */
849 if (out_duration >= 1)
850 add = false;
851 } else if (in_ctx) {
852 assert(report[2] == obj->oa.hw_id);
853 DBG("i915 perf: Continuation IN\n");
854 } else {
855 assert(report[2] != obj->oa.hw_id);
856 DBG("i915 perf: Continuation OUT\n");
857 add = false;
858 out_duration++;
859 }
860 }
861
862 if (add)
863 add_deltas(brw, obj, last, report);
864
865 last = report;
866
867 break;
868 }
869
870 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
871 DBG("i915 perf: OA error: all reports lost\n");
872 goto error;
873 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
874 DBG("i915 perf: OA report lost\n");
875 break;
876 }
877 }
878 }
879
880 end:
881
882 add_deltas(brw, obj, last, end);
883
884 DBG("Marking %d accumulated - results gathered\n", o->Id);
885
886 obj->oa.results_accumulated = true;
887 drop_from_unaccumulated_query_list(brw, obj);
888 dec_n_oa_users(brw);
889
890 return;
891
892 error:
893
894 discard_all_queries(brw);
895 }
896
897 /******************************************************************************/
898
899 static bool
900 open_i915_perf_oa_stream(struct brw_context *brw,
901 int metrics_set_id,
902 int report_format,
903 int period_exponent,
904 int drm_fd,
905 uint32_t ctx_id)
906 {
907 uint64_t properties[] = {
908 /* Single context sampling */
909 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
910
911 /* Include OA reports in samples */
912 DRM_I915_PERF_PROP_SAMPLE_OA, true,
913
914 /* OA unit configuration */
915 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
916 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
917 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
918 };
919 struct drm_i915_perf_open_param param = {
920 .flags = I915_PERF_FLAG_FD_CLOEXEC |
921 I915_PERF_FLAG_FD_NONBLOCK |
922 I915_PERF_FLAG_DISABLED,
923 .num_properties = ARRAY_SIZE(properties) / 2,
924 .properties_ptr = (uintptr_t) properties,
925 };
926 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
927 if (fd == -1) {
928 DBG("Error opening i915 perf OA stream: %m\n");
929 return false;
930 }
931
932 brw->perfquery.oa_stream_fd = fd;
933
934 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
935 brw->perfquery.current_oa_format = report_format;
936
937 return true;
938 }
939
940 static void
941 close_perf(struct brw_context *brw)
942 {
943 if (brw->perfquery.oa_stream_fd != -1) {
944 close(brw->perfquery.oa_stream_fd);
945 brw->perfquery.oa_stream_fd = -1;
946 }
947 }
948
949 /**
950 * Driver hook for glBeginPerfQueryINTEL().
951 */
952 static bool
953 brw_begin_perf_query(struct gl_context *ctx,
954 struct gl_perf_query_object *o)
955 {
956 struct brw_context *brw = brw_context(ctx);
957 struct brw_perf_query_object *obj = brw_perf_query(o);
958 const struct brw_perf_query_info *query = obj->query;
959
960 /* We can assume the frontend hides mistaken attempts to Begin a
961 * query object multiple times before its End. Similarly if an
962 * application reuses a query object before results have arrived
963 * the frontend will wait for prior results so we don't need
964 * to support abandoning in-flight results.
965 */
966 assert(!o->Active);
967 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
968
969 DBG("Begin(%d)\n", o->Id);
970
971 /* XXX: We have to consider that the command parser unit that parses batch
972 * buffer commands and is used to capture begin/end counter snapshots isn't
973 * implicitly synchronized with what's currently running across other GPU
974 * units (such as the EUs running shaders) that the performance counters are
975 * associated with.
976 *
977 * The intention of performance queries is to measure the work associated
978 * with commands between the begin/end delimiters and so for that to be the
979 * case we need to explicitly synchronize the parsing of commands to capture
980 * Begin/End counter snapshots with what's running across other parts of the
981 * GPU.
982 *
983 * When the command parser reaches a Begin marker it effectively needs to
984 * drain everything currently running on the GPU until the hardware is idle
985 * before capturing the first snapshot of counters - otherwise the results
986 * would also be measuring the effects of earlier commands.
987 *
988 * When the command parser reaches an End marker it needs to stall until
989 * everything currently running on the GPU has finished before capturing the
990 * end snapshot - otherwise the results won't be a complete representation
991 * of the work.
992 *
993 * Theoretically there could be opportunities to minimize how much of the
994 * GPU pipeline is drained, or that we stall for, when we know what specific
995 * units the performance counters being queried relate to but we don't
996 * currently attempt to be clever here.
997 *
998 * Note: with our current simple approach here then for back-to-back queries
999 * we will redundantly emit duplicate commands to synchronize the command
1000 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1001 * second synchronization is effectively a NOOP.
1002 *
1003 * N.B. The final results are based on deltas of counters between (inside)
1004 * Begin/End markers so even though the total wall clock time of the
1005 * workload is stretched by larger pipeline bubbles the bubbles themselves
1006 * are generally invisible to the query results. Whether that's a good or a
1007 * bad thing depends on the use case. For a lower real-time impact while
1008 * capturing metrics then periodic sampling may be a better choice than
1009 * INTEL_performance_query.
1010 *
1011 *
1012 * This is our Begin synchronization point to drain current work on the
1013 * GPU before we capture our first counter snapshot...
1014 */
1015 brw_emit_mi_flush(brw);
1016
1017 switch (query->kind) {
1018 case OA_COUNTERS:
1019
1020 /* Opening an i915 perf stream implies exclusive access to the OA unit
1021 * which will generate counter reports for a specific counter set with a
1022 * specific layout/format so we can't begin any OA based queries that
1023 * require a different counter set or format unless we get an opportunity
1024 * to close the stream and open a new one...
1025 */
1026 if (brw->perfquery.oa_stream_fd != -1 &&
1027 brw->perfquery.current_oa_metrics_set_id !=
1028 query->oa_metrics_set_id) {
1029
1030 if (brw->perfquery.n_oa_users != 0)
1031 return false;
1032 else
1033 close_perf(brw);
1034 }
1035
1036 /* If the OA counters aren't already on, enable them. */
1037 if (brw->perfquery.oa_stream_fd == -1) {
1038 __DRIscreen *screen = brw->screen->driScrnPriv;
1039 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1040
1041 /* The period_exponent gives a sampling period as follows:
1042 * sample_period = timestamp_period * 2^(period_exponent + 1)
1043 *
1044 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1045 * ~83ns (GEN8/9).
1046 *
1047 * The counter overflow period is derived from the EuActive counter
1048 * which reads a counter that increments by the number of clock
1049 * cycles multiplied by the number of EUs. It can be calculated as:
1050 *
1051 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1052 *
1053 * (E.g. 40 EUs @ 1GHz = ~53ms)
1054 *
1055 * We select a sampling period inferior to that overflow period to
1056 * ensure we cannot see more than 1 counter overflow, otherwise we
1057 * could loose information.
1058 */
1059
1060 int a_counter_in_bits = 32;
1061 if (devinfo->gen >= 8)
1062 a_counter_in_bits = 40;
1063
1064 uint64_t overflow_period = pow(2, a_counter_in_bits) /
1065 (brw->perfquery.sys_vars.n_eus *
1066 /* drop 1GHz freq to have units in nanoseconds */
1067 2);
1068
1069 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1070 overflow_period, overflow_period / 1000000ul, brw->perfquery.sys_vars.n_eus);
1071
1072 int period_exponent = 0;
1073 uint64_t prev_sample_period, next_sample_period;
1074 for (int e = 0; e < 30; e++) {
1075 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1076 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1077
1078 /* Take the previous sampling period, lower than the overflow
1079 * period.
1080 */
1081 if (prev_sample_period < overflow_period &&
1082 next_sample_period > overflow_period)
1083 period_exponent = e + 1;
1084 }
1085
1086 if (period_exponent == 0) {
1087 DBG("WARNING: enable to find a sampling exponent\n");
1088 return false;
1089 }
1090
1091 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1092 prev_sample_period / 1000000ul);
1093
1094 if (!open_i915_perf_oa_stream(brw,
1095 query->oa_metrics_set_id,
1096 query->oa_format,
1097 period_exponent,
1098 screen->fd, /* drm fd */
1099 brw->hw_ctx))
1100 return false;
1101 } else {
1102 assert(brw->perfquery.current_oa_metrics_set_id ==
1103 query->oa_metrics_set_id &&
1104 brw->perfquery.current_oa_format ==
1105 query->oa_format);
1106 }
1107
1108 if (!inc_n_oa_users(brw)) {
1109 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1110 return false;
1111 }
1112
1113 if (obj->oa.bo) {
1114 brw_bo_unreference(obj->oa.bo);
1115 obj->oa.bo = NULL;
1116 }
1117
1118 obj->oa.bo =
1119 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo", MI_RPC_BO_SIZE);
1120 #ifdef DEBUG
1121 /* Pre-filling the BO helps debug whether writes landed. */
1122 void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
1123 memset(map, 0x80, MI_RPC_BO_SIZE);
1124 brw_bo_unmap(obj->oa.bo);
1125 #endif
1126
1127 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1128 brw->perfquery.next_query_start_report_id += 2;
1129
1130 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1131 * delimiting commands end up in different batchbuffers. If that's the
1132 * case, the measurement will include the time it takes for the kernel
1133 * scheduler to load a new request into the hardware. This is manifested in
1134 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1135 */
1136 intel_batchbuffer_flush(brw);
1137
1138 /* Take a starting OA counter snapshot. */
1139 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1140 obj->oa.begin_report_id);
1141 ++brw->perfquery.n_active_oa_queries;
1142
1143 /* No already-buffered samples can possibly be associated with this query
1144 * so create a marker within the list of sample buffers enabling us to
1145 * easily ignore earlier samples when processing this query after
1146 * completion.
1147 */
1148 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1149 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1150
1151 struct brw_oa_sample_buf *buf =
1152 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1153
1154 /* This reference will ensure that future/following sample
1155 * buffers (that may relate to this query) can't be freed until
1156 * this drops to zero.
1157 */
1158 buf->refcount++;
1159
1160 obj->oa.hw_id = 0xffffffff;
1161 memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
1162 obj->oa.results_accumulated = false;
1163
1164 add_to_unaccumulated_query_list(brw, obj);
1165 break;
1166
1167 case PIPELINE_STATS:
1168 if (obj->pipeline_stats.bo) {
1169 brw_bo_unreference(obj->pipeline_stats.bo);
1170 obj->pipeline_stats.bo = NULL;
1171 }
1172
1173 obj->pipeline_stats.bo =
1174 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1175 STATS_BO_SIZE);
1176
1177 /* Take starting snapshots. */
1178 snapshot_statistics_registers(brw, obj, 0);
1179
1180 ++brw->perfquery.n_active_pipeline_stats_queries;
1181 break;
1182
1183 default:
1184 unreachable("Unknown query type");
1185 break;
1186 }
1187
1188 if (INTEL_DEBUG & DEBUG_PERFMON)
1189 dump_perf_queries(brw);
1190
1191 return true;
1192 }
1193
1194 /**
1195 * Driver hook for glEndPerfQueryINTEL().
1196 */
1197 static void
1198 brw_end_perf_query(struct gl_context *ctx,
1199 struct gl_perf_query_object *o)
1200 {
1201 struct brw_context *brw = brw_context(ctx);
1202 struct brw_perf_query_object *obj = brw_perf_query(o);
1203
1204 DBG("End(%d)\n", o->Id);
1205
1206 /* Ensure that the work associated with the queried commands will have
1207 * finished before taking our query end counter readings.
1208 *
1209 * For more details see comment in brw_begin_perf_query for
1210 * corresponding flush.
1211 */
1212 brw_emit_mi_flush(brw);
1213
1214 switch (obj->query->kind) {
1215 case OA_COUNTERS:
1216
1217 /* NB: It's possible that the query will have already been marked
1218 * as 'accumulated' if an error was seen while reading samples
1219 * from perf. In this case we mustn't try and emit a closing
1220 * MI_RPC command in case the OA unit has already been disabled
1221 */
1222 if (!obj->oa.results_accumulated) {
1223 /* Take an ending OA counter snapshot. */
1224 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
1225 MI_RPC_BO_END_OFFSET_BYTES,
1226 obj->oa.begin_report_id + 1);
1227 }
1228
1229 --brw->perfquery.n_active_oa_queries;
1230
1231 /* NB: even though the query has now ended, it can't be accumulated
1232 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1233 * to query->oa.bo
1234 */
1235 break;
1236
1237 case PIPELINE_STATS:
1238 snapshot_statistics_registers(brw, obj,
1239 STATS_BO_END_OFFSET_BYTES);
1240 --brw->perfquery.n_active_pipeline_stats_queries;
1241 break;
1242
1243 default:
1244 unreachable("Unknown query type");
1245 break;
1246 }
1247 }
1248
1249 static void
1250 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1251 {
1252 struct brw_context *brw = brw_context(ctx);
1253 struct brw_perf_query_object *obj = brw_perf_query(o);
1254 struct brw_bo *bo = NULL;
1255
1256 assert(!o->Ready);
1257
1258 switch (obj->query->kind) {
1259 case OA_COUNTERS:
1260 bo = obj->oa.bo;
1261 break;
1262
1263 case PIPELINE_STATS:
1264 bo = obj->pipeline_stats.bo;
1265 break;
1266
1267 default:
1268 unreachable("Unknown query type");
1269 break;
1270 }
1271
1272 if (bo == NULL)
1273 return;
1274
1275 /* If the current batch references our results bo then we need to
1276 * flush first...
1277 */
1278 if (brw_batch_references(&brw->batch, bo))
1279 intel_batchbuffer_flush(brw);
1280
1281 brw_bo_wait_rendering(bo);
1282
1283 /* Due to a race condition between the OA unit signaling report
1284 * availability and the report actually being written into memory,
1285 * we need to wait for all the reports to come in before we can
1286 * read them.
1287 */
1288 if (obj->query->kind == OA_COUNTERS) {
1289 while (!read_oa_samples_for_query(brw, obj))
1290 ;
1291 }
1292 }
1293
1294 static bool
1295 brw_is_perf_query_ready(struct gl_context *ctx,
1296 struct gl_perf_query_object *o)
1297 {
1298 struct brw_context *brw = brw_context(ctx);
1299 struct brw_perf_query_object *obj = brw_perf_query(o);
1300
1301 if (o->Ready)
1302 return true;
1303
1304 switch (obj->query->kind) {
1305 case OA_COUNTERS:
1306 return (obj->oa.results_accumulated ||
1307 (obj->oa.bo &&
1308 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1309 !brw_bo_busy(obj->oa.bo) &&
1310 read_oa_samples_for_query(brw, obj)));
1311 case PIPELINE_STATS:
1312 return (obj->pipeline_stats.bo &&
1313 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1314 !brw_bo_busy(obj->pipeline_stats.bo));
1315
1316 default:
1317 unreachable("Unknown query type");
1318 break;
1319 }
1320
1321 return false;
1322 }
1323
1324 static int
1325 get_oa_counter_data(struct brw_context *brw,
1326 struct brw_perf_query_object *obj,
1327 size_t data_size,
1328 uint8_t *data)
1329 {
1330 const struct brw_perf_query_info *query = obj->query;
1331 int n_counters = query->n_counters;
1332 int written = 0;
1333
1334 if (!obj->oa.results_accumulated) {
1335 accumulate_oa_reports(brw, obj);
1336 assert(obj->oa.results_accumulated);
1337
1338 brw_bo_unmap(obj->oa.bo);
1339 obj->oa.map = NULL;
1340 }
1341
1342 for (int i = 0; i < n_counters; i++) {
1343 const struct brw_perf_query_counter *counter = &query->counters[i];
1344 uint64_t *out_uint64;
1345 float *out_float;
1346
1347 if (counter->size) {
1348 switch (counter->data_type) {
1349 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL:
1350 out_uint64 = (uint64_t *)(data + counter->offset);
1351 *out_uint64 = counter->oa_counter_read_uint64(brw, query,
1352 obj->oa.accumulator);
1353 break;
1354 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL:
1355 out_float = (float *)(data + counter->offset);
1356 *out_float = counter->oa_counter_read_float(brw, query,
1357 obj->oa.accumulator);
1358 break;
1359 default:
1360 /* So far we aren't using uint32, double or bool32... */
1361 unreachable("unexpected counter data type");
1362 }
1363 written = counter->offset + counter->size;
1364 }
1365 }
1366
1367 return written;
1368 }
1369
1370 static int
1371 get_pipeline_stats_data(struct brw_context *brw,
1372 struct brw_perf_query_object *obj,
1373 size_t data_size,
1374 uint8_t *data)
1375
1376 {
1377 const struct brw_perf_query_info *query = obj->query;
1378 int n_counters = obj->query->n_counters;
1379 uint8_t *p = data;
1380
1381 uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
1382 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1383
1384 for (int i = 0; i < n_counters; i++) {
1385 const struct brw_perf_query_counter *counter = &query->counters[i];
1386 uint64_t value = end[i] - start[i];
1387
1388 if (counter->pipeline_stat.numerator !=
1389 counter->pipeline_stat.denominator) {
1390 value *= counter->pipeline_stat.numerator;
1391 value /= counter->pipeline_stat.denominator;
1392 }
1393
1394 *((uint64_t *)p) = value;
1395 p += 8;
1396 }
1397
1398 brw_bo_unmap(obj->pipeline_stats.bo);
1399
1400 return p - data;
1401 }
1402
1403 /**
1404 * Driver hook for glGetPerfQueryDataINTEL().
1405 */
1406 static void
1407 brw_get_perf_query_data(struct gl_context *ctx,
1408 struct gl_perf_query_object *o,
1409 GLsizei data_size,
1410 GLuint *data,
1411 GLuint *bytes_written)
1412 {
1413 struct brw_context *brw = brw_context(ctx);
1414 struct brw_perf_query_object *obj = brw_perf_query(o);
1415 int written = 0;
1416
1417 assert(brw_is_perf_query_ready(ctx, o));
1418
1419 DBG("GetData(%d)\n", o->Id);
1420
1421 if (INTEL_DEBUG & DEBUG_PERFMON)
1422 dump_perf_queries(brw);
1423
1424 /* We expect that the frontend only calls this hook when it knows
1425 * that results are available.
1426 */
1427 assert(o->Ready);
1428
1429 switch (obj->query->kind) {
1430 case OA_COUNTERS:
1431 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1432 break;
1433
1434 case PIPELINE_STATS:
1435 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1436 break;
1437
1438 default:
1439 unreachable("Unknown query type");
1440 break;
1441 }
1442
1443 if (bytes_written)
1444 *bytes_written = written;
1445 }
1446
1447 static struct gl_perf_query_object *
1448 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1449 {
1450 struct brw_context *brw = brw_context(ctx);
1451 const struct brw_perf_query_info *query =
1452 &brw->perfquery.queries[query_index];
1453 struct brw_perf_query_object *obj =
1454 calloc(1, sizeof(struct brw_perf_query_object));
1455
1456 if (!obj)
1457 return NULL;
1458
1459 obj->query = query;
1460
1461 brw->perfquery.n_query_instances++;
1462
1463 return &obj->base;
1464 }
1465
1466 /**
1467 * Driver hook for glDeletePerfQueryINTEL().
1468 */
1469 static void
1470 brw_delete_perf_query(struct gl_context *ctx,
1471 struct gl_perf_query_object *o)
1472 {
1473 struct brw_context *brw = brw_context(ctx);
1474 struct brw_perf_query_object *obj = brw_perf_query(o);
1475
1476 /* We can assume that the frontend waits for a query to complete
1477 * before ever calling into here, so we don't have to worry about
1478 * deleting an in-flight query object.
1479 */
1480 assert(!o->Active);
1481 assert(!o->Used || o->Ready);
1482
1483 DBG("Delete(%d)\n", o->Id);
1484
1485 switch (obj->query->kind) {
1486 case OA_COUNTERS:
1487 if (obj->oa.bo) {
1488 if (!obj->oa.results_accumulated) {
1489 drop_from_unaccumulated_query_list(brw, obj);
1490 dec_n_oa_users(brw);
1491 }
1492
1493 brw_bo_unreference(obj->oa.bo);
1494 obj->oa.bo = NULL;
1495 }
1496
1497 obj->oa.results_accumulated = false;
1498 break;
1499
1500 case PIPELINE_STATS:
1501 if (obj->pipeline_stats.bo) {
1502 brw_bo_unreference(obj->pipeline_stats.bo);
1503 obj->pipeline_stats.bo = NULL;
1504 }
1505 break;
1506
1507 default:
1508 unreachable("Unknown query type");
1509 break;
1510 }
1511
1512 free(obj);
1513
1514 /* As an indication that the INTEL_performance_query extension is no
1515 * longer in use, it's a good time to free our cache of sample
1516 * buffers and close any current i915-perf stream.
1517 */
1518 if (--brw->perfquery.n_query_instances == 0) {
1519 free_sample_bufs(brw);
1520 close_perf(brw);
1521 }
1522 }
1523
1524 /******************************************************************************/
1525
1526 static void
1527 init_pipeline_statistic_query_registers(struct brw_context *brw)
1528 {
1529 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1530 struct brw_perf_query_info *query = brw_perf_query_append_query_info(brw);
1531
1532 query->kind = PIPELINE_STATS;
1533 query->name = "Pipeline Statistics Registers";
1534 query->n_counters = 0;
1535 query->counters =
1536 rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
1537
1538 brw_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
1539 "N vertices submitted");
1540 brw_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1541 "N primitives submitted");
1542 brw_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1543 "N vertex shader invocations");
1544
1545 if (devinfo->gen == 6) {
1546 brw_perf_query_info_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1547 "SO_PRIM_STORAGE_NEEDED",
1548 "N geometry shader stream-out primitives (total)");
1549 brw_perf_query_info_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1550 "SO_NUM_PRIMS_WRITTEN",
1551 "N geometry shader stream-out primitives (written)");
1552 } else {
1553 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1554 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1555 "N stream-out (stream 0) primitives (total)");
1556 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1557 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1558 "N stream-out (stream 1) primitives (total)");
1559 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1560 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1561 "N stream-out (stream 2) primitives (total)");
1562 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1563 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1564 "N stream-out (stream 3) primitives (total)");
1565 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1566 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1567 "N stream-out (stream 0) primitives (written)");
1568 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1569 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1570 "N stream-out (stream 1) primitives (written)");
1571 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1572 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1573 "N stream-out (stream 2) primitives (written)");
1574 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1575 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1576 "N stream-out (stream 3) primitives (written)");
1577 }
1578
1579 brw_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1580 "N TCS shader invocations");
1581 brw_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1582 "N TES shader invocations");
1583
1584 brw_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1585 "N geometry shader invocations");
1586 brw_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1587 "N geometry shader primitives emitted");
1588
1589 brw_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1590 "N primitives entering clipping");
1591 brw_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1592 "N primitives leaving clipping");
1593
1594 if (devinfo->is_haswell || devinfo->gen == 8)
1595 brw_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1596 "N fragment shader invocations",
1597 "N fragment shader invocations");
1598 else
1599 brw_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1600 "N fragment shader invocations");
1601
1602 brw_perf_query_info_add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
1603
1604 if (devinfo->gen >= 7)
1605 brw_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1606 "N compute shader invocations");
1607
1608 query->data_size = sizeof(uint64_t) * query->n_counters;
1609 }
1610
1611 static void
1612 register_oa_config(struct brw_context *brw,
1613 const struct brw_perf_query_info *query,
1614 uint64_t config_id)
1615 {
1616 struct brw_perf_query_info *registred_query =
1617 brw_perf_query_append_query_info(brw);
1618
1619 *registred_query = *query;
1620 registred_query->oa_metrics_set_id = config_id;
1621 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
1622 registred_query->oa_metrics_set_id, query->guid);
1623 }
1624
1625 static void
1626 enumerate_sysfs_metrics(struct brw_context *brw)
1627 {
1628 char buf[256];
1629 DIR *metricsdir = NULL;
1630 struct dirent *metric_entry;
1631 int len;
1632
1633 len = snprintf(buf, sizeof(buf), "%s/metrics", brw->perfquery.sysfs_dev_dir);
1634 if (len < 0 || len >= sizeof(buf)) {
1635 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1636 return;
1637 }
1638
1639 metricsdir = opendir(buf);
1640 if (!metricsdir) {
1641 DBG("Failed to open %s: %m\n", buf);
1642 return;
1643 }
1644
1645 while ((metric_entry = readdir(metricsdir))) {
1646 struct hash_entry *entry;
1647
1648 if ((metric_entry->d_type != DT_DIR &&
1649 metric_entry->d_type != DT_LNK) ||
1650 metric_entry->d_name[0] == '.')
1651 continue;
1652
1653 DBG("metric set: %s\n", metric_entry->d_name);
1654 entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
1655 metric_entry->d_name);
1656 if (entry) {
1657 uint64_t id;
1658
1659 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
1660 brw->perfquery.sysfs_dev_dir, metric_entry->d_name);
1661 if (len < 0 || len >= sizeof(buf)) {
1662 DBG("Failed to concatenate path to sysfs metric id file\n");
1663 continue;
1664 }
1665
1666 if (!read_file_uint64(buf, &id)) {
1667 DBG("Failed to read metric set id from %s: %m", buf);
1668 continue;
1669 }
1670
1671 register_oa_config(brw, (const struct brw_perf_query_info *)entry->data, id);
1672 } else
1673 DBG("metric set not known by mesa (skipping)\n");
1674 }
1675
1676 closedir(metricsdir);
1677 }
1678
1679 static bool
1680 kernel_has_dynamic_config_support(struct brw_context *brw)
1681 {
1682 __DRIscreen *screen = brw->screen->driScrnPriv;
1683 struct hash_entry *entry;
1684
1685 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1686 struct brw_perf_query_info *query = entry->data;
1687 char config_path[280];
1688 uint64_t config_id;
1689
1690 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1691 brw->perfquery.sysfs_dev_dir, query->guid);
1692
1693 /* Look for the test config, which we know we can't replace. */
1694 if (read_file_uint64(config_path, &config_id) && config_id == 1) {
1695 return drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
1696 &config_id) < 0 && errno == ENOENT;
1697 }
1698 }
1699
1700 return false;
1701 }
1702
1703 static void
1704 init_oa_configs(struct brw_context *brw)
1705 {
1706 __DRIscreen *screen = brw->screen->driScrnPriv;
1707 struct hash_entry *entry;
1708
1709 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1710 const struct brw_perf_query_info *query = entry->data;
1711 struct drm_i915_perf_oa_config config;
1712 char config_path[280];
1713 uint64_t config_id;
1714 int ret;
1715
1716 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1717 brw->perfquery.sysfs_dev_dir, query->guid);
1718
1719 /* Don't recreate already loaded configs. */
1720 if (read_file_uint64(config_path, &config_id)) {
1721 DBG("metric set: %s (already loaded)\n", query->guid);
1722 register_oa_config(brw, query, config_id);
1723 continue;
1724 }
1725
1726 memset(&config, 0, sizeof(config));
1727
1728 memcpy(config.uuid, query->guid, sizeof(config.uuid));
1729
1730 config.n_mux_regs = query->n_mux_regs;
1731 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
1732
1733 config.n_boolean_regs = query->n_b_counter_regs;
1734 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
1735
1736 config.n_flex_regs = query->n_flex_regs;
1737 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
1738
1739 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
1740 if (ret < 0) {
1741 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
1742 query->name, query->guid, strerror(errno));
1743 continue;
1744 }
1745
1746 register_oa_config(brw, query, ret);
1747 DBG("metric set: %s (added)\n", query->guid);
1748 }
1749 }
1750
1751 static bool
1752 query_topology(struct brw_context *brw)
1753 {
1754 __DRIscreen *screen = brw->screen->driScrnPriv;
1755 struct drm_i915_query_item item = {
1756 .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
1757 };
1758 struct drm_i915_query query = {
1759 .num_items = 1,
1760 .items_ptr = (uintptr_t) &item,
1761 };
1762
1763 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query))
1764 return false;
1765
1766 struct drm_i915_query_topology_info *topo_info =
1767 (struct drm_i915_query_topology_info *) calloc(1, item.length);
1768 item.data_ptr = (uintptr_t) topo_info;
1769
1770 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query) ||
1771 item.length <= 0)
1772 return false;
1773
1774 gen_device_info_update_from_topology(&brw->screen->devinfo,
1775 topo_info);
1776
1777 free(topo_info);
1778
1779 return true;
1780 }
1781
1782 static bool
1783 getparam_topology(struct brw_context *brw)
1784 {
1785 __DRIscreen *screen = brw->screen->driScrnPriv;
1786 drm_i915_getparam_t gp;
1787 int ret;
1788
1789 int slice_mask = 0;
1790 gp.param = I915_PARAM_SLICE_MASK;
1791 gp.value = &slice_mask;
1792 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1793 if (ret)
1794 return false;
1795
1796 int subslice_mask = 0;
1797 gp.param = I915_PARAM_SUBSLICE_MASK;
1798 gp.value = &subslice_mask;
1799 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1800 if (ret)
1801 return false;
1802
1803 gen_device_info_update_from_masks(&brw->screen->devinfo,
1804 slice_mask,
1805 subslice_mask,
1806 brw->screen->eu_total);
1807
1808 return true;
1809 }
1810
1811 static void
1812 compute_topology_builtins(struct brw_context *brw)
1813 {
1814 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1815
1816 brw->perfquery.sys_vars.slice_mask = devinfo->slice_masks;
1817 brw->perfquery.sys_vars.n_eu_slices = devinfo->num_slices;
1818
1819 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
1820 brw->perfquery.sys_vars.n_eu_sub_slices +=
1821 _mesa_bitcount(devinfo->subslice_masks[i]);
1822 }
1823
1824 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
1825 brw->perfquery.sys_vars.n_eus += _mesa_bitcount(devinfo->eu_masks[i]);
1826
1827 brw->perfquery.sys_vars.eu_threads_count =
1828 brw->perfquery.sys_vars.n_eus * devinfo->num_thread_per_eu;
1829
1830 /* At the moment the subslice mask builtin has groups of 3bits for each
1831 * slice.
1832 *
1833 * Ideally equations would be updated to have a slice/subslice query
1834 * function/operator.
1835 */
1836 brw->perfquery.sys_vars.subslice_mask = 0;
1837 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
1838 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
1839 if (gen_device_info_subslice_available(devinfo, s, ss))
1840 brw->perfquery.sys_vars.subslice_mask |= 1UL << (s * 3 + ss);
1841 }
1842 }
1843 }
1844
1845 static bool
1846 init_oa_sys_vars(struct brw_context *brw)
1847 {
1848 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1849 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
1850 __DRIscreen *screen = brw->screen->driScrnPriv;
1851
1852 if (!read_sysfs_drm_device_file_uint64(brw, "gt_min_freq_mhz", &min_freq_mhz))
1853 return false;
1854
1855 if (!read_sysfs_drm_device_file_uint64(brw, "gt_max_freq_mhz", &max_freq_mhz))
1856 return false;
1857
1858 if (!query_topology(brw)) {
1859 /* We need the i915 query uAPI on CNL+ (kernel 4.17+). */
1860 if (devinfo->gen >= 10)
1861 return false;
1862
1863 if (!getparam_topology(brw)) {
1864 /* We need the SLICE_MASK/SUBSLICE_MASK on gen8+ (kernel 4.13+). */
1865 if (devinfo->gen >= 8)
1866 return false;
1867
1868 /* On Haswell, the values are already computed for us in
1869 * gen_device_info.
1870 */
1871 }
1872 }
1873
1874 memset(&brw->perfquery.sys_vars, 0, sizeof(brw->perfquery.sys_vars));
1875 brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
1876 brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
1877 brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
1878 brw->perfquery.sys_vars.revision = intel_device_get_revision(screen->fd);
1879 compute_topology_builtins(brw);
1880
1881 return true;
1882 }
1883
1884 static bool
1885 get_sysfs_dev_dir(struct brw_context *brw)
1886 {
1887 __DRIscreen *screen = brw->screen->driScrnPriv;
1888 struct stat sb;
1889 int min, maj;
1890 DIR *drmdir;
1891 struct dirent *drm_entry;
1892 int len;
1893
1894 brw->perfquery.sysfs_dev_dir[0] = '\0';
1895
1896 if (fstat(screen->fd, &sb)) {
1897 DBG("Failed to stat DRM fd\n");
1898 return false;
1899 }
1900
1901 maj = major(sb.st_rdev);
1902 min = minor(sb.st_rdev);
1903
1904 if (!S_ISCHR(sb.st_mode)) {
1905 DBG("DRM fd is not a character device as expected\n");
1906 return false;
1907 }
1908
1909 len = snprintf(brw->perfquery.sysfs_dev_dir,
1910 sizeof(brw->perfquery.sysfs_dev_dir),
1911 "/sys/dev/char/%d:%d/device/drm", maj, min);
1912 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir)) {
1913 DBG("Failed to concatenate sysfs path to drm device\n");
1914 return false;
1915 }
1916
1917 drmdir = opendir(brw->perfquery.sysfs_dev_dir);
1918 if (!drmdir) {
1919 DBG("Failed to open %s: %m\n", brw->perfquery.sysfs_dev_dir);
1920 return false;
1921 }
1922
1923 while ((drm_entry = readdir(drmdir))) {
1924 if ((drm_entry->d_type == DT_DIR ||
1925 drm_entry->d_type == DT_LNK) &&
1926 strncmp(drm_entry->d_name, "card", 4) == 0)
1927 {
1928 len = snprintf(brw->perfquery.sysfs_dev_dir,
1929 sizeof(brw->perfquery.sysfs_dev_dir),
1930 "/sys/dev/char/%d:%d/device/drm/%s",
1931 maj, min, drm_entry->d_name);
1932 closedir(drmdir);
1933 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir))
1934 return false;
1935 else
1936 return true;
1937 }
1938 }
1939
1940 closedir(drmdir);
1941
1942 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
1943 maj, min);
1944
1945 return false;
1946 }
1947
1948 typedef void (*perf_register_oa_queries_t)(struct brw_context *);
1949
1950 static perf_register_oa_queries_t
1951 get_register_queries_function(const struct gen_device_info *devinfo)
1952 {
1953 if (devinfo->is_haswell)
1954 return brw_oa_register_queries_hsw;
1955 if (devinfo->is_cherryview)
1956 return brw_oa_register_queries_chv;
1957 if (devinfo->is_broadwell)
1958 return brw_oa_register_queries_bdw;
1959 if (devinfo->is_broxton)
1960 return brw_oa_register_queries_bxt;
1961 if (devinfo->is_skylake) {
1962 if (devinfo->gt == 2)
1963 return brw_oa_register_queries_sklgt2;
1964 if (devinfo->gt == 3)
1965 return brw_oa_register_queries_sklgt3;
1966 if (devinfo->gt == 4)
1967 return brw_oa_register_queries_sklgt4;
1968 }
1969 if (devinfo->is_kabylake) {
1970 if (devinfo->gt == 2)
1971 return brw_oa_register_queries_kblgt2;
1972 if (devinfo->gt == 3)
1973 return brw_oa_register_queries_kblgt3;
1974 }
1975 if (devinfo->is_geminilake)
1976 return brw_oa_register_queries_glk;
1977 if (devinfo->is_coffeelake) {
1978 if (devinfo->gt == 2)
1979 return brw_oa_register_queries_cflgt2;
1980 if (devinfo->gt == 3)
1981 return brw_oa_register_queries_cflgt3;
1982 }
1983 if (devinfo->is_cannonlake)
1984 return brw_oa_register_queries_cnl;
1985
1986 return NULL;
1987 }
1988
1989 static unsigned
1990 brw_init_perf_query_info(struct gl_context *ctx)
1991 {
1992 struct brw_context *brw = brw_context(ctx);
1993 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1994 bool i915_perf_oa_available = false;
1995 struct stat sb;
1996 perf_register_oa_queries_t oa_register;
1997
1998 if (brw->perfquery.n_queries)
1999 return brw->perfquery.n_queries;
2000
2001 init_pipeline_statistic_query_registers(brw);
2002
2003 oa_register = get_register_queries_function(devinfo);
2004
2005 /* The existence of this sysctl parameter implies the kernel supports
2006 * the i915 perf interface.
2007 */
2008 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
2009
2010 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
2011 * metrics unless running as root.
2012 */
2013 if (devinfo->is_haswell)
2014 i915_perf_oa_available = true;
2015 else {
2016 uint64_t paranoid = 1;
2017
2018 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
2019
2020 if (paranoid == 0 || geteuid() == 0)
2021 i915_perf_oa_available = true;
2022 }
2023 }
2024
2025 if (i915_perf_oa_available &&
2026 oa_register &&
2027 get_sysfs_dev_dir(brw) &&
2028 init_oa_sys_vars(brw))
2029 {
2030 brw->perfquery.oa_metrics_table =
2031 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2032 _mesa_key_string_equal);
2033
2034 /* Index all the metric sets mesa knows about before looking to see what
2035 * the kernel is advertising.
2036 */
2037 oa_register(brw);
2038
2039 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
2040 kernel_has_dynamic_config_support(brw))
2041 init_oa_configs(brw);
2042 else
2043 enumerate_sysfs_metrics(brw);
2044 }
2045
2046 brw->perfquery.unaccumulated =
2047 ralloc_array(brw, struct brw_perf_query_object *, 2);
2048 brw->perfquery.unaccumulated_elements = 0;
2049 brw->perfquery.unaccumulated_array_size = 2;
2050
2051 exec_list_make_empty(&brw->perfquery.sample_buffers);
2052 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
2053
2054 /* It's convenient to guarantee that this linked list of sample
2055 * buffers is never empty so we add an empty head so when we
2056 * Begin an OA query we can always take a reference on a buffer
2057 * in this list.
2058 */
2059 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
2060 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
2061
2062 brw->perfquery.oa_stream_fd = -1;
2063
2064 brw->perfquery.next_query_start_report_id = 1000;
2065
2066 return brw->perfquery.n_queries;
2067 }
2068
2069 void
2070 brw_init_performance_queries(struct brw_context *brw)
2071 {
2072 struct gl_context *ctx = &brw->ctx;
2073
2074 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
2075 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
2076 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
2077 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
2078 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
2079 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
2080 ctx->Driver.EndPerfQuery = brw_end_perf_query;
2081 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
2082 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
2083 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
2084 }