i965: perf: snapshot RPSTAT register
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43 #include <dirent.h>
44
45 /* put before sys/types.h to silence glibc warnings */
46 #ifdef MAJOR_IN_MKDEV
47 #include <sys/mkdev.h>
48 #endif
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
51 #endif
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <fcntl.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57
58 #include <xf86drm.h>
59 #include <i915_drm.h>
60
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
65
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_metrics.h"
75 #include "intel_batchbuffer.h"
76
77 #define FILE_DEBUG_FLAG DEBUG_PERFMON
78
79 #define OAREPORT_REASON_MASK 0x3f
80 #define OAREPORT_REASON_SHIFT 19
81 #define OAREPORT_REASON_TIMER (1<<0)
82 #define OAREPORT_REASON_TRIGGER1 (1<<1)
83 #define OAREPORT_REASON_TRIGGER2 (1<<2)
84 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
85 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
86
87 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
88 256) /* OA counter report */
89
90 /**
91 * Periodic OA samples are read() into these buffer structures via the
92 * i915 perf kernel interface and appended to the
93 * brw->perfquery.sample_buffers linked list. When we process the
94 * results of an OA metrics query we need to consider all the periodic
95 * samples between the Begin and End MI_REPORT_PERF_COUNT command
96 * markers.
97 *
98 * 'Periodic' is a simplification as there are other automatic reports
99 * written by the hardware also buffered here.
100 *
101 * Considering three queries, A, B and C:
102 *
103 * Time ---->
104 * ________________A_________________
105 * | |
106 * | ________B_________ _____C___________
107 * | | | | | |
108 *
109 * And an illustration of sample buffers read over this time frame:
110 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
111 *
112 * These nodes may hold samples for query A:
113 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
114 *
115 * These nodes may hold samples for query B:
116 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
117 *
118 * These nodes may hold samples for query C:
119 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
120 *
121 * The illustration assumes we have an even distribution of periodic
122 * samples so all nodes have the same size plotted against time:
123 *
124 * Note, to simplify code, the list is never empty.
125 *
126 * With overlapping queries we can see that periodic OA reports may
127 * relate to multiple queries and care needs to be take to keep
128 * track of sample buffers until there are no queries that might
129 * depend on their contents.
130 *
131 * We use a node ref counting system where a reference ensures that a
132 * node and all following nodes can't be freed/recycled until the
133 * reference drops to zero.
134 *
135 * E.g. with a ref of one here:
136 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
137 *
138 * These nodes could be freed or recycled ("reaped"):
139 * [ 0 ][ 0 ]
140 *
141 * These must be preserved until the leading ref drops to zero:
142 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
143 *
144 * When a query starts we take a reference on the current tail of
145 * the list, knowing that no already-buffered samples can possibly
146 * relate to the newly-started query. A pointer to this node is
147 * also saved in the query object's ->oa.samples_head.
148 *
149 * E.g. starting query A while there are two nodes in .sample_buffers:
150 * ________________A________
151 * |
152 *
153 * [ 0 ][ 1 ]
154 * ^_______ Add a reference and store pointer to node in
155 * A->oa.samples_head
156 *
157 * Moving forward to when the B query starts with no new buffer nodes:
158 * (for reference, i915 perf reads() are only done when queries finish)
159 * ________________A_______
160 * | ________B___
161 * | |
162 *
163 * [ 0 ][ 2 ]
164 * ^_______ Add a reference and store pointer to
165 * node in B->oa.samples_head
166 *
167 * Once a query is finished, after an OA query has become 'Ready',
168 * once the End OA report has landed and after we we have processed
169 * all the intermediate periodic samples then we drop the
170 * ->oa.samples_head reference we took at the start.
171 *
172 * So when the B query has finished we have:
173 * ________________A________
174 * | ______B___________
175 * | | |
176 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
177 * ^_______ Drop B->oa.samples_head reference
178 *
179 * We still can't free these due to the A->oa.samples_head ref:
180 * [ 1 ][ 0 ][ 0 ][ 0 ]
181 *
182 * When the A query finishes: (note there's a new ref for C's samples_head)
183 * ________________A_________________
184 * | |
185 * | _____C_________
186 * | | |
187 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
188 * ^_______ Drop A->oa.samples_head reference
189 *
190 * And we can now reap these nodes up to the C->oa.samples_head:
191 * [ X ][ X ][ X ][ X ]
192 * keeping -> [ 1 ][ 0 ][ 0 ]
193 *
194 * We reap old sample buffers each time we finish processing an OA
195 * query by iterating the sample_buffers list from the head until we
196 * find a referenced node and stop.
197 *
198 * Reaped buffers move to a perfquery.free_sample_buffers list and
199 * when we come to read() we first look to recycle a buffer from the
200 * free_sample_buffers list before allocating a new buffer.
201 */
202 struct brw_oa_sample_buf {
203 struct exec_node link;
204 int refcount;
205 int len;
206 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
207 uint32_t last_timestamp;
208 };
209
210 /** Downcasting convenience macro. */
211 static inline struct brw_perf_query_object *
212 brw_perf_query(struct gl_perf_query_object *o)
213 {
214 return (struct brw_perf_query_object *) o;
215 }
216
217 #define MI_RPC_BO_SIZE 4096
218 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
219 #define MI_FREQ_START_OFFSET_BYTES (3072)
220 #define MI_FREQ_END_OFFSET_BYTES (3076)
221
222 /******************************************************************************/
223
224 static bool
225 read_file_uint64(const char *file, uint64_t *val)
226 {
227 char buf[32];
228 int fd, n;
229
230 fd = open(file, 0);
231 if (fd < 0)
232 return false;
233 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
234 errno == EINTR);
235 close(fd);
236 if (n < 0)
237 return false;
238
239 buf[n] = '\0';
240 *val = strtoull(buf, NULL, 0);
241
242 return true;
243 }
244
245 static bool
246 read_sysfs_drm_device_file_uint64(struct brw_context *brw,
247 const char *file,
248 uint64_t *value)
249 {
250 char buf[512];
251 int len;
252
253 len = snprintf(buf, sizeof(buf), "%s/%s",
254 brw->perfquery.sysfs_dev_dir, file);
255 if (len < 0 || len >= sizeof(buf)) {
256 DBG("Failed to concatenate sys filename to read u64 from\n");
257 return false;
258 }
259
260 return read_file_uint64(buf, value);
261 }
262
263 /******************************************************************************/
264
265 static bool
266 brw_is_perf_query_ready(struct gl_context *ctx,
267 struct gl_perf_query_object *o);
268
269 static void
270 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
271 {
272 struct gl_context *ctx = brw_void;
273 struct gl_perf_query_object *o = query_void;
274 struct brw_perf_query_object *obj = query_void;
275
276 switch (obj->query->kind) {
277 case OA_COUNTERS:
278 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
279 id,
280 o->Used ? "Dirty," : "New,",
281 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
282 obj->oa.bo ? "yes," : "no,",
283 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
284 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
285 break;
286 case PIPELINE_STATS:
287 DBG("%4d: %-6s %-8s BO: %-4s\n",
288 id,
289 o->Used ? "Dirty," : "New,",
290 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
291 obj->pipeline_stats.bo ? "yes" : "no");
292 break;
293 default:
294 unreachable("Unknown query type");
295 break;
296 }
297 }
298
299 static void
300 dump_perf_queries(struct brw_context *brw)
301 {
302 struct gl_context *ctx = &brw->ctx;
303 DBG("Queries: (Open queries = %d, OA users = %d)\n",
304 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
305 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
306 }
307
308 /******************************************************************************/
309
310 static struct brw_oa_sample_buf *
311 get_free_sample_buf(struct brw_context *brw)
312 {
313 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
314 struct brw_oa_sample_buf *buf;
315
316 if (node)
317 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
318 else {
319 buf = ralloc_size(brw, sizeof(*buf));
320
321 exec_node_init(&buf->link);
322 buf->refcount = 0;
323 buf->len = 0;
324 }
325
326 return buf;
327 }
328
329 static void
330 reap_old_sample_buffers(struct brw_context *brw)
331 {
332 struct exec_node *tail_node =
333 exec_list_get_tail(&brw->perfquery.sample_buffers);
334 struct brw_oa_sample_buf *tail_buf =
335 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
336
337 /* Remove all old, unreferenced sample buffers walking forward from
338 * the head of the list, except always leave at least one node in
339 * the list so we always have a node to reference when we Begin
340 * a new query.
341 */
342 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
343 &brw->perfquery.sample_buffers)
344 {
345 if (buf->refcount == 0 && buf != tail_buf) {
346 exec_node_remove(&buf->link);
347 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
348 } else
349 return;
350 }
351 }
352
353 static void
354 free_sample_bufs(struct brw_context *brw)
355 {
356 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
357 &brw->perfquery.free_sample_buffers)
358 ralloc_free(buf);
359
360 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
361 }
362
363 /******************************************************************************/
364
365 /**
366 * Driver hook for glGetPerfQueryInfoINTEL().
367 */
368 static void
369 brw_get_perf_query_info(struct gl_context *ctx,
370 unsigned query_index,
371 const char **name,
372 GLuint *data_size,
373 GLuint *n_counters,
374 GLuint *n_active)
375 {
376 struct brw_context *brw = brw_context(ctx);
377 const struct brw_perf_query_info *query =
378 &brw->perfquery.queries[query_index];
379
380 *name = query->name;
381 *data_size = query->data_size;
382 *n_counters = query->n_counters;
383
384 switch (query->kind) {
385 case OA_COUNTERS:
386 *n_active = brw->perfquery.n_active_oa_queries;
387 break;
388
389 case PIPELINE_STATS:
390 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
391 break;
392
393 default:
394 unreachable("Unknown query type");
395 break;
396 }
397 }
398
399 /**
400 * Driver hook for glGetPerfCounterInfoINTEL().
401 */
402 static void
403 brw_get_perf_counter_info(struct gl_context *ctx,
404 unsigned query_index,
405 unsigned counter_index,
406 const char **name,
407 const char **desc,
408 GLuint *offset,
409 GLuint *data_size,
410 GLuint *type_enum,
411 GLuint *data_type_enum,
412 GLuint64 *raw_max)
413 {
414 struct brw_context *brw = brw_context(ctx);
415 const struct brw_perf_query_info *query =
416 &brw->perfquery.queries[query_index];
417 const struct brw_perf_query_counter *counter =
418 &query->counters[counter_index];
419
420 *name = counter->name;
421 *desc = counter->desc;
422 *offset = counter->offset;
423 *data_size = counter->size;
424 *type_enum = counter->type;
425 *data_type_enum = counter->data_type;
426 *raw_max = counter->raw_max;
427 }
428
429 /******************************************************************************/
430
431 /**
432 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
433 * pipeline statistics for the performance query object.
434 */
435 static void
436 snapshot_statistics_registers(struct brw_context *brw,
437 struct brw_perf_query_object *obj,
438 uint32_t offset_in_bytes)
439 {
440 const struct brw_perf_query_info *query = obj->query;
441 const int n_counters = query->n_counters;
442
443 for (int i = 0; i < n_counters; i++) {
444 const struct brw_perf_query_counter *counter = &query->counters[i];
445
446 assert(counter->data_type == GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL);
447
448 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
449 counter->pipeline_stat.reg,
450 offset_in_bytes + i * sizeof(uint64_t));
451 }
452 }
453
454 /**
455 * Add a query to the global list of "unaccumulated queries."
456 *
457 * Queries are tracked here until all the associated OA reports have
458 * been accumulated via accumulate_oa_reports() after the end
459 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
460 */
461 static void
462 add_to_unaccumulated_query_list(struct brw_context *brw,
463 struct brw_perf_query_object *obj)
464 {
465 if (brw->perfquery.unaccumulated_elements >=
466 brw->perfquery.unaccumulated_array_size)
467 {
468 brw->perfquery.unaccumulated_array_size *= 1.5;
469 brw->perfquery.unaccumulated =
470 reralloc(brw, brw->perfquery.unaccumulated,
471 struct brw_perf_query_object *,
472 brw->perfquery.unaccumulated_array_size);
473 }
474
475 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
476 }
477
478 /**
479 * Remove a query from the global list of unaccumulated queries once
480 * after successfully accumulating the OA reports associated with the
481 * query in accumulate_oa_reports() or when discarding unwanted query
482 * results.
483 */
484 static void
485 drop_from_unaccumulated_query_list(struct brw_context *brw,
486 struct brw_perf_query_object *obj)
487 {
488 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
489 if (brw->perfquery.unaccumulated[i] == obj) {
490 int last_elt = --brw->perfquery.unaccumulated_elements;
491
492 if (i == last_elt)
493 brw->perfquery.unaccumulated[i] = NULL;
494 else {
495 brw->perfquery.unaccumulated[i] =
496 brw->perfquery.unaccumulated[last_elt];
497 }
498
499 break;
500 }
501 }
502
503 /* Drop our samples_head reference so that associated periodic
504 * sample data buffers can potentially be reaped if they aren't
505 * referenced by any other queries...
506 */
507
508 struct brw_oa_sample_buf *buf =
509 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
510
511 assert(buf->refcount > 0);
512 buf->refcount--;
513
514 obj->oa.samples_head = NULL;
515
516 reap_old_sample_buffers(brw);
517 }
518
519 /**
520 * Given pointers to starting and ending OA snapshots, add the deltas for each
521 * counter to the results.
522 */
523 static void
524 add_deltas(struct brw_context *brw,
525 struct brw_perf_query_object *obj,
526 const uint32_t *start,
527 const uint32_t *end)
528 {
529 const struct brw_perf_query_info *query = obj->query;
530 uint64_t *accumulator = obj->oa.accumulator;
531 int idx = 0;
532 int i;
533
534 obj->oa.reports_accumulated++;
535
536 switch (query->oa_format) {
537 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
538 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
539 brw_perf_query_accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
540
541 /* 32x 40bit A counters... */
542 for (i = 0; i < 32; i++)
543 brw_perf_query_accumulate_uint40(i, start, end, accumulator + idx++);
544
545 /* 4x 32bit A counters... */
546 for (i = 0; i < 4; i++)
547 brw_perf_query_accumulate_uint32(start + 36 + i, end + 36 + i,
548 accumulator + idx++);
549
550 /* 8x 32bit B counters + 8x 32bit C counters... */
551 for (i = 0; i < 16; i++)
552 brw_perf_query_accumulate_uint32(start + 48 + i, end + 48 + i,
553 accumulator + idx++);
554
555 break;
556 case I915_OA_FORMAT_A45_B8_C8:
557 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
558
559 for (i = 0; i < 61; i++)
560 brw_perf_query_accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
561
562 break;
563 default:
564 unreachable("Can't accumulate OA counters in unknown format");
565 }
566 }
567
568 static bool
569 inc_n_oa_users(struct brw_context *brw)
570 {
571 if (brw->perfquery.n_oa_users == 0 &&
572 drmIoctl(brw->perfquery.oa_stream_fd,
573 I915_PERF_IOCTL_ENABLE, 0) < 0)
574 {
575 return false;
576 }
577 ++brw->perfquery.n_oa_users;
578
579 return true;
580 }
581
582 static void
583 dec_n_oa_users(struct brw_context *brw)
584 {
585 /* Disabling the i915 perf stream will effectively disable the OA
586 * counters. Note it's important to be sure there are no outstanding
587 * MI_RPC commands at this point since they could stall the CS
588 * indefinitely once OACONTROL is disabled.
589 */
590 --brw->perfquery.n_oa_users;
591 if (brw->perfquery.n_oa_users == 0 &&
592 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
593 {
594 DBG("WARNING: Error disabling i915 perf stream: %m\n");
595 }
596 }
597
598 /* In general if we see anything spurious while accumulating results,
599 * we don't try and continue accumulating the current query, hoping
600 * for the best, we scrap anything outstanding, and then hope for the
601 * best with new queries.
602 */
603 static void
604 discard_all_queries(struct brw_context *brw)
605 {
606 while (brw->perfquery.unaccumulated_elements) {
607 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
608
609 obj->oa.results_accumulated = true;
610 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
611
612 dec_n_oa_users(brw);
613 }
614 }
615
616 enum OaReadStatus {
617 OA_READ_STATUS_ERROR,
618 OA_READ_STATUS_UNFINISHED,
619 OA_READ_STATUS_FINISHED,
620 };
621
622 static enum OaReadStatus
623 read_oa_samples_until(struct brw_context *brw,
624 uint32_t start_timestamp,
625 uint32_t end_timestamp)
626 {
627 struct exec_node *tail_node =
628 exec_list_get_tail(&brw->perfquery.sample_buffers);
629 struct brw_oa_sample_buf *tail_buf =
630 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
631 uint32_t last_timestamp = tail_buf->last_timestamp;
632
633 while (1) {
634 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
635 uint32_t offset;
636 int len;
637
638 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
639 sizeof(buf->buf))) < 0 && errno == EINTR)
640 ;
641
642 if (len <= 0) {
643 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
644
645 if (len < 0) {
646 if (errno == EAGAIN)
647 return ((last_timestamp - start_timestamp) >=
648 (end_timestamp - start_timestamp)) ?
649 OA_READ_STATUS_FINISHED :
650 OA_READ_STATUS_UNFINISHED;
651 else {
652 DBG("Error reading i915 perf samples: %m\n");
653 }
654 } else
655 DBG("Spurious EOF reading i915 perf samples\n");
656
657 return OA_READ_STATUS_ERROR;
658 }
659
660 buf->len = len;
661 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
662
663 /* Go through the reports and update the last timestamp. */
664 offset = 0;
665 while (offset < buf->len) {
666 const struct drm_i915_perf_record_header *header =
667 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
668 uint32_t *report = (uint32_t *) (header + 1);
669
670 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
671 last_timestamp = report[1];
672
673 offset += header->size;
674 }
675
676 buf->last_timestamp = last_timestamp;
677 }
678
679 unreachable("not reached");
680 return OA_READ_STATUS_ERROR;
681 }
682
683 /**
684 * Try to read all the reports until either the delimiting timestamp
685 * or an error arises.
686 */
687 static bool
688 read_oa_samples_for_query(struct brw_context *brw,
689 struct brw_perf_query_object *obj)
690 {
691 uint32_t *start;
692 uint32_t *last;
693 uint32_t *end;
694
695 /* We need the MI_REPORT_PERF_COUNT to land before we can start
696 * accumulate. */
697 assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
698 !brw_bo_busy(obj->oa.bo));
699
700 /* Map the BO once here and let accumulate_oa_reports() unmap
701 * it. */
702 if (obj->oa.map == NULL)
703 obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
704
705 start = last = obj->oa.map;
706 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
707
708 if (start[0] != obj->oa.begin_report_id) {
709 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
710 return true;
711 }
712 if (end[0] != (obj->oa.begin_report_id + 1)) {
713 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
714 return true;
715 }
716
717 /* Read the reports until the end timestamp. */
718 switch (read_oa_samples_until(brw, start[1], end[1])) {
719 case OA_READ_STATUS_ERROR:
720 /* Fallthrough and let accumulate_oa_reports() deal with the
721 * error. */
722 case OA_READ_STATUS_FINISHED:
723 return true;
724 case OA_READ_STATUS_UNFINISHED:
725 return false;
726 }
727
728 unreachable("invalid read status");
729 return false;
730 }
731
732 /**
733 * Accumulate raw OA counter values based on deltas between pairs of
734 * OA reports.
735 *
736 * Accumulation starts from the first report captured via
737 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
738 * last MI_RPC report requested by brw_end_perf_query(). Between these
739 * two reports there may also some number of periodically sampled OA
740 * reports collected via the i915 perf interface - depending on the
741 * duration of the query.
742 *
743 * These periodic snapshots help to ensure we handle counter overflow
744 * correctly by being frequent enough to ensure we don't miss multiple
745 * overflows of a counter between snapshots. For Gen8+ the i915 perf
746 * snapshots provide the extra context-switch reports that let us
747 * subtract out the progress of counters associated with other
748 * contexts running on the system.
749 */
750 static void
751 accumulate_oa_reports(struct brw_context *brw,
752 struct brw_perf_query_object *obj)
753 {
754 const struct gen_device_info *devinfo = &brw->screen->devinfo;
755 struct gl_perf_query_object *o = &obj->base;
756 uint32_t *start;
757 uint32_t *last;
758 uint32_t *end;
759 struct exec_node *first_samples_node;
760 bool in_ctx = true;
761 int out_duration = 0;
762
763 assert(o->Ready);
764 assert(obj->oa.map != NULL);
765
766 start = last = obj->oa.map;
767 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
768
769 if (start[0] != obj->oa.begin_report_id) {
770 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
771 goto error;
772 }
773 if (end[0] != (obj->oa.begin_report_id + 1)) {
774 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
775 goto error;
776 }
777
778 obj->oa.hw_id = start[2];
779
780 /* See if we have any periodic reports to accumulate too... */
781
782 /* N.B. The oa.samples_head was set when the query began and
783 * pointed to the tail of the brw->perfquery.sample_buffers list at
784 * the time the query started. Since the buffer existed before the
785 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
786 * that no data in this particular node's buffer can possibly be
787 * associated with the query - so skip ahead one...
788 */
789 first_samples_node = obj->oa.samples_head->next;
790
791 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
792 &brw->perfquery.sample_buffers,
793 first_samples_node)
794 {
795 int offset = 0;
796
797 while (offset < buf->len) {
798 const struct drm_i915_perf_record_header *header =
799 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
800
801 assert(header->size != 0);
802 assert(header->size <= buf->len);
803
804 offset += header->size;
805
806 switch (header->type) {
807 case DRM_I915_PERF_RECORD_SAMPLE: {
808 uint32_t *report = (uint32_t *)(header + 1);
809 bool add = true;
810
811 /* Ignore reports that come before the start marker.
812 * (Note: takes care to allow overflow of 32bit timestamps)
813 */
814 if (brw_timebase_scale(brw, report[1] - start[1]) > 5000000000)
815 continue;
816
817 /* Ignore reports that come after the end marker.
818 * (Note: takes care to allow overflow of 32bit timestamps)
819 */
820 if (brw_timebase_scale(brw, report[1] - end[1]) <= 5000000000)
821 goto end;
822
823 /* For Gen8+ since the counters continue while other
824 * contexts are running we need to discount any unrelated
825 * deltas. The hardware automatically generates a report
826 * on context switch which gives us a new reference point
827 * to continuing adding deltas from.
828 *
829 * For Haswell we can rely on the HW to stop the progress
830 * of OA counters while any other context is acctive.
831 */
832 if (devinfo->gen >= 8) {
833 if (in_ctx && report[2] != obj->oa.hw_id) {
834 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
835 in_ctx = false;
836 out_duration = 0;
837 } else if (in_ctx == false && report[2] == obj->oa.hw_id) {
838 DBG("i915 perf: Switch TO\n");
839 in_ctx = true;
840
841 /* From experimentation in IGT, we found that the OA unit
842 * might label some report as "idle" (using an invalid
843 * context ID), right after a report for a given context.
844 * Deltas generated by those reports actually belong to the
845 * previous context, even though they're not labelled as
846 * such.
847 *
848 * We didn't *really* Switch AWAY in the case that we e.g.
849 * saw a single periodic report while idle...
850 */
851 if (out_duration >= 1)
852 add = false;
853 } else if (in_ctx) {
854 assert(report[2] == obj->oa.hw_id);
855 DBG("i915 perf: Continuation IN\n");
856 } else {
857 assert(report[2] != obj->oa.hw_id);
858 DBG("i915 perf: Continuation OUT\n");
859 add = false;
860 out_duration++;
861 }
862 }
863
864 if (add)
865 add_deltas(brw, obj, last, report);
866
867 last = report;
868
869 break;
870 }
871
872 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
873 DBG("i915 perf: OA error: all reports lost\n");
874 goto error;
875 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
876 DBG("i915 perf: OA report lost\n");
877 break;
878 }
879 }
880 }
881
882 end:
883
884 add_deltas(brw, obj, last, end);
885
886 DBG("Marking %d accumulated - results gathered\n", o->Id);
887
888 obj->oa.results_accumulated = true;
889 drop_from_unaccumulated_query_list(brw, obj);
890 dec_n_oa_users(brw);
891
892 return;
893
894 error:
895
896 discard_all_queries(brw);
897 }
898
899 /******************************************************************************/
900
901 static bool
902 open_i915_perf_oa_stream(struct brw_context *brw,
903 int metrics_set_id,
904 int report_format,
905 int period_exponent,
906 int drm_fd,
907 uint32_t ctx_id)
908 {
909 uint64_t properties[] = {
910 /* Single context sampling */
911 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
912
913 /* Include OA reports in samples */
914 DRM_I915_PERF_PROP_SAMPLE_OA, true,
915
916 /* OA unit configuration */
917 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
918 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
919 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
920 };
921 struct drm_i915_perf_open_param param = {
922 .flags = I915_PERF_FLAG_FD_CLOEXEC |
923 I915_PERF_FLAG_FD_NONBLOCK |
924 I915_PERF_FLAG_DISABLED,
925 .num_properties = ARRAY_SIZE(properties) / 2,
926 .properties_ptr = (uintptr_t) properties,
927 };
928 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
929 if (fd == -1) {
930 DBG("Error opening i915 perf OA stream: %m\n");
931 return false;
932 }
933
934 brw->perfquery.oa_stream_fd = fd;
935
936 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
937 brw->perfquery.current_oa_format = report_format;
938
939 return true;
940 }
941
942 static void
943 close_perf(struct brw_context *brw)
944 {
945 if (brw->perfquery.oa_stream_fd != -1) {
946 close(brw->perfquery.oa_stream_fd);
947 brw->perfquery.oa_stream_fd = -1;
948 }
949 }
950
951 static void
952 capture_frequency_stat_register(struct brw_context *brw,
953 struct brw_bo *bo,
954 uint32_t bo_offset)
955 {
956 const struct gen_device_info *devinfo = &brw->screen->devinfo;
957
958 if (devinfo->gen >= 7 && devinfo->gen <= 8 &&
959 !devinfo->is_baytrail && !devinfo->is_cherryview) {
960 brw_store_register_mem32(brw, bo, GEN7_RPSTAT1, bo_offset);
961 } else if (devinfo->gen >= 9) {
962 brw_store_register_mem32(brw, bo, GEN9_RPSTAT0, bo_offset);
963 }
964 }
965
966 /**
967 * Driver hook for glBeginPerfQueryINTEL().
968 */
969 static bool
970 brw_begin_perf_query(struct gl_context *ctx,
971 struct gl_perf_query_object *o)
972 {
973 struct brw_context *brw = brw_context(ctx);
974 struct brw_perf_query_object *obj = brw_perf_query(o);
975 const struct brw_perf_query_info *query = obj->query;
976
977 /* We can assume the frontend hides mistaken attempts to Begin a
978 * query object multiple times before its End. Similarly if an
979 * application reuses a query object before results have arrived
980 * the frontend will wait for prior results so we don't need
981 * to support abandoning in-flight results.
982 */
983 assert(!o->Active);
984 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
985
986 DBG("Begin(%d)\n", o->Id);
987
988 /* XXX: We have to consider that the command parser unit that parses batch
989 * buffer commands and is used to capture begin/end counter snapshots isn't
990 * implicitly synchronized with what's currently running across other GPU
991 * units (such as the EUs running shaders) that the performance counters are
992 * associated with.
993 *
994 * The intention of performance queries is to measure the work associated
995 * with commands between the begin/end delimiters and so for that to be the
996 * case we need to explicitly synchronize the parsing of commands to capture
997 * Begin/End counter snapshots with what's running across other parts of the
998 * GPU.
999 *
1000 * When the command parser reaches a Begin marker it effectively needs to
1001 * drain everything currently running on the GPU until the hardware is idle
1002 * before capturing the first snapshot of counters - otherwise the results
1003 * would also be measuring the effects of earlier commands.
1004 *
1005 * When the command parser reaches an End marker it needs to stall until
1006 * everything currently running on the GPU has finished before capturing the
1007 * end snapshot - otherwise the results won't be a complete representation
1008 * of the work.
1009 *
1010 * Theoretically there could be opportunities to minimize how much of the
1011 * GPU pipeline is drained, or that we stall for, when we know what specific
1012 * units the performance counters being queried relate to but we don't
1013 * currently attempt to be clever here.
1014 *
1015 * Note: with our current simple approach here then for back-to-back queries
1016 * we will redundantly emit duplicate commands to synchronize the command
1017 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1018 * second synchronization is effectively a NOOP.
1019 *
1020 * N.B. The final results are based on deltas of counters between (inside)
1021 * Begin/End markers so even though the total wall clock time of the
1022 * workload is stretched by larger pipeline bubbles the bubbles themselves
1023 * are generally invisible to the query results. Whether that's a good or a
1024 * bad thing depends on the use case. For a lower real-time impact while
1025 * capturing metrics then periodic sampling may be a better choice than
1026 * INTEL_performance_query.
1027 *
1028 *
1029 * This is our Begin synchronization point to drain current work on the
1030 * GPU before we capture our first counter snapshot...
1031 */
1032 brw_emit_mi_flush(brw);
1033
1034 switch (query->kind) {
1035 case OA_COUNTERS:
1036
1037 /* Opening an i915 perf stream implies exclusive access to the OA unit
1038 * which will generate counter reports for a specific counter set with a
1039 * specific layout/format so we can't begin any OA based queries that
1040 * require a different counter set or format unless we get an opportunity
1041 * to close the stream and open a new one...
1042 */
1043 if (brw->perfquery.oa_stream_fd != -1 &&
1044 brw->perfquery.current_oa_metrics_set_id !=
1045 query->oa_metrics_set_id) {
1046
1047 if (brw->perfquery.n_oa_users != 0)
1048 return false;
1049 else
1050 close_perf(brw);
1051 }
1052
1053 /* If the OA counters aren't already on, enable them. */
1054 if (brw->perfquery.oa_stream_fd == -1) {
1055 __DRIscreen *screen = brw->screen->driScrnPriv;
1056 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1057
1058 /* The period_exponent gives a sampling period as follows:
1059 * sample_period = timestamp_period * 2^(period_exponent + 1)
1060 *
1061 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1062 * ~83ns (GEN8/9).
1063 *
1064 * The counter overflow period is derived from the EuActive counter
1065 * which reads a counter that increments by the number of clock
1066 * cycles multiplied by the number of EUs. It can be calculated as:
1067 *
1068 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1069 *
1070 * (E.g. 40 EUs @ 1GHz = ~53ms)
1071 *
1072 * We select a sampling period inferior to that overflow period to
1073 * ensure we cannot see more than 1 counter overflow, otherwise we
1074 * could loose information.
1075 */
1076
1077 int a_counter_in_bits = 32;
1078 if (devinfo->gen >= 8)
1079 a_counter_in_bits = 40;
1080
1081 uint64_t overflow_period = pow(2, a_counter_in_bits) /
1082 (brw->perfquery.sys_vars.n_eus *
1083 /* drop 1GHz freq to have units in nanoseconds */
1084 2);
1085
1086 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1087 overflow_period, overflow_period / 1000000ul, brw->perfquery.sys_vars.n_eus);
1088
1089 int period_exponent = 0;
1090 uint64_t prev_sample_period, next_sample_period;
1091 for (int e = 0; e < 30; e++) {
1092 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1093 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1094
1095 /* Take the previous sampling period, lower than the overflow
1096 * period.
1097 */
1098 if (prev_sample_period < overflow_period &&
1099 next_sample_period > overflow_period)
1100 period_exponent = e + 1;
1101 }
1102
1103 if (period_exponent == 0) {
1104 DBG("WARNING: enable to find a sampling exponent\n");
1105 return false;
1106 }
1107
1108 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1109 prev_sample_period / 1000000ul);
1110
1111 if (!open_i915_perf_oa_stream(brw,
1112 query->oa_metrics_set_id,
1113 query->oa_format,
1114 period_exponent,
1115 screen->fd, /* drm fd */
1116 brw->hw_ctx))
1117 return false;
1118 } else {
1119 assert(brw->perfquery.current_oa_metrics_set_id ==
1120 query->oa_metrics_set_id &&
1121 brw->perfquery.current_oa_format ==
1122 query->oa_format);
1123 }
1124
1125 if (!inc_n_oa_users(brw)) {
1126 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1127 return false;
1128 }
1129
1130 if (obj->oa.bo) {
1131 brw_bo_unreference(obj->oa.bo);
1132 obj->oa.bo = NULL;
1133 }
1134
1135 obj->oa.bo =
1136 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo", MI_RPC_BO_SIZE);
1137 #ifdef DEBUG
1138 /* Pre-filling the BO helps debug whether writes landed. */
1139 void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
1140 memset(map, 0x80, MI_RPC_BO_SIZE);
1141 brw_bo_unmap(obj->oa.bo);
1142 #endif
1143
1144 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1145 brw->perfquery.next_query_start_report_id += 2;
1146
1147 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1148 * delimiting commands end up in different batchbuffers. If that's the
1149 * case, the measurement will include the time it takes for the kernel
1150 * scheduler to load a new request into the hardware. This is manifested in
1151 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1152 */
1153 intel_batchbuffer_flush(brw);
1154
1155 /* Take a starting OA counter snapshot. */
1156 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1157 obj->oa.begin_report_id);
1158 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_START_OFFSET_BYTES);
1159
1160 ++brw->perfquery.n_active_oa_queries;
1161
1162 /* No already-buffered samples can possibly be associated with this query
1163 * so create a marker within the list of sample buffers enabling us to
1164 * easily ignore earlier samples when processing this query after
1165 * completion.
1166 */
1167 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1168 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1169
1170 struct brw_oa_sample_buf *buf =
1171 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1172
1173 /* This reference will ensure that future/following sample
1174 * buffers (that may relate to this query) can't be freed until
1175 * this drops to zero.
1176 */
1177 buf->refcount++;
1178
1179 obj->oa.hw_id = 0xffffffff;
1180 memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
1181 obj->oa.results_accumulated = false;
1182
1183 add_to_unaccumulated_query_list(brw, obj);
1184 break;
1185
1186 case PIPELINE_STATS:
1187 if (obj->pipeline_stats.bo) {
1188 brw_bo_unreference(obj->pipeline_stats.bo);
1189 obj->pipeline_stats.bo = NULL;
1190 }
1191
1192 obj->pipeline_stats.bo =
1193 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1194 STATS_BO_SIZE);
1195
1196 /* Take starting snapshots. */
1197 snapshot_statistics_registers(brw, obj, 0);
1198
1199 ++brw->perfquery.n_active_pipeline_stats_queries;
1200 break;
1201
1202 default:
1203 unreachable("Unknown query type");
1204 break;
1205 }
1206
1207 if (INTEL_DEBUG & DEBUG_PERFMON)
1208 dump_perf_queries(brw);
1209
1210 return true;
1211 }
1212
1213 /**
1214 * Driver hook for glEndPerfQueryINTEL().
1215 */
1216 static void
1217 brw_end_perf_query(struct gl_context *ctx,
1218 struct gl_perf_query_object *o)
1219 {
1220 struct brw_context *brw = brw_context(ctx);
1221 struct brw_perf_query_object *obj = brw_perf_query(o);
1222
1223 DBG("End(%d)\n", o->Id);
1224
1225 /* Ensure that the work associated with the queried commands will have
1226 * finished before taking our query end counter readings.
1227 *
1228 * For more details see comment in brw_begin_perf_query for
1229 * corresponding flush.
1230 */
1231 brw_emit_mi_flush(brw);
1232
1233 switch (obj->query->kind) {
1234 case OA_COUNTERS:
1235
1236 /* NB: It's possible that the query will have already been marked
1237 * as 'accumulated' if an error was seen while reading samples
1238 * from perf. In this case we mustn't try and emit a closing
1239 * MI_RPC command in case the OA unit has already been disabled
1240 */
1241 if (!obj->oa.results_accumulated) {
1242 /* Take an ending OA counter snapshot. */
1243 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_END_OFFSET_BYTES);
1244 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
1245 MI_RPC_BO_END_OFFSET_BYTES,
1246 obj->oa.begin_report_id + 1);
1247 }
1248
1249 --brw->perfquery.n_active_oa_queries;
1250
1251 /* NB: even though the query has now ended, it can't be accumulated
1252 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1253 * to query->oa.bo
1254 */
1255 break;
1256
1257 case PIPELINE_STATS:
1258 snapshot_statistics_registers(brw, obj,
1259 STATS_BO_END_OFFSET_BYTES);
1260 --brw->perfquery.n_active_pipeline_stats_queries;
1261 break;
1262
1263 default:
1264 unreachable("Unknown query type");
1265 break;
1266 }
1267 }
1268
1269 static void
1270 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1271 {
1272 struct brw_context *brw = brw_context(ctx);
1273 struct brw_perf_query_object *obj = brw_perf_query(o);
1274 struct brw_bo *bo = NULL;
1275
1276 assert(!o->Ready);
1277
1278 switch (obj->query->kind) {
1279 case OA_COUNTERS:
1280 bo = obj->oa.bo;
1281 break;
1282
1283 case PIPELINE_STATS:
1284 bo = obj->pipeline_stats.bo;
1285 break;
1286
1287 default:
1288 unreachable("Unknown query type");
1289 break;
1290 }
1291
1292 if (bo == NULL)
1293 return;
1294
1295 /* If the current batch references our results bo then we need to
1296 * flush first...
1297 */
1298 if (brw_batch_references(&brw->batch, bo))
1299 intel_batchbuffer_flush(brw);
1300
1301 brw_bo_wait_rendering(bo);
1302
1303 /* Due to a race condition between the OA unit signaling report
1304 * availability and the report actually being written into memory,
1305 * we need to wait for all the reports to come in before we can
1306 * read them.
1307 */
1308 if (obj->query->kind == OA_COUNTERS) {
1309 while (!read_oa_samples_for_query(brw, obj))
1310 ;
1311 }
1312 }
1313
1314 static bool
1315 brw_is_perf_query_ready(struct gl_context *ctx,
1316 struct gl_perf_query_object *o)
1317 {
1318 struct brw_context *brw = brw_context(ctx);
1319 struct brw_perf_query_object *obj = brw_perf_query(o);
1320
1321 if (o->Ready)
1322 return true;
1323
1324 switch (obj->query->kind) {
1325 case OA_COUNTERS:
1326 return (obj->oa.results_accumulated ||
1327 (obj->oa.bo &&
1328 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1329 !brw_bo_busy(obj->oa.bo) &&
1330 read_oa_samples_for_query(brw, obj)));
1331 case PIPELINE_STATS:
1332 return (obj->pipeline_stats.bo &&
1333 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1334 !brw_bo_busy(obj->pipeline_stats.bo));
1335
1336 default:
1337 unreachable("Unknown query type");
1338 break;
1339 }
1340
1341 return false;
1342 }
1343
1344 static void
1345 read_gt_frequency(struct brw_context *brw,
1346 struct brw_perf_query_object *obj)
1347 {
1348 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1349 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
1350 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
1351
1352 switch (devinfo->gen) {
1353 case 7:
1354 case 8:
1355 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1356 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1357 break;
1358 case 9:
1359 case 10:
1360 case 11:
1361 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1362 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1363 break;
1364 default:
1365 unreachable("unexpected gen");
1366 }
1367
1368 /* Put the numbers into Hz. */
1369 obj->oa.gt_frequency[0] *= 1000000ULL;
1370 obj->oa.gt_frequency[1] *= 1000000ULL;
1371 }
1372
1373 static int
1374 get_oa_counter_data(struct brw_context *brw,
1375 struct brw_perf_query_object *obj,
1376 size_t data_size,
1377 uint8_t *data)
1378 {
1379 const struct brw_perf_query_info *query = obj->query;
1380 int n_counters = query->n_counters;
1381 int written = 0;
1382
1383 if (!obj->oa.results_accumulated) {
1384 read_gt_frequency(brw, obj);
1385 accumulate_oa_reports(brw, obj);
1386 assert(obj->oa.results_accumulated);
1387
1388 brw_bo_unmap(obj->oa.bo);
1389 obj->oa.map = NULL;
1390 }
1391
1392 for (int i = 0; i < n_counters; i++) {
1393 const struct brw_perf_query_counter *counter = &query->counters[i];
1394 uint64_t *out_uint64;
1395 float *out_float;
1396
1397 if (counter->size) {
1398 switch (counter->data_type) {
1399 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL:
1400 out_uint64 = (uint64_t *)(data + counter->offset);
1401 *out_uint64 = counter->oa_counter_read_uint64(brw, query,
1402 obj->oa.accumulator);
1403 break;
1404 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL:
1405 out_float = (float *)(data + counter->offset);
1406 *out_float = counter->oa_counter_read_float(brw, query,
1407 obj->oa.accumulator);
1408 break;
1409 default:
1410 /* So far we aren't using uint32, double or bool32... */
1411 unreachable("unexpected counter data type");
1412 }
1413 written = counter->offset + counter->size;
1414 }
1415 }
1416
1417 return written;
1418 }
1419
1420 static int
1421 get_pipeline_stats_data(struct brw_context *brw,
1422 struct brw_perf_query_object *obj,
1423 size_t data_size,
1424 uint8_t *data)
1425
1426 {
1427 const struct brw_perf_query_info *query = obj->query;
1428 int n_counters = obj->query->n_counters;
1429 uint8_t *p = data;
1430
1431 uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
1432 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1433
1434 for (int i = 0; i < n_counters; i++) {
1435 const struct brw_perf_query_counter *counter = &query->counters[i];
1436 uint64_t value = end[i] - start[i];
1437
1438 if (counter->pipeline_stat.numerator !=
1439 counter->pipeline_stat.denominator) {
1440 value *= counter->pipeline_stat.numerator;
1441 value /= counter->pipeline_stat.denominator;
1442 }
1443
1444 *((uint64_t *)p) = value;
1445 p += 8;
1446 }
1447
1448 brw_bo_unmap(obj->pipeline_stats.bo);
1449
1450 return p - data;
1451 }
1452
1453 /**
1454 * Driver hook for glGetPerfQueryDataINTEL().
1455 */
1456 static void
1457 brw_get_perf_query_data(struct gl_context *ctx,
1458 struct gl_perf_query_object *o,
1459 GLsizei data_size,
1460 GLuint *data,
1461 GLuint *bytes_written)
1462 {
1463 struct brw_context *brw = brw_context(ctx);
1464 struct brw_perf_query_object *obj = brw_perf_query(o);
1465 int written = 0;
1466
1467 assert(brw_is_perf_query_ready(ctx, o));
1468
1469 DBG("GetData(%d)\n", o->Id);
1470
1471 if (INTEL_DEBUG & DEBUG_PERFMON)
1472 dump_perf_queries(brw);
1473
1474 /* We expect that the frontend only calls this hook when it knows
1475 * that results are available.
1476 */
1477 assert(o->Ready);
1478
1479 switch (obj->query->kind) {
1480 case OA_COUNTERS:
1481 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1482 break;
1483
1484 case PIPELINE_STATS:
1485 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1486 break;
1487
1488 default:
1489 unreachable("Unknown query type");
1490 break;
1491 }
1492
1493 if (bytes_written)
1494 *bytes_written = written;
1495 }
1496
1497 static struct gl_perf_query_object *
1498 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1499 {
1500 struct brw_context *brw = brw_context(ctx);
1501 const struct brw_perf_query_info *query =
1502 &brw->perfquery.queries[query_index];
1503 struct brw_perf_query_object *obj =
1504 calloc(1, sizeof(struct brw_perf_query_object));
1505
1506 if (!obj)
1507 return NULL;
1508
1509 obj->query = query;
1510
1511 brw->perfquery.n_query_instances++;
1512
1513 return &obj->base;
1514 }
1515
1516 /**
1517 * Driver hook for glDeletePerfQueryINTEL().
1518 */
1519 static void
1520 brw_delete_perf_query(struct gl_context *ctx,
1521 struct gl_perf_query_object *o)
1522 {
1523 struct brw_context *brw = brw_context(ctx);
1524 struct brw_perf_query_object *obj = brw_perf_query(o);
1525
1526 /* We can assume that the frontend waits for a query to complete
1527 * before ever calling into here, so we don't have to worry about
1528 * deleting an in-flight query object.
1529 */
1530 assert(!o->Active);
1531 assert(!o->Used || o->Ready);
1532
1533 DBG("Delete(%d)\n", o->Id);
1534
1535 switch (obj->query->kind) {
1536 case OA_COUNTERS:
1537 if (obj->oa.bo) {
1538 if (!obj->oa.results_accumulated) {
1539 drop_from_unaccumulated_query_list(brw, obj);
1540 dec_n_oa_users(brw);
1541 }
1542
1543 brw_bo_unreference(obj->oa.bo);
1544 obj->oa.bo = NULL;
1545 }
1546
1547 obj->oa.results_accumulated = false;
1548 break;
1549
1550 case PIPELINE_STATS:
1551 if (obj->pipeline_stats.bo) {
1552 brw_bo_unreference(obj->pipeline_stats.bo);
1553 obj->pipeline_stats.bo = NULL;
1554 }
1555 break;
1556
1557 default:
1558 unreachable("Unknown query type");
1559 break;
1560 }
1561
1562 free(obj);
1563
1564 /* As an indication that the INTEL_performance_query extension is no
1565 * longer in use, it's a good time to free our cache of sample
1566 * buffers and close any current i915-perf stream.
1567 */
1568 if (--brw->perfquery.n_query_instances == 0) {
1569 free_sample_bufs(brw);
1570 close_perf(brw);
1571 }
1572 }
1573
1574 /******************************************************************************/
1575
1576 static void
1577 init_pipeline_statistic_query_registers(struct brw_context *brw)
1578 {
1579 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1580 struct brw_perf_query_info *query = brw_perf_query_append_query_info(brw);
1581
1582 query->kind = PIPELINE_STATS;
1583 query->name = "Pipeline Statistics Registers";
1584 query->n_counters = 0;
1585 query->counters =
1586 rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
1587
1588 brw_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
1589 "N vertices submitted");
1590 brw_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1591 "N primitives submitted");
1592 brw_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1593 "N vertex shader invocations");
1594
1595 if (devinfo->gen == 6) {
1596 brw_perf_query_info_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1597 "SO_PRIM_STORAGE_NEEDED",
1598 "N geometry shader stream-out primitives (total)");
1599 brw_perf_query_info_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1600 "SO_NUM_PRIMS_WRITTEN",
1601 "N geometry shader stream-out primitives (written)");
1602 } else {
1603 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1604 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1605 "N stream-out (stream 0) primitives (total)");
1606 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1607 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1608 "N stream-out (stream 1) primitives (total)");
1609 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1610 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1611 "N stream-out (stream 2) primitives (total)");
1612 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1613 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1614 "N stream-out (stream 3) primitives (total)");
1615 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1616 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1617 "N stream-out (stream 0) primitives (written)");
1618 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1619 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1620 "N stream-out (stream 1) primitives (written)");
1621 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1622 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1623 "N stream-out (stream 2) primitives (written)");
1624 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1625 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1626 "N stream-out (stream 3) primitives (written)");
1627 }
1628
1629 brw_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1630 "N TCS shader invocations");
1631 brw_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1632 "N TES shader invocations");
1633
1634 brw_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1635 "N geometry shader invocations");
1636 brw_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1637 "N geometry shader primitives emitted");
1638
1639 brw_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1640 "N primitives entering clipping");
1641 brw_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1642 "N primitives leaving clipping");
1643
1644 if (devinfo->is_haswell || devinfo->gen == 8)
1645 brw_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1646 "N fragment shader invocations",
1647 "N fragment shader invocations");
1648 else
1649 brw_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1650 "N fragment shader invocations");
1651
1652 brw_perf_query_info_add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
1653
1654 if (devinfo->gen >= 7)
1655 brw_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1656 "N compute shader invocations");
1657
1658 query->data_size = sizeof(uint64_t) * query->n_counters;
1659 }
1660
1661 static void
1662 register_oa_config(struct brw_context *brw,
1663 const struct brw_perf_query_info *query,
1664 uint64_t config_id)
1665 {
1666 struct brw_perf_query_info *registred_query =
1667 brw_perf_query_append_query_info(brw);
1668
1669 *registred_query = *query;
1670 registred_query->oa_metrics_set_id = config_id;
1671 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
1672 registred_query->oa_metrics_set_id, query->guid);
1673 }
1674
1675 static void
1676 enumerate_sysfs_metrics(struct brw_context *brw)
1677 {
1678 char buf[256];
1679 DIR *metricsdir = NULL;
1680 struct dirent *metric_entry;
1681 int len;
1682
1683 len = snprintf(buf, sizeof(buf), "%s/metrics", brw->perfquery.sysfs_dev_dir);
1684 if (len < 0 || len >= sizeof(buf)) {
1685 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1686 return;
1687 }
1688
1689 metricsdir = opendir(buf);
1690 if (!metricsdir) {
1691 DBG("Failed to open %s: %m\n", buf);
1692 return;
1693 }
1694
1695 while ((metric_entry = readdir(metricsdir))) {
1696 struct hash_entry *entry;
1697
1698 if ((metric_entry->d_type != DT_DIR &&
1699 metric_entry->d_type != DT_LNK) ||
1700 metric_entry->d_name[0] == '.')
1701 continue;
1702
1703 DBG("metric set: %s\n", metric_entry->d_name);
1704 entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
1705 metric_entry->d_name);
1706 if (entry) {
1707 uint64_t id;
1708
1709 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
1710 brw->perfquery.sysfs_dev_dir, metric_entry->d_name);
1711 if (len < 0 || len >= sizeof(buf)) {
1712 DBG("Failed to concatenate path to sysfs metric id file\n");
1713 continue;
1714 }
1715
1716 if (!read_file_uint64(buf, &id)) {
1717 DBG("Failed to read metric set id from %s: %m", buf);
1718 continue;
1719 }
1720
1721 register_oa_config(brw, (const struct brw_perf_query_info *)entry->data, id);
1722 } else
1723 DBG("metric set not known by mesa (skipping)\n");
1724 }
1725
1726 closedir(metricsdir);
1727 }
1728
1729 static bool
1730 kernel_has_dynamic_config_support(struct brw_context *brw)
1731 {
1732 __DRIscreen *screen = brw->screen->driScrnPriv;
1733 struct hash_entry *entry;
1734
1735 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1736 struct brw_perf_query_info *query = entry->data;
1737 char config_path[280];
1738 uint64_t config_id;
1739
1740 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1741 brw->perfquery.sysfs_dev_dir, query->guid);
1742
1743 /* Look for the test config, which we know we can't replace. */
1744 if (read_file_uint64(config_path, &config_id) && config_id == 1) {
1745 return drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
1746 &config_id) < 0 && errno == ENOENT;
1747 }
1748 }
1749
1750 return false;
1751 }
1752
1753 static void
1754 init_oa_configs(struct brw_context *brw)
1755 {
1756 __DRIscreen *screen = brw->screen->driScrnPriv;
1757 struct hash_entry *entry;
1758
1759 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1760 const struct brw_perf_query_info *query = entry->data;
1761 struct drm_i915_perf_oa_config config;
1762 char config_path[280];
1763 uint64_t config_id;
1764 int ret;
1765
1766 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1767 brw->perfquery.sysfs_dev_dir, query->guid);
1768
1769 /* Don't recreate already loaded configs. */
1770 if (read_file_uint64(config_path, &config_id)) {
1771 DBG("metric set: %s (already loaded)\n", query->guid);
1772 register_oa_config(brw, query, config_id);
1773 continue;
1774 }
1775
1776 memset(&config, 0, sizeof(config));
1777
1778 memcpy(config.uuid, query->guid, sizeof(config.uuid));
1779
1780 config.n_mux_regs = query->n_mux_regs;
1781 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
1782
1783 config.n_boolean_regs = query->n_b_counter_regs;
1784 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
1785
1786 config.n_flex_regs = query->n_flex_regs;
1787 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
1788
1789 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
1790 if (ret < 0) {
1791 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
1792 query->name, query->guid, strerror(errno));
1793 continue;
1794 }
1795
1796 register_oa_config(brw, query, ret);
1797 DBG("metric set: %s (added)\n", query->guid);
1798 }
1799 }
1800
1801 static bool
1802 query_topology(struct brw_context *brw)
1803 {
1804 __DRIscreen *screen = brw->screen->driScrnPriv;
1805 struct drm_i915_query_item item = {
1806 .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
1807 };
1808 struct drm_i915_query query = {
1809 .num_items = 1,
1810 .items_ptr = (uintptr_t) &item,
1811 };
1812
1813 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query))
1814 return false;
1815
1816 struct drm_i915_query_topology_info *topo_info =
1817 (struct drm_i915_query_topology_info *) calloc(1, item.length);
1818 item.data_ptr = (uintptr_t) topo_info;
1819
1820 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query) ||
1821 item.length <= 0)
1822 return false;
1823
1824 gen_device_info_update_from_topology(&brw->screen->devinfo,
1825 topo_info);
1826
1827 free(topo_info);
1828
1829 return true;
1830 }
1831
1832 static bool
1833 getparam_topology(struct brw_context *brw)
1834 {
1835 __DRIscreen *screen = brw->screen->driScrnPriv;
1836 drm_i915_getparam_t gp;
1837 int ret;
1838
1839 int slice_mask = 0;
1840 gp.param = I915_PARAM_SLICE_MASK;
1841 gp.value = &slice_mask;
1842 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1843 if (ret)
1844 return false;
1845
1846 int subslice_mask = 0;
1847 gp.param = I915_PARAM_SUBSLICE_MASK;
1848 gp.value = &subslice_mask;
1849 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1850 if (ret)
1851 return false;
1852
1853 gen_device_info_update_from_masks(&brw->screen->devinfo,
1854 slice_mask,
1855 subslice_mask,
1856 brw->screen->eu_total);
1857
1858 return true;
1859 }
1860
1861 static void
1862 compute_topology_builtins(struct brw_context *brw)
1863 {
1864 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1865
1866 brw->perfquery.sys_vars.slice_mask = devinfo->slice_masks;
1867 brw->perfquery.sys_vars.n_eu_slices = devinfo->num_slices;
1868
1869 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
1870 brw->perfquery.sys_vars.n_eu_sub_slices +=
1871 _mesa_bitcount(devinfo->subslice_masks[i]);
1872 }
1873
1874 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
1875 brw->perfquery.sys_vars.n_eus += _mesa_bitcount(devinfo->eu_masks[i]);
1876
1877 brw->perfquery.sys_vars.eu_threads_count =
1878 brw->perfquery.sys_vars.n_eus * devinfo->num_thread_per_eu;
1879
1880 /* At the moment the subslice mask builtin has groups of 3bits for each
1881 * slice.
1882 *
1883 * Ideally equations would be updated to have a slice/subslice query
1884 * function/operator.
1885 */
1886 brw->perfquery.sys_vars.subslice_mask = 0;
1887 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
1888 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
1889 if (gen_device_info_subslice_available(devinfo, s, ss))
1890 brw->perfquery.sys_vars.subslice_mask |= 1UL << (s * 3 + ss);
1891 }
1892 }
1893 }
1894
1895 static bool
1896 init_oa_sys_vars(struct brw_context *brw)
1897 {
1898 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1899 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
1900 __DRIscreen *screen = brw->screen->driScrnPriv;
1901
1902 if (!read_sysfs_drm_device_file_uint64(brw, "gt_min_freq_mhz", &min_freq_mhz))
1903 return false;
1904
1905 if (!read_sysfs_drm_device_file_uint64(brw, "gt_max_freq_mhz", &max_freq_mhz))
1906 return false;
1907
1908 if (!query_topology(brw)) {
1909 /* We need the i915 query uAPI on CNL+ (kernel 4.17+). */
1910 if (devinfo->gen >= 10)
1911 return false;
1912
1913 if (!getparam_topology(brw)) {
1914 /* We need the SLICE_MASK/SUBSLICE_MASK on gen8+ (kernel 4.13+). */
1915 if (devinfo->gen >= 8)
1916 return false;
1917
1918 /* On Haswell, the values are already computed for us in
1919 * gen_device_info.
1920 */
1921 }
1922 }
1923
1924 memset(&brw->perfquery.sys_vars, 0, sizeof(brw->perfquery.sys_vars));
1925 brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
1926 brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
1927 brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
1928 brw->perfquery.sys_vars.revision = intel_device_get_revision(screen->fd);
1929 compute_topology_builtins(brw);
1930
1931 return true;
1932 }
1933
1934 static bool
1935 get_sysfs_dev_dir(struct brw_context *brw)
1936 {
1937 __DRIscreen *screen = brw->screen->driScrnPriv;
1938 struct stat sb;
1939 int min, maj;
1940 DIR *drmdir;
1941 struct dirent *drm_entry;
1942 int len;
1943
1944 brw->perfquery.sysfs_dev_dir[0] = '\0';
1945
1946 if (fstat(screen->fd, &sb)) {
1947 DBG("Failed to stat DRM fd\n");
1948 return false;
1949 }
1950
1951 maj = major(sb.st_rdev);
1952 min = minor(sb.st_rdev);
1953
1954 if (!S_ISCHR(sb.st_mode)) {
1955 DBG("DRM fd is not a character device as expected\n");
1956 return false;
1957 }
1958
1959 len = snprintf(brw->perfquery.sysfs_dev_dir,
1960 sizeof(brw->perfquery.sysfs_dev_dir),
1961 "/sys/dev/char/%d:%d/device/drm", maj, min);
1962 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir)) {
1963 DBG("Failed to concatenate sysfs path to drm device\n");
1964 return false;
1965 }
1966
1967 drmdir = opendir(brw->perfquery.sysfs_dev_dir);
1968 if (!drmdir) {
1969 DBG("Failed to open %s: %m\n", brw->perfquery.sysfs_dev_dir);
1970 return false;
1971 }
1972
1973 while ((drm_entry = readdir(drmdir))) {
1974 if ((drm_entry->d_type == DT_DIR ||
1975 drm_entry->d_type == DT_LNK) &&
1976 strncmp(drm_entry->d_name, "card", 4) == 0)
1977 {
1978 len = snprintf(brw->perfquery.sysfs_dev_dir,
1979 sizeof(brw->perfquery.sysfs_dev_dir),
1980 "/sys/dev/char/%d:%d/device/drm/%s",
1981 maj, min, drm_entry->d_name);
1982 closedir(drmdir);
1983 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir))
1984 return false;
1985 else
1986 return true;
1987 }
1988 }
1989
1990 closedir(drmdir);
1991
1992 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
1993 maj, min);
1994
1995 return false;
1996 }
1997
1998 typedef void (*perf_register_oa_queries_t)(struct brw_context *);
1999
2000 static perf_register_oa_queries_t
2001 get_register_queries_function(const struct gen_device_info *devinfo)
2002 {
2003 if (devinfo->is_haswell)
2004 return brw_oa_register_queries_hsw;
2005 if (devinfo->is_cherryview)
2006 return brw_oa_register_queries_chv;
2007 if (devinfo->is_broadwell)
2008 return brw_oa_register_queries_bdw;
2009 if (devinfo->is_broxton)
2010 return brw_oa_register_queries_bxt;
2011 if (devinfo->is_skylake) {
2012 if (devinfo->gt == 2)
2013 return brw_oa_register_queries_sklgt2;
2014 if (devinfo->gt == 3)
2015 return brw_oa_register_queries_sklgt3;
2016 if (devinfo->gt == 4)
2017 return brw_oa_register_queries_sklgt4;
2018 }
2019 if (devinfo->is_kabylake) {
2020 if (devinfo->gt == 2)
2021 return brw_oa_register_queries_kblgt2;
2022 if (devinfo->gt == 3)
2023 return brw_oa_register_queries_kblgt3;
2024 }
2025 if (devinfo->is_geminilake)
2026 return brw_oa_register_queries_glk;
2027 if (devinfo->is_coffeelake) {
2028 if (devinfo->gt == 2)
2029 return brw_oa_register_queries_cflgt2;
2030 if (devinfo->gt == 3)
2031 return brw_oa_register_queries_cflgt3;
2032 }
2033 if (devinfo->is_cannonlake)
2034 return brw_oa_register_queries_cnl;
2035
2036 return NULL;
2037 }
2038
2039 static unsigned
2040 brw_init_perf_query_info(struct gl_context *ctx)
2041 {
2042 struct brw_context *brw = brw_context(ctx);
2043 const struct gen_device_info *devinfo = &brw->screen->devinfo;
2044 bool i915_perf_oa_available = false;
2045 struct stat sb;
2046 perf_register_oa_queries_t oa_register;
2047
2048 if (brw->perfquery.n_queries)
2049 return brw->perfquery.n_queries;
2050
2051 init_pipeline_statistic_query_registers(brw);
2052
2053 oa_register = get_register_queries_function(devinfo);
2054
2055 /* The existence of this sysctl parameter implies the kernel supports
2056 * the i915 perf interface.
2057 */
2058 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
2059
2060 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
2061 * metrics unless running as root.
2062 */
2063 if (devinfo->is_haswell)
2064 i915_perf_oa_available = true;
2065 else {
2066 uint64_t paranoid = 1;
2067
2068 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
2069
2070 if (paranoid == 0 || geteuid() == 0)
2071 i915_perf_oa_available = true;
2072 }
2073 }
2074
2075 if (i915_perf_oa_available &&
2076 oa_register &&
2077 get_sysfs_dev_dir(brw) &&
2078 init_oa_sys_vars(brw))
2079 {
2080 brw->perfquery.oa_metrics_table =
2081 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2082 _mesa_key_string_equal);
2083
2084 /* Index all the metric sets mesa knows about before looking to see what
2085 * the kernel is advertising.
2086 */
2087 oa_register(brw);
2088
2089 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
2090 kernel_has_dynamic_config_support(brw))
2091 init_oa_configs(brw);
2092 else
2093 enumerate_sysfs_metrics(brw);
2094 }
2095
2096 brw->perfquery.unaccumulated =
2097 ralloc_array(brw, struct brw_perf_query_object *, 2);
2098 brw->perfquery.unaccumulated_elements = 0;
2099 brw->perfquery.unaccumulated_array_size = 2;
2100
2101 exec_list_make_empty(&brw->perfquery.sample_buffers);
2102 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
2103
2104 /* It's convenient to guarantee that this linked list of sample
2105 * buffers is never empty so we add an empty head so when we
2106 * Begin an OA query we can always take a reference on a buffer
2107 * in this list.
2108 */
2109 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
2110 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
2111
2112 brw->perfquery.oa_stream_fd = -1;
2113
2114 brw->perfquery.next_query_start_report_id = 1000;
2115
2116 return brw->perfquery.n_queries;
2117 }
2118
2119 void
2120 brw_init_performance_queries(struct brw_context *brw)
2121 {
2122 struct gl_context *ctx = &brw->ctx;
2123
2124 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
2125 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
2126 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
2127 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
2128 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
2129 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
2130 ctx->Driver.EndPerfQuery = brw_end_perf_query;
2131 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
2132 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
2133 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
2134 }