i965: perf: keep on reading reports until delimiting timestamp
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43 #include <dirent.h>
44
45 /* put before sys/types.h to silence glibc warnings */
46 #ifdef MAJOR_IN_MKDEV
47 #include <sys/mkdev.h>
48 #endif
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
51 #endif
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <fcntl.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57
58 #include <xf86drm.h>
59 #include <i915_drm.h>
60
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
65
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_hsw.h"
75 #include "brw_oa_bdw.h"
76 #include "brw_oa_chv.h"
77 #include "brw_oa_sklgt2.h"
78 #include "brw_oa_sklgt3.h"
79 #include "brw_oa_sklgt4.h"
80 #include "brw_oa_bxt.h"
81 #include "intel_batchbuffer.h"
82
83 #define FILE_DEBUG_FLAG DEBUG_PERFMON
84
85 /*
86 * The largest OA formats we can use include:
87 * For Haswell:
88 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
89 * For Gen8+
90 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
91 */
92 #define MAX_OA_REPORT_COUNTERS 62
93
94 #define OAREPORT_REASON_MASK 0x3f
95 #define OAREPORT_REASON_SHIFT 19
96 #define OAREPORT_REASON_TIMER (1<<0)
97 #define OAREPORT_REASON_TRIGGER1 (1<<1)
98 #define OAREPORT_REASON_TRIGGER2 (1<<2)
99 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
100 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
101
102 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
103 256) /* OA counter report */
104
105 /**
106 * Periodic OA samples are read() into these buffer structures via the
107 * i915 perf kernel interface and appended to the
108 * brw->perfquery.sample_buffers linked list. When we process the
109 * results of an OA metrics query we need to consider all the periodic
110 * samples between the Begin and End MI_REPORT_PERF_COUNT command
111 * markers.
112 *
113 * 'Periodic' is a simplification as there are other automatic reports
114 * written by the hardware also buffered here.
115 *
116 * Considering three queries, A, B and C:
117 *
118 * Time ---->
119 * ________________A_________________
120 * | |
121 * | ________B_________ _____C___________
122 * | | | | | |
123 *
124 * And an illustration of sample buffers read over this time frame:
125 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
126 *
127 * These nodes may hold samples for query A:
128 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
129 *
130 * These nodes may hold samples for query B:
131 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
132 *
133 * These nodes may hold samples for query C:
134 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
135 *
136 * The illustration assumes we have an even distribution of periodic
137 * samples so all nodes have the same size plotted against time:
138 *
139 * Note, to simplify code, the list is never empty.
140 *
141 * With overlapping queries we can see that periodic OA reports may
142 * relate to multiple queries and care needs to be take to keep
143 * track of sample buffers until there are no queries that might
144 * depend on their contents.
145 *
146 * We use a node ref counting system where a reference ensures that a
147 * node and all following nodes can't be freed/recycled until the
148 * reference drops to zero.
149 *
150 * E.g. with a ref of one here:
151 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
152 *
153 * These nodes could be freed or recycled ("reaped"):
154 * [ 0 ][ 0 ]
155 *
156 * These must be preserved until the leading ref drops to zero:
157 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
158 *
159 * When a query starts we take a reference on the current tail of
160 * the list, knowing that no already-buffered samples can possibly
161 * relate to the newly-started query. A pointer to this node is
162 * also saved in the query object's ->oa.samples_head.
163 *
164 * E.g. starting query A while there are two nodes in .sample_buffers:
165 * ________________A________
166 * |
167 *
168 * [ 0 ][ 1 ]
169 * ^_______ Add a reference and store pointer to node in
170 * A->oa.samples_head
171 *
172 * Moving forward to when the B query starts with no new buffer nodes:
173 * (for reference, i915 perf reads() are only done when queries finish)
174 * ________________A_______
175 * | ________B___
176 * | |
177 *
178 * [ 0 ][ 2 ]
179 * ^_______ Add a reference and store pointer to
180 * node in B->oa.samples_head
181 *
182 * Once a query is finished, after an OA query has become 'Ready',
183 * once the End OA report has landed and after we we have processed
184 * all the intermediate periodic samples then we drop the
185 * ->oa.samples_head reference we took at the start.
186 *
187 * So when the B query has finished we have:
188 * ________________A________
189 * | ______B___________
190 * | | |
191 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
192 * ^_______ Drop B->oa.samples_head reference
193 *
194 * We still can't free these due to the A->oa.samples_head ref:
195 * [ 1 ][ 0 ][ 0 ][ 0 ]
196 *
197 * When the A query finishes: (note there's a new ref for C's samples_head)
198 * ________________A_________________
199 * | |
200 * | _____C_________
201 * | | |
202 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
203 * ^_______ Drop A->oa.samples_head reference
204 *
205 * And we can now reap these nodes up to the C->oa.samples_head:
206 * [ X ][ X ][ X ][ X ]
207 * keeping -> [ 1 ][ 0 ][ 0 ]
208 *
209 * We reap old sample buffers each time we finish processing an OA
210 * query by iterating the sample_buffers list from the head until we
211 * find a referenced node and stop.
212 *
213 * Reaped buffers move to a perfquery.free_sample_buffers list and
214 * when we come to read() we first look to recycle a buffer from the
215 * free_sample_buffers list before allocating a new buffer.
216 */
217 struct brw_oa_sample_buf {
218 struct exec_node link;
219 int refcount;
220 int len;
221 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
222 uint32_t last_timestamp;
223 };
224
225 /**
226 * i965 representation of a performance query object.
227 *
228 * NB: We want to keep this structure relatively lean considering that
229 * applications may expect to allocate enough objects to be able to
230 * query around all draw calls in a frame.
231 */
232 struct brw_perf_query_object
233 {
234 struct gl_perf_query_object base;
235
236 const struct brw_perf_query_info *query;
237
238 /* See query->kind to know which state below is in use... */
239 union {
240 struct {
241
242 /**
243 * BO containing OA counter snapshots at query Begin/End time.
244 */
245 struct brw_bo *bo;
246
247 /**
248 * Address of mapped of @bo
249 */
250 void *map;
251
252 /**
253 * The MI_REPORT_PERF_COUNT command lets us specify a unique
254 * ID that will be reflected in the resulting OA report
255 * that's written by the GPU. This is the ID we're expecting
256 * in the begin report and the the end report should be
257 * @begin_report_id + 1.
258 */
259 int begin_report_id;
260
261 /**
262 * Reference the head of the brw->perfquery.sample_buffers
263 * list at the time that the query started (so we only need
264 * to look at nodes after this point when looking for samples
265 * related to this query)
266 *
267 * (See struct brw_oa_sample_buf description for more details)
268 */
269 struct exec_node *samples_head;
270
271 /**
272 * Storage for the final accumulated OA counters.
273 */
274 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
275
276 /**
277 * false while in the unaccumulated_elements list, and set to
278 * true when the final, end MI_RPC snapshot has been
279 * accumulated.
280 */
281 bool results_accumulated;
282
283 } oa;
284
285 struct {
286 /**
287 * BO containing starting and ending snapshots for the
288 * statistics counters.
289 */
290 struct brw_bo *bo;
291 } pipeline_stats;
292 };
293 };
294
295 /** Downcasting convenience macro. */
296 static inline struct brw_perf_query_object *
297 brw_perf_query(struct gl_perf_query_object *o)
298 {
299 return (struct brw_perf_query_object *) o;
300 }
301
302 #define STATS_BO_SIZE 4096
303 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
304 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
305
306 #define MI_RPC_BO_SIZE 4096
307 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
308
309 /******************************************************************************/
310
311 static bool
312 brw_is_perf_query_ready(struct gl_context *ctx,
313 struct gl_perf_query_object *o);
314
315 static void
316 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
317 {
318 struct gl_context *ctx = brw_void;
319 struct gl_perf_query_object *o = query_void;
320 struct brw_perf_query_object *obj = query_void;
321
322 switch (obj->query->kind) {
323 case OA_COUNTERS:
324 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
325 id,
326 o->Used ? "Dirty," : "New,",
327 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
328 obj->oa.bo ? "yes," : "no,",
329 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
330 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
331 break;
332 case PIPELINE_STATS:
333 DBG("%4d: %-6s %-8s BO: %-4s\n",
334 id,
335 o->Used ? "Dirty," : "New,",
336 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
337 obj->pipeline_stats.bo ? "yes" : "no");
338 break;
339 }
340 }
341
342 static void
343 dump_perf_queries(struct brw_context *brw)
344 {
345 struct gl_context *ctx = &brw->ctx;
346 DBG("Queries: (Open queries = %d, OA users = %d)\n",
347 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
348 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
349 }
350
351 /******************************************************************************/
352
353 static struct brw_oa_sample_buf *
354 get_free_sample_buf(struct brw_context *brw)
355 {
356 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
357 struct brw_oa_sample_buf *buf;
358
359 if (node)
360 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
361 else {
362 buf = ralloc_size(brw, sizeof(*buf));
363
364 exec_node_init(&buf->link);
365 buf->refcount = 0;
366 buf->len = 0;
367 }
368
369 return buf;
370 }
371
372 static void
373 reap_old_sample_buffers(struct brw_context *brw)
374 {
375 struct exec_node *tail_node =
376 exec_list_get_tail(&brw->perfquery.sample_buffers);
377 struct brw_oa_sample_buf *tail_buf =
378 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
379
380 /* Remove all old, unreferenced sample buffers walking forward from
381 * the head of the list, except always leave at least one node in
382 * the list so we always have a node to reference when we Begin
383 * a new query.
384 */
385 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
386 &brw->perfquery.sample_buffers)
387 {
388 if (buf->refcount == 0 && buf != tail_buf) {
389 exec_node_remove(&buf->link);
390 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
391 } else
392 return;
393 }
394 }
395
396 static void
397 free_sample_bufs(struct brw_context *brw)
398 {
399 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
400 &brw->perfquery.free_sample_buffers)
401 ralloc_free(buf);
402
403 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
404 }
405
406 /******************************************************************************/
407
408 /**
409 * Driver hook for glGetPerfQueryInfoINTEL().
410 */
411 static void
412 brw_get_perf_query_info(struct gl_context *ctx,
413 unsigned query_index,
414 const char **name,
415 GLuint *data_size,
416 GLuint *n_counters,
417 GLuint *n_active)
418 {
419 struct brw_context *brw = brw_context(ctx);
420 const struct brw_perf_query_info *query =
421 &brw->perfquery.queries[query_index];
422
423 *name = query->name;
424 *data_size = query->data_size;
425 *n_counters = query->n_counters;
426
427 switch (query->kind) {
428 case OA_COUNTERS:
429 *n_active = brw->perfquery.n_active_oa_queries;
430 break;
431
432 case PIPELINE_STATS:
433 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
434 break;
435 }
436 }
437
438 /**
439 * Driver hook for glGetPerfCounterInfoINTEL().
440 */
441 static void
442 brw_get_perf_counter_info(struct gl_context *ctx,
443 unsigned query_index,
444 unsigned counter_index,
445 const char **name,
446 const char **desc,
447 GLuint *offset,
448 GLuint *data_size,
449 GLuint *type_enum,
450 GLuint *data_type_enum,
451 GLuint64 *raw_max)
452 {
453 struct brw_context *brw = brw_context(ctx);
454 const struct brw_perf_query_info *query =
455 &brw->perfquery.queries[query_index];
456 const struct brw_perf_query_counter *counter =
457 &query->counters[counter_index];
458
459 *name = counter->name;
460 *desc = counter->desc;
461 *offset = counter->offset;
462 *data_size = counter->size;
463 *type_enum = counter->type;
464 *data_type_enum = counter->data_type;
465 *raw_max = counter->raw_max;
466 }
467
468 /******************************************************************************/
469
470 /**
471 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
472 * pipeline statistics for the performance query object.
473 */
474 static void
475 snapshot_statistics_registers(struct brw_context *brw,
476 struct brw_perf_query_object *obj,
477 uint32_t offset_in_bytes)
478 {
479 const struct brw_perf_query_info *query = obj->query;
480 const int n_counters = query->n_counters;
481
482 for (int i = 0; i < n_counters; i++) {
483 const struct brw_perf_query_counter *counter = &query->counters[i];
484
485 assert(counter->data_type == GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL);
486
487 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
488 counter->pipeline_stat.reg,
489 offset_in_bytes + i * sizeof(uint64_t));
490 }
491 }
492
493 /**
494 * Add a query to the global list of "unaccumulated queries."
495 *
496 * Queries are tracked here until all the associated OA reports have
497 * been accumulated via accumulate_oa_reports() after the end
498 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
499 */
500 static void
501 add_to_unaccumulated_query_list(struct brw_context *brw,
502 struct brw_perf_query_object *obj)
503 {
504 if (brw->perfquery.unaccumulated_elements >=
505 brw->perfquery.unaccumulated_array_size)
506 {
507 brw->perfquery.unaccumulated_array_size *= 1.5;
508 brw->perfquery.unaccumulated =
509 reralloc(brw, brw->perfquery.unaccumulated,
510 struct brw_perf_query_object *,
511 brw->perfquery.unaccumulated_array_size);
512 }
513
514 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
515 }
516
517 /**
518 * Remove a query from the global list of unaccumulated queries once
519 * after successfully accumulating the OA reports associated with the
520 * query in accumulate_oa_reports() or when discarding unwanted query
521 * results.
522 */
523 static void
524 drop_from_unaccumulated_query_list(struct brw_context *brw,
525 struct brw_perf_query_object *obj)
526 {
527 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
528 if (brw->perfquery.unaccumulated[i] == obj) {
529 int last_elt = --brw->perfquery.unaccumulated_elements;
530
531 if (i == last_elt)
532 brw->perfquery.unaccumulated[i] = NULL;
533 else {
534 brw->perfquery.unaccumulated[i] =
535 brw->perfquery.unaccumulated[last_elt];
536 }
537
538 break;
539 }
540 }
541
542 /* Drop our samples_head reference so that associated periodic
543 * sample data buffers can potentially be reaped if they aren't
544 * referenced by any other queries...
545 */
546
547 struct brw_oa_sample_buf *buf =
548 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
549
550 assert(buf->refcount > 0);
551 buf->refcount--;
552
553 obj->oa.samples_head = NULL;
554
555 reap_old_sample_buffers(brw);
556 }
557
558 static uint64_t
559 timebase_scale(struct brw_context *brw, uint32_t u32_time_delta)
560 {
561 const struct gen_device_info *devinfo = &brw->screen->devinfo;
562 uint64_t tmp = ((uint64_t)u32_time_delta) * 1000000000ull;
563
564 return tmp ? tmp / devinfo->timestamp_frequency : 0;
565 }
566
567 static void
568 accumulate_uint32(const uint32_t *report0,
569 const uint32_t *report1,
570 uint64_t *accumulator)
571 {
572 *accumulator += (uint32_t)(*report1 - *report0);
573 }
574
575 static void
576 accumulate_uint40(int a_index,
577 const uint32_t *report0,
578 const uint32_t *report1,
579 uint64_t *accumulator)
580 {
581 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
582 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
583 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
584 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
585 uint64_t value0 = report0[a_index + 4] | high0;
586 uint64_t value1 = report1[a_index + 4] | high1;
587 uint64_t delta;
588
589 if (value0 > value1)
590 delta = (1ULL << 40) + value1 - value0;
591 else
592 delta = value1 - value0;
593
594 *accumulator += delta;
595 }
596
597 /**
598 * Given pointers to starting and ending OA snapshots, add the deltas for each
599 * counter to the results.
600 */
601 static void
602 add_deltas(struct brw_context *brw,
603 struct brw_perf_query_object *obj,
604 const uint32_t *start,
605 const uint32_t *end)
606 {
607 const struct brw_perf_query_info *query = obj->query;
608 uint64_t *accumulator = obj->oa.accumulator;
609 int idx = 0;
610 int i;
611
612 switch (query->oa_format) {
613 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
614 accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
615 accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
616
617 /* 32x 40bit A counters... */
618 for (i = 0; i < 32; i++)
619 accumulate_uint40(i, start, end, accumulator + idx++);
620
621 /* 4x 32bit A counters... */
622 for (i = 0; i < 4; i++)
623 accumulate_uint32(start + 36 + i, end + 36 + i, accumulator + idx++);
624
625 /* 8x 32bit B counters + 8x 32bit C counters... */
626 for (i = 0; i < 16; i++)
627 accumulate_uint32(start + 48 + i, end + 48 + i, accumulator + idx++);
628
629 break;
630 case I915_OA_FORMAT_A45_B8_C8:
631 accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
632
633 for (i = 0; i < 61; i++)
634 accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
635
636 break;
637 default:
638 unreachable("Can't accumulate OA counters in unknown format");
639 }
640 }
641
642 static bool
643 inc_n_oa_users(struct brw_context *brw)
644 {
645 if (brw->perfquery.n_oa_users == 0 &&
646 drmIoctl(brw->perfquery.oa_stream_fd,
647 I915_PERF_IOCTL_ENABLE, 0) < 0)
648 {
649 return false;
650 }
651 ++brw->perfquery.n_oa_users;
652
653 return true;
654 }
655
656 static void
657 dec_n_oa_users(struct brw_context *brw)
658 {
659 /* Disabling the i915 perf stream will effectively disable the OA
660 * counters. Note it's important to be sure there are no outstanding
661 * MI_RPC commands at this point since they could stall the CS
662 * indefinitely once OACONTROL is disabled.
663 */
664 --brw->perfquery.n_oa_users;
665 if (brw->perfquery.n_oa_users == 0 &&
666 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
667 {
668 DBG("WARNING: Error disabling i915 perf stream: %m\n");
669 }
670 }
671
672 /* In general if we see anything spurious while accumulating results,
673 * we don't try and continue accumulating the current query, hoping
674 * for the best, we scrap anything outstanding, and then hope for the
675 * best with new queries.
676 */
677 static void
678 discard_all_queries(struct brw_context *brw)
679 {
680 while (brw->perfquery.unaccumulated_elements) {
681 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
682
683 obj->oa.results_accumulated = true;
684 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
685
686 dec_n_oa_users(brw);
687 }
688 }
689
690 enum OaReadStatus {
691 OA_READ_STATUS_ERROR,
692 OA_READ_STATUS_UNFINISHED,
693 OA_READ_STATUS_FINISHED,
694 };
695
696 static enum OaReadStatus
697 read_oa_samples_until(struct brw_context *brw,
698 uint32_t start_timestamp,
699 uint32_t end_timestamp)
700 {
701 struct exec_node *tail_node =
702 exec_list_get_tail(&brw->perfquery.sample_buffers);
703 struct brw_oa_sample_buf *tail_buf =
704 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
705 uint32_t last_timestamp = tail_buf->last_timestamp;
706
707 while (1) {
708 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
709 uint32_t offset;
710 int len;
711
712 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
713 sizeof(buf->buf))) < 0 && errno == EINTR)
714 ;
715
716 if (len <= 0) {
717 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
718
719 if (len < 0) {
720 if (errno == EAGAIN)
721 return ((last_timestamp - start_timestamp) >=
722 (end_timestamp - start_timestamp)) ?
723 OA_READ_STATUS_FINISHED :
724 OA_READ_STATUS_UNFINISHED;
725 else {
726 DBG("Error reading i915 perf samples: %m\n");
727 }
728 } else
729 DBG("Spurious EOF reading i915 perf samples\n");
730
731 return OA_READ_STATUS_ERROR;
732 }
733
734 buf->len = len;
735 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
736
737 /* Go through the reports and update the last timestamp. */
738 offset = 0;
739 while (offset < buf->len) {
740 const struct drm_i915_perf_record_header *header =
741 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
742 uint32_t *report = (uint32_t *) (header + 1);
743
744 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
745 last_timestamp = report[1];
746
747 offset += header->size;
748 }
749
750 buf->last_timestamp = last_timestamp;
751 }
752
753 unreachable("not reached");
754 return OA_READ_STATUS_ERROR;
755 }
756
757 /**
758 * Try to read all the reports until either the delimiting timestamp
759 * or an error arises.
760 */
761 static bool
762 read_oa_samples_for_query(struct brw_context *brw,
763 struct brw_perf_query_object *obj)
764 {
765 uint32_t *start;
766 uint32_t *last;
767 uint32_t *end;
768
769 /* We need the MI_REPORT_PERF_COUNT to land before we can start
770 * accumulate. */
771 assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
772 !brw_bo_busy(obj->oa.bo));
773
774 /* Map the BO once here and let accumulate_oa_reports() unmap
775 * it. */
776 if (obj->oa.map == NULL)
777 obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
778
779 start = last = obj->oa.map;
780 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
781
782 if (start[0] != obj->oa.begin_report_id) {
783 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
784 return true;
785 }
786 if (end[0] != (obj->oa.begin_report_id + 1)) {
787 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
788 return true;
789 }
790
791 /* Read the reports until the end timestamp. */
792 switch (read_oa_samples_until(brw, start[1], end[1])) {
793 case OA_READ_STATUS_ERROR:
794 /* Fallthrough and let accumulate_oa_reports() deal with the
795 * error. */
796 case OA_READ_STATUS_FINISHED:
797 return true;
798 case OA_READ_STATUS_UNFINISHED:
799 return false;
800 }
801
802 unreachable("invalid read status");
803 return false;
804 }
805
806 /**
807 * Accumulate raw OA counter values based on deltas between pairs of
808 * OA reports.
809 *
810 * Accumulation starts from the first report captured via
811 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
812 * last MI_RPC report requested by brw_end_perf_query(). Between these
813 * two reports there may also some number of periodically sampled OA
814 * reports collected via the i915 perf interface - depending on the
815 * duration of the query.
816 *
817 * These periodic snapshots help to ensure we handle counter overflow
818 * correctly by being frequent enough to ensure we don't miss multiple
819 * overflows of a counter between snapshots. For Gen8+ the i915 perf
820 * snapshots provide the extra context-switch reports that let us
821 * subtract out the progress of counters associated with other
822 * contexts running on the system.
823 */
824 static void
825 accumulate_oa_reports(struct brw_context *brw,
826 struct brw_perf_query_object *obj)
827 {
828 struct gl_perf_query_object *o = &obj->base;
829 uint32_t *start;
830 uint32_t *last;
831 uint32_t *end;
832 struct exec_node *first_samples_node;
833 bool in_ctx = true;
834 uint32_t ctx_id;
835
836 assert(o->Ready);
837 assert(obj->oa.map != NULL);
838
839 start = last = obj->oa.map;
840 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
841
842 if (start[0] != obj->oa.begin_report_id) {
843 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
844 goto error;
845 }
846 if (end[0] != (obj->oa.begin_report_id + 1)) {
847 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
848 goto error;
849 }
850
851 ctx_id = start[2];
852
853 /* See if we have any periodic reports to accumulate too... */
854
855 /* N.B. The oa.samples_head was set when the query began and
856 * pointed to the tail of the brw->perfquery.sample_buffers list at
857 * the time the query started. Since the buffer existed before the
858 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
859 * that no data in this particular node's buffer can possibly be
860 * associated with the query - so skip ahead one...
861 */
862 first_samples_node = obj->oa.samples_head->next;
863
864 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
865 &brw->perfquery.sample_buffers,
866 first_samples_node)
867 {
868 int offset = 0;
869
870 while (offset < buf->len) {
871 const struct drm_i915_perf_record_header *header =
872 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
873
874 assert(header->size != 0);
875 assert(header->size <= buf->len);
876
877 offset += header->size;
878
879 switch (header->type) {
880 case DRM_I915_PERF_RECORD_SAMPLE: {
881 uint32_t *report = (uint32_t *)(header + 1);
882 bool add = true;
883
884 /* Ignore reports that come before the start marker.
885 * (Note: takes care to allow overflow of 32bit timestamps)
886 */
887 if (timebase_scale(brw, report[1] - start[1]) > 5000000000)
888 continue;
889
890 /* Ignore reports that come after the end marker.
891 * (Note: takes care to allow overflow of 32bit timestamps)
892 */
893 if (timebase_scale(brw, report[1] - end[1]) <= 5000000000)
894 goto end;
895
896 /* For Gen8+ since the counters continue while other
897 * contexts are running we need to discount any unrelated
898 * deltas. The hardware automatically generates a report
899 * on context switch which gives us a new reference point
900 * to continuing adding deltas from.
901 *
902 * For Haswell we can rely on the HW to stop the progress
903 * of OA counters while any other context is acctive.
904 */
905 if (brw->gen >= 8) {
906 if (in_ctx && report[2] != ctx_id) {
907 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
908 in_ctx = false;
909 } else if (in_ctx == false && report[2] == ctx_id) {
910 DBG("i915 perf: Switch TO\n");
911 in_ctx = true;
912 add = false;
913 } else if (in_ctx) {
914 assert(report[2] == ctx_id);
915 DBG("i915 perf: Continuation IN\n");
916 } else {
917 assert(report[2] != ctx_id);
918 DBG("i915 perf: Continuation OUT\n");
919 add = false;
920 }
921 }
922
923 if (add)
924 add_deltas(brw, obj, last, report);
925
926 last = report;
927
928 break;
929 }
930
931 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
932 DBG("i915 perf: OA error: all reports lost\n");
933 goto error;
934 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
935 DBG("i915 perf: OA report lost\n");
936 break;
937 }
938 }
939 }
940
941 end:
942
943 add_deltas(brw, obj, last, end);
944
945 DBG("Marking %d accumulated - results gathered\n", o->Id);
946
947 brw_bo_unmap(obj->oa.bo);
948 obj->oa.map = NULL;
949 obj->oa.results_accumulated = true;
950 drop_from_unaccumulated_query_list(brw, obj);
951 dec_n_oa_users(brw);
952
953 return;
954
955 error:
956
957 brw_bo_unmap(obj->oa.bo);
958 obj->oa.map = NULL;
959 discard_all_queries(brw);
960 }
961
962 /******************************************************************************/
963
964 static bool
965 open_i915_perf_oa_stream(struct brw_context *brw,
966 int metrics_set_id,
967 int report_format,
968 int period_exponent,
969 int drm_fd,
970 uint32_t ctx_id)
971 {
972 uint64_t properties[] = {
973 /* Single context sampling */
974 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
975
976 /* Include OA reports in samples */
977 DRM_I915_PERF_PROP_SAMPLE_OA, true,
978
979 /* OA unit configuration */
980 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
981 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
982 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
983 };
984 struct drm_i915_perf_open_param param = {
985 .flags = I915_PERF_FLAG_FD_CLOEXEC |
986 I915_PERF_FLAG_FD_NONBLOCK |
987 I915_PERF_FLAG_DISABLED,
988 .num_properties = ARRAY_SIZE(properties) / 2,
989 .properties_ptr = (uintptr_t) properties,
990 };
991 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
992 if (fd == -1) {
993 DBG("Error opening i915 perf OA stream: %m\n");
994 return false;
995 }
996
997 brw->perfquery.oa_stream_fd = fd;
998
999 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
1000 brw->perfquery.current_oa_format = report_format;
1001
1002 return true;
1003 }
1004
1005 static void
1006 close_perf(struct brw_context *brw)
1007 {
1008 if (brw->perfquery.oa_stream_fd != -1) {
1009 close(brw->perfquery.oa_stream_fd);
1010 brw->perfquery.oa_stream_fd = -1;
1011 }
1012 }
1013
1014 /**
1015 * Driver hook for glBeginPerfQueryINTEL().
1016 */
1017 static bool
1018 brw_begin_perf_query(struct gl_context *ctx,
1019 struct gl_perf_query_object *o)
1020 {
1021 struct brw_context *brw = brw_context(ctx);
1022 struct brw_perf_query_object *obj = brw_perf_query(o);
1023 const struct brw_perf_query_info *query = obj->query;
1024
1025 /* We can assume the frontend hides mistaken attempts to Begin a
1026 * query object multiple times before its End. Similarly if an
1027 * application reuses a query object before results have arrived
1028 * the frontend will wait for prior results so we don't need
1029 * to support abandoning in-flight results.
1030 */
1031 assert(!o->Active);
1032 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
1033
1034 DBG("Begin(%d)\n", o->Id);
1035
1036 /* XXX: We have to consider that the command parser unit that parses batch
1037 * buffer commands and is used to capture begin/end counter snapshots isn't
1038 * implicitly synchronized with what's currently running across other GPU
1039 * units (such as the EUs running shaders) that the performance counters are
1040 * associated with.
1041 *
1042 * The intention of performance queries is to measure the work associated
1043 * with commands between the begin/end delimiters and so for that to be the
1044 * case we need to explicitly synchronize the parsing of commands to capture
1045 * Begin/End counter snapshots with what's running across other parts of the
1046 * GPU.
1047 *
1048 * When the command parser reaches a Begin marker it effectively needs to
1049 * drain everything currently running on the GPU until the hardware is idle
1050 * before capturing the first snapshot of counters - otherwise the results
1051 * would also be measuring the effects of earlier commands.
1052 *
1053 * When the command parser reaches an End marker it needs to stall until
1054 * everything currently running on the GPU has finished before capturing the
1055 * end snapshot - otherwise the results won't be a complete representation
1056 * of the work.
1057 *
1058 * Theoretically there could be opportunities to minimize how much of the
1059 * GPU pipeline is drained, or that we stall for, when we know what specific
1060 * units the performance counters being queried relate to but we don't
1061 * currently attempt to be clever here.
1062 *
1063 * Note: with our current simple approach here then for back-to-back queries
1064 * we will redundantly emit duplicate commands to synchronize the command
1065 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1066 * second synchronization is effectively a NOOP.
1067 *
1068 * N.B. The final results are based on deltas of counters between (inside)
1069 * Begin/End markers so even though the total wall clock time of the
1070 * workload is stretched by larger pipeline bubbles the bubbles themselves
1071 * are generally invisible to the query results. Whether that's a good or a
1072 * bad thing depends on the use case. For a lower real-time impact while
1073 * capturing metrics then periodic sampling may be a better choice than
1074 * INTEL_performance_query.
1075 *
1076 *
1077 * This is our Begin synchronization point to drain current work on the
1078 * GPU before we capture our first counter snapshot...
1079 */
1080 brw_emit_mi_flush(brw);
1081
1082 switch (query->kind) {
1083 case OA_COUNTERS:
1084
1085 /* Opening an i915 perf stream implies exclusive access to the OA unit
1086 * which will generate counter reports for a specific counter set with a
1087 * specific layout/format so we can't begin any OA based queries that
1088 * require a different counter set or format unless we get an opportunity
1089 * to close the stream and open a new one...
1090 */
1091 if (brw->perfquery.oa_stream_fd != -1 &&
1092 brw->perfquery.current_oa_metrics_set_id !=
1093 query->oa_metrics_set_id) {
1094
1095 if (brw->perfquery.n_oa_users != 0)
1096 return false;
1097 else
1098 close_perf(brw);
1099 }
1100
1101 /* If the OA counters aren't already on, enable them. */
1102 if (brw->perfquery.oa_stream_fd == -1) {
1103 __DRIscreen *screen = brw->screen->driScrnPriv;
1104 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1105
1106 /* The period_exponent gives a sampling period as follows:
1107 * sample_period = timestamp_period * 2^(period_exponent + 1)
1108 *
1109 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1110 * ~83ns (GEN8/9).
1111 *
1112 * The counter overflow period is derived from the EuActive counter
1113 * which reads a counter that increments by the number of clock
1114 * cycles multiplied by the number of EUs. It can be calculated as:
1115 *
1116 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1117 *
1118 * (E.g. 40 EUs @ 1GHz = ~53ms)
1119 *
1120 * We select a sampling period inferior to that overflow period to
1121 * ensure we cannot see more than 1 counter overflow, otherwise we
1122 * could loose information.
1123 */
1124
1125 int a_counter_in_bits = 32;
1126 if (devinfo->gen >= 8)
1127 a_counter_in_bits = 40;
1128
1129 uint64_t overflow_period = pow(2, a_counter_in_bits) /
1130 (brw->perfquery.sys_vars.n_eus *
1131 /* drop 1GHz freq to have units in nanoseconds */
1132 2);
1133
1134 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1135 overflow_period, overflow_period / 1000000ul, brw->perfquery.sys_vars.n_eus);
1136
1137 int period_exponent = 0;
1138 uint64_t prev_sample_period, next_sample_period;
1139 for (int e = 0; e < 30; e++) {
1140 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1141 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1142
1143 /* Take the previous sampling period, lower than the overflow
1144 * period.
1145 */
1146 if (prev_sample_period < overflow_period &&
1147 next_sample_period > overflow_period)
1148 period_exponent = e + 1;
1149 }
1150
1151 if (period_exponent == 0) {
1152 DBG("WARNING: enable to find a sampling exponent\n");
1153 return false;
1154 }
1155
1156 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1157 prev_sample_period / 1000000ul);
1158
1159 if (!open_i915_perf_oa_stream(brw,
1160 query->oa_metrics_set_id,
1161 query->oa_format,
1162 period_exponent,
1163 screen->fd, /* drm fd */
1164 brw->hw_ctx))
1165 return false;
1166 } else {
1167 assert(brw->perfquery.current_oa_metrics_set_id ==
1168 query->oa_metrics_set_id &&
1169 brw->perfquery.current_oa_format ==
1170 query->oa_format);
1171 }
1172
1173 if (!inc_n_oa_users(brw)) {
1174 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1175 return false;
1176 }
1177
1178 if (obj->oa.bo) {
1179 brw_bo_unreference(obj->oa.bo);
1180 obj->oa.bo = NULL;
1181 }
1182
1183 obj->oa.bo =
1184 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
1185 MI_RPC_BO_SIZE, 64);
1186 #ifdef DEBUG
1187 /* Pre-filling the BO helps debug whether writes landed. */
1188 void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
1189 memset(map, 0x80, MI_RPC_BO_SIZE);
1190 brw_bo_unmap(obj->oa.bo);
1191 #endif
1192
1193 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1194 brw->perfquery.next_query_start_report_id += 2;
1195
1196 /* Take a starting OA counter snapshot. */
1197 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1198 obj->oa.begin_report_id);
1199 ++brw->perfquery.n_active_oa_queries;
1200
1201 /* No already-buffered samples can possibly be associated with this query
1202 * so create a marker within the list of sample buffers enabling us to
1203 * easily ignore earlier samples when processing this query after
1204 * completion.
1205 */
1206 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1207 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1208
1209 struct brw_oa_sample_buf *buf =
1210 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1211
1212 /* This reference will ensure that future/following sample
1213 * buffers (that may relate to this query) can't be freed until
1214 * this drops to zero.
1215 */
1216 buf->refcount++;
1217
1218 memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
1219 obj->oa.results_accumulated = false;
1220
1221 add_to_unaccumulated_query_list(brw, obj);
1222 break;
1223
1224 case PIPELINE_STATS:
1225 if (obj->pipeline_stats.bo) {
1226 brw_bo_unreference(obj->pipeline_stats.bo);
1227 obj->pipeline_stats.bo = NULL;
1228 }
1229
1230 obj->pipeline_stats.bo =
1231 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1232 STATS_BO_SIZE, 64);
1233
1234 /* Take starting snapshots. */
1235 snapshot_statistics_registers(brw, obj, 0);
1236
1237 ++brw->perfquery.n_active_pipeline_stats_queries;
1238 break;
1239 }
1240
1241 if (INTEL_DEBUG & DEBUG_PERFMON)
1242 dump_perf_queries(brw);
1243
1244 return true;
1245 }
1246
1247 /**
1248 * Driver hook for glEndPerfQueryINTEL().
1249 */
1250 static void
1251 brw_end_perf_query(struct gl_context *ctx,
1252 struct gl_perf_query_object *o)
1253 {
1254 struct brw_context *brw = brw_context(ctx);
1255 struct brw_perf_query_object *obj = brw_perf_query(o);
1256
1257 DBG("End(%d)\n", o->Id);
1258
1259 /* Ensure that the work associated with the queried commands will have
1260 * finished before taking our query end counter readings.
1261 *
1262 * For more details see comment in brw_begin_perf_query for
1263 * corresponding flush.
1264 */
1265 brw_emit_mi_flush(brw);
1266
1267 switch (obj->query->kind) {
1268 case OA_COUNTERS:
1269
1270 /* NB: It's possible that the query will have already been marked
1271 * as 'accumulated' if an error was seen while reading samples
1272 * from perf. In this case we mustn't try and emit a closing
1273 * MI_RPC command in case the OA unit has already been disabled
1274 */
1275 if (!obj->oa.results_accumulated) {
1276 /* Take an ending OA counter snapshot. */
1277 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
1278 MI_RPC_BO_END_OFFSET_BYTES,
1279 obj->oa.begin_report_id + 1);
1280 }
1281
1282 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1283 * delimiting commands end up in different batchbuffers. If that's the
1284 * case, the measurement will include the time it takes for the kernel
1285 * scheduler to load a new request into the hardware. This is manifested
1286 * in tools like frameretrace by spikes in the "GPU Core Clocks"
1287 * counter.
1288 */
1289 intel_batchbuffer_flush(brw);
1290 --brw->perfquery.n_active_oa_queries;
1291
1292 /* NB: even though the query has now ended, it can't be accumulated
1293 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1294 * to query->oa.bo
1295 */
1296 break;
1297
1298 case PIPELINE_STATS:
1299 snapshot_statistics_registers(brw, obj,
1300 STATS_BO_END_OFFSET_BYTES);
1301 --brw->perfquery.n_active_pipeline_stats_queries;
1302 break;
1303 }
1304 }
1305
1306 static void
1307 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1308 {
1309 struct brw_context *brw = brw_context(ctx);
1310 struct brw_perf_query_object *obj = brw_perf_query(o);
1311 struct brw_bo *bo = NULL;
1312
1313 assert(!o->Ready);
1314
1315 switch (obj->query->kind) {
1316 case OA_COUNTERS:
1317 bo = obj->oa.bo;
1318 break;
1319
1320 case PIPELINE_STATS:
1321 bo = obj->pipeline_stats.bo;
1322 break;
1323 }
1324
1325 if (bo == NULL)
1326 return;
1327
1328 /* If the current batch references our results bo then we need to
1329 * flush first...
1330 */
1331 if (brw_batch_references(&brw->batch, bo))
1332 intel_batchbuffer_flush(brw);
1333
1334 brw_bo_wait_rendering(brw, bo);
1335
1336 /* Due to a race condition between the OA unit signaling report
1337 * availability and the report actually being written into memory,
1338 * we need to wait for all the reports to come in before we can
1339 * read them.
1340 */
1341 if (obj->query->kind == OA_COUNTERS) {
1342 while (!read_oa_samples_for_query(brw, obj))
1343 ;
1344 }
1345 }
1346
1347 static bool
1348 brw_is_perf_query_ready(struct gl_context *ctx,
1349 struct gl_perf_query_object *o)
1350 {
1351 struct brw_context *brw = brw_context(ctx);
1352 struct brw_perf_query_object *obj = brw_perf_query(o);
1353
1354 if (o->Ready)
1355 return true;
1356
1357 switch (obj->query->kind) {
1358 case OA_COUNTERS:
1359 return (obj->oa.results_accumulated ||
1360 (obj->oa.bo &&
1361 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1362 !brw_bo_busy(obj->oa.bo) &&
1363 read_oa_samples_for_query(brw, obj)));
1364 case PIPELINE_STATS:
1365 return (obj->pipeline_stats.bo &&
1366 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1367 !brw_bo_busy(obj->pipeline_stats.bo));
1368 }
1369
1370 unreachable("missing ready check for unknown query kind");
1371 return false;
1372 }
1373
1374 static int
1375 get_oa_counter_data(struct brw_context *brw,
1376 struct brw_perf_query_object *obj,
1377 size_t data_size,
1378 uint8_t *data)
1379 {
1380 const struct brw_perf_query_info *query = obj->query;
1381 int n_counters = query->n_counters;
1382 int written = 0;
1383
1384 if (!obj->oa.results_accumulated) {
1385 accumulate_oa_reports(brw, obj);
1386 assert(obj->oa.results_accumulated);
1387 }
1388
1389 for (int i = 0; i < n_counters; i++) {
1390 const struct brw_perf_query_counter *counter = &query->counters[i];
1391 uint64_t *out_uint64;
1392 float *out_float;
1393
1394 if (counter->size) {
1395 switch (counter->data_type) {
1396 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL:
1397 out_uint64 = (uint64_t *)(data + counter->offset);
1398 *out_uint64 = counter->oa_counter_read_uint64(brw, query,
1399 obj->oa.accumulator);
1400 break;
1401 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL:
1402 out_float = (float *)(data + counter->offset);
1403 *out_float = counter->oa_counter_read_float(brw, query,
1404 obj->oa.accumulator);
1405 break;
1406 default:
1407 /* So far we aren't using uint32, double or bool32... */
1408 unreachable("unexpected counter data type");
1409 }
1410 written = counter->offset + counter->size;
1411 }
1412 }
1413
1414 return written;
1415 }
1416
1417 static int
1418 get_pipeline_stats_data(struct brw_context *brw,
1419 struct brw_perf_query_object *obj,
1420 size_t data_size,
1421 uint8_t *data)
1422
1423 {
1424 const struct brw_perf_query_info *query = obj->query;
1425 int n_counters = obj->query->n_counters;
1426 uint8_t *p = data;
1427
1428 uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
1429 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1430
1431 for (int i = 0; i < n_counters; i++) {
1432 const struct brw_perf_query_counter *counter = &query->counters[i];
1433 uint64_t value = end[i] - start[i];
1434
1435 if (counter->pipeline_stat.numerator !=
1436 counter->pipeline_stat.denominator) {
1437 value *= counter->pipeline_stat.numerator;
1438 value /= counter->pipeline_stat.denominator;
1439 }
1440
1441 *((uint64_t *)p) = value;
1442 p += 8;
1443 }
1444
1445 brw_bo_unmap(obj->pipeline_stats.bo);
1446
1447 return p - data;
1448 }
1449
1450 /**
1451 * Driver hook for glGetPerfQueryDataINTEL().
1452 */
1453 static void
1454 brw_get_perf_query_data(struct gl_context *ctx,
1455 struct gl_perf_query_object *o,
1456 GLsizei data_size,
1457 GLuint *data,
1458 GLuint *bytes_written)
1459 {
1460 struct brw_context *brw = brw_context(ctx);
1461 struct brw_perf_query_object *obj = brw_perf_query(o);
1462 int written = 0;
1463
1464 assert(brw_is_perf_query_ready(ctx, o));
1465
1466 DBG("GetData(%d)\n", o->Id);
1467
1468 if (INTEL_DEBUG & DEBUG_PERFMON)
1469 dump_perf_queries(brw);
1470
1471 /* We expect that the frontend only calls this hook when it knows
1472 * that results are available.
1473 */
1474 assert(o->Ready);
1475
1476 switch (obj->query->kind) {
1477 case OA_COUNTERS:
1478 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1479 break;
1480
1481 case PIPELINE_STATS:
1482 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1483 break;
1484 }
1485
1486 if (bytes_written)
1487 *bytes_written = written;
1488 }
1489
1490 static struct gl_perf_query_object *
1491 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1492 {
1493 struct brw_context *brw = brw_context(ctx);
1494 const struct brw_perf_query_info *query =
1495 &brw->perfquery.queries[query_index];
1496 struct brw_perf_query_object *obj =
1497 calloc(1, sizeof(struct brw_perf_query_object));
1498
1499 if (!obj)
1500 return NULL;
1501
1502 obj->query = query;
1503
1504 brw->perfquery.n_query_instances++;
1505
1506 return &obj->base;
1507 }
1508
1509 /**
1510 * Driver hook for glDeletePerfQueryINTEL().
1511 */
1512 static void
1513 brw_delete_perf_query(struct gl_context *ctx,
1514 struct gl_perf_query_object *o)
1515 {
1516 struct brw_context *brw = brw_context(ctx);
1517 struct brw_perf_query_object *obj = brw_perf_query(o);
1518
1519 /* We can assume that the frontend waits for a query to complete
1520 * before ever calling into here, so we don't have to worry about
1521 * deleting an in-flight query object.
1522 */
1523 assert(!o->Active);
1524 assert(!o->Used || o->Ready);
1525
1526 DBG("Delete(%d)\n", o->Id);
1527
1528 switch (obj->query->kind) {
1529 case OA_COUNTERS:
1530 if (obj->oa.bo) {
1531 if (!obj->oa.results_accumulated) {
1532 drop_from_unaccumulated_query_list(brw, obj);
1533 dec_n_oa_users(brw);
1534 }
1535
1536 brw_bo_unreference(obj->oa.bo);
1537 obj->oa.bo = NULL;
1538 }
1539
1540 obj->oa.results_accumulated = false;
1541 break;
1542
1543 case PIPELINE_STATS:
1544 if (obj->pipeline_stats.bo) {
1545 brw_bo_unreference(obj->pipeline_stats.bo);
1546 obj->pipeline_stats.bo = NULL;
1547 }
1548 break;
1549 }
1550
1551 free(obj);
1552
1553 /* As an indication that the INTEL_performance_query extension is no
1554 * longer in use, it's a good time to free our cache of sample
1555 * buffers and close any current i915-perf stream.
1556 */
1557 if (--brw->perfquery.n_query_instances == 0) {
1558 free_sample_bufs(brw);
1559 close_perf(brw);
1560 }
1561 }
1562
1563 /******************************************************************************/
1564
1565 static struct brw_perf_query_info *
1566 append_query_info(struct brw_context *brw)
1567 {
1568 brw->perfquery.queries =
1569 reralloc(brw, brw->perfquery.queries,
1570 struct brw_perf_query_info, ++brw->perfquery.n_queries);
1571
1572 return &brw->perfquery.queries[brw->perfquery.n_queries - 1];
1573 }
1574
1575 static void
1576 add_stat_reg(struct brw_perf_query_info *query,
1577 uint32_t reg,
1578 uint32_t numerator,
1579 uint32_t denominator,
1580 const char *name,
1581 const char *description)
1582 {
1583 struct brw_perf_query_counter *counter;
1584
1585 assert(query->n_counters < MAX_STAT_COUNTERS);
1586
1587 counter = &query->counters[query->n_counters];
1588 counter->name = name;
1589 counter->desc = description;
1590 counter->type = GL_PERFQUERY_COUNTER_RAW_INTEL;
1591 counter->data_type = GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL;
1592 counter->size = sizeof(uint64_t);
1593 counter->offset = sizeof(uint64_t) * query->n_counters;
1594 counter->pipeline_stat.reg = reg;
1595 counter->pipeline_stat.numerator = numerator;
1596 counter->pipeline_stat.denominator = denominator;
1597
1598 query->n_counters++;
1599 }
1600
1601 static void
1602 add_basic_stat_reg(struct brw_perf_query_info *query,
1603 uint32_t reg, const char *name)
1604 {
1605 add_stat_reg(query, reg, 1, 1, name, name);
1606 }
1607
1608 static void
1609 init_pipeline_statistic_query_registers(struct brw_context *brw)
1610 {
1611 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1612 struct brw_perf_query_info *query = append_query_info(brw);
1613
1614 query->kind = PIPELINE_STATS;
1615 query->name = "Pipeline Statistics Registers";
1616 query->n_counters = 0;
1617 query->counters =
1618 rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
1619
1620 add_basic_stat_reg(query, IA_VERTICES_COUNT,
1621 "N vertices submitted");
1622 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1623 "N primitives submitted");
1624 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1625 "N vertex shader invocations");
1626
1627 if (devinfo->gen == 6) {
1628 add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1629 "SO_PRIM_STORAGE_NEEDED",
1630 "N geometry shader stream-out primitives (total)");
1631 add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1632 "SO_NUM_PRIMS_WRITTEN",
1633 "N geometry shader stream-out primitives (written)");
1634 } else {
1635 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1636 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1637 "N stream-out (stream 0) primitives (total)");
1638 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1639 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1640 "N stream-out (stream 1) primitives (total)");
1641 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1642 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1643 "N stream-out (stream 2) primitives (total)");
1644 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1645 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1646 "N stream-out (stream 3) primitives (total)");
1647 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1648 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1649 "N stream-out (stream 0) primitives (written)");
1650 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1651 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1652 "N stream-out (stream 1) primitives (written)");
1653 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1654 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1655 "N stream-out (stream 2) primitives (written)");
1656 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1657 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1658 "N stream-out (stream 3) primitives (written)");
1659 }
1660
1661 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1662 "N TCS shader invocations");
1663 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1664 "N TES shader invocations");
1665
1666 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1667 "N geometry shader invocations");
1668 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1669 "N geometry shader primitives emitted");
1670
1671 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1672 "N primitives entering clipping");
1673 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1674 "N primitives leaving clipping");
1675
1676 if (devinfo->is_haswell || devinfo->gen == 8)
1677 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1678 "N fragment shader invocations",
1679 "N fragment shader invocations");
1680 else
1681 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1682 "N fragment shader invocations");
1683
1684 add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
1685
1686 if (devinfo->gen >= 7)
1687 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1688 "N compute shader invocations");
1689
1690 query->data_size = sizeof(uint64_t) * query->n_counters;
1691 }
1692
1693 static bool
1694 read_file_uint64(const char *file, uint64_t *val)
1695 {
1696 char buf[32];
1697 int fd, n;
1698
1699 fd = open(file, 0);
1700 if (fd < 0)
1701 return false;
1702 n = read(fd, buf, sizeof (buf) - 1);
1703 close(fd);
1704 if (n < 0)
1705 return false;
1706
1707 buf[n] = '\0';
1708 *val = strtoull(buf, NULL, 0);
1709
1710 return true;
1711 }
1712
1713 static void
1714 enumerate_sysfs_metrics(struct brw_context *brw, const char *sysfs_dev_dir)
1715 {
1716 char buf[256];
1717 DIR *metricsdir = NULL;
1718 struct dirent *metric_entry;
1719 int len;
1720
1721 len = snprintf(buf, sizeof(buf), "%s/metrics", sysfs_dev_dir);
1722 if (len < 0 || len >= sizeof(buf)) {
1723 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1724 return;
1725 }
1726
1727 metricsdir = opendir(buf);
1728 if (!metricsdir) {
1729 DBG("Failed to open %s: %m\n", buf);
1730 return;
1731 }
1732
1733 while ((metric_entry = readdir(metricsdir))) {
1734 struct hash_entry *entry;
1735
1736 if ((metric_entry->d_type != DT_DIR &&
1737 metric_entry->d_type != DT_LNK) ||
1738 metric_entry->d_name[0] == '.')
1739 continue;
1740
1741 DBG("metric set: %s\n", metric_entry->d_name);
1742 entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
1743 metric_entry->d_name);
1744 if (entry) {
1745 struct brw_perf_query_info *query;
1746 uint64_t id;
1747
1748 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
1749 sysfs_dev_dir, metric_entry->d_name);
1750 if (len < 0 || len >= sizeof(buf)) {
1751 DBG("Failed to concatenate path to sysfs metric id file\n");
1752 continue;
1753 }
1754
1755 if (!read_file_uint64(buf, &id)) {
1756 DBG("Failed to read metric set id from %s: %m", buf);
1757 continue;
1758 }
1759
1760 query = append_query_info(brw);
1761 *query = *(struct brw_perf_query_info *)entry->data;
1762 query->oa_metrics_set_id = id;
1763
1764 DBG("metric set known by mesa: id = %" PRIu64"\n",
1765 query->oa_metrics_set_id);
1766 } else
1767 DBG("metric set not known by mesa (skipping)\n");
1768 }
1769
1770 closedir(metricsdir);
1771 }
1772
1773 static bool
1774 read_sysfs_drm_device_file_uint64(struct brw_context *brw,
1775 const char *sysfs_dev_dir,
1776 const char *file,
1777 uint64_t *value)
1778 {
1779 char buf[512];
1780 int len;
1781
1782 len = snprintf(buf, sizeof(buf), "%s/%s", sysfs_dev_dir, file);
1783 if (len < 0 || len >= sizeof(buf)) {
1784 DBG("Failed to concatenate sys filename to read u64 from\n");
1785 return false;
1786 }
1787
1788 return read_file_uint64(buf, value);
1789 }
1790
1791 static bool
1792 init_oa_sys_vars(struct brw_context *brw, const char *sysfs_dev_dir)
1793 {
1794 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1795 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
1796
1797 if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
1798 "gt_min_freq_mhz",
1799 &min_freq_mhz))
1800 return false;
1801
1802 if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
1803 "gt_max_freq_mhz",
1804 &max_freq_mhz))
1805 return false;
1806
1807 brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
1808 brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
1809 brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
1810
1811 if (devinfo->is_haswell) {
1812 if (devinfo->gt == 1) {
1813 brw->perfquery.sys_vars.n_eus = 10;
1814 brw->perfquery.sys_vars.n_eu_slices = 1;
1815 brw->perfquery.sys_vars.n_eu_sub_slices = 1;
1816 brw->perfquery.sys_vars.slice_mask = 0x1;
1817 brw->perfquery.sys_vars.subslice_mask = 0x1;
1818 } else if (devinfo->gt == 2) {
1819 brw->perfquery.sys_vars.n_eus = 20;
1820 brw->perfquery.sys_vars.n_eu_slices = 1;
1821 brw->perfquery.sys_vars.n_eu_sub_slices = 2;
1822 brw->perfquery.sys_vars.slice_mask = 0x1;
1823 brw->perfquery.sys_vars.subslice_mask = 0x3;
1824 } else if (devinfo->gt == 3) {
1825 brw->perfquery.sys_vars.n_eus = 40;
1826 brw->perfquery.sys_vars.n_eu_slices = 2;
1827 brw->perfquery.sys_vars.n_eu_sub_slices = 2;
1828 brw->perfquery.sys_vars.slice_mask = 0x3;
1829 brw->perfquery.sys_vars.subslice_mask = 0xf;
1830 } else
1831 unreachable("not reached");
1832 } else {
1833 __DRIscreen *screen = brw->screen->driScrnPriv;
1834 drm_i915_getparam_t gp;
1835 int ret;
1836 int n_eus = 0;
1837 int slice_mask = 0;
1838 int ss_mask = 0;
1839 int s_max = devinfo->num_slices; /* maximum number of slices */
1840 int ss_max = 0; /* maximum number of subslices per slice */
1841 uint64_t subslice_mask = 0;
1842 int s;
1843
1844 if (devinfo->gen == 8) {
1845 if (devinfo->gt == 1) {
1846 ss_max = 2;
1847 } else {
1848 ss_max = 3;
1849 }
1850 } else if (devinfo->gen == 9) {
1851 /* XXX: beware that the kernel (as of writing) actually works as if
1852 * ss_max == 4 since the HW register that reports the global subslice
1853 * mask has 4 bits while in practice the limit is 3. It's also
1854 * important that we initialize $SubsliceMask with 3 bits per slice
1855 * since that's what the counter availability expressions in XML
1856 * expect.
1857 */
1858 ss_max = 3;
1859 } else
1860 return false;
1861
1862 gp.param = I915_PARAM_EU_TOTAL;
1863 gp.value = &n_eus;
1864 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1865 if (ret)
1866 return false;
1867
1868 gp.param = I915_PARAM_SLICE_MASK;
1869 gp.value = &slice_mask;
1870 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1871 if (ret)
1872 return false;
1873
1874 gp.param = I915_PARAM_SUBSLICE_MASK;
1875 gp.value = &ss_mask;
1876 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1877 if (ret)
1878 return false;
1879
1880 brw->perfquery.sys_vars.n_eus = n_eus;
1881 brw->perfquery.sys_vars.n_eu_slices = __builtin_popcount(slice_mask);
1882 brw->perfquery.sys_vars.slice_mask = slice_mask;
1883
1884 /* Note: the _SUBSLICE_MASK param only reports a global subslice mask
1885 * which applies to all slices.
1886 *
1887 * Note: some of the metrics we have (as described in XML) are
1888 * conditional on a $SubsliceMask variable which is expected to also
1889 * reflect the slice mask by packing together subslice masks for each
1890 * slice in one value..
1891 */
1892 for (s = 0; s < s_max; s++) {
1893 if (slice_mask & (1<<s)) {
1894 subslice_mask |= ss_mask << (ss_max * s);
1895 }
1896 }
1897
1898 brw->perfquery.sys_vars.subslice_mask = subslice_mask;
1899 brw->perfquery.sys_vars.n_eu_sub_slices =
1900 __builtin_popcount(subslice_mask);
1901 }
1902
1903 brw->perfquery.sys_vars.eu_threads_count =
1904 brw->perfquery.sys_vars.n_eus * devinfo->num_thread_per_eu;
1905
1906 return true;
1907 }
1908
1909 static bool
1910 get_sysfs_dev_dir(struct brw_context *brw,
1911 char *path_buf,
1912 int path_buf_len)
1913 {
1914 __DRIscreen *screen = brw->screen->driScrnPriv;
1915 struct stat sb;
1916 int min, maj;
1917 DIR *drmdir;
1918 struct dirent *drm_entry;
1919 int len;
1920
1921 assert(path_buf);
1922 assert(path_buf_len);
1923 path_buf[0] = '\0';
1924
1925 if (fstat(screen->fd, &sb)) {
1926 DBG("Failed to stat DRM fd\n");
1927 return false;
1928 }
1929
1930 maj = major(sb.st_rdev);
1931 min = minor(sb.st_rdev);
1932
1933 if (!S_ISCHR(sb.st_mode)) {
1934 DBG("DRM fd is not a character device as expected\n");
1935 return false;
1936 }
1937
1938 len = snprintf(path_buf, path_buf_len,
1939 "/sys/dev/char/%d:%d/device/drm", maj, min);
1940 if (len < 0 || len >= path_buf_len) {
1941 DBG("Failed to concatenate sysfs path to drm device\n");
1942 return false;
1943 }
1944
1945 drmdir = opendir(path_buf);
1946 if (!drmdir) {
1947 DBG("Failed to open %s: %m\n", path_buf);
1948 return false;
1949 }
1950
1951 while ((drm_entry = readdir(drmdir))) {
1952 if ((drm_entry->d_type == DT_DIR ||
1953 drm_entry->d_type == DT_LNK) &&
1954 strncmp(drm_entry->d_name, "card", 4) == 0)
1955 {
1956 len = snprintf(path_buf, path_buf_len,
1957 "/sys/dev/char/%d:%d/device/drm/%s",
1958 maj, min, drm_entry->d_name);
1959 closedir(drmdir);
1960 if (len < 0 || len >= path_buf_len)
1961 return false;
1962 else
1963 return true;
1964 }
1965 }
1966
1967 closedir(drmdir);
1968
1969 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
1970 maj, min);
1971
1972 return false;
1973 }
1974
1975 typedef void (*perf_register_oa_queries_t)(struct brw_context *);
1976
1977 static perf_register_oa_queries_t
1978 get_register_queries_function(const struct gen_device_info *devinfo)
1979 {
1980 if (devinfo->is_haswell)
1981 return brw_oa_register_queries_hsw;
1982 if (devinfo->is_cherryview)
1983 return brw_oa_register_queries_chv;
1984 if (devinfo->is_broadwell)
1985 return brw_oa_register_queries_bdw;
1986 if (devinfo->is_broxton)
1987 return brw_oa_register_queries_bxt;
1988 if (devinfo->is_skylake) {
1989 if (devinfo->gt == 2)
1990 return brw_oa_register_queries_sklgt2;
1991 if (devinfo->gt == 3)
1992 return brw_oa_register_queries_sklgt3;
1993 if (devinfo->gt == 4)
1994 return brw_oa_register_queries_sklgt4;
1995 }
1996 return NULL;
1997 }
1998
1999 static unsigned
2000 brw_init_perf_query_info(struct gl_context *ctx)
2001 {
2002 struct brw_context *brw = brw_context(ctx);
2003 const struct gen_device_info *devinfo = &brw->screen->devinfo;
2004 bool i915_perf_oa_available = false;
2005 struct stat sb;
2006 char sysfs_dev_dir[128];
2007 perf_register_oa_queries_t oa_register;
2008
2009 if (brw->perfquery.n_queries)
2010 return brw->perfquery.n_queries;
2011
2012 init_pipeline_statistic_query_registers(brw);
2013
2014 oa_register = get_register_queries_function(devinfo);
2015
2016 /* The existence of this sysctl parameter implies the kernel supports
2017 * the i915 perf interface.
2018 */
2019 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
2020
2021 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
2022 * metrics unless running as root.
2023 */
2024 if (devinfo->is_haswell)
2025 i915_perf_oa_available = true;
2026 else {
2027 uint64_t paranoid = 1;
2028
2029 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
2030
2031 if (paranoid == 0 || geteuid() == 0)
2032 i915_perf_oa_available = true;
2033 }
2034 }
2035
2036 if (i915_perf_oa_available &&
2037 oa_register &&
2038 get_sysfs_dev_dir(brw, sysfs_dev_dir, sizeof(sysfs_dev_dir)) &&
2039 init_oa_sys_vars(brw, sysfs_dev_dir))
2040 {
2041 brw->perfquery.oa_metrics_table =
2042 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2043 _mesa_key_string_equal);
2044
2045 /* Index all the metric sets mesa knows about before looking to see what
2046 * the kernel is advertising.
2047 */
2048 oa_register(brw);
2049
2050 enumerate_sysfs_metrics(brw, sysfs_dev_dir);
2051 }
2052
2053 brw->perfquery.unaccumulated =
2054 ralloc_array(brw, struct brw_perf_query_object *, 2);
2055 brw->perfquery.unaccumulated_elements = 0;
2056 brw->perfquery.unaccumulated_array_size = 2;
2057
2058 exec_list_make_empty(&brw->perfquery.sample_buffers);
2059 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
2060
2061 /* It's convenient to guarantee that this linked list of sample
2062 * buffers is never empty so we add an empty head so when we
2063 * Begin an OA query we can always take a reference on a buffer
2064 * in this list.
2065 */
2066 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
2067 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
2068
2069 brw->perfquery.oa_stream_fd = -1;
2070
2071 brw->perfquery.next_query_start_report_id = 1000;
2072
2073 return brw->perfquery.n_queries;
2074 }
2075
2076 void
2077 brw_init_performance_queries(struct brw_context *brw)
2078 {
2079 struct gl_context *ctx = &brw->ctx;
2080
2081 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
2082 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
2083 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
2084 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
2085 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
2086 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
2087 ctx->Driver.EndPerfQuery = brw_end_perf_query;
2088 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
2089 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
2090 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
2091 }