i965: add a debug option to disable oa config loading
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43 #include <dirent.h>
44
45 /* put before sys/types.h to silence glibc warnings */
46 #ifdef MAJOR_IN_MKDEV
47 #include <sys/mkdev.h>
48 #endif
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
51 #endif
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <fcntl.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57
58 #include <xf86drm.h>
59 #include <i915_drm.h>
60
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
65
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_hsw.h"
75 #include "brw_oa_bdw.h"
76 #include "brw_oa_chv.h"
77 #include "brw_oa_sklgt2.h"
78 #include "brw_oa_sklgt3.h"
79 #include "brw_oa_sklgt4.h"
80 #include "brw_oa_bxt.h"
81 #include "brw_oa_kblgt2.h"
82 #include "brw_oa_kblgt3.h"
83 #include "brw_oa_glk.h"
84 #include "intel_batchbuffer.h"
85
86 #define FILE_DEBUG_FLAG DEBUG_PERFMON
87
88 /*
89 * The largest OA formats we can use include:
90 * For Haswell:
91 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
92 * For Gen8+
93 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
94 */
95 #define MAX_OA_REPORT_COUNTERS 62
96
97 #define OAREPORT_REASON_MASK 0x3f
98 #define OAREPORT_REASON_SHIFT 19
99 #define OAREPORT_REASON_TIMER (1<<0)
100 #define OAREPORT_REASON_TRIGGER1 (1<<1)
101 #define OAREPORT_REASON_TRIGGER2 (1<<2)
102 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
103 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
104
105 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
106 256) /* OA counter report */
107
108 /**
109 * Periodic OA samples are read() into these buffer structures via the
110 * i915 perf kernel interface and appended to the
111 * brw->perfquery.sample_buffers linked list. When we process the
112 * results of an OA metrics query we need to consider all the periodic
113 * samples between the Begin and End MI_REPORT_PERF_COUNT command
114 * markers.
115 *
116 * 'Periodic' is a simplification as there are other automatic reports
117 * written by the hardware also buffered here.
118 *
119 * Considering three queries, A, B and C:
120 *
121 * Time ---->
122 * ________________A_________________
123 * | |
124 * | ________B_________ _____C___________
125 * | | | | | |
126 *
127 * And an illustration of sample buffers read over this time frame:
128 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
129 *
130 * These nodes may hold samples for query A:
131 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
132 *
133 * These nodes may hold samples for query B:
134 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
135 *
136 * These nodes may hold samples for query C:
137 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
138 *
139 * The illustration assumes we have an even distribution of periodic
140 * samples so all nodes have the same size plotted against time:
141 *
142 * Note, to simplify code, the list is never empty.
143 *
144 * With overlapping queries we can see that periodic OA reports may
145 * relate to multiple queries and care needs to be take to keep
146 * track of sample buffers until there are no queries that might
147 * depend on their contents.
148 *
149 * We use a node ref counting system where a reference ensures that a
150 * node and all following nodes can't be freed/recycled until the
151 * reference drops to zero.
152 *
153 * E.g. with a ref of one here:
154 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
155 *
156 * These nodes could be freed or recycled ("reaped"):
157 * [ 0 ][ 0 ]
158 *
159 * These must be preserved until the leading ref drops to zero:
160 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
161 *
162 * When a query starts we take a reference on the current tail of
163 * the list, knowing that no already-buffered samples can possibly
164 * relate to the newly-started query. A pointer to this node is
165 * also saved in the query object's ->oa.samples_head.
166 *
167 * E.g. starting query A while there are two nodes in .sample_buffers:
168 * ________________A________
169 * |
170 *
171 * [ 0 ][ 1 ]
172 * ^_______ Add a reference and store pointer to node in
173 * A->oa.samples_head
174 *
175 * Moving forward to when the B query starts with no new buffer nodes:
176 * (for reference, i915 perf reads() are only done when queries finish)
177 * ________________A_______
178 * | ________B___
179 * | |
180 *
181 * [ 0 ][ 2 ]
182 * ^_______ Add a reference and store pointer to
183 * node in B->oa.samples_head
184 *
185 * Once a query is finished, after an OA query has become 'Ready',
186 * once the End OA report has landed and after we we have processed
187 * all the intermediate periodic samples then we drop the
188 * ->oa.samples_head reference we took at the start.
189 *
190 * So when the B query has finished we have:
191 * ________________A________
192 * | ______B___________
193 * | | |
194 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
195 * ^_______ Drop B->oa.samples_head reference
196 *
197 * We still can't free these due to the A->oa.samples_head ref:
198 * [ 1 ][ 0 ][ 0 ][ 0 ]
199 *
200 * When the A query finishes: (note there's a new ref for C's samples_head)
201 * ________________A_________________
202 * | |
203 * | _____C_________
204 * | | |
205 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
206 * ^_______ Drop A->oa.samples_head reference
207 *
208 * And we can now reap these nodes up to the C->oa.samples_head:
209 * [ X ][ X ][ X ][ X ]
210 * keeping -> [ 1 ][ 0 ][ 0 ]
211 *
212 * We reap old sample buffers each time we finish processing an OA
213 * query by iterating the sample_buffers list from the head until we
214 * find a referenced node and stop.
215 *
216 * Reaped buffers move to a perfquery.free_sample_buffers list and
217 * when we come to read() we first look to recycle a buffer from the
218 * free_sample_buffers list before allocating a new buffer.
219 */
220 struct brw_oa_sample_buf {
221 struct exec_node link;
222 int refcount;
223 int len;
224 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
225 uint32_t last_timestamp;
226 };
227
228 /**
229 * i965 representation of a performance query object.
230 *
231 * NB: We want to keep this structure relatively lean considering that
232 * applications may expect to allocate enough objects to be able to
233 * query around all draw calls in a frame.
234 */
235 struct brw_perf_query_object
236 {
237 struct gl_perf_query_object base;
238
239 const struct brw_perf_query_info *query;
240
241 /* See query->kind to know which state below is in use... */
242 union {
243 struct {
244
245 /**
246 * BO containing OA counter snapshots at query Begin/End time.
247 */
248 struct brw_bo *bo;
249
250 /**
251 * Address of mapped of @bo
252 */
253 void *map;
254
255 /**
256 * The MI_REPORT_PERF_COUNT command lets us specify a unique
257 * ID that will be reflected in the resulting OA report
258 * that's written by the GPU. This is the ID we're expecting
259 * in the begin report and the the end report should be
260 * @begin_report_id + 1.
261 */
262 int begin_report_id;
263
264 /**
265 * Reference the head of the brw->perfquery.sample_buffers
266 * list at the time that the query started (so we only need
267 * to look at nodes after this point when looking for samples
268 * related to this query)
269 *
270 * (See struct brw_oa_sample_buf description for more details)
271 */
272 struct exec_node *samples_head;
273
274 /**
275 * Storage for the final accumulated OA counters.
276 */
277 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
278
279 /**
280 * false while in the unaccumulated_elements list, and set to
281 * true when the final, end MI_RPC snapshot has been
282 * accumulated.
283 */
284 bool results_accumulated;
285
286 } oa;
287
288 struct {
289 /**
290 * BO containing starting and ending snapshots for the
291 * statistics counters.
292 */
293 struct brw_bo *bo;
294 } pipeline_stats;
295 };
296 };
297
298 /** Downcasting convenience macro. */
299 static inline struct brw_perf_query_object *
300 brw_perf_query(struct gl_perf_query_object *o)
301 {
302 return (struct brw_perf_query_object *) o;
303 }
304
305 #define STATS_BO_SIZE 4096
306 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
307 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
308
309 #define MI_RPC_BO_SIZE 4096
310 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
311
312 /******************************************************************************/
313
314 static bool
315 brw_is_perf_query_ready(struct gl_context *ctx,
316 struct gl_perf_query_object *o);
317
318 static void
319 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
320 {
321 struct gl_context *ctx = brw_void;
322 struct gl_perf_query_object *o = query_void;
323 struct brw_perf_query_object *obj = query_void;
324
325 switch (obj->query->kind) {
326 case OA_COUNTERS:
327 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
328 id,
329 o->Used ? "Dirty," : "New,",
330 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
331 obj->oa.bo ? "yes," : "no,",
332 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
333 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
334 break;
335 case PIPELINE_STATS:
336 DBG("%4d: %-6s %-8s BO: %-4s\n",
337 id,
338 o->Used ? "Dirty," : "New,",
339 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
340 obj->pipeline_stats.bo ? "yes" : "no");
341 break;
342 }
343 }
344
345 static void
346 dump_perf_queries(struct brw_context *brw)
347 {
348 struct gl_context *ctx = &brw->ctx;
349 DBG("Queries: (Open queries = %d, OA users = %d)\n",
350 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
351 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
352 }
353
354 /******************************************************************************/
355
356 static struct brw_oa_sample_buf *
357 get_free_sample_buf(struct brw_context *brw)
358 {
359 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
360 struct brw_oa_sample_buf *buf;
361
362 if (node)
363 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
364 else {
365 buf = ralloc_size(brw, sizeof(*buf));
366
367 exec_node_init(&buf->link);
368 buf->refcount = 0;
369 buf->len = 0;
370 }
371
372 return buf;
373 }
374
375 static void
376 reap_old_sample_buffers(struct brw_context *brw)
377 {
378 struct exec_node *tail_node =
379 exec_list_get_tail(&brw->perfquery.sample_buffers);
380 struct brw_oa_sample_buf *tail_buf =
381 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
382
383 /* Remove all old, unreferenced sample buffers walking forward from
384 * the head of the list, except always leave at least one node in
385 * the list so we always have a node to reference when we Begin
386 * a new query.
387 */
388 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
389 &brw->perfquery.sample_buffers)
390 {
391 if (buf->refcount == 0 && buf != tail_buf) {
392 exec_node_remove(&buf->link);
393 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
394 } else
395 return;
396 }
397 }
398
399 static void
400 free_sample_bufs(struct brw_context *brw)
401 {
402 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
403 &brw->perfquery.free_sample_buffers)
404 ralloc_free(buf);
405
406 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
407 }
408
409 /******************************************************************************/
410
411 /**
412 * Driver hook for glGetPerfQueryInfoINTEL().
413 */
414 static void
415 brw_get_perf_query_info(struct gl_context *ctx,
416 unsigned query_index,
417 const char **name,
418 GLuint *data_size,
419 GLuint *n_counters,
420 GLuint *n_active)
421 {
422 struct brw_context *brw = brw_context(ctx);
423 const struct brw_perf_query_info *query =
424 &brw->perfquery.queries[query_index];
425
426 *name = query->name;
427 *data_size = query->data_size;
428 *n_counters = query->n_counters;
429
430 switch (query->kind) {
431 case OA_COUNTERS:
432 *n_active = brw->perfquery.n_active_oa_queries;
433 break;
434
435 case PIPELINE_STATS:
436 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
437 break;
438 }
439 }
440
441 /**
442 * Driver hook for glGetPerfCounterInfoINTEL().
443 */
444 static void
445 brw_get_perf_counter_info(struct gl_context *ctx,
446 unsigned query_index,
447 unsigned counter_index,
448 const char **name,
449 const char **desc,
450 GLuint *offset,
451 GLuint *data_size,
452 GLuint *type_enum,
453 GLuint *data_type_enum,
454 GLuint64 *raw_max)
455 {
456 struct brw_context *brw = brw_context(ctx);
457 const struct brw_perf_query_info *query =
458 &brw->perfquery.queries[query_index];
459 const struct brw_perf_query_counter *counter =
460 &query->counters[counter_index];
461
462 *name = counter->name;
463 *desc = counter->desc;
464 *offset = counter->offset;
465 *data_size = counter->size;
466 *type_enum = counter->type;
467 *data_type_enum = counter->data_type;
468 *raw_max = counter->raw_max;
469 }
470
471 /******************************************************************************/
472
473 /**
474 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
475 * pipeline statistics for the performance query object.
476 */
477 static void
478 snapshot_statistics_registers(struct brw_context *brw,
479 struct brw_perf_query_object *obj,
480 uint32_t offset_in_bytes)
481 {
482 const struct brw_perf_query_info *query = obj->query;
483 const int n_counters = query->n_counters;
484
485 for (int i = 0; i < n_counters; i++) {
486 const struct brw_perf_query_counter *counter = &query->counters[i];
487
488 assert(counter->data_type == GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL);
489
490 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
491 counter->pipeline_stat.reg,
492 offset_in_bytes + i * sizeof(uint64_t));
493 }
494 }
495
496 /**
497 * Add a query to the global list of "unaccumulated queries."
498 *
499 * Queries are tracked here until all the associated OA reports have
500 * been accumulated via accumulate_oa_reports() after the end
501 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
502 */
503 static void
504 add_to_unaccumulated_query_list(struct brw_context *brw,
505 struct brw_perf_query_object *obj)
506 {
507 if (brw->perfquery.unaccumulated_elements >=
508 brw->perfquery.unaccumulated_array_size)
509 {
510 brw->perfquery.unaccumulated_array_size *= 1.5;
511 brw->perfquery.unaccumulated =
512 reralloc(brw, brw->perfquery.unaccumulated,
513 struct brw_perf_query_object *,
514 brw->perfquery.unaccumulated_array_size);
515 }
516
517 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
518 }
519
520 /**
521 * Remove a query from the global list of unaccumulated queries once
522 * after successfully accumulating the OA reports associated with the
523 * query in accumulate_oa_reports() or when discarding unwanted query
524 * results.
525 */
526 static void
527 drop_from_unaccumulated_query_list(struct brw_context *brw,
528 struct brw_perf_query_object *obj)
529 {
530 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
531 if (brw->perfquery.unaccumulated[i] == obj) {
532 int last_elt = --brw->perfquery.unaccumulated_elements;
533
534 if (i == last_elt)
535 brw->perfquery.unaccumulated[i] = NULL;
536 else {
537 brw->perfquery.unaccumulated[i] =
538 brw->perfquery.unaccumulated[last_elt];
539 }
540
541 break;
542 }
543 }
544
545 /* Drop our samples_head reference so that associated periodic
546 * sample data buffers can potentially be reaped if they aren't
547 * referenced by any other queries...
548 */
549
550 struct brw_oa_sample_buf *buf =
551 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
552
553 assert(buf->refcount > 0);
554 buf->refcount--;
555
556 obj->oa.samples_head = NULL;
557
558 reap_old_sample_buffers(brw);
559 }
560
561 static uint64_t
562 timebase_scale(struct brw_context *brw, uint32_t u32_time_delta)
563 {
564 const struct gen_device_info *devinfo = &brw->screen->devinfo;
565 uint64_t tmp = ((uint64_t)u32_time_delta) * 1000000000ull;
566
567 return tmp ? tmp / devinfo->timestamp_frequency : 0;
568 }
569
570 static void
571 accumulate_uint32(const uint32_t *report0,
572 const uint32_t *report1,
573 uint64_t *accumulator)
574 {
575 *accumulator += (uint32_t)(*report1 - *report0);
576 }
577
578 static void
579 accumulate_uint40(int a_index,
580 const uint32_t *report0,
581 const uint32_t *report1,
582 uint64_t *accumulator)
583 {
584 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
585 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
586 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
587 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
588 uint64_t value0 = report0[a_index + 4] | high0;
589 uint64_t value1 = report1[a_index + 4] | high1;
590 uint64_t delta;
591
592 if (value0 > value1)
593 delta = (1ULL << 40) + value1 - value0;
594 else
595 delta = value1 - value0;
596
597 *accumulator += delta;
598 }
599
600 /**
601 * Given pointers to starting and ending OA snapshots, add the deltas for each
602 * counter to the results.
603 */
604 static void
605 add_deltas(struct brw_context *brw,
606 struct brw_perf_query_object *obj,
607 const uint32_t *start,
608 const uint32_t *end)
609 {
610 const struct brw_perf_query_info *query = obj->query;
611 uint64_t *accumulator = obj->oa.accumulator;
612 int idx = 0;
613 int i;
614
615 switch (query->oa_format) {
616 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
617 accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
618 accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
619
620 /* 32x 40bit A counters... */
621 for (i = 0; i < 32; i++)
622 accumulate_uint40(i, start, end, accumulator + idx++);
623
624 /* 4x 32bit A counters... */
625 for (i = 0; i < 4; i++)
626 accumulate_uint32(start + 36 + i, end + 36 + i, accumulator + idx++);
627
628 /* 8x 32bit B counters + 8x 32bit C counters... */
629 for (i = 0; i < 16; i++)
630 accumulate_uint32(start + 48 + i, end + 48 + i, accumulator + idx++);
631
632 break;
633 case I915_OA_FORMAT_A45_B8_C8:
634 accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
635
636 for (i = 0; i < 61; i++)
637 accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
638
639 break;
640 default:
641 unreachable("Can't accumulate OA counters in unknown format");
642 }
643 }
644
645 static bool
646 inc_n_oa_users(struct brw_context *brw)
647 {
648 if (brw->perfquery.n_oa_users == 0 &&
649 drmIoctl(brw->perfquery.oa_stream_fd,
650 I915_PERF_IOCTL_ENABLE, 0) < 0)
651 {
652 return false;
653 }
654 ++brw->perfquery.n_oa_users;
655
656 return true;
657 }
658
659 static void
660 dec_n_oa_users(struct brw_context *brw)
661 {
662 /* Disabling the i915 perf stream will effectively disable the OA
663 * counters. Note it's important to be sure there are no outstanding
664 * MI_RPC commands at this point since they could stall the CS
665 * indefinitely once OACONTROL is disabled.
666 */
667 --brw->perfquery.n_oa_users;
668 if (brw->perfquery.n_oa_users == 0 &&
669 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
670 {
671 DBG("WARNING: Error disabling i915 perf stream: %m\n");
672 }
673 }
674
675 /* In general if we see anything spurious while accumulating results,
676 * we don't try and continue accumulating the current query, hoping
677 * for the best, we scrap anything outstanding, and then hope for the
678 * best with new queries.
679 */
680 static void
681 discard_all_queries(struct brw_context *brw)
682 {
683 while (brw->perfquery.unaccumulated_elements) {
684 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
685
686 obj->oa.results_accumulated = true;
687 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
688
689 dec_n_oa_users(brw);
690 }
691 }
692
693 enum OaReadStatus {
694 OA_READ_STATUS_ERROR,
695 OA_READ_STATUS_UNFINISHED,
696 OA_READ_STATUS_FINISHED,
697 };
698
699 static enum OaReadStatus
700 read_oa_samples_until(struct brw_context *brw,
701 uint32_t start_timestamp,
702 uint32_t end_timestamp)
703 {
704 struct exec_node *tail_node =
705 exec_list_get_tail(&brw->perfquery.sample_buffers);
706 struct brw_oa_sample_buf *tail_buf =
707 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
708 uint32_t last_timestamp = tail_buf->last_timestamp;
709
710 while (1) {
711 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
712 uint32_t offset;
713 int len;
714
715 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
716 sizeof(buf->buf))) < 0 && errno == EINTR)
717 ;
718
719 if (len <= 0) {
720 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
721
722 if (len < 0) {
723 if (errno == EAGAIN)
724 return ((last_timestamp - start_timestamp) >=
725 (end_timestamp - start_timestamp)) ?
726 OA_READ_STATUS_FINISHED :
727 OA_READ_STATUS_UNFINISHED;
728 else {
729 DBG("Error reading i915 perf samples: %m\n");
730 }
731 } else
732 DBG("Spurious EOF reading i915 perf samples\n");
733
734 return OA_READ_STATUS_ERROR;
735 }
736
737 buf->len = len;
738 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
739
740 /* Go through the reports and update the last timestamp. */
741 offset = 0;
742 while (offset < buf->len) {
743 const struct drm_i915_perf_record_header *header =
744 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
745 uint32_t *report = (uint32_t *) (header + 1);
746
747 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
748 last_timestamp = report[1];
749
750 offset += header->size;
751 }
752
753 buf->last_timestamp = last_timestamp;
754 }
755
756 unreachable("not reached");
757 return OA_READ_STATUS_ERROR;
758 }
759
760 /**
761 * Try to read all the reports until either the delimiting timestamp
762 * or an error arises.
763 */
764 static bool
765 read_oa_samples_for_query(struct brw_context *brw,
766 struct brw_perf_query_object *obj)
767 {
768 uint32_t *start;
769 uint32_t *last;
770 uint32_t *end;
771
772 /* We need the MI_REPORT_PERF_COUNT to land before we can start
773 * accumulate. */
774 assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
775 !brw_bo_busy(obj->oa.bo));
776
777 /* Map the BO once here and let accumulate_oa_reports() unmap
778 * it. */
779 if (obj->oa.map == NULL)
780 obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
781
782 start = last = obj->oa.map;
783 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
784
785 if (start[0] != obj->oa.begin_report_id) {
786 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
787 return true;
788 }
789 if (end[0] != (obj->oa.begin_report_id + 1)) {
790 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
791 return true;
792 }
793
794 /* Read the reports until the end timestamp. */
795 switch (read_oa_samples_until(brw, start[1], end[1])) {
796 case OA_READ_STATUS_ERROR:
797 /* Fallthrough and let accumulate_oa_reports() deal with the
798 * error. */
799 case OA_READ_STATUS_FINISHED:
800 return true;
801 case OA_READ_STATUS_UNFINISHED:
802 return false;
803 }
804
805 unreachable("invalid read status");
806 return false;
807 }
808
809 /**
810 * Accumulate raw OA counter values based on deltas between pairs of
811 * OA reports.
812 *
813 * Accumulation starts from the first report captured via
814 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
815 * last MI_RPC report requested by brw_end_perf_query(). Between these
816 * two reports there may also some number of periodically sampled OA
817 * reports collected via the i915 perf interface - depending on the
818 * duration of the query.
819 *
820 * These periodic snapshots help to ensure we handle counter overflow
821 * correctly by being frequent enough to ensure we don't miss multiple
822 * overflows of a counter between snapshots. For Gen8+ the i915 perf
823 * snapshots provide the extra context-switch reports that let us
824 * subtract out the progress of counters associated with other
825 * contexts running on the system.
826 */
827 static void
828 accumulate_oa_reports(struct brw_context *brw,
829 struct brw_perf_query_object *obj)
830 {
831 const struct gen_device_info *devinfo = &brw->screen->devinfo;
832 struct gl_perf_query_object *o = &obj->base;
833 uint32_t *start;
834 uint32_t *last;
835 uint32_t *end;
836 struct exec_node *first_samples_node;
837 bool in_ctx = true;
838 uint32_t ctx_id;
839 int out_duration = 0;
840
841 assert(o->Ready);
842 assert(obj->oa.map != NULL);
843
844 start = last = obj->oa.map;
845 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
846
847 if (start[0] != obj->oa.begin_report_id) {
848 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
849 goto error;
850 }
851 if (end[0] != (obj->oa.begin_report_id + 1)) {
852 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
853 goto error;
854 }
855
856 ctx_id = start[2];
857
858 /* See if we have any periodic reports to accumulate too... */
859
860 /* N.B. The oa.samples_head was set when the query began and
861 * pointed to the tail of the brw->perfquery.sample_buffers list at
862 * the time the query started. Since the buffer existed before the
863 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
864 * that no data in this particular node's buffer can possibly be
865 * associated with the query - so skip ahead one...
866 */
867 first_samples_node = obj->oa.samples_head->next;
868
869 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
870 &brw->perfquery.sample_buffers,
871 first_samples_node)
872 {
873 int offset = 0;
874
875 while (offset < buf->len) {
876 const struct drm_i915_perf_record_header *header =
877 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
878
879 assert(header->size != 0);
880 assert(header->size <= buf->len);
881
882 offset += header->size;
883
884 switch (header->type) {
885 case DRM_I915_PERF_RECORD_SAMPLE: {
886 uint32_t *report = (uint32_t *)(header + 1);
887 bool add = true;
888
889 /* Ignore reports that come before the start marker.
890 * (Note: takes care to allow overflow of 32bit timestamps)
891 */
892 if (timebase_scale(brw, report[1] - start[1]) > 5000000000)
893 continue;
894
895 /* Ignore reports that come after the end marker.
896 * (Note: takes care to allow overflow of 32bit timestamps)
897 */
898 if (timebase_scale(brw, report[1] - end[1]) <= 5000000000)
899 goto end;
900
901 /* For Gen8+ since the counters continue while other
902 * contexts are running we need to discount any unrelated
903 * deltas. The hardware automatically generates a report
904 * on context switch which gives us a new reference point
905 * to continuing adding deltas from.
906 *
907 * For Haswell we can rely on the HW to stop the progress
908 * of OA counters while any other context is acctive.
909 */
910 if (devinfo->gen >= 8) {
911 if (in_ctx && report[2] != ctx_id) {
912 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
913 in_ctx = false;
914 out_duration = 0;
915 } else if (in_ctx == false && report[2] == ctx_id) {
916 DBG("i915 perf: Switch TO\n");
917 in_ctx = true;
918
919 /* From experimentation in IGT, we found that the OA unit
920 * might label some report as "idle" (using an invalid
921 * context ID), right after a report for a given context.
922 * Deltas generated by those reports actually belong to the
923 * previous context, even though they're not labelled as
924 * such.
925 *
926 * We didn't *really* Switch AWAY in the case that we e.g.
927 * saw a single periodic report while idle...
928 */
929 if (out_duration >= 1)
930 add = false;
931 } else if (in_ctx) {
932 assert(report[2] == ctx_id);
933 DBG("i915 perf: Continuation IN\n");
934 } else {
935 assert(report[2] != ctx_id);
936 DBG("i915 perf: Continuation OUT\n");
937 add = false;
938 out_duration++;
939 }
940 }
941
942 if (add)
943 add_deltas(brw, obj, last, report);
944
945 last = report;
946
947 break;
948 }
949
950 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
951 DBG("i915 perf: OA error: all reports lost\n");
952 goto error;
953 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
954 DBG("i915 perf: OA report lost\n");
955 break;
956 }
957 }
958 }
959
960 end:
961
962 add_deltas(brw, obj, last, end);
963
964 DBG("Marking %d accumulated - results gathered\n", o->Id);
965
966 brw_bo_unmap(obj->oa.bo);
967 obj->oa.map = NULL;
968 obj->oa.results_accumulated = true;
969 drop_from_unaccumulated_query_list(brw, obj);
970 dec_n_oa_users(brw);
971
972 return;
973
974 error:
975
976 brw_bo_unmap(obj->oa.bo);
977 obj->oa.map = NULL;
978 discard_all_queries(brw);
979 }
980
981 /******************************************************************************/
982
983 static bool
984 open_i915_perf_oa_stream(struct brw_context *brw,
985 int metrics_set_id,
986 int report_format,
987 int period_exponent,
988 int drm_fd,
989 uint32_t ctx_id)
990 {
991 uint64_t properties[] = {
992 /* Single context sampling */
993 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
994
995 /* Include OA reports in samples */
996 DRM_I915_PERF_PROP_SAMPLE_OA, true,
997
998 /* OA unit configuration */
999 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
1000 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
1001 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
1002 };
1003 struct drm_i915_perf_open_param param = {
1004 .flags = I915_PERF_FLAG_FD_CLOEXEC |
1005 I915_PERF_FLAG_FD_NONBLOCK |
1006 I915_PERF_FLAG_DISABLED,
1007 .num_properties = ARRAY_SIZE(properties) / 2,
1008 .properties_ptr = (uintptr_t) properties,
1009 };
1010 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
1011 if (fd == -1) {
1012 DBG("Error opening i915 perf OA stream: %m\n");
1013 return false;
1014 }
1015
1016 brw->perfquery.oa_stream_fd = fd;
1017
1018 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
1019 brw->perfquery.current_oa_format = report_format;
1020
1021 return true;
1022 }
1023
1024 static void
1025 close_perf(struct brw_context *brw)
1026 {
1027 if (brw->perfquery.oa_stream_fd != -1) {
1028 close(brw->perfquery.oa_stream_fd);
1029 brw->perfquery.oa_stream_fd = -1;
1030 }
1031 }
1032
1033 /**
1034 * Driver hook for glBeginPerfQueryINTEL().
1035 */
1036 static bool
1037 brw_begin_perf_query(struct gl_context *ctx,
1038 struct gl_perf_query_object *o)
1039 {
1040 struct brw_context *brw = brw_context(ctx);
1041 struct brw_perf_query_object *obj = brw_perf_query(o);
1042 const struct brw_perf_query_info *query = obj->query;
1043
1044 /* We can assume the frontend hides mistaken attempts to Begin a
1045 * query object multiple times before its End. Similarly if an
1046 * application reuses a query object before results have arrived
1047 * the frontend will wait for prior results so we don't need
1048 * to support abandoning in-flight results.
1049 */
1050 assert(!o->Active);
1051 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
1052
1053 DBG("Begin(%d)\n", o->Id);
1054
1055 /* XXX: We have to consider that the command parser unit that parses batch
1056 * buffer commands and is used to capture begin/end counter snapshots isn't
1057 * implicitly synchronized with what's currently running across other GPU
1058 * units (such as the EUs running shaders) that the performance counters are
1059 * associated with.
1060 *
1061 * The intention of performance queries is to measure the work associated
1062 * with commands between the begin/end delimiters and so for that to be the
1063 * case we need to explicitly synchronize the parsing of commands to capture
1064 * Begin/End counter snapshots with what's running across other parts of the
1065 * GPU.
1066 *
1067 * When the command parser reaches a Begin marker it effectively needs to
1068 * drain everything currently running on the GPU until the hardware is idle
1069 * before capturing the first snapshot of counters - otherwise the results
1070 * would also be measuring the effects of earlier commands.
1071 *
1072 * When the command parser reaches an End marker it needs to stall until
1073 * everything currently running on the GPU has finished before capturing the
1074 * end snapshot - otherwise the results won't be a complete representation
1075 * of the work.
1076 *
1077 * Theoretically there could be opportunities to minimize how much of the
1078 * GPU pipeline is drained, or that we stall for, when we know what specific
1079 * units the performance counters being queried relate to but we don't
1080 * currently attempt to be clever here.
1081 *
1082 * Note: with our current simple approach here then for back-to-back queries
1083 * we will redundantly emit duplicate commands to synchronize the command
1084 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1085 * second synchronization is effectively a NOOP.
1086 *
1087 * N.B. The final results are based on deltas of counters between (inside)
1088 * Begin/End markers so even though the total wall clock time of the
1089 * workload is stretched by larger pipeline bubbles the bubbles themselves
1090 * are generally invisible to the query results. Whether that's a good or a
1091 * bad thing depends on the use case. For a lower real-time impact while
1092 * capturing metrics then periodic sampling may be a better choice than
1093 * INTEL_performance_query.
1094 *
1095 *
1096 * This is our Begin synchronization point to drain current work on the
1097 * GPU before we capture our first counter snapshot...
1098 */
1099 brw_emit_mi_flush(brw);
1100
1101 switch (query->kind) {
1102 case OA_COUNTERS:
1103
1104 /* Opening an i915 perf stream implies exclusive access to the OA unit
1105 * which will generate counter reports for a specific counter set with a
1106 * specific layout/format so we can't begin any OA based queries that
1107 * require a different counter set or format unless we get an opportunity
1108 * to close the stream and open a new one...
1109 */
1110 if (brw->perfquery.oa_stream_fd != -1 &&
1111 brw->perfquery.current_oa_metrics_set_id !=
1112 query->oa_metrics_set_id) {
1113
1114 if (brw->perfquery.n_oa_users != 0)
1115 return false;
1116 else
1117 close_perf(brw);
1118 }
1119
1120 /* If the OA counters aren't already on, enable them. */
1121 if (brw->perfquery.oa_stream_fd == -1) {
1122 __DRIscreen *screen = brw->screen->driScrnPriv;
1123 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1124
1125 /* The period_exponent gives a sampling period as follows:
1126 * sample_period = timestamp_period * 2^(period_exponent + 1)
1127 *
1128 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1129 * ~83ns (GEN8/9).
1130 *
1131 * The counter overflow period is derived from the EuActive counter
1132 * which reads a counter that increments by the number of clock
1133 * cycles multiplied by the number of EUs. It can be calculated as:
1134 *
1135 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1136 *
1137 * (E.g. 40 EUs @ 1GHz = ~53ms)
1138 *
1139 * We select a sampling period inferior to that overflow period to
1140 * ensure we cannot see more than 1 counter overflow, otherwise we
1141 * could loose information.
1142 */
1143
1144 int a_counter_in_bits = 32;
1145 if (devinfo->gen >= 8)
1146 a_counter_in_bits = 40;
1147
1148 uint64_t overflow_period = pow(2, a_counter_in_bits) /
1149 (brw->perfquery.sys_vars.n_eus *
1150 /* drop 1GHz freq to have units in nanoseconds */
1151 2);
1152
1153 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1154 overflow_period, overflow_period / 1000000ul, brw->perfquery.sys_vars.n_eus);
1155
1156 int period_exponent = 0;
1157 uint64_t prev_sample_period, next_sample_period;
1158 for (int e = 0; e < 30; e++) {
1159 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1160 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1161
1162 /* Take the previous sampling period, lower than the overflow
1163 * period.
1164 */
1165 if (prev_sample_period < overflow_period &&
1166 next_sample_period > overflow_period)
1167 period_exponent = e + 1;
1168 }
1169
1170 if (period_exponent == 0) {
1171 DBG("WARNING: enable to find a sampling exponent\n");
1172 return false;
1173 }
1174
1175 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1176 prev_sample_period / 1000000ul);
1177
1178 if (!open_i915_perf_oa_stream(brw,
1179 query->oa_metrics_set_id,
1180 query->oa_format,
1181 period_exponent,
1182 screen->fd, /* drm fd */
1183 brw->hw_ctx))
1184 return false;
1185 } else {
1186 assert(brw->perfquery.current_oa_metrics_set_id ==
1187 query->oa_metrics_set_id &&
1188 brw->perfquery.current_oa_format ==
1189 query->oa_format);
1190 }
1191
1192 if (!inc_n_oa_users(brw)) {
1193 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1194 return false;
1195 }
1196
1197 if (obj->oa.bo) {
1198 brw_bo_unreference(obj->oa.bo);
1199 obj->oa.bo = NULL;
1200 }
1201
1202 obj->oa.bo =
1203 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
1204 MI_RPC_BO_SIZE, 64);
1205 #ifdef DEBUG
1206 /* Pre-filling the BO helps debug whether writes landed. */
1207 void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
1208 memset(map, 0x80, MI_RPC_BO_SIZE);
1209 brw_bo_unmap(obj->oa.bo);
1210 #endif
1211
1212 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1213 brw->perfquery.next_query_start_report_id += 2;
1214
1215 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1216 * delimiting commands end up in different batchbuffers. If that's the
1217 * case, the measurement will include the time it takes for the kernel
1218 * scheduler to load a new request into the hardware. This is manifested in
1219 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1220 */
1221 intel_batchbuffer_flush(brw);
1222
1223 /* Take a starting OA counter snapshot. */
1224 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1225 obj->oa.begin_report_id);
1226 ++brw->perfquery.n_active_oa_queries;
1227
1228 /* No already-buffered samples can possibly be associated with this query
1229 * so create a marker within the list of sample buffers enabling us to
1230 * easily ignore earlier samples when processing this query after
1231 * completion.
1232 */
1233 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1234 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1235
1236 struct brw_oa_sample_buf *buf =
1237 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1238
1239 /* This reference will ensure that future/following sample
1240 * buffers (that may relate to this query) can't be freed until
1241 * this drops to zero.
1242 */
1243 buf->refcount++;
1244
1245 memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
1246 obj->oa.results_accumulated = false;
1247
1248 add_to_unaccumulated_query_list(brw, obj);
1249 break;
1250
1251 case PIPELINE_STATS:
1252 if (obj->pipeline_stats.bo) {
1253 brw_bo_unreference(obj->pipeline_stats.bo);
1254 obj->pipeline_stats.bo = NULL;
1255 }
1256
1257 obj->pipeline_stats.bo =
1258 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1259 STATS_BO_SIZE, 64);
1260
1261 /* Take starting snapshots. */
1262 snapshot_statistics_registers(brw, obj, 0);
1263
1264 ++brw->perfquery.n_active_pipeline_stats_queries;
1265 break;
1266 }
1267
1268 if (INTEL_DEBUG & DEBUG_PERFMON)
1269 dump_perf_queries(brw);
1270
1271 return true;
1272 }
1273
1274 /**
1275 * Driver hook for glEndPerfQueryINTEL().
1276 */
1277 static void
1278 brw_end_perf_query(struct gl_context *ctx,
1279 struct gl_perf_query_object *o)
1280 {
1281 struct brw_context *brw = brw_context(ctx);
1282 struct brw_perf_query_object *obj = brw_perf_query(o);
1283
1284 DBG("End(%d)\n", o->Id);
1285
1286 /* Ensure that the work associated with the queried commands will have
1287 * finished before taking our query end counter readings.
1288 *
1289 * For more details see comment in brw_begin_perf_query for
1290 * corresponding flush.
1291 */
1292 brw_emit_mi_flush(brw);
1293
1294 switch (obj->query->kind) {
1295 case OA_COUNTERS:
1296
1297 /* NB: It's possible that the query will have already been marked
1298 * as 'accumulated' if an error was seen while reading samples
1299 * from perf. In this case we mustn't try and emit a closing
1300 * MI_RPC command in case the OA unit has already been disabled
1301 */
1302 if (!obj->oa.results_accumulated) {
1303 /* Take an ending OA counter snapshot. */
1304 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
1305 MI_RPC_BO_END_OFFSET_BYTES,
1306 obj->oa.begin_report_id + 1);
1307 }
1308
1309 --brw->perfquery.n_active_oa_queries;
1310
1311 /* NB: even though the query has now ended, it can't be accumulated
1312 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1313 * to query->oa.bo
1314 */
1315 break;
1316
1317 case PIPELINE_STATS:
1318 snapshot_statistics_registers(brw, obj,
1319 STATS_BO_END_OFFSET_BYTES);
1320 --brw->perfquery.n_active_pipeline_stats_queries;
1321 break;
1322 }
1323 }
1324
1325 static void
1326 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1327 {
1328 struct brw_context *brw = brw_context(ctx);
1329 struct brw_perf_query_object *obj = brw_perf_query(o);
1330 struct brw_bo *bo = NULL;
1331
1332 assert(!o->Ready);
1333
1334 switch (obj->query->kind) {
1335 case OA_COUNTERS:
1336 bo = obj->oa.bo;
1337 break;
1338
1339 case PIPELINE_STATS:
1340 bo = obj->pipeline_stats.bo;
1341 break;
1342 }
1343
1344 if (bo == NULL)
1345 return;
1346
1347 /* If the current batch references our results bo then we need to
1348 * flush first...
1349 */
1350 if (brw_batch_references(&brw->batch, bo))
1351 intel_batchbuffer_flush(brw);
1352
1353 brw_bo_wait_rendering(bo);
1354
1355 /* Due to a race condition between the OA unit signaling report
1356 * availability and the report actually being written into memory,
1357 * we need to wait for all the reports to come in before we can
1358 * read them.
1359 */
1360 if (obj->query->kind == OA_COUNTERS) {
1361 while (!read_oa_samples_for_query(brw, obj))
1362 ;
1363 }
1364 }
1365
1366 static bool
1367 brw_is_perf_query_ready(struct gl_context *ctx,
1368 struct gl_perf_query_object *o)
1369 {
1370 struct brw_context *brw = brw_context(ctx);
1371 struct brw_perf_query_object *obj = brw_perf_query(o);
1372
1373 if (o->Ready)
1374 return true;
1375
1376 switch (obj->query->kind) {
1377 case OA_COUNTERS:
1378 return (obj->oa.results_accumulated ||
1379 (obj->oa.bo &&
1380 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1381 !brw_bo_busy(obj->oa.bo) &&
1382 read_oa_samples_for_query(brw, obj)));
1383 case PIPELINE_STATS:
1384 return (obj->pipeline_stats.bo &&
1385 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1386 !brw_bo_busy(obj->pipeline_stats.bo));
1387 }
1388
1389 unreachable("missing ready check for unknown query kind");
1390 return false;
1391 }
1392
1393 static int
1394 get_oa_counter_data(struct brw_context *brw,
1395 struct brw_perf_query_object *obj,
1396 size_t data_size,
1397 uint8_t *data)
1398 {
1399 const struct brw_perf_query_info *query = obj->query;
1400 int n_counters = query->n_counters;
1401 int written = 0;
1402
1403 if (!obj->oa.results_accumulated) {
1404 accumulate_oa_reports(brw, obj);
1405 assert(obj->oa.results_accumulated);
1406 }
1407
1408 for (int i = 0; i < n_counters; i++) {
1409 const struct brw_perf_query_counter *counter = &query->counters[i];
1410 uint64_t *out_uint64;
1411 float *out_float;
1412
1413 if (counter->size) {
1414 switch (counter->data_type) {
1415 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL:
1416 out_uint64 = (uint64_t *)(data + counter->offset);
1417 *out_uint64 = counter->oa_counter_read_uint64(brw, query,
1418 obj->oa.accumulator);
1419 break;
1420 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL:
1421 out_float = (float *)(data + counter->offset);
1422 *out_float = counter->oa_counter_read_float(brw, query,
1423 obj->oa.accumulator);
1424 break;
1425 default:
1426 /* So far we aren't using uint32, double or bool32... */
1427 unreachable("unexpected counter data type");
1428 }
1429 written = counter->offset + counter->size;
1430 }
1431 }
1432
1433 return written;
1434 }
1435
1436 static int
1437 get_pipeline_stats_data(struct brw_context *brw,
1438 struct brw_perf_query_object *obj,
1439 size_t data_size,
1440 uint8_t *data)
1441
1442 {
1443 const struct brw_perf_query_info *query = obj->query;
1444 int n_counters = obj->query->n_counters;
1445 uint8_t *p = data;
1446
1447 uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
1448 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1449
1450 for (int i = 0; i < n_counters; i++) {
1451 const struct brw_perf_query_counter *counter = &query->counters[i];
1452 uint64_t value = end[i] - start[i];
1453
1454 if (counter->pipeline_stat.numerator !=
1455 counter->pipeline_stat.denominator) {
1456 value *= counter->pipeline_stat.numerator;
1457 value /= counter->pipeline_stat.denominator;
1458 }
1459
1460 *((uint64_t *)p) = value;
1461 p += 8;
1462 }
1463
1464 brw_bo_unmap(obj->pipeline_stats.bo);
1465
1466 return p - data;
1467 }
1468
1469 /**
1470 * Driver hook for glGetPerfQueryDataINTEL().
1471 */
1472 static void
1473 brw_get_perf_query_data(struct gl_context *ctx,
1474 struct gl_perf_query_object *o,
1475 GLsizei data_size,
1476 GLuint *data,
1477 GLuint *bytes_written)
1478 {
1479 struct brw_context *brw = brw_context(ctx);
1480 struct brw_perf_query_object *obj = brw_perf_query(o);
1481 int written = 0;
1482
1483 assert(brw_is_perf_query_ready(ctx, o));
1484
1485 DBG("GetData(%d)\n", o->Id);
1486
1487 if (INTEL_DEBUG & DEBUG_PERFMON)
1488 dump_perf_queries(brw);
1489
1490 /* We expect that the frontend only calls this hook when it knows
1491 * that results are available.
1492 */
1493 assert(o->Ready);
1494
1495 switch (obj->query->kind) {
1496 case OA_COUNTERS:
1497 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1498 break;
1499
1500 case PIPELINE_STATS:
1501 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1502 break;
1503 }
1504
1505 if (bytes_written)
1506 *bytes_written = written;
1507 }
1508
1509 static struct gl_perf_query_object *
1510 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1511 {
1512 struct brw_context *brw = brw_context(ctx);
1513 const struct brw_perf_query_info *query =
1514 &brw->perfquery.queries[query_index];
1515 struct brw_perf_query_object *obj =
1516 calloc(1, sizeof(struct brw_perf_query_object));
1517
1518 if (!obj)
1519 return NULL;
1520
1521 obj->query = query;
1522
1523 brw->perfquery.n_query_instances++;
1524
1525 return &obj->base;
1526 }
1527
1528 /**
1529 * Driver hook for glDeletePerfQueryINTEL().
1530 */
1531 static void
1532 brw_delete_perf_query(struct gl_context *ctx,
1533 struct gl_perf_query_object *o)
1534 {
1535 struct brw_context *brw = brw_context(ctx);
1536 struct brw_perf_query_object *obj = brw_perf_query(o);
1537
1538 /* We can assume that the frontend waits for a query to complete
1539 * before ever calling into here, so we don't have to worry about
1540 * deleting an in-flight query object.
1541 */
1542 assert(!o->Active);
1543 assert(!o->Used || o->Ready);
1544
1545 DBG("Delete(%d)\n", o->Id);
1546
1547 switch (obj->query->kind) {
1548 case OA_COUNTERS:
1549 if (obj->oa.bo) {
1550 if (!obj->oa.results_accumulated) {
1551 drop_from_unaccumulated_query_list(brw, obj);
1552 dec_n_oa_users(brw);
1553 }
1554
1555 brw_bo_unreference(obj->oa.bo);
1556 obj->oa.bo = NULL;
1557 }
1558
1559 obj->oa.results_accumulated = false;
1560 break;
1561
1562 case PIPELINE_STATS:
1563 if (obj->pipeline_stats.bo) {
1564 brw_bo_unreference(obj->pipeline_stats.bo);
1565 obj->pipeline_stats.bo = NULL;
1566 }
1567 break;
1568 }
1569
1570 free(obj);
1571
1572 /* As an indication that the INTEL_performance_query extension is no
1573 * longer in use, it's a good time to free our cache of sample
1574 * buffers and close any current i915-perf stream.
1575 */
1576 if (--brw->perfquery.n_query_instances == 0) {
1577 free_sample_bufs(brw);
1578 close_perf(brw);
1579 }
1580 }
1581
1582 /******************************************************************************/
1583
1584 static struct brw_perf_query_info *
1585 append_query_info(struct brw_context *brw)
1586 {
1587 brw->perfquery.queries =
1588 reralloc(brw, brw->perfquery.queries,
1589 struct brw_perf_query_info, ++brw->perfquery.n_queries);
1590
1591 return &brw->perfquery.queries[brw->perfquery.n_queries - 1];
1592 }
1593
1594 static void
1595 add_stat_reg(struct brw_perf_query_info *query,
1596 uint32_t reg,
1597 uint32_t numerator,
1598 uint32_t denominator,
1599 const char *name,
1600 const char *description)
1601 {
1602 struct brw_perf_query_counter *counter;
1603
1604 assert(query->n_counters < MAX_STAT_COUNTERS);
1605
1606 counter = &query->counters[query->n_counters];
1607 counter->name = name;
1608 counter->desc = description;
1609 counter->type = GL_PERFQUERY_COUNTER_RAW_INTEL;
1610 counter->data_type = GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL;
1611 counter->size = sizeof(uint64_t);
1612 counter->offset = sizeof(uint64_t) * query->n_counters;
1613 counter->pipeline_stat.reg = reg;
1614 counter->pipeline_stat.numerator = numerator;
1615 counter->pipeline_stat.denominator = denominator;
1616
1617 query->n_counters++;
1618 }
1619
1620 static void
1621 add_basic_stat_reg(struct brw_perf_query_info *query,
1622 uint32_t reg, const char *name)
1623 {
1624 add_stat_reg(query, reg, 1, 1, name, name);
1625 }
1626
1627 static void
1628 init_pipeline_statistic_query_registers(struct brw_context *brw)
1629 {
1630 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1631 struct brw_perf_query_info *query = append_query_info(brw);
1632
1633 query->kind = PIPELINE_STATS;
1634 query->name = "Pipeline Statistics Registers";
1635 query->n_counters = 0;
1636 query->counters =
1637 rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
1638
1639 add_basic_stat_reg(query, IA_VERTICES_COUNT,
1640 "N vertices submitted");
1641 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1642 "N primitives submitted");
1643 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1644 "N vertex shader invocations");
1645
1646 if (devinfo->gen == 6) {
1647 add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1648 "SO_PRIM_STORAGE_NEEDED",
1649 "N geometry shader stream-out primitives (total)");
1650 add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1651 "SO_NUM_PRIMS_WRITTEN",
1652 "N geometry shader stream-out primitives (written)");
1653 } else {
1654 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1655 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1656 "N stream-out (stream 0) primitives (total)");
1657 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1658 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1659 "N stream-out (stream 1) primitives (total)");
1660 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1661 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1662 "N stream-out (stream 2) primitives (total)");
1663 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1664 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1665 "N stream-out (stream 3) primitives (total)");
1666 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1667 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1668 "N stream-out (stream 0) primitives (written)");
1669 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1670 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1671 "N stream-out (stream 1) primitives (written)");
1672 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1673 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1674 "N stream-out (stream 2) primitives (written)");
1675 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1676 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1677 "N stream-out (stream 3) primitives (written)");
1678 }
1679
1680 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1681 "N TCS shader invocations");
1682 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1683 "N TES shader invocations");
1684
1685 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1686 "N geometry shader invocations");
1687 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1688 "N geometry shader primitives emitted");
1689
1690 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1691 "N primitives entering clipping");
1692 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1693 "N primitives leaving clipping");
1694
1695 if (devinfo->is_haswell || devinfo->gen == 8)
1696 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1697 "N fragment shader invocations",
1698 "N fragment shader invocations");
1699 else
1700 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1701 "N fragment shader invocations");
1702
1703 add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
1704
1705 if (devinfo->gen >= 7)
1706 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1707 "N compute shader invocations");
1708
1709 query->data_size = sizeof(uint64_t) * query->n_counters;
1710 }
1711
1712 static bool
1713 read_file_uint64(const char *file, uint64_t *val)
1714 {
1715 char buf[32];
1716 int fd, n;
1717
1718 fd = open(file, 0);
1719 if (fd < 0)
1720 return false;
1721 n = read(fd, buf, sizeof (buf) - 1);
1722 close(fd);
1723 if (n < 0)
1724 return false;
1725
1726 buf[n] = '\0';
1727 *val = strtoull(buf, NULL, 0);
1728
1729 return true;
1730 }
1731
1732 static void
1733 register_oa_config(struct brw_context *brw,
1734 const struct brw_perf_query_info *query,
1735 uint64_t config_id)
1736 {
1737 struct brw_perf_query_info *registred_query = append_query_info(brw);
1738 *registred_query = *query;
1739 registred_query->oa_metrics_set_id = config_id;
1740 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
1741 registred_query->oa_metrics_set_id, query->guid);
1742 }
1743
1744 static void
1745 enumerate_sysfs_metrics(struct brw_context *brw, const char *sysfs_dev_dir)
1746 {
1747 char buf[256];
1748 DIR *metricsdir = NULL;
1749 struct dirent *metric_entry;
1750 int len;
1751
1752 len = snprintf(buf, sizeof(buf), "%s/metrics", sysfs_dev_dir);
1753 if (len < 0 || len >= sizeof(buf)) {
1754 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1755 return;
1756 }
1757
1758 metricsdir = opendir(buf);
1759 if (!metricsdir) {
1760 DBG("Failed to open %s: %m\n", buf);
1761 return;
1762 }
1763
1764 while ((metric_entry = readdir(metricsdir))) {
1765 struct hash_entry *entry;
1766
1767 if ((metric_entry->d_type != DT_DIR &&
1768 metric_entry->d_type != DT_LNK) ||
1769 metric_entry->d_name[0] == '.')
1770 continue;
1771
1772 DBG("metric set: %s\n", metric_entry->d_name);
1773 entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
1774 metric_entry->d_name);
1775 if (entry) {
1776 uint64_t id;
1777
1778 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
1779 sysfs_dev_dir, metric_entry->d_name);
1780 if (len < 0 || len >= sizeof(buf)) {
1781 DBG("Failed to concatenate path to sysfs metric id file\n");
1782 continue;
1783 }
1784
1785 if (!read_file_uint64(buf, &id)) {
1786 DBG("Failed to read metric set id from %s: %m", buf);
1787 continue;
1788 }
1789
1790 register_oa_config(brw, (const struct brw_perf_query_info *)entry->data, id);
1791 } else
1792 DBG("metric set not known by mesa (skipping)\n");
1793 }
1794
1795 closedir(metricsdir);
1796 }
1797
1798 static bool
1799 read_sysfs_drm_device_file_uint64(struct brw_context *brw,
1800 const char *sysfs_dev_dir,
1801 const char *file,
1802 uint64_t *value)
1803 {
1804 char buf[512];
1805 int len;
1806
1807 len = snprintf(buf, sizeof(buf), "%s/%s", sysfs_dev_dir, file);
1808 if (len < 0 || len >= sizeof(buf)) {
1809 DBG("Failed to concatenate sys filename to read u64 from\n");
1810 return false;
1811 }
1812
1813 return read_file_uint64(buf, value);
1814 }
1815
1816 static bool
1817 kernel_has_dynamic_config_support(struct brw_context *brw,
1818 const char *sysfs_dev_dir)
1819 {
1820 __DRIscreen *screen = brw->screen->driScrnPriv;
1821 struct hash_entry *entry;
1822
1823 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1824 struct brw_perf_query_info *query = entry->data;
1825 char config_path[256];
1826 uint64_t config_id;
1827
1828 snprintf(config_path, sizeof(config_path),
1829 "%s/metrics/%s/id", sysfs_dev_dir, query->guid);
1830
1831 /* Look for the test config, which we know we can't replace. */
1832 if (read_file_uint64(config_path, &config_id) && config_id == 1) {
1833 uint32_t mux_regs[] = { 0x9888 /* NOA_WRITE */, 0x0 };
1834 struct drm_i915_perf_oa_config config;
1835
1836 memset(&config, 0, sizeof(config));
1837
1838 memcpy(config.uuid, query->guid, sizeof(config.uuid));
1839
1840 config.n_mux_regs = 1;
1841 config.mux_regs_ptr = (uintptr_t) mux_regs;
1842
1843 if (ioctl(screen->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG, &config_id) < 0 &&
1844 errno == ENOENT)
1845 return true;
1846
1847 break;
1848 }
1849 }
1850
1851 return false;
1852 }
1853
1854 static void
1855 init_oa_configs(struct brw_context *brw, const char *sysfs_dev_dir)
1856 {
1857 __DRIscreen *screen = brw->screen->driScrnPriv;
1858 struct hash_entry *entry;
1859
1860 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1861 const struct brw_perf_query_info *query = entry->data;
1862 struct drm_i915_perf_oa_config config;
1863 char config_path[256];
1864 uint64_t config_id;
1865 int ret;
1866
1867 snprintf(config_path, sizeof(config_path),
1868 "%s/metrics/%s/id", sysfs_dev_dir, query->guid);
1869
1870 /* Don't recreate already loaded configs. */
1871 if (read_file_uint64(config_path, &config_id)) {
1872 register_oa_config(brw, query, config_id);
1873 continue;
1874 }
1875
1876 memset(&config, 0, sizeof(config));
1877
1878 memcpy(config.uuid, query->guid, sizeof(config.uuid));
1879
1880 config.n_mux_regs = query->n_mux_regs;
1881 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
1882
1883 config.n_boolean_regs = query->n_b_counter_regs;
1884 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
1885
1886 config.n_flex_regs = query->n_flex_regs;
1887 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
1888
1889 ret = ioctl(screen->fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
1890 if (ret < 0) {
1891 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
1892 query->name, query->guid, strerror(errno));
1893 continue;
1894 }
1895
1896 register_oa_config(brw, query, config_id);
1897 }
1898 }
1899
1900 static bool
1901 init_oa_sys_vars(struct brw_context *brw, const char *sysfs_dev_dir)
1902 {
1903 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1904 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
1905 __DRIscreen *screen = brw->screen->driScrnPriv;
1906
1907 if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
1908 "gt_min_freq_mhz",
1909 &min_freq_mhz))
1910 return false;
1911
1912 if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
1913 "gt_max_freq_mhz",
1914 &max_freq_mhz))
1915 return false;
1916
1917 brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
1918 brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
1919 brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
1920
1921 brw->perfquery.sys_vars.revision = intel_device_get_revision(screen->fd);
1922 brw->perfquery.sys_vars.n_eu_slices = devinfo->num_slices;
1923 /* Assuming uniform distribution of subslices per slices. */
1924 brw->perfquery.sys_vars.n_eu_sub_slices = devinfo->num_subslices[0];
1925
1926 if (devinfo->is_haswell) {
1927 brw->perfquery.sys_vars.slice_mask = 0;
1928 brw->perfquery.sys_vars.subslice_mask = 0;
1929
1930 for (int s = 0; s < devinfo->num_slices; s++)
1931 brw->perfquery.sys_vars.slice_mask |= 1U << s;
1932 for (int ss = 0; ss < devinfo->num_subslices[0]; ss++)
1933 brw->perfquery.sys_vars.subslice_mask |= 1U << ss;
1934
1935 if (devinfo->gt == 1) {
1936 brw->perfquery.sys_vars.n_eus = 10;
1937 } else if (devinfo->gt == 2) {
1938 brw->perfquery.sys_vars.n_eus = 20;
1939 } else if (devinfo->gt == 3) {
1940 brw->perfquery.sys_vars.n_eus = 40;
1941 } else
1942 unreachable("not reached");
1943 } else {
1944 drm_i915_getparam_t gp;
1945 int ret;
1946 int slice_mask = 0;
1947 int ss_mask = 0;
1948 /* maximum number of slices */
1949 int s_max = devinfo->num_slices;
1950 /* maximum number of subslices per slice (assuming uniform subslices per
1951 * slices)
1952 */
1953 int ss_max = devinfo->num_subslices[0];
1954 uint64_t subslice_mask = 0;
1955 int s;
1956
1957 gp.param = I915_PARAM_SLICE_MASK;
1958 gp.value = &slice_mask;
1959 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1960 if (ret)
1961 return false;
1962
1963 gp.param = I915_PARAM_SUBSLICE_MASK;
1964 gp.value = &ss_mask;
1965 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1966 if (ret)
1967 return false;
1968
1969 brw->perfquery.sys_vars.n_eus = brw->screen->eu_total;
1970 brw->perfquery.sys_vars.n_eu_slices = __builtin_popcount(slice_mask);
1971 brw->perfquery.sys_vars.slice_mask = slice_mask;
1972
1973 /* Note: the _SUBSLICE_MASK param only reports a global subslice mask
1974 * which applies to all slices.
1975 *
1976 * Note: some of the metrics we have (as described in XML) are
1977 * conditional on a $SubsliceMask variable which is expected to also
1978 * reflect the slice mask by packing together subslice masks for each
1979 * slice in one value..
1980 */
1981 for (s = 0; s < s_max; s++) {
1982 if (slice_mask & (1<<s)) {
1983 subslice_mask |= ss_mask << (ss_max * s);
1984 }
1985 }
1986
1987 brw->perfquery.sys_vars.subslice_mask = subslice_mask;
1988 brw->perfquery.sys_vars.n_eu_sub_slices =
1989 __builtin_popcount(subslice_mask);
1990 }
1991
1992 brw->perfquery.sys_vars.eu_threads_count =
1993 brw->perfquery.sys_vars.n_eus * devinfo->num_thread_per_eu;
1994
1995 return true;
1996 }
1997
1998 static bool
1999 get_sysfs_dev_dir(struct brw_context *brw,
2000 char *path_buf,
2001 int path_buf_len)
2002 {
2003 __DRIscreen *screen = brw->screen->driScrnPriv;
2004 struct stat sb;
2005 int min, maj;
2006 DIR *drmdir;
2007 struct dirent *drm_entry;
2008 int len;
2009
2010 assert(path_buf);
2011 assert(path_buf_len);
2012 path_buf[0] = '\0';
2013
2014 if (fstat(screen->fd, &sb)) {
2015 DBG("Failed to stat DRM fd\n");
2016 return false;
2017 }
2018
2019 maj = major(sb.st_rdev);
2020 min = minor(sb.st_rdev);
2021
2022 if (!S_ISCHR(sb.st_mode)) {
2023 DBG("DRM fd is not a character device as expected\n");
2024 return false;
2025 }
2026
2027 len = snprintf(path_buf, path_buf_len,
2028 "/sys/dev/char/%d:%d/device/drm", maj, min);
2029 if (len < 0 || len >= path_buf_len) {
2030 DBG("Failed to concatenate sysfs path to drm device\n");
2031 return false;
2032 }
2033
2034 drmdir = opendir(path_buf);
2035 if (!drmdir) {
2036 DBG("Failed to open %s: %m\n", path_buf);
2037 return false;
2038 }
2039
2040 while ((drm_entry = readdir(drmdir))) {
2041 if ((drm_entry->d_type == DT_DIR ||
2042 drm_entry->d_type == DT_LNK) &&
2043 strncmp(drm_entry->d_name, "card", 4) == 0)
2044 {
2045 len = snprintf(path_buf, path_buf_len,
2046 "/sys/dev/char/%d:%d/device/drm/%s",
2047 maj, min, drm_entry->d_name);
2048 closedir(drmdir);
2049 if (len < 0 || len >= path_buf_len)
2050 return false;
2051 else
2052 return true;
2053 }
2054 }
2055
2056 closedir(drmdir);
2057
2058 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
2059 maj, min);
2060
2061 return false;
2062 }
2063
2064 typedef void (*perf_register_oa_queries_t)(struct brw_context *);
2065
2066 static perf_register_oa_queries_t
2067 get_register_queries_function(const struct gen_device_info *devinfo)
2068 {
2069 if (devinfo->is_haswell)
2070 return brw_oa_register_queries_hsw;
2071 if (devinfo->is_cherryview)
2072 return brw_oa_register_queries_chv;
2073 if (devinfo->is_broadwell)
2074 return brw_oa_register_queries_bdw;
2075 if (devinfo->is_broxton)
2076 return brw_oa_register_queries_bxt;
2077 if (devinfo->is_skylake) {
2078 if (devinfo->gt == 2)
2079 return brw_oa_register_queries_sklgt2;
2080 if (devinfo->gt == 3)
2081 return brw_oa_register_queries_sklgt3;
2082 if (devinfo->gt == 4)
2083 return brw_oa_register_queries_sklgt4;
2084 }
2085 if (devinfo->is_kabylake) {
2086 if (devinfo->gt == 2)
2087 return brw_oa_register_queries_kblgt2;
2088 if (devinfo->gt == 3)
2089 return brw_oa_register_queries_kblgt3;
2090 }
2091 if (devinfo->is_geminilake)
2092 return brw_oa_register_queries_glk;
2093 return NULL;
2094 }
2095
2096 static unsigned
2097 brw_init_perf_query_info(struct gl_context *ctx)
2098 {
2099 struct brw_context *brw = brw_context(ctx);
2100 const struct gen_device_info *devinfo = &brw->screen->devinfo;
2101 bool i915_perf_oa_available = false;
2102 struct stat sb;
2103 char sysfs_dev_dir[128];
2104 perf_register_oa_queries_t oa_register;
2105
2106 if (brw->perfquery.n_queries)
2107 return brw->perfquery.n_queries;
2108
2109 init_pipeline_statistic_query_registers(brw);
2110
2111 oa_register = get_register_queries_function(devinfo);
2112
2113 /* The existence of this sysctl parameter implies the kernel supports
2114 * the i915 perf interface.
2115 */
2116 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
2117
2118 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
2119 * metrics unless running as root.
2120 */
2121 if (devinfo->is_haswell)
2122 i915_perf_oa_available = true;
2123 else {
2124 uint64_t paranoid = 1;
2125
2126 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
2127
2128 if (paranoid == 0 || geteuid() == 0)
2129 i915_perf_oa_available = true;
2130 }
2131 }
2132
2133 if (i915_perf_oa_available &&
2134 oa_register &&
2135 get_sysfs_dev_dir(brw, sysfs_dev_dir, sizeof(sysfs_dev_dir)) &&
2136 init_oa_sys_vars(brw, sysfs_dev_dir))
2137 {
2138 brw->perfquery.oa_metrics_table =
2139 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2140 _mesa_key_string_equal);
2141
2142 /* Index all the metric sets mesa knows about before looking to see what
2143 * the kernel is advertising.
2144 */
2145 oa_register(brw);
2146
2147 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
2148 kernel_has_dynamic_config_support(brw, sysfs_dev_dir))
2149 init_oa_configs(brw, sysfs_dev_dir);
2150 else
2151 enumerate_sysfs_metrics(brw, sysfs_dev_dir);
2152 }
2153
2154 brw->perfquery.unaccumulated =
2155 ralloc_array(brw, struct brw_perf_query_object *, 2);
2156 brw->perfquery.unaccumulated_elements = 0;
2157 brw->perfquery.unaccumulated_array_size = 2;
2158
2159 exec_list_make_empty(&brw->perfquery.sample_buffers);
2160 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
2161
2162 /* It's convenient to guarantee that this linked list of sample
2163 * buffers is never empty so we add an empty head so when we
2164 * Begin an OA query we can always take a reference on a buffer
2165 * in this list.
2166 */
2167 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
2168 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
2169
2170 brw->perfquery.oa_stream_fd = -1;
2171
2172 brw->perfquery.next_query_start_report_id = 1000;
2173
2174 return brw->perfquery.n_queries;
2175 }
2176
2177 void
2178 brw_init_performance_queries(struct brw_context *brw)
2179 {
2180 struct gl_context *ctx = &brw->ctx;
2181
2182 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
2183 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
2184 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
2185 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
2186 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
2187 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
2188 ctx->Driver.EndPerfQuery = brw_end_perf_query;
2189 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
2190 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
2191 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
2192 }