i965: store device revision in gen_device_info
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43 #include <dirent.h>
44
45 /* put before sys/types.h to silence glibc warnings */
46 #ifdef MAJOR_IN_MKDEV
47 #include <sys/mkdev.h>
48 #endif
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
51 #endif
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <fcntl.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57
58 #include <xf86drm.h>
59 #include "drm-uapi/i915_drm.h"
60
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
65
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70 #include "util/u_math.h"
71
72 #include "brw_context.h"
73 #include "brw_defines.h"
74 #include "brw_performance_query.h"
75 #include "brw_oa_metrics.h"
76 #include "intel_batchbuffer.h"
77
78 #define FILE_DEBUG_FLAG DEBUG_PERFMON
79
80 #define OAREPORT_REASON_MASK 0x3f
81 #define OAREPORT_REASON_SHIFT 19
82 #define OAREPORT_REASON_TIMER (1<<0)
83 #define OAREPORT_REASON_TRIGGER1 (1<<1)
84 #define OAREPORT_REASON_TRIGGER2 (1<<2)
85 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
86 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
87
88 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
89 256) /* OA counter report */
90
91 /**
92 * Periodic OA samples are read() into these buffer structures via the
93 * i915 perf kernel interface and appended to the
94 * brw->perfquery.sample_buffers linked list. When we process the
95 * results of an OA metrics query we need to consider all the periodic
96 * samples between the Begin and End MI_REPORT_PERF_COUNT command
97 * markers.
98 *
99 * 'Periodic' is a simplification as there are other automatic reports
100 * written by the hardware also buffered here.
101 *
102 * Considering three queries, A, B and C:
103 *
104 * Time ---->
105 * ________________A_________________
106 * | |
107 * | ________B_________ _____C___________
108 * | | | | | |
109 *
110 * And an illustration of sample buffers read over this time frame:
111 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
112 *
113 * These nodes may hold samples for query A:
114 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
115 *
116 * These nodes may hold samples for query B:
117 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
118 *
119 * These nodes may hold samples for query C:
120 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
121 *
122 * The illustration assumes we have an even distribution of periodic
123 * samples so all nodes have the same size plotted against time:
124 *
125 * Note, to simplify code, the list is never empty.
126 *
127 * With overlapping queries we can see that periodic OA reports may
128 * relate to multiple queries and care needs to be take to keep
129 * track of sample buffers until there are no queries that might
130 * depend on their contents.
131 *
132 * We use a node ref counting system where a reference ensures that a
133 * node and all following nodes can't be freed/recycled until the
134 * reference drops to zero.
135 *
136 * E.g. with a ref of one here:
137 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
138 *
139 * These nodes could be freed or recycled ("reaped"):
140 * [ 0 ][ 0 ]
141 *
142 * These must be preserved until the leading ref drops to zero:
143 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
144 *
145 * When a query starts we take a reference on the current tail of
146 * the list, knowing that no already-buffered samples can possibly
147 * relate to the newly-started query. A pointer to this node is
148 * also saved in the query object's ->oa.samples_head.
149 *
150 * E.g. starting query A while there are two nodes in .sample_buffers:
151 * ________________A________
152 * |
153 *
154 * [ 0 ][ 1 ]
155 * ^_______ Add a reference and store pointer to node in
156 * A->oa.samples_head
157 *
158 * Moving forward to when the B query starts with no new buffer nodes:
159 * (for reference, i915 perf reads() are only done when queries finish)
160 * ________________A_______
161 * | ________B___
162 * | |
163 *
164 * [ 0 ][ 2 ]
165 * ^_______ Add a reference and store pointer to
166 * node in B->oa.samples_head
167 *
168 * Once a query is finished, after an OA query has become 'Ready',
169 * once the End OA report has landed and after we we have processed
170 * all the intermediate periodic samples then we drop the
171 * ->oa.samples_head reference we took at the start.
172 *
173 * So when the B query has finished we have:
174 * ________________A________
175 * | ______B___________
176 * | | |
177 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
178 * ^_______ Drop B->oa.samples_head reference
179 *
180 * We still can't free these due to the A->oa.samples_head ref:
181 * [ 1 ][ 0 ][ 0 ][ 0 ]
182 *
183 * When the A query finishes: (note there's a new ref for C's samples_head)
184 * ________________A_________________
185 * | |
186 * | _____C_________
187 * | | |
188 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
189 * ^_______ Drop A->oa.samples_head reference
190 *
191 * And we can now reap these nodes up to the C->oa.samples_head:
192 * [ X ][ X ][ X ][ X ]
193 * keeping -> [ 1 ][ 0 ][ 0 ]
194 *
195 * We reap old sample buffers each time we finish processing an OA
196 * query by iterating the sample_buffers list from the head until we
197 * find a referenced node and stop.
198 *
199 * Reaped buffers move to a perfquery.free_sample_buffers list and
200 * when we come to read() we first look to recycle a buffer from the
201 * free_sample_buffers list before allocating a new buffer.
202 */
203 struct brw_oa_sample_buf {
204 struct exec_node link;
205 int refcount;
206 int len;
207 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
208 uint32_t last_timestamp;
209 };
210
211 /** Downcasting convenience macro. */
212 static inline struct brw_perf_query_object *
213 brw_perf_query(struct gl_perf_query_object *o)
214 {
215 return (struct brw_perf_query_object *) o;
216 }
217
218 #define MI_RPC_BO_SIZE 4096
219 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
220 #define MI_FREQ_START_OFFSET_BYTES (3072)
221 #define MI_FREQ_END_OFFSET_BYTES (3076)
222
223 /******************************************************************************/
224
225 static bool
226 read_file_uint64(const char *file, uint64_t *val)
227 {
228 char buf[32];
229 int fd, n;
230
231 fd = open(file, 0);
232 if (fd < 0)
233 return false;
234 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
235 errno == EINTR);
236 close(fd);
237 if (n < 0)
238 return false;
239
240 buf[n] = '\0';
241 *val = strtoull(buf, NULL, 0);
242
243 return true;
244 }
245
246 static bool
247 read_sysfs_drm_device_file_uint64(struct brw_context *brw,
248 const char *file,
249 uint64_t *value)
250 {
251 char buf[512];
252 int len;
253
254 len = snprintf(buf, sizeof(buf), "%s/%s",
255 brw->perfquery.sysfs_dev_dir, file);
256 if (len < 0 || len >= sizeof(buf)) {
257 DBG("Failed to concatenate sys filename to read u64 from\n");
258 return false;
259 }
260
261 return read_file_uint64(buf, value);
262 }
263
264 /******************************************************************************/
265
266 static bool
267 brw_is_perf_query_ready(struct gl_context *ctx,
268 struct gl_perf_query_object *o);
269
270 static uint64_t
271 brw_perf_query_get_metric_id(struct brw_context *brw,
272 const struct brw_perf_query_info *query)
273 {
274 /* These queries are know not to ever change, their config ID has been
275 * loaded upon the first query creation. No need to look them up again.
276 */
277 if (query->kind == OA_COUNTERS)
278 return query->oa_metrics_set_id;
279
280 assert(query->kind == OA_COUNTERS_RAW);
281
282 /* Raw queries can be reprogrammed up by an external application/library.
283 * When a raw query is used for the first time it's id is set to a value !=
284 * 0. When it stops being used the id returns to 0. No need to reload the
285 * ID when it's already loaded.
286 */
287 if (query->oa_metrics_set_id != 0) {
288 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
289 query->name, query->guid, query->oa_metrics_set_id);
290 return query->oa_metrics_set_id;
291 }
292
293 char metric_id_file[280];
294 snprintf(metric_id_file, sizeof(metric_id_file),
295 "%s/metrics/%s/id", brw->perfquery.sysfs_dev_dir, query->guid);
296
297 struct brw_perf_query_info *raw_query = (struct brw_perf_query_info *)query;
298 if (!read_file_uint64(metric_id_file, &raw_query->oa_metrics_set_id)) {
299 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
300 raw_query->oa_metrics_set_id = 1ULL;
301 } else {
302 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
303 query->name, query->guid, query->oa_metrics_set_id);
304 }
305 return query->oa_metrics_set_id;
306 }
307
308 static void
309 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
310 {
311 struct gl_context *ctx = brw_void;
312 struct gl_perf_query_object *o = query_void;
313 struct brw_perf_query_object *obj = query_void;
314
315 switch (obj->query->kind) {
316 case OA_COUNTERS:
317 case OA_COUNTERS_RAW:
318 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
319 id,
320 o->Used ? "Dirty," : "New,",
321 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
322 obj->oa.bo ? "yes," : "no,",
323 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
324 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
325 break;
326 case PIPELINE_STATS:
327 DBG("%4d: %-6s %-8s BO: %-4s\n",
328 id,
329 o->Used ? "Dirty," : "New,",
330 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
331 obj->pipeline_stats.bo ? "yes" : "no");
332 break;
333 default:
334 unreachable("Unknown query type");
335 break;
336 }
337 }
338
339 static void
340 dump_perf_queries(struct brw_context *brw)
341 {
342 struct gl_context *ctx = &brw->ctx;
343 DBG("Queries: (Open queries = %d, OA users = %d)\n",
344 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
345 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
346 }
347
348 /******************************************************************************/
349
350 static struct brw_oa_sample_buf *
351 get_free_sample_buf(struct brw_context *brw)
352 {
353 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
354 struct brw_oa_sample_buf *buf;
355
356 if (node)
357 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
358 else {
359 buf = ralloc_size(brw, sizeof(*buf));
360
361 exec_node_init(&buf->link);
362 buf->refcount = 0;
363 buf->len = 0;
364 }
365
366 return buf;
367 }
368
369 static void
370 reap_old_sample_buffers(struct brw_context *brw)
371 {
372 struct exec_node *tail_node =
373 exec_list_get_tail(&brw->perfquery.sample_buffers);
374 struct brw_oa_sample_buf *tail_buf =
375 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
376
377 /* Remove all old, unreferenced sample buffers walking forward from
378 * the head of the list, except always leave at least one node in
379 * the list so we always have a node to reference when we Begin
380 * a new query.
381 */
382 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
383 &brw->perfquery.sample_buffers)
384 {
385 if (buf->refcount == 0 && buf != tail_buf) {
386 exec_node_remove(&buf->link);
387 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
388 } else
389 return;
390 }
391 }
392
393 static void
394 free_sample_bufs(struct brw_context *brw)
395 {
396 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
397 &brw->perfquery.free_sample_buffers)
398 ralloc_free(buf);
399
400 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
401 }
402
403 /******************************************************************************/
404
405 /**
406 * Driver hook for glGetPerfQueryInfoINTEL().
407 */
408 static void
409 brw_get_perf_query_info(struct gl_context *ctx,
410 unsigned query_index,
411 const char **name,
412 GLuint *data_size,
413 GLuint *n_counters,
414 GLuint *n_active)
415 {
416 struct brw_context *brw = brw_context(ctx);
417 const struct brw_perf_query_info *query =
418 &brw->perfquery.queries[query_index];
419
420 *name = query->name;
421 *data_size = query->data_size;
422 *n_counters = query->n_counters;
423
424 switch (query->kind) {
425 case OA_COUNTERS:
426 case OA_COUNTERS_RAW:
427 *n_active = brw->perfquery.n_active_oa_queries;
428 break;
429
430 case PIPELINE_STATS:
431 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
432 break;
433
434 default:
435 unreachable("Unknown query type");
436 break;
437 }
438 }
439
440 /**
441 * Driver hook for glGetPerfCounterInfoINTEL().
442 */
443 static void
444 brw_get_perf_counter_info(struct gl_context *ctx,
445 unsigned query_index,
446 unsigned counter_index,
447 const char **name,
448 const char **desc,
449 GLuint *offset,
450 GLuint *data_size,
451 GLuint *type_enum,
452 GLuint *data_type_enum,
453 GLuint64 *raw_max)
454 {
455 struct brw_context *brw = brw_context(ctx);
456 const struct brw_perf_query_info *query =
457 &brw->perfquery.queries[query_index];
458 const struct brw_perf_query_counter *counter =
459 &query->counters[counter_index];
460
461 *name = counter->name;
462 *desc = counter->desc;
463 *offset = counter->offset;
464 *data_size = counter->size;
465 *type_enum = counter->type;
466 *data_type_enum = counter->data_type;
467 *raw_max = counter->raw_max;
468 }
469
470 /******************************************************************************/
471
472 /**
473 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
474 * pipeline statistics for the performance query object.
475 */
476 static void
477 snapshot_statistics_registers(struct brw_context *brw,
478 struct brw_perf_query_object *obj,
479 uint32_t offset_in_bytes)
480 {
481 const struct brw_perf_query_info *query = obj->query;
482 const int n_counters = query->n_counters;
483
484 for (int i = 0; i < n_counters; i++) {
485 const struct brw_perf_query_counter *counter = &query->counters[i];
486
487 assert(counter->data_type == GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL);
488
489 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
490 counter->pipeline_stat.reg,
491 offset_in_bytes + i * sizeof(uint64_t));
492 }
493 }
494
495 /**
496 * Add a query to the global list of "unaccumulated queries."
497 *
498 * Queries are tracked here until all the associated OA reports have
499 * been accumulated via accumulate_oa_reports() after the end
500 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
501 */
502 static void
503 add_to_unaccumulated_query_list(struct brw_context *brw,
504 struct brw_perf_query_object *obj)
505 {
506 if (brw->perfquery.unaccumulated_elements >=
507 brw->perfquery.unaccumulated_array_size)
508 {
509 brw->perfquery.unaccumulated_array_size *= 1.5;
510 brw->perfquery.unaccumulated =
511 reralloc(brw, brw->perfquery.unaccumulated,
512 struct brw_perf_query_object *,
513 brw->perfquery.unaccumulated_array_size);
514 }
515
516 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
517 }
518
519 /**
520 * Remove a query from the global list of unaccumulated queries once
521 * after successfully accumulating the OA reports associated with the
522 * query in accumulate_oa_reports() or when discarding unwanted query
523 * results.
524 */
525 static void
526 drop_from_unaccumulated_query_list(struct brw_context *brw,
527 struct brw_perf_query_object *obj)
528 {
529 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
530 if (brw->perfquery.unaccumulated[i] == obj) {
531 int last_elt = --brw->perfquery.unaccumulated_elements;
532
533 if (i == last_elt)
534 brw->perfquery.unaccumulated[i] = NULL;
535 else {
536 brw->perfquery.unaccumulated[i] =
537 brw->perfquery.unaccumulated[last_elt];
538 }
539
540 break;
541 }
542 }
543
544 /* Drop our samples_head reference so that associated periodic
545 * sample data buffers can potentially be reaped if they aren't
546 * referenced by any other queries...
547 */
548
549 struct brw_oa_sample_buf *buf =
550 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
551
552 assert(buf->refcount > 0);
553 buf->refcount--;
554
555 obj->oa.samples_head = NULL;
556
557 reap_old_sample_buffers(brw);
558 }
559
560 /**
561 * Given pointers to starting and ending OA snapshots, add the deltas for each
562 * counter to the results.
563 */
564 static void
565 add_deltas(struct brw_context *brw,
566 struct brw_perf_query_object *obj,
567 const uint32_t *start,
568 const uint32_t *end)
569 {
570 const struct brw_perf_query_info *query = obj->query;
571 uint64_t *accumulator = obj->oa.accumulator;
572 int idx = 0;
573 int i;
574
575 obj->oa.reports_accumulated++;
576
577 switch (query->oa_format) {
578 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
579 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
580 brw_perf_query_accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
581
582 /* 32x 40bit A counters... */
583 for (i = 0; i < 32; i++)
584 brw_perf_query_accumulate_uint40(i, start, end, accumulator + idx++);
585
586 /* 4x 32bit A counters... */
587 for (i = 0; i < 4; i++)
588 brw_perf_query_accumulate_uint32(start + 36 + i, end + 36 + i,
589 accumulator + idx++);
590
591 /* 8x 32bit B counters + 8x 32bit C counters... */
592 for (i = 0; i < 16; i++)
593 brw_perf_query_accumulate_uint32(start + 48 + i, end + 48 + i,
594 accumulator + idx++);
595
596 break;
597 case I915_OA_FORMAT_A45_B8_C8:
598 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
599
600 for (i = 0; i < 61; i++)
601 brw_perf_query_accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
602
603 break;
604 default:
605 unreachable("Can't accumulate OA counters in unknown format");
606 }
607 }
608
609 static bool
610 inc_n_oa_users(struct brw_context *brw)
611 {
612 if (brw->perfquery.n_oa_users == 0 &&
613 drmIoctl(brw->perfquery.oa_stream_fd,
614 I915_PERF_IOCTL_ENABLE, 0) < 0)
615 {
616 return false;
617 }
618 ++brw->perfquery.n_oa_users;
619
620 return true;
621 }
622
623 static void
624 dec_n_oa_users(struct brw_context *brw)
625 {
626 /* Disabling the i915 perf stream will effectively disable the OA
627 * counters. Note it's important to be sure there are no outstanding
628 * MI_RPC commands at this point since they could stall the CS
629 * indefinitely once OACONTROL is disabled.
630 */
631 --brw->perfquery.n_oa_users;
632 if (brw->perfquery.n_oa_users == 0 &&
633 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
634 {
635 DBG("WARNING: Error disabling i915 perf stream: %m\n");
636 }
637 }
638
639 /* In general if we see anything spurious while accumulating results,
640 * we don't try and continue accumulating the current query, hoping
641 * for the best, we scrap anything outstanding, and then hope for the
642 * best with new queries.
643 */
644 static void
645 discard_all_queries(struct brw_context *brw)
646 {
647 while (brw->perfquery.unaccumulated_elements) {
648 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
649
650 obj->oa.results_accumulated = true;
651 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
652
653 dec_n_oa_users(brw);
654 }
655 }
656
657 enum OaReadStatus {
658 OA_READ_STATUS_ERROR,
659 OA_READ_STATUS_UNFINISHED,
660 OA_READ_STATUS_FINISHED,
661 };
662
663 static enum OaReadStatus
664 read_oa_samples_until(struct brw_context *brw,
665 uint32_t start_timestamp,
666 uint32_t end_timestamp)
667 {
668 struct exec_node *tail_node =
669 exec_list_get_tail(&brw->perfquery.sample_buffers);
670 struct brw_oa_sample_buf *tail_buf =
671 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
672 uint32_t last_timestamp = tail_buf->last_timestamp;
673
674 while (1) {
675 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
676 uint32_t offset;
677 int len;
678
679 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
680 sizeof(buf->buf))) < 0 && errno == EINTR)
681 ;
682
683 if (len <= 0) {
684 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
685
686 if (len < 0) {
687 if (errno == EAGAIN)
688 return ((last_timestamp - start_timestamp) >=
689 (end_timestamp - start_timestamp)) ?
690 OA_READ_STATUS_FINISHED :
691 OA_READ_STATUS_UNFINISHED;
692 else {
693 DBG("Error reading i915 perf samples: %m\n");
694 }
695 } else
696 DBG("Spurious EOF reading i915 perf samples\n");
697
698 return OA_READ_STATUS_ERROR;
699 }
700
701 buf->len = len;
702 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
703
704 /* Go through the reports and update the last timestamp. */
705 offset = 0;
706 while (offset < buf->len) {
707 const struct drm_i915_perf_record_header *header =
708 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
709 uint32_t *report = (uint32_t *) (header + 1);
710
711 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
712 last_timestamp = report[1];
713
714 offset += header->size;
715 }
716
717 buf->last_timestamp = last_timestamp;
718 }
719
720 unreachable("not reached");
721 return OA_READ_STATUS_ERROR;
722 }
723
724 /**
725 * Try to read all the reports until either the delimiting timestamp
726 * or an error arises.
727 */
728 static bool
729 read_oa_samples_for_query(struct brw_context *brw,
730 struct brw_perf_query_object *obj)
731 {
732 uint32_t *start;
733 uint32_t *last;
734 uint32_t *end;
735
736 /* We need the MI_REPORT_PERF_COUNT to land before we can start
737 * accumulate. */
738 assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
739 !brw_bo_busy(obj->oa.bo));
740
741 /* Map the BO once here and let accumulate_oa_reports() unmap
742 * it. */
743 if (obj->oa.map == NULL)
744 obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
745
746 start = last = obj->oa.map;
747 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
748
749 if (start[0] != obj->oa.begin_report_id) {
750 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
751 return true;
752 }
753 if (end[0] != (obj->oa.begin_report_id + 1)) {
754 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
755 return true;
756 }
757
758 /* Read the reports until the end timestamp. */
759 switch (read_oa_samples_until(brw, start[1], end[1])) {
760 case OA_READ_STATUS_ERROR:
761 /* Fallthrough and let accumulate_oa_reports() deal with the
762 * error. */
763 case OA_READ_STATUS_FINISHED:
764 return true;
765 case OA_READ_STATUS_UNFINISHED:
766 return false;
767 }
768
769 unreachable("invalid read status");
770 return false;
771 }
772
773 /**
774 * Accumulate raw OA counter values based on deltas between pairs of
775 * OA reports.
776 *
777 * Accumulation starts from the first report captured via
778 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
779 * last MI_RPC report requested by brw_end_perf_query(). Between these
780 * two reports there may also some number of periodically sampled OA
781 * reports collected via the i915 perf interface - depending on the
782 * duration of the query.
783 *
784 * These periodic snapshots help to ensure we handle counter overflow
785 * correctly by being frequent enough to ensure we don't miss multiple
786 * overflows of a counter between snapshots. For Gen8+ the i915 perf
787 * snapshots provide the extra context-switch reports that let us
788 * subtract out the progress of counters associated with other
789 * contexts running on the system.
790 */
791 static void
792 accumulate_oa_reports(struct brw_context *brw,
793 struct brw_perf_query_object *obj)
794 {
795 const struct gen_device_info *devinfo = &brw->screen->devinfo;
796 struct gl_perf_query_object *o = &obj->base;
797 uint32_t *start;
798 uint32_t *last;
799 uint32_t *end;
800 struct exec_node *first_samples_node;
801 bool in_ctx = true;
802 int out_duration = 0;
803
804 assert(o->Ready);
805 assert(obj->oa.map != NULL);
806
807 start = last = obj->oa.map;
808 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
809
810 if (start[0] != obj->oa.begin_report_id) {
811 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
812 goto error;
813 }
814 if (end[0] != (obj->oa.begin_report_id + 1)) {
815 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
816 goto error;
817 }
818
819 obj->oa.hw_id = start[2];
820
821 /* See if we have any periodic reports to accumulate too... */
822
823 /* N.B. The oa.samples_head was set when the query began and
824 * pointed to the tail of the brw->perfquery.sample_buffers list at
825 * the time the query started. Since the buffer existed before the
826 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
827 * that no data in this particular node's buffer can possibly be
828 * associated with the query - so skip ahead one...
829 */
830 first_samples_node = obj->oa.samples_head->next;
831
832 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
833 &brw->perfquery.sample_buffers,
834 first_samples_node)
835 {
836 int offset = 0;
837
838 while (offset < buf->len) {
839 const struct drm_i915_perf_record_header *header =
840 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
841
842 assert(header->size != 0);
843 assert(header->size <= buf->len);
844
845 offset += header->size;
846
847 switch (header->type) {
848 case DRM_I915_PERF_RECORD_SAMPLE: {
849 uint32_t *report = (uint32_t *)(header + 1);
850 bool add = true;
851
852 /* Ignore reports that come before the start marker.
853 * (Note: takes care to allow overflow of 32bit timestamps)
854 */
855 if (brw_timebase_scale(brw, report[1] - start[1]) > 5000000000)
856 continue;
857
858 /* Ignore reports that come after the end marker.
859 * (Note: takes care to allow overflow of 32bit timestamps)
860 */
861 if (brw_timebase_scale(brw, report[1] - end[1]) <= 5000000000)
862 goto end;
863
864 /* For Gen8+ since the counters continue while other
865 * contexts are running we need to discount any unrelated
866 * deltas. The hardware automatically generates a report
867 * on context switch which gives us a new reference point
868 * to continuing adding deltas from.
869 *
870 * For Haswell we can rely on the HW to stop the progress
871 * of OA counters while any other context is acctive.
872 */
873 if (devinfo->gen >= 8) {
874 if (in_ctx && report[2] != obj->oa.hw_id) {
875 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
876 in_ctx = false;
877 out_duration = 0;
878 } else if (in_ctx == false && report[2] == obj->oa.hw_id) {
879 DBG("i915 perf: Switch TO\n");
880 in_ctx = true;
881
882 /* From experimentation in IGT, we found that the OA unit
883 * might label some report as "idle" (using an invalid
884 * context ID), right after a report for a given context.
885 * Deltas generated by those reports actually belong to the
886 * previous context, even though they're not labelled as
887 * such.
888 *
889 * We didn't *really* Switch AWAY in the case that we e.g.
890 * saw a single periodic report while idle...
891 */
892 if (out_duration >= 1)
893 add = false;
894 } else if (in_ctx) {
895 assert(report[2] == obj->oa.hw_id);
896 DBG("i915 perf: Continuation IN\n");
897 } else {
898 assert(report[2] != obj->oa.hw_id);
899 DBG("i915 perf: Continuation OUT\n");
900 add = false;
901 out_duration++;
902 }
903 }
904
905 if (add)
906 add_deltas(brw, obj, last, report);
907
908 last = report;
909
910 break;
911 }
912
913 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
914 DBG("i915 perf: OA error: all reports lost\n");
915 goto error;
916 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
917 DBG("i915 perf: OA report lost\n");
918 break;
919 }
920 }
921 }
922
923 end:
924
925 add_deltas(brw, obj, last, end);
926
927 DBG("Marking %d accumulated - results gathered\n", o->Id);
928
929 obj->oa.results_accumulated = true;
930 drop_from_unaccumulated_query_list(brw, obj);
931 dec_n_oa_users(brw);
932
933 return;
934
935 error:
936
937 discard_all_queries(brw);
938 }
939
940 /******************************************************************************/
941
942 static bool
943 open_i915_perf_oa_stream(struct brw_context *brw,
944 int metrics_set_id,
945 int report_format,
946 int period_exponent,
947 int drm_fd,
948 uint32_t ctx_id)
949 {
950 uint64_t properties[] = {
951 /* Single context sampling */
952 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
953
954 /* Include OA reports in samples */
955 DRM_I915_PERF_PROP_SAMPLE_OA, true,
956
957 /* OA unit configuration */
958 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
959 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
960 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
961 };
962 struct drm_i915_perf_open_param param = {
963 .flags = I915_PERF_FLAG_FD_CLOEXEC |
964 I915_PERF_FLAG_FD_NONBLOCK |
965 I915_PERF_FLAG_DISABLED,
966 .num_properties = ARRAY_SIZE(properties) / 2,
967 .properties_ptr = (uintptr_t) properties,
968 };
969 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
970 if (fd == -1) {
971 DBG("Error opening i915 perf OA stream: %m\n");
972 return false;
973 }
974
975 brw->perfquery.oa_stream_fd = fd;
976
977 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
978 brw->perfquery.current_oa_format = report_format;
979
980 return true;
981 }
982
983 static void
984 close_perf(struct brw_context *brw,
985 const struct brw_perf_query_info *query)
986 {
987 if (brw->perfquery.oa_stream_fd != -1) {
988 close(brw->perfquery.oa_stream_fd);
989 brw->perfquery.oa_stream_fd = -1;
990 }
991 if (query->kind == OA_COUNTERS_RAW) {
992 struct brw_perf_query_info *raw_query =
993 (struct brw_perf_query_info *) query;
994 raw_query->oa_metrics_set_id = 0;
995 }
996 }
997
998 static void
999 capture_frequency_stat_register(struct brw_context *brw,
1000 struct brw_bo *bo,
1001 uint32_t bo_offset)
1002 {
1003 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1004
1005 if (devinfo->gen >= 7 && devinfo->gen <= 8 &&
1006 !devinfo->is_baytrail && !devinfo->is_cherryview) {
1007 brw_store_register_mem32(brw, bo, GEN7_RPSTAT1, bo_offset);
1008 } else if (devinfo->gen >= 9) {
1009 brw_store_register_mem32(brw, bo, GEN9_RPSTAT0, bo_offset);
1010 }
1011 }
1012
1013 /**
1014 * Driver hook for glBeginPerfQueryINTEL().
1015 */
1016 static bool
1017 brw_begin_perf_query(struct gl_context *ctx,
1018 struct gl_perf_query_object *o)
1019 {
1020 struct brw_context *brw = brw_context(ctx);
1021 struct brw_perf_query_object *obj = brw_perf_query(o);
1022 const struct brw_perf_query_info *query = obj->query;
1023
1024 /* We can assume the frontend hides mistaken attempts to Begin a
1025 * query object multiple times before its End. Similarly if an
1026 * application reuses a query object before results have arrived
1027 * the frontend will wait for prior results so we don't need
1028 * to support abandoning in-flight results.
1029 */
1030 assert(!o->Active);
1031 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
1032
1033 DBG("Begin(%d)\n", o->Id);
1034
1035 /* XXX: We have to consider that the command parser unit that parses batch
1036 * buffer commands and is used to capture begin/end counter snapshots isn't
1037 * implicitly synchronized with what's currently running across other GPU
1038 * units (such as the EUs running shaders) that the performance counters are
1039 * associated with.
1040 *
1041 * The intention of performance queries is to measure the work associated
1042 * with commands between the begin/end delimiters and so for that to be the
1043 * case we need to explicitly synchronize the parsing of commands to capture
1044 * Begin/End counter snapshots with what's running across other parts of the
1045 * GPU.
1046 *
1047 * When the command parser reaches a Begin marker it effectively needs to
1048 * drain everything currently running on the GPU until the hardware is idle
1049 * before capturing the first snapshot of counters - otherwise the results
1050 * would also be measuring the effects of earlier commands.
1051 *
1052 * When the command parser reaches an End marker it needs to stall until
1053 * everything currently running on the GPU has finished before capturing the
1054 * end snapshot - otherwise the results won't be a complete representation
1055 * of the work.
1056 *
1057 * Theoretically there could be opportunities to minimize how much of the
1058 * GPU pipeline is drained, or that we stall for, when we know what specific
1059 * units the performance counters being queried relate to but we don't
1060 * currently attempt to be clever here.
1061 *
1062 * Note: with our current simple approach here then for back-to-back queries
1063 * we will redundantly emit duplicate commands to synchronize the command
1064 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1065 * second synchronization is effectively a NOOP.
1066 *
1067 * N.B. The final results are based on deltas of counters between (inside)
1068 * Begin/End markers so even though the total wall clock time of the
1069 * workload is stretched by larger pipeline bubbles the bubbles themselves
1070 * are generally invisible to the query results. Whether that's a good or a
1071 * bad thing depends on the use case. For a lower real-time impact while
1072 * capturing metrics then periodic sampling may be a better choice than
1073 * INTEL_performance_query.
1074 *
1075 *
1076 * This is our Begin synchronization point to drain current work on the
1077 * GPU before we capture our first counter snapshot...
1078 */
1079 brw_emit_mi_flush(brw);
1080
1081 switch (query->kind) {
1082 case OA_COUNTERS:
1083 case OA_COUNTERS_RAW: {
1084
1085 /* Opening an i915 perf stream implies exclusive access to the OA unit
1086 * which will generate counter reports for a specific counter set with a
1087 * specific layout/format so we can't begin any OA based queries that
1088 * require a different counter set or format unless we get an opportunity
1089 * to close the stream and open a new one...
1090 */
1091 uint64_t metric_id = brw_perf_query_get_metric_id(brw, query);
1092
1093 if (brw->perfquery.oa_stream_fd != -1 &&
1094 brw->perfquery.current_oa_metrics_set_id != metric_id) {
1095
1096 if (brw->perfquery.n_oa_users != 0) {
1097 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64"\n",
1098 o->Id, brw->perfquery.current_oa_metrics_set_id, metric_id);
1099 return false;
1100 } else
1101 close_perf(brw, query);
1102 }
1103
1104 /* If the OA counters aren't already on, enable them. */
1105 if (brw->perfquery.oa_stream_fd == -1) {
1106 __DRIscreen *screen = brw->screen->driScrnPriv;
1107 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1108
1109 /* The period_exponent gives a sampling period as follows:
1110 * sample_period = timestamp_period * 2^(period_exponent + 1)
1111 *
1112 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1113 * ~83ns (GEN8/9).
1114 *
1115 * The counter overflow period is derived from the EuActive counter
1116 * which reads a counter that increments by the number of clock
1117 * cycles multiplied by the number of EUs. It can be calculated as:
1118 *
1119 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1120 *
1121 * (E.g. 40 EUs @ 1GHz = ~53ms)
1122 *
1123 * We select a sampling period inferior to that overflow period to
1124 * ensure we cannot see more than 1 counter overflow, otherwise we
1125 * could loose information.
1126 */
1127
1128 int a_counter_in_bits = 32;
1129 if (devinfo->gen >= 8)
1130 a_counter_in_bits = 40;
1131
1132 uint64_t overflow_period = pow(2, a_counter_in_bits) /
1133 (brw->perfquery.sys_vars.n_eus *
1134 /* drop 1GHz freq to have units in nanoseconds */
1135 2);
1136
1137 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1138 overflow_period, overflow_period / 1000000ul, brw->perfquery.sys_vars.n_eus);
1139
1140 int period_exponent = 0;
1141 uint64_t prev_sample_period, next_sample_period;
1142 for (int e = 0; e < 30; e++) {
1143 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1144 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1145
1146 /* Take the previous sampling period, lower than the overflow
1147 * period.
1148 */
1149 if (prev_sample_period < overflow_period &&
1150 next_sample_period > overflow_period)
1151 period_exponent = e + 1;
1152 }
1153
1154 if (period_exponent == 0) {
1155 DBG("WARNING: enable to find a sampling exponent\n");
1156 return false;
1157 }
1158
1159 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1160 prev_sample_period / 1000000ul);
1161
1162 if (!open_i915_perf_oa_stream(brw,
1163 metric_id,
1164 query->oa_format,
1165 period_exponent,
1166 screen->fd, /* drm fd */
1167 brw->hw_ctx))
1168 return false;
1169 } else {
1170 assert(brw->perfquery.current_oa_metrics_set_id == metric_id &&
1171 brw->perfquery.current_oa_format == query->oa_format);
1172 }
1173
1174 if (!inc_n_oa_users(brw)) {
1175 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1176 return false;
1177 }
1178
1179 if (obj->oa.bo) {
1180 brw_bo_unreference(obj->oa.bo);
1181 obj->oa.bo = NULL;
1182 }
1183
1184 obj->oa.bo =
1185 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo", MI_RPC_BO_SIZE,
1186 BRW_MEMZONE_OTHER);
1187 #ifdef DEBUG
1188 /* Pre-filling the BO helps debug whether writes landed. */
1189 void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
1190 memset(map, 0x80, MI_RPC_BO_SIZE);
1191 brw_bo_unmap(obj->oa.bo);
1192 #endif
1193
1194 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1195 brw->perfquery.next_query_start_report_id += 2;
1196
1197 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1198 * delimiting commands end up in different batchbuffers. If that's the
1199 * case, the measurement will include the time it takes for the kernel
1200 * scheduler to load a new request into the hardware. This is manifested in
1201 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1202 */
1203 intel_batchbuffer_flush(brw);
1204
1205 /* Take a starting OA counter snapshot. */
1206 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1207 obj->oa.begin_report_id);
1208 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_START_OFFSET_BYTES);
1209
1210 ++brw->perfquery.n_active_oa_queries;
1211
1212 /* No already-buffered samples can possibly be associated with this query
1213 * so create a marker within the list of sample buffers enabling us to
1214 * easily ignore earlier samples when processing this query after
1215 * completion.
1216 */
1217 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1218 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1219
1220 struct brw_oa_sample_buf *buf =
1221 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1222
1223 /* This reference will ensure that future/following sample
1224 * buffers (that may relate to this query) can't be freed until
1225 * this drops to zero.
1226 */
1227 buf->refcount++;
1228
1229 obj->oa.hw_id = 0xffffffff;
1230 memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
1231 obj->oa.results_accumulated = false;
1232
1233 add_to_unaccumulated_query_list(brw, obj);
1234 break;
1235 }
1236
1237 case PIPELINE_STATS:
1238 if (obj->pipeline_stats.bo) {
1239 brw_bo_unreference(obj->pipeline_stats.bo);
1240 obj->pipeline_stats.bo = NULL;
1241 }
1242
1243 obj->pipeline_stats.bo =
1244 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1245 STATS_BO_SIZE, BRW_MEMZONE_OTHER);
1246
1247 /* Take starting snapshots. */
1248 snapshot_statistics_registers(brw, obj, 0);
1249
1250 ++brw->perfquery.n_active_pipeline_stats_queries;
1251 break;
1252
1253 default:
1254 unreachable("Unknown query type");
1255 break;
1256 }
1257
1258 if (INTEL_DEBUG & DEBUG_PERFMON)
1259 dump_perf_queries(brw);
1260
1261 return true;
1262 }
1263
1264 /**
1265 * Driver hook for glEndPerfQueryINTEL().
1266 */
1267 static void
1268 brw_end_perf_query(struct gl_context *ctx,
1269 struct gl_perf_query_object *o)
1270 {
1271 struct brw_context *brw = brw_context(ctx);
1272 struct brw_perf_query_object *obj = brw_perf_query(o);
1273
1274 DBG("End(%d)\n", o->Id);
1275
1276 /* Ensure that the work associated with the queried commands will have
1277 * finished before taking our query end counter readings.
1278 *
1279 * For more details see comment in brw_begin_perf_query for
1280 * corresponding flush.
1281 */
1282 brw_emit_mi_flush(brw);
1283
1284 switch (obj->query->kind) {
1285 case OA_COUNTERS:
1286 case OA_COUNTERS_RAW:
1287
1288 /* NB: It's possible that the query will have already been marked
1289 * as 'accumulated' if an error was seen while reading samples
1290 * from perf. In this case we mustn't try and emit a closing
1291 * MI_RPC command in case the OA unit has already been disabled
1292 */
1293 if (!obj->oa.results_accumulated) {
1294 /* Take an ending OA counter snapshot. */
1295 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_END_OFFSET_BYTES);
1296 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
1297 MI_RPC_BO_END_OFFSET_BYTES,
1298 obj->oa.begin_report_id + 1);
1299 }
1300
1301 --brw->perfquery.n_active_oa_queries;
1302
1303 /* NB: even though the query has now ended, it can't be accumulated
1304 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1305 * to query->oa.bo
1306 */
1307 break;
1308
1309 case PIPELINE_STATS:
1310 snapshot_statistics_registers(brw, obj,
1311 STATS_BO_END_OFFSET_BYTES);
1312 --brw->perfquery.n_active_pipeline_stats_queries;
1313 break;
1314
1315 default:
1316 unreachable("Unknown query type");
1317 break;
1318 }
1319 }
1320
1321 static void
1322 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1323 {
1324 struct brw_context *brw = brw_context(ctx);
1325 struct brw_perf_query_object *obj = brw_perf_query(o);
1326 struct brw_bo *bo = NULL;
1327
1328 assert(!o->Ready);
1329
1330 switch (obj->query->kind) {
1331 case OA_COUNTERS:
1332 case OA_COUNTERS_RAW:
1333 bo = obj->oa.bo;
1334 break;
1335
1336 case PIPELINE_STATS:
1337 bo = obj->pipeline_stats.bo;
1338 break;
1339
1340 default:
1341 unreachable("Unknown query type");
1342 break;
1343 }
1344
1345 if (bo == NULL)
1346 return;
1347
1348 /* If the current batch references our results bo then we need to
1349 * flush first...
1350 */
1351 if (brw_batch_references(&brw->batch, bo))
1352 intel_batchbuffer_flush(brw);
1353
1354 brw_bo_wait_rendering(bo);
1355
1356 /* Due to a race condition between the OA unit signaling report
1357 * availability and the report actually being written into memory,
1358 * we need to wait for all the reports to come in before we can
1359 * read them.
1360 */
1361 if (obj->query->kind == OA_COUNTERS ||
1362 obj->query->kind == OA_COUNTERS_RAW) {
1363 while (!read_oa_samples_for_query(brw, obj))
1364 ;
1365 }
1366 }
1367
1368 static bool
1369 brw_is_perf_query_ready(struct gl_context *ctx,
1370 struct gl_perf_query_object *o)
1371 {
1372 struct brw_context *brw = brw_context(ctx);
1373 struct brw_perf_query_object *obj = brw_perf_query(o);
1374
1375 if (o->Ready)
1376 return true;
1377
1378 switch (obj->query->kind) {
1379 case OA_COUNTERS:
1380 case OA_COUNTERS_RAW:
1381 return (obj->oa.results_accumulated ||
1382 (obj->oa.bo &&
1383 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1384 !brw_bo_busy(obj->oa.bo) &&
1385 read_oa_samples_for_query(brw, obj)));
1386 case PIPELINE_STATS:
1387 return (obj->pipeline_stats.bo &&
1388 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1389 !brw_bo_busy(obj->pipeline_stats.bo));
1390
1391 default:
1392 unreachable("Unknown query type");
1393 break;
1394 }
1395
1396 return false;
1397 }
1398
1399 static void
1400 gen8_read_report_clock_ratios(const uint32_t *report,
1401 uint64_t *slice_freq_hz,
1402 uint64_t *unslice_freq_hz)
1403 {
1404 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1405 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1406 * divided this way :
1407 *
1408 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1409 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1410 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1411 *
1412 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1413 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1414 *
1415 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1416 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1417 */
1418
1419 uint32_t unslice_freq = report[0] & 0x1ff;
1420 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1421 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1422 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1423
1424 *slice_freq_hz = slice_freq * 16666667ULL;
1425 *unslice_freq_hz = unslice_freq * 16666667ULL;
1426 }
1427
1428 static void
1429 read_slice_unslice_frequencies(struct brw_context *brw,
1430 struct brw_perf_query_object *obj)
1431 {
1432 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1433 uint32_t *begin_report, *end_report;
1434
1435 /* Slice/Unslice frequency is only available in the OA reports when the
1436 * "Disable OA reports due to clock ratio change" field in
1437 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1438 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1439 *
1440 * Documentation says this should be available on Gen9+ but experimentation
1441 * shows that Gen8 reports similar values, so we enable it there too.
1442 */
1443 if (devinfo->gen < 8)
1444 return;
1445
1446 begin_report = obj->oa.map;
1447 end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
1448
1449 gen8_read_report_clock_ratios(begin_report,
1450 &obj->oa.slice_frequency[0],
1451 &obj->oa.unslice_frequency[0]);
1452 gen8_read_report_clock_ratios(end_report,
1453 &obj->oa.slice_frequency[1],
1454 &obj->oa.unslice_frequency[1]);
1455 }
1456
1457 static void
1458 read_gt_frequency(struct brw_context *brw,
1459 struct brw_perf_query_object *obj)
1460 {
1461 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1462 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
1463 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
1464
1465 switch (devinfo->gen) {
1466 case 7:
1467 case 8:
1468 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1469 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1470 break;
1471 case 9:
1472 case 10:
1473 case 11:
1474 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1475 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1476 break;
1477 default:
1478 unreachable("unexpected gen");
1479 }
1480
1481 /* Put the numbers into Hz. */
1482 obj->oa.gt_frequency[0] *= 1000000ULL;
1483 obj->oa.gt_frequency[1] *= 1000000ULL;
1484 }
1485
1486 static int
1487 get_oa_counter_data(struct brw_context *brw,
1488 struct brw_perf_query_object *obj,
1489 size_t data_size,
1490 uint8_t *data)
1491 {
1492 const struct brw_perf_query_info *query = obj->query;
1493 int n_counters = query->n_counters;
1494 int written = 0;
1495
1496 for (int i = 0; i < n_counters; i++) {
1497 const struct brw_perf_query_counter *counter = &query->counters[i];
1498 uint64_t *out_uint64;
1499 float *out_float;
1500
1501 if (counter->size) {
1502 switch (counter->data_type) {
1503 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL:
1504 out_uint64 = (uint64_t *)(data + counter->offset);
1505 *out_uint64 = counter->oa_counter_read_uint64(brw, query,
1506 obj->oa.accumulator);
1507 break;
1508 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL:
1509 out_float = (float *)(data + counter->offset);
1510 *out_float = counter->oa_counter_read_float(brw, query,
1511 obj->oa.accumulator);
1512 break;
1513 default:
1514 /* So far we aren't using uint32, double or bool32... */
1515 unreachable("unexpected counter data type");
1516 }
1517 written = counter->offset + counter->size;
1518 }
1519 }
1520
1521 return written;
1522 }
1523
1524 static int
1525 get_pipeline_stats_data(struct brw_context *brw,
1526 struct brw_perf_query_object *obj,
1527 size_t data_size,
1528 uint8_t *data)
1529
1530 {
1531 const struct brw_perf_query_info *query = obj->query;
1532 int n_counters = obj->query->n_counters;
1533 uint8_t *p = data;
1534
1535 uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
1536 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1537
1538 for (int i = 0; i < n_counters; i++) {
1539 const struct brw_perf_query_counter *counter = &query->counters[i];
1540 uint64_t value = end[i] - start[i];
1541
1542 if (counter->pipeline_stat.numerator !=
1543 counter->pipeline_stat.denominator) {
1544 value *= counter->pipeline_stat.numerator;
1545 value /= counter->pipeline_stat.denominator;
1546 }
1547
1548 *((uint64_t *)p) = value;
1549 p += 8;
1550 }
1551
1552 brw_bo_unmap(obj->pipeline_stats.bo);
1553
1554 return p - data;
1555 }
1556
1557 /**
1558 * Driver hook for glGetPerfQueryDataINTEL().
1559 */
1560 static void
1561 brw_get_perf_query_data(struct gl_context *ctx,
1562 struct gl_perf_query_object *o,
1563 GLsizei data_size,
1564 GLuint *data,
1565 GLuint *bytes_written)
1566 {
1567 struct brw_context *brw = brw_context(ctx);
1568 struct brw_perf_query_object *obj = brw_perf_query(o);
1569 int written = 0;
1570
1571 assert(brw_is_perf_query_ready(ctx, o));
1572
1573 DBG("GetData(%d)\n", o->Id);
1574
1575 if (INTEL_DEBUG & DEBUG_PERFMON)
1576 dump_perf_queries(brw);
1577
1578 /* We expect that the frontend only calls this hook when it knows
1579 * that results are available.
1580 */
1581 assert(o->Ready);
1582
1583 switch (obj->query->kind) {
1584 case OA_COUNTERS:
1585 case OA_COUNTERS_RAW:
1586 if (!obj->oa.results_accumulated) {
1587 read_gt_frequency(brw, obj);
1588 read_slice_unslice_frequencies(brw, obj);
1589 accumulate_oa_reports(brw, obj);
1590 assert(obj->oa.results_accumulated);
1591
1592 brw_bo_unmap(obj->oa.bo);
1593 obj->oa.map = NULL;
1594 }
1595 if (obj->query->kind == OA_COUNTERS)
1596 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1597 else
1598 written = brw_perf_query_get_mdapi_oa_data(brw, obj, data_size, (uint8_t *)data);
1599 break;
1600
1601 case PIPELINE_STATS:
1602 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1603 break;
1604
1605 default:
1606 unreachable("Unknown query type");
1607 break;
1608 }
1609
1610 if (bytes_written)
1611 *bytes_written = written;
1612 }
1613
1614 static struct gl_perf_query_object *
1615 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1616 {
1617 struct brw_context *brw = brw_context(ctx);
1618 const struct brw_perf_query_info *query =
1619 &brw->perfquery.queries[query_index];
1620 struct brw_perf_query_object *obj =
1621 calloc(1, sizeof(struct brw_perf_query_object));
1622
1623 if (!obj)
1624 return NULL;
1625
1626 obj->query = query;
1627
1628 brw->perfquery.n_query_instances++;
1629
1630 return &obj->base;
1631 }
1632
1633 /**
1634 * Driver hook for glDeletePerfQueryINTEL().
1635 */
1636 static void
1637 brw_delete_perf_query(struct gl_context *ctx,
1638 struct gl_perf_query_object *o)
1639 {
1640 struct brw_context *brw = brw_context(ctx);
1641 struct brw_perf_query_object *obj = brw_perf_query(o);
1642
1643 /* We can assume that the frontend waits for a query to complete
1644 * before ever calling into here, so we don't have to worry about
1645 * deleting an in-flight query object.
1646 */
1647 assert(!o->Active);
1648 assert(!o->Used || o->Ready);
1649
1650 DBG("Delete(%d)\n", o->Id);
1651
1652 switch (obj->query->kind) {
1653 case OA_COUNTERS:
1654 case OA_COUNTERS_RAW:
1655 if (obj->oa.bo) {
1656 if (!obj->oa.results_accumulated) {
1657 drop_from_unaccumulated_query_list(brw, obj);
1658 dec_n_oa_users(brw);
1659 }
1660
1661 brw_bo_unreference(obj->oa.bo);
1662 obj->oa.bo = NULL;
1663 }
1664
1665 obj->oa.results_accumulated = false;
1666 break;
1667
1668 case PIPELINE_STATS:
1669 if (obj->pipeline_stats.bo) {
1670 brw_bo_unreference(obj->pipeline_stats.bo);
1671 obj->pipeline_stats.bo = NULL;
1672 }
1673 break;
1674
1675 default:
1676 unreachable("Unknown query type");
1677 break;
1678 }
1679
1680 /* As an indication that the INTEL_performance_query extension is no
1681 * longer in use, it's a good time to free our cache of sample
1682 * buffers and close any current i915-perf stream.
1683 */
1684 if (--brw->perfquery.n_query_instances == 0) {
1685 free_sample_bufs(brw);
1686 close_perf(brw, obj->query);
1687 }
1688
1689 free(obj);
1690 }
1691
1692 /******************************************************************************/
1693
1694 static void
1695 init_pipeline_statistic_query_registers(struct brw_context *brw)
1696 {
1697 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1698 struct brw_perf_query_info *query = brw_perf_query_append_query_info(brw);
1699
1700 query->kind = PIPELINE_STATS;
1701 query->name = "Pipeline Statistics Registers";
1702 query->n_counters = 0;
1703 query->counters =
1704 rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
1705
1706 brw_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
1707 "N vertices submitted");
1708 brw_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1709 "N primitives submitted");
1710 brw_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1711 "N vertex shader invocations");
1712
1713 if (devinfo->gen == 6) {
1714 brw_perf_query_info_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1715 "SO_PRIM_STORAGE_NEEDED",
1716 "N geometry shader stream-out primitives (total)");
1717 brw_perf_query_info_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1718 "SO_NUM_PRIMS_WRITTEN",
1719 "N geometry shader stream-out primitives (written)");
1720 } else {
1721 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1722 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1723 "N stream-out (stream 0) primitives (total)");
1724 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1725 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1726 "N stream-out (stream 1) primitives (total)");
1727 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1728 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1729 "N stream-out (stream 2) primitives (total)");
1730 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1731 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1732 "N stream-out (stream 3) primitives (total)");
1733 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1734 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1735 "N stream-out (stream 0) primitives (written)");
1736 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1737 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1738 "N stream-out (stream 1) primitives (written)");
1739 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1740 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1741 "N stream-out (stream 2) primitives (written)");
1742 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1743 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1744 "N stream-out (stream 3) primitives (written)");
1745 }
1746
1747 brw_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1748 "N TCS shader invocations");
1749 brw_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1750 "N TES shader invocations");
1751
1752 brw_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1753 "N geometry shader invocations");
1754 brw_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1755 "N geometry shader primitives emitted");
1756
1757 brw_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1758 "N primitives entering clipping");
1759 brw_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1760 "N primitives leaving clipping");
1761
1762 if (devinfo->is_haswell || devinfo->gen == 8)
1763 brw_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1764 "N fragment shader invocations",
1765 "N fragment shader invocations");
1766 else
1767 brw_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1768 "N fragment shader invocations");
1769
1770 brw_perf_query_info_add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
1771
1772 if (devinfo->gen >= 7)
1773 brw_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1774 "N compute shader invocations");
1775
1776 query->data_size = sizeof(uint64_t) * query->n_counters;
1777 }
1778
1779 static void
1780 register_oa_config(struct brw_context *brw,
1781 const struct brw_perf_query_info *query,
1782 uint64_t config_id)
1783 {
1784 struct brw_perf_query_info *registred_query =
1785 brw_perf_query_append_query_info(brw);
1786
1787 *registred_query = *query;
1788 registred_query->oa_metrics_set_id = config_id;
1789 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
1790 registred_query->oa_metrics_set_id, query->guid);
1791 }
1792
1793 static void
1794 enumerate_sysfs_metrics(struct brw_context *brw)
1795 {
1796 char buf[256];
1797 DIR *metricsdir = NULL;
1798 struct dirent *metric_entry;
1799 int len;
1800
1801 len = snprintf(buf, sizeof(buf), "%s/metrics", brw->perfquery.sysfs_dev_dir);
1802 if (len < 0 || len >= sizeof(buf)) {
1803 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1804 return;
1805 }
1806
1807 metricsdir = opendir(buf);
1808 if (!metricsdir) {
1809 DBG("Failed to open %s: %m\n", buf);
1810 return;
1811 }
1812
1813 while ((metric_entry = readdir(metricsdir))) {
1814 struct hash_entry *entry;
1815
1816 if ((metric_entry->d_type != DT_DIR &&
1817 metric_entry->d_type != DT_LNK) ||
1818 metric_entry->d_name[0] == '.')
1819 continue;
1820
1821 DBG("metric set: %s\n", metric_entry->d_name);
1822 entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
1823 metric_entry->d_name);
1824 if (entry) {
1825 uint64_t id;
1826
1827 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
1828 brw->perfquery.sysfs_dev_dir, metric_entry->d_name);
1829 if (len < 0 || len >= sizeof(buf)) {
1830 DBG("Failed to concatenate path to sysfs metric id file\n");
1831 continue;
1832 }
1833
1834 if (!read_file_uint64(buf, &id)) {
1835 DBG("Failed to read metric set id from %s: %m", buf);
1836 continue;
1837 }
1838
1839 register_oa_config(brw, (const struct brw_perf_query_info *)entry->data, id);
1840 } else
1841 DBG("metric set not known by mesa (skipping)\n");
1842 }
1843
1844 closedir(metricsdir);
1845 }
1846
1847 static bool
1848 kernel_has_dynamic_config_support(struct brw_context *brw)
1849 {
1850 __DRIscreen *screen = brw->screen->driScrnPriv;
1851
1852 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1853 struct brw_perf_query_info *query = entry->data;
1854 char config_path[280];
1855 uint64_t config_id;
1856
1857 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1858 brw->perfquery.sysfs_dev_dir, query->guid);
1859
1860 /* Look for the test config, which we know we can't replace. */
1861 if (read_file_uint64(config_path, &config_id) && config_id == 1) {
1862 return drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
1863 &config_id) < 0 && errno == ENOENT;
1864 }
1865 }
1866
1867 return false;
1868 }
1869
1870 static void
1871 init_oa_configs(struct brw_context *brw)
1872 {
1873 __DRIscreen *screen = brw->screen->driScrnPriv;
1874
1875 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1876 const struct brw_perf_query_info *query = entry->data;
1877 struct drm_i915_perf_oa_config config;
1878 char config_path[280];
1879 uint64_t config_id;
1880 int ret;
1881
1882 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1883 brw->perfquery.sysfs_dev_dir, query->guid);
1884
1885 /* Don't recreate already loaded configs. */
1886 if (read_file_uint64(config_path, &config_id)) {
1887 DBG("metric set: %s (already loaded)\n", query->guid);
1888 register_oa_config(brw, query, config_id);
1889 continue;
1890 }
1891
1892 memset(&config, 0, sizeof(config));
1893
1894 memcpy(config.uuid, query->guid, sizeof(config.uuid));
1895
1896 config.n_mux_regs = query->n_mux_regs;
1897 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
1898
1899 config.n_boolean_regs = query->n_b_counter_regs;
1900 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
1901
1902 config.n_flex_regs = query->n_flex_regs;
1903 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
1904
1905 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
1906 if (ret < 0) {
1907 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
1908 query->name, query->guid, strerror(errno));
1909 continue;
1910 }
1911
1912 register_oa_config(brw, query, ret);
1913 DBG("metric set: %s (added)\n", query->guid);
1914 }
1915 }
1916
1917 static bool
1918 query_topology(struct brw_context *brw)
1919 {
1920 __DRIscreen *screen = brw->screen->driScrnPriv;
1921 struct drm_i915_query_item item = {
1922 .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
1923 };
1924 struct drm_i915_query query = {
1925 .num_items = 1,
1926 .items_ptr = (uintptr_t) &item,
1927 };
1928
1929 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query))
1930 return false;
1931
1932 struct drm_i915_query_topology_info *topo_info =
1933 (struct drm_i915_query_topology_info *) calloc(1, item.length);
1934 item.data_ptr = (uintptr_t) topo_info;
1935
1936 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query) ||
1937 item.length <= 0)
1938 return false;
1939
1940 gen_device_info_update_from_topology(&brw->screen->devinfo,
1941 topo_info);
1942
1943 free(topo_info);
1944
1945 return true;
1946 }
1947
1948 static bool
1949 getparam_topology(struct brw_context *brw)
1950 {
1951 __DRIscreen *screen = brw->screen->driScrnPriv;
1952 drm_i915_getparam_t gp;
1953 int ret;
1954
1955 int slice_mask = 0;
1956 gp.param = I915_PARAM_SLICE_MASK;
1957 gp.value = &slice_mask;
1958 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1959 if (ret)
1960 return false;
1961
1962 int subslice_mask = 0;
1963 gp.param = I915_PARAM_SUBSLICE_MASK;
1964 gp.value = &subslice_mask;
1965 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1966 if (ret)
1967 return false;
1968
1969 gen_device_info_update_from_masks(&brw->screen->devinfo,
1970 slice_mask,
1971 subslice_mask,
1972 brw->screen->eu_total);
1973
1974 return true;
1975 }
1976
1977 static void
1978 compute_topology_builtins(struct brw_context *brw)
1979 {
1980 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1981
1982 brw->perfquery.sys_vars.slice_mask = devinfo->slice_masks;
1983 brw->perfquery.sys_vars.n_eu_slices = devinfo->num_slices;
1984
1985 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
1986 brw->perfquery.sys_vars.n_eu_sub_slices +=
1987 util_bitcount(devinfo->subslice_masks[i]);
1988 }
1989
1990 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
1991 brw->perfquery.sys_vars.n_eus += util_bitcount(devinfo->eu_masks[i]);
1992
1993 brw->perfquery.sys_vars.eu_threads_count =
1994 brw->perfquery.sys_vars.n_eus * devinfo->num_thread_per_eu;
1995
1996 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
1997 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
1998 * slice.
1999 *
2000 * Ideally equations would be updated to have a slice/subslice query
2001 * function/operator.
2002 */
2003 brw->perfquery.sys_vars.subslice_mask = 0;
2004
2005 int bits_per_subslice = devinfo->gen == 11 ? 8 : 3;
2006
2007 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
2008 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
2009 if (gen_device_info_subslice_available(devinfo, s, ss))
2010 brw->perfquery.sys_vars.subslice_mask |= 1UL << (s * bits_per_subslice + ss);
2011 }
2012 }
2013 }
2014
2015 static bool
2016 init_oa_sys_vars(struct brw_context *brw)
2017 {
2018 const struct gen_device_info *devinfo = &brw->screen->devinfo;
2019 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
2020
2021 if (!read_sysfs_drm_device_file_uint64(brw, "gt_min_freq_mhz", &min_freq_mhz))
2022 return false;
2023
2024 if (!read_sysfs_drm_device_file_uint64(brw, "gt_max_freq_mhz", &max_freq_mhz))
2025 return false;
2026
2027 if (!query_topology(brw)) {
2028 /* We need the i915 query uAPI on CNL+ (kernel 4.17+). */
2029 if (devinfo->gen >= 10)
2030 return false;
2031
2032 if (!getparam_topology(brw)) {
2033 /* We need the SLICE_MASK/SUBSLICE_MASK on gen8+ (kernel 4.13+). */
2034 if (devinfo->gen >= 8)
2035 return false;
2036
2037 /* On Haswell, the values are already computed for us in
2038 * gen_device_info.
2039 */
2040 }
2041 }
2042
2043 memset(&brw->perfquery.sys_vars, 0, sizeof(brw->perfquery.sys_vars));
2044 brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
2045 brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
2046 brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
2047 brw->perfquery.sys_vars.revision = devinfo->revision;
2048 compute_topology_builtins(brw);
2049
2050 return true;
2051 }
2052
2053 static bool
2054 get_sysfs_dev_dir(struct brw_context *brw)
2055 {
2056 __DRIscreen *screen = brw->screen->driScrnPriv;
2057 struct stat sb;
2058 int min, maj;
2059 DIR *drmdir;
2060 struct dirent *drm_entry;
2061 int len;
2062
2063 brw->perfquery.sysfs_dev_dir[0] = '\0';
2064
2065 if (fstat(screen->fd, &sb)) {
2066 DBG("Failed to stat DRM fd\n");
2067 return false;
2068 }
2069
2070 maj = major(sb.st_rdev);
2071 min = minor(sb.st_rdev);
2072
2073 if (!S_ISCHR(sb.st_mode)) {
2074 DBG("DRM fd is not a character device as expected\n");
2075 return false;
2076 }
2077
2078 len = snprintf(brw->perfquery.sysfs_dev_dir,
2079 sizeof(brw->perfquery.sysfs_dev_dir),
2080 "/sys/dev/char/%d:%d/device/drm", maj, min);
2081 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir)) {
2082 DBG("Failed to concatenate sysfs path to drm device\n");
2083 return false;
2084 }
2085
2086 drmdir = opendir(brw->perfquery.sysfs_dev_dir);
2087 if (!drmdir) {
2088 DBG("Failed to open %s: %m\n", brw->perfquery.sysfs_dev_dir);
2089 return false;
2090 }
2091
2092 while ((drm_entry = readdir(drmdir))) {
2093 if ((drm_entry->d_type == DT_DIR ||
2094 drm_entry->d_type == DT_LNK) &&
2095 strncmp(drm_entry->d_name, "card", 4) == 0)
2096 {
2097 len = snprintf(brw->perfquery.sysfs_dev_dir,
2098 sizeof(brw->perfquery.sysfs_dev_dir),
2099 "/sys/dev/char/%d:%d/device/drm/%s",
2100 maj, min, drm_entry->d_name);
2101 closedir(drmdir);
2102 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir))
2103 return false;
2104 else
2105 return true;
2106 }
2107 }
2108
2109 closedir(drmdir);
2110
2111 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
2112 maj, min);
2113
2114 return false;
2115 }
2116
2117 typedef void (*perf_register_oa_queries_t)(struct brw_context *);
2118
2119 static perf_register_oa_queries_t
2120 get_register_queries_function(const struct gen_device_info *devinfo)
2121 {
2122 if (devinfo->is_haswell)
2123 return brw_oa_register_queries_hsw;
2124 if (devinfo->is_cherryview)
2125 return brw_oa_register_queries_chv;
2126 if (devinfo->is_broadwell)
2127 return brw_oa_register_queries_bdw;
2128 if (devinfo->is_broxton)
2129 return brw_oa_register_queries_bxt;
2130 if (devinfo->is_skylake) {
2131 if (devinfo->gt == 2)
2132 return brw_oa_register_queries_sklgt2;
2133 if (devinfo->gt == 3)
2134 return brw_oa_register_queries_sklgt3;
2135 if (devinfo->gt == 4)
2136 return brw_oa_register_queries_sklgt4;
2137 }
2138 if (devinfo->is_kabylake) {
2139 if (devinfo->gt == 2)
2140 return brw_oa_register_queries_kblgt2;
2141 if (devinfo->gt == 3)
2142 return brw_oa_register_queries_kblgt3;
2143 }
2144 if (devinfo->is_geminilake)
2145 return brw_oa_register_queries_glk;
2146 if (devinfo->is_coffeelake) {
2147 if (devinfo->gt == 2)
2148 return brw_oa_register_queries_cflgt2;
2149 if (devinfo->gt == 3)
2150 return brw_oa_register_queries_cflgt3;
2151 }
2152 if (devinfo->is_cannonlake)
2153 return brw_oa_register_queries_cnl;
2154 if (devinfo->gen == 11)
2155 return brw_oa_register_queries_icl;
2156
2157 return NULL;
2158 }
2159
2160 static unsigned
2161 brw_init_perf_query_info(struct gl_context *ctx)
2162 {
2163 struct brw_context *brw = brw_context(ctx);
2164 const struct gen_device_info *devinfo = &brw->screen->devinfo;
2165 bool i915_perf_oa_available = false;
2166 struct stat sb;
2167 perf_register_oa_queries_t oa_register;
2168
2169 if (brw->perfquery.n_queries)
2170 return brw->perfquery.n_queries;
2171
2172 init_pipeline_statistic_query_registers(brw);
2173 brw_perf_query_register_mdapi_statistic_query(brw);
2174
2175 oa_register = get_register_queries_function(devinfo);
2176
2177 /* The existence of this sysctl parameter implies the kernel supports
2178 * the i915 perf interface.
2179 */
2180 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
2181
2182 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
2183 * metrics unless running as root.
2184 */
2185 if (devinfo->is_haswell)
2186 i915_perf_oa_available = true;
2187 else {
2188 uint64_t paranoid = 1;
2189
2190 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
2191
2192 if (paranoid == 0 || geteuid() == 0)
2193 i915_perf_oa_available = true;
2194 }
2195 }
2196
2197 if (i915_perf_oa_available &&
2198 oa_register &&
2199 get_sysfs_dev_dir(brw) &&
2200 init_oa_sys_vars(brw))
2201 {
2202 brw->perfquery.oa_metrics_table =
2203 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2204 _mesa_key_string_equal);
2205
2206 /* Index all the metric sets mesa knows about before looking to see what
2207 * the kernel is advertising.
2208 */
2209 oa_register(brw);
2210
2211 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
2212 kernel_has_dynamic_config_support(brw))
2213 init_oa_configs(brw);
2214 else
2215 enumerate_sysfs_metrics(brw);
2216
2217 brw_perf_query_register_mdapi_oa_query(brw);
2218 }
2219
2220 brw->perfquery.unaccumulated =
2221 ralloc_array(brw, struct brw_perf_query_object *, 2);
2222 brw->perfquery.unaccumulated_elements = 0;
2223 brw->perfquery.unaccumulated_array_size = 2;
2224
2225 exec_list_make_empty(&brw->perfquery.sample_buffers);
2226 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
2227
2228 /* It's convenient to guarantee that this linked list of sample
2229 * buffers is never empty so we add an empty head so when we
2230 * Begin an OA query we can always take a reference on a buffer
2231 * in this list.
2232 */
2233 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
2234 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
2235
2236 brw->perfquery.oa_stream_fd = -1;
2237
2238 brw->perfquery.next_query_start_report_id = 1000;
2239
2240 return brw->perfquery.n_queries;
2241 }
2242
2243 void
2244 brw_init_performance_queries(struct brw_context *brw)
2245 {
2246 struct gl_context *ctx = &brw->ctx;
2247
2248 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
2249 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
2250 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
2251 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
2252 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
2253 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
2254 ctx->Driver.EndPerfQuery = brw_end_perf_query;
2255 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
2256 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
2257 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
2258 }