i965: Move buffer texture size calculation into a common helper function.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43 #include <dirent.h>
44
45 /* put before sys/types.h to silence glibc warnings */
46 #ifdef MAJOR_IN_MKDEV
47 #include <sys/mkdev.h>
48 #endif
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
51 #endif
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <fcntl.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57
58 #include <xf86drm.h>
59 #include <i915_drm.h>
60
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
65
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_metrics.h"
75 #include "intel_batchbuffer.h"
76
77 #define FILE_DEBUG_FLAG DEBUG_PERFMON
78
79 #define OAREPORT_REASON_MASK 0x3f
80 #define OAREPORT_REASON_SHIFT 19
81 #define OAREPORT_REASON_TIMER (1<<0)
82 #define OAREPORT_REASON_TRIGGER1 (1<<1)
83 #define OAREPORT_REASON_TRIGGER2 (1<<2)
84 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
85 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
86
87 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
88 256) /* OA counter report */
89
90 /**
91 * Periodic OA samples are read() into these buffer structures via the
92 * i915 perf kernel interface and appended to the
93 * brw->perfquery.sample_buffers linked list. When we process the
94 * results of an OA metrics query we need to consider all the periodic
95 * samples between the Begin and End MI_REPORT_PERF_COUNT command
96 * markers.
97 *
98 * 'Periodic' is a simplification as there are other automatic reports
99 * written by the hardware also buffered here.
100 *
101 * Considering three queries, A, B and C:
102 *
103 * Time ---->
104 * ________________A_________________
105 * | |
106 * | ________B_________ _____C___________
107 * | | | | | |
108 *
109 * And an illustration of sample buffers read over this time frame:
110 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
111 *
112 * These nodes may hold samples for query A:
113 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
114 *
115 * These nodes may hold samples for query B:
116 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
117 *
118 * These nodes may hold samples for query C:
119 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
120 *
121 * The illustration assumes we have an even distribution of periodic
122 * samples so all nodes have the same size plotted against time:
123 *
124 * Note, to simplify code, the list is never empty.
125 *
126 * With overlapping queries we can see that periodic OA reports may
127 * relate to multiple queries and care needs to be take to keep
128 * track of sample buffers until there are no queries that might
129 * depend on their contents.
130 *
131 * We use a node ref counting system where a reference ensures that a
132 * node and all following nodes can't be freed/recycled until the
133 * reference drops to zero.
134 *
135 * E.g. with a ref of one here:
136 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
137 *
138 * These nodes could be freed or recycled ("reaped"):
139 * [ 0 ][ 0 ]
140 *
141 * These must be preserved until the leading ref drops to zero:
142 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
143 *
144 * When a query starts we take a reference on the current tail of
145 * the list, knowing that no already-buffered samples can possibly
146 * relate to the newly-started query. A pointer to this node is
147 * also saved in the query object's ->oa.samples_head.
148 *
149 * E.g. starting query A while there are two nodes in .sample_buffers:
150 * ________________A________
151 * |
152 *
153 * [ 0 ][ 1 ]
154 * ^_______ Add a reference and store pointer to node in
155 * A->oa.samples_head
156 *
157 * Moving forward to when the B query starts with no new buffer nodes:
158 * (for reference, i915 perf reads() are only done when queries finish)
159 * ________________A_______
160 * | ________B___
161 * | |
162 *
163 * [ 0 ][ 2 ]
164 * ^_______ Add a reference and store pointer to
165 * node in B->oa.samples_head
166 *
167 * Once a query is finished, after an OA query has become 'Ready',
168 * once the End OA report has landed and after we we have processed
169 * all the intermediate periodic samples then we drop the
170 * ->oa.samples_head reference we took at the start.
171 *
172 * So when the B query has finished we have:
173 * ________________A________
174 * | ______B___________
175 * | | |
176 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
177 * ^_______ Drop B->oa.samples_head reference
178 *
179 * We still can't free these due to the A->oa.samples_head ref:
180 * [ 1 ][ 0 ][ 0 ][ 0 ]
181 *
182 * When the A query finishes: (note there's a new ref for C's samples_head)
183 * ________________A_________________
184 * | |
185 * | _____C_________
186 * | | |
187 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
188 * ^_______ Drop A->oa.samples_head reference
189 *
190 * And we can now reap these nodes up to the C->oa.samples_head:
191 * [ X ][ X ][ X ][ X ]
192 * keeping -> [ 1 ][ 0 ][ 0 ]
193 *
194 * We reap old sample buffers each time we finish processing an OA
195 * query by iterating the sample_buffers list from the head until we
196 * find a referenced node and stop.
197 *
198 * Reaped buffers move to a perfquery.free_sample_buffers list and
199 * when we come to read() we first look to recycle a buffer from the
200 * free_sample_buffers list before allocating a new buffer.
201 */
202 struct brw_oa_sample_buf {
203 struct exec_node link;
204 int refcount;
205 int len;
206 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
207 uint32_t last_timestamp;
208 };
209
210 /** Downcasting convenience macro. */
211 static inline struct brw_perf_query_object *
212 brw_perf_query(struct gl_perf_query_object *o)
213 {
214 return (struct brw_perf_query_object *) o;
215 }
216
217 #define MI_RPC_BO_SIZE 4096
218 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
219 #define MI_FREQ_START_OFFSET_BYTES (3072)
220 #define MI_FREQ_END_OFFSET_BYTES (3076)
221
222 /******************************************************************************/
223
224 static bool
225 read_file_uint64(const char *file, uint64_t *val)
226 {
227 char buf[32];
228 int fd, n;
229
230 fd = open(file, 0);
231 if (fd < 0)
232 return false;
233 while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
234 errno == EINTR);
235 close(fd);
236 if (n < 0)
237 return false;
238
239 buf[n] = '\0';
240 *val = strtoull(buf, NULL, 0);
241
242 return true;
243 }
244
245 static bool
246 read_sysfs_drm_device_file_uint64(struct brw_context *brw,
247 const char *file,
248 uint64_t *value)
249 {
250 char buf[512];
251 int len;
252
253 len = snprintf(buf, sizeof(buf), "%s/%s",
254 brw->perfquery.sysfs_dev_dir, file);
255 if (len < 0 || len >= sizeof(buf)) {
256 DBG("Failed to concatenate sys filename to read u64 from\n");
257 return false;
258 }
259
260 return read_file_uint64(buf, value);
261 }
262
263 /******************************************************************************/
264
265 static bool
266 brw_is_perf_query_ready(struct gl_context *ctx,
267 struct gl_perf_query_object *o);
268
269 static uint64_t
270 brw_perf_query_get_metric_id(struct brw_context *brw,
271 const struct brw_perf_query_info *query)
272 {
273 /* These queries are know not to ever change, their config ID has been
274 * loaded upon the first query creation. No need to look them up again.
275 */
276 if (query->kind == OA_COUNTERS)
277 return query->oa_metrics_set_id;
278
279 assert(query->kind == OA_COUNTERS_RAW);
280
281 /* Raw queries can be reprogrammed up by an external application/library.
282 * When a raw query is used for the first time it's id is set to a value !=
283 * 0. When it stops being used the id returns to 0. No need to reload the
284 * ID when it's already loaded.
285 */
286 if (query->oa_metrics_set_id != 0) {
287 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
288 query->name, query->guid, query->oa_metrics_set_id);
289 return query->oa_metrics_set_id;
290 }
291
292 char metric_id_file[280];
293 snprintf(metric_id_file, sizeof(metric_id_file),
294 "%s/metrics/%s/id", brw->perfquery.sysfs_dev_dir, query->guid);
295
296 struct brw_perf_query_info *raw_query = (struct brw_perf_query_info *)query;
297 if (!read_file_uint64(metric_id_file, &raw_query->oa_metrics_set_id)) {
298 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
299 raw_query->oa_metrics_set_id = 1ULL;
300 } else {
301 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
302 query->name, query->guid, query->oa_metrics_set_id);
303 }
304 return query->oa_metrics_set_id;
305 }
306
307 static void
308 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
309 {
310 struct gl_context *ctx = brw_void;
311 struct gl_perf_query_object *o = query_void;
312 struct brw_perf_query_object *obj = query_void;
313
314 switch (obj->query->kind) {
315 case OA_COUNTERS:
316 case OA_COUNTERS_RAW:
317 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
318 id,
319 o->Used ? "Dirty," : "New,",
320 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
321 obj->oa.bo ? "yes," : "no,",
322 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
323 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
324 break;
325 case PIPELINE_STATS:
326 DBG("%4d: %-6s %-8s BO: %-4s\n",
327 id,
328 o->Used ? "Dirty," : "New,",
329 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
330 obj->pipeline_stats.bo ? "yes" : "no");
331 break;
332 default:
333 unreachable("Unknown query type");
334 break;
335 }
336 }
337
338 static void
339 dump_perf_queries(struct brw_context *brw)
340 {
341 struct gl_context *ctx = &brw->ctx;
342 DBG("Queries: (Open queries = %d, OA users = %d)\n",
343 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
344 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
345 }
346
347 /******************************************************************************/
348
349 static struct brw_oa_sample_buf *
350 get_free_sample_buf(struct brw_context *brw)
351 {
352 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
353 struct brw_oa_sample_buf *buf;
354
355 if (node)
356 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
357 else {
358 buf = ralloc_size(brw, sizeof(*buf));
359
360 exec_node_init(&buf->link);
361 buf->refcount = 0;
362 buf->len = 0;
363 }
364
365 return buf;
366 }
367
368 static void
369 reap_old_sample_buffers(struct brw_context *brw)
370 {
371 struct exec_node *tail_node =
372 exec_list_get_tail(&brw->perfquery.sample_buffers);
373 struct brw_oa_sample_buf *tail_buf =
374 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
375
376 /* Remove all old, unreferenced sample buffers walking forward from
377 * the head of the list, except always leave at least one node in
378 * the list so we always have a node to reference when we Begin
379 * a new query.
380 */
381 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
382 &brw->perfquery.sample_buffers)
383 {
384 if (buf->refcount == 0 && buf != tail_buf) {
385 exec_node_remove(&buf->link);
386 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
387 } else
388 return;
389 }
390 }
391
392 static void
393 free_sample_bufs(struct brw_context *brw)
394 {
395 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
396 &brw->perfquery.free_sample_buffers)
397 ralloc_free(buf);
398
399 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
400 }
401
402 /******************************************************************************/
403
404 /**
405 * Driver hook for glGetPerfQueryInfoINTEL().
406 */
407 static void
408 brw_get_perf_query_info(struct gl_context *ctx,
409 unsigned query_index,
410 const char **name,
411 GLuint *data_size,
412 GLuint *n_counters,
413 GLuint *n_active)
414 {
415 struct brw_context *brw = brw_context(ctx);
416 const struct brw_perf_query_info *query =
417 &brw->perfquery.queries[query_index];
418
419 *name = query->name;
420 *data_size = query->data_size;
421 *n_counters = query->n_counters;
422
423 switch (query->kind) {
424 case OA_COUNTERS:
425 case OA_COUNTERS_RAW:
426 *n_active = brw->perfquery.n_active_oa_queries;
427 break;
428
429 case PIPELINE_STATS:
430 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
431 break;
432
433 default:
434 unreachable("Unknown query type");
435 break;
436 }
437 }
438
439 /**
440 * Driver hook for glGetPerfCounterInfoINTEL().
441 */
442 static void
443 brw_get_perf_counter_info(struct gl_context *ctx,
444 unsigned query_index,
445 unsigned counter_index,
446 const char **name,
447 const char **desc,
448 GLuint *offset,
449 GLuint *data_size,
450 GLuint *type_enum,
451 GLuint *data_type_enum,
452 GLuint64 *raw_max)
453 {
454 struct brw_context *brw = brw_context(ctx);
455 const struct brw_perf_query_info *query =
456 &brw->perfquery.queries[query_index];
457 const struct brw_perf_query_counter *counter =
458 &query->counters[counter_index];
459
460 *name = counter->name;
461 *desc = counter->desc;
462 *offset = counter->offset;
463 *data_size = counter->size;
464 *type_enum = counter->type;
465 *data_type_enum = counter->data_type;
466 *raw_max = counter->raw_max;
467 }
468
469 /******************************************************************************/
470
471 /**
472 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
473 * pipeline statistics for the performance query object.
474 */
475 static void
476 snapshot_statistics_registers(struct brw_context *brw,
477 struct brw_perf_query_object *obj,
478 uint32_t offset_in_bytes)
479 {
480 const struct brw_perf_query_info *query = obj->query;
481 const int n_counters = query->n_counters;
482
483 for (int i = 0; i < n_counters; i++) {
484 const struct brw_perf_query_counter *counter = &query->counters[i];
485
486 assert(counter->data_type == GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL);
487
488 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
489 counter->pipeline_stat.reg,
490 offset_in_bytes + i * sizeof(uint64_t));
491 }
492 }
493
494 /**
495 * Add a query to the global list of "unaccumulated queries."
496 *
497 * Queries are tracked here until all the associated OA reports have
498 * been accumulated via accumulate_oa_reports() after the end
499 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
500 */
501 static void
502 add_to_unaccumulated_query_list(struct brw_context *brw,
503 struct brw_perf_query_object *obj)
504 {
505 if (brw->perfquery.unaccumulated_elements >=
506 brw->perfquery.unaccumulated_array_size)
507 {
508 brw->perfquery.unaccumulated_array_size *= 1.5;
509 brw->perfquery.unaccumulated =
510 reralloc(brw, brw->perfquery.unaccumulated,
511 struct brw_perf_query_object *,
512 brw->perfquery.unaccumulated_array_size);
513 }
514
515 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
516 }
517
518 /**
519 * Remove a query from the global list of unaccumulated queries once
520 * after successfully accumulating the OA reports associated with the
521 * query in accumulate_oa_reports() or when discarding unwanted query
522 * results.
523 */
524 static void
525 drop_from_unaccumulated_query_list(struct brw_context *brw,
526 struct brw_perf_query_object *obj)
527 {
528 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
529 if (brw->perfquery.unaccumulated[i] == obj) {
530 int last_elt = --brw->perfquery.unaccumulated_elements;
531
532 if (i == last_elt)
533 brw->perfquery.unaccumulated[i] = NULL;
534 else {
535 brw->perfquery.unaccumulated[i] =
536 brw->perfquery.unaccumulated[last_elt];
537 }
538
539 break;
540 }
541 }
542
543 /* Drop our samples_head reference so that associated periodic
544 * sample data buffers can potentially be reaped if they aren't
545 * referenced by any other queries...
546 */
547
548 struct brw_oa_sample_buf *buf =
549 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
550
551 assert(buf->refcount > 0);
552 buf->refcount--;
553
554 obj->oa.samples_head = NULL;
555
556 reap_old_sample_buffers(brw);
557 }
558
559 /**
560 * Given pointers to starting and ending OA snapshots, add the deltas for each
561 * counter to the results.
562 */
563 static void
564 add_deltas(struct brw_context *brw,
565 struct brw_perf_query_object *obj,
566 const uint32_t *start,
567 const uint32_t *end)
568 {
569 const struct brw_perf_query_info *query = obj->query;
570 uint64_t *accumulator = obj->oa.accumulator;
571 int idx = 0;
572 int i;
573
574 obj->oa.reports_accumulated++;
575
576 switch (query->oa_format) {
577 case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
578 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator + idx++); /* timestamp */
579 brw_perf_query_accumulate_uint32(start + 3, end + 3, accumulator + idx++); /* clock */
580
581 /* 32x 40bit A counters... */
582 for (i = 0; i < 32; i++)
583 brw_perf_query_accumulate_uint40(i, start, end, accumulator + idx++);
584
585 /* 4x 32bit A counters... */
586 for (i = 0; i < 4; i++)
587 brw_perf_query_accumulate_uint32(start + 36 + i, end + 36 + i,
588 accumulator + idx++);
589
590 /* 8x 32bit B counters + 8x 32bit C counters... */
591 for (i = 0; i < 16; i++)
592 brw_perf_query_accumulate_uint32(start + 48 + i, end + 48 + i,
593 accumulator + idx++);
594
595 break;
596 case I915_OA_FORMAT_A45_B8_C8:
597 brw_perf_query_accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
598
599 for (i = 0; i < 61; i++)
600 brw_perf_query_accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
601
602 break;
603 default:
604 unreachable("Can't accumulate OA counters in unknown format");
605 }
606 }
607
608 static bool
609 inc_n_oa_users(struct brw_context *brw)
610 {
611 if (brw->perfquery.n_oa_users == 0 &&
612 drmIoctl(brw->perfquery.oa_stream_fd,
613 I915_PERF_IOCTL_ENABLE, 0) < 0)
614 {
615 return false;
616 }
617 ++brw->perfquery.n_oa_users;
618
619 return true;
620 }
621
622 static void
623 dec_n_oa_users(struct brw_context *brw)
624 {
625 /* Disabling the i915 perf stream will effectively disable the OA
626 * counters. Note it's important to be sure there are no outstanding
627 * MI_RPC commands at this point since they could stall the CS
628 * indefinitely once OACONTROL is disabled.
629 */
630 --brw->perfquery.n_oa_users;
631 if (brw->perfquery.n_oa_users == 0 &&
632 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
633 {
634 DBG("WARNING: Error disabling i915 perf stream: %m\n");
635 }
636 }
637
638 /* In general if we see anything spurious while accumulating results,
639 * we don't try and continue accumulating the current query, hoping
640 * for the best, we scrap anything outstanding, and then hope for the
641 * best with new queries.
642 */
643 static void
644 discard_all_queries(struct brw_context *brw)
645 {
646 while (brw->perfquery.unaccumulated_elements) {
647 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
648
649 obj->oa.results_accumulated = true;
650 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
651
652 dec_n_oa_users(brw);
653 }
654 }
655
656 enum OaReadStatus {
657 OA_READ_STATUS_ERROR,
658 OA_READ_STATUS_UNFINISHED,
659 OA_READ_STATUS_FINISHED,
660 };
661
662 static enum OaReadStatus
663 read_oa_samples_until(struct brw_context *brw,
664 uint32_t start_timestamp,
665 uint32_t end_timestamp)
666 {
667 struct exec_node *tail_node =
668 exec_list_get_tail(&brw->perfquery.sample_buffers);
669 struct brw_oa_sample_buf *tail_buf =
670 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
671 uint32_t last_timestamp = tail_buf->last_timestamp;
672
673 while (1) {
674 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
675 uint32_t offset;
676 int len;
677
678 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
679 sizeof(buf->buf))) < 0 && errno == EINTR)
680 ;
681
682 if (len <= 0) {
683 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
684
685 if (len < 0) {
686 if (errno == EAGAIN)
687 return ((last_timestamp - start_timestamp) >=
688 (end_timestamp - start_timestamp)) ?
689 OA_READ_STATUS_FINISHED :
690 OA_READ_STATUS_UNFINISHED;
691 else {
692 DBG("Error reading i915 perf samples: %m\n");
693 }
694 } else
695 DBG("Spurious EOF reading i915 perf samples\n");
696
697 return OA_READ_STATUS_ERROR;
698 }
699
700 buf->len = len;
701 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
702
703 /* Go through the reports and update the last timestamp. */
704 offset = 0;
705 while (offset < buf->len) {
706 const struct drm_i915_perf_record_header *header =
707 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
708 uint32_t *report = (uint32_t *) (header + 1);
709
710 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
711 last_timestamp = report[1];
712
713 offset += header->size;
714 }
715
716 buf->last_timestamp = last_timestamp;
717 }
718
719 unreachable("not reached");
720 return OA_READ_STATUS_ERROR;
721 }
722
723 /**
724 * Try to read all the reports until either the delimiting timestamp
725 * or an error arises.
726 */
727 static bool
728 read_oa_samples_for_query(struct brw_context *brw,
729 struct brw_perf_query_object *obj)
730 {
731 uint32_t *start;
732 uint32_t *last;
733 uint32_t *end;
734
735 /* We need the MI_REPORT_PERF_COUNT to land before we can start
736 * accumulate. */
737 assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
738 !brw_bo_busy(obj->oa.bo));
739
740 /* Map the BO once here and let accumulate_oa_reports() unmap
741 * it. */
742 if (obj->oa.map == NULL)
743 obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
744
745 start = last = obj->oa.map;
746 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
747
748 if (start[0] != obj->oa.begin_report_id) {
749 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
750 return true;
751 }
752 if (end[0] != (obj->oa.begin_report_id + 1)) {
753 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
754 return true;
755 }
756
757 /* Read the reports until the end timestamp. */
758 switch (read_oa_samples_until(brw, start[1], end[1])) {
759 case OA_READ_STATUS_ERROR:
760 /* Fallthrough and let accumulate_oa_reports() deal with the
761 * error. */
762 case OA_READ_STATUS_FINISHED:
763 return true;
764 case OA_READ_STATUS_UNFINISHED:
765 return false;
766 }
767
768 unreachable("invalid read status");
769 return false;
770 }
771
772 /**
773 * Accumulate raw OA counter values based on deltas between pairs of
774 * OA reports.
775 *
776 * Accumulation starts from the first report captured via
777 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
778 * last MI_RPC report requested by brw_end_perf_query(). Between these
779 * two reports there may also some number of periodically sampled OA
780 * reports collected via the i915 perf interface - depending on the
781 * duration of the query.
782 *
783 * These periodic snapshots help to ensure we handle counter overflow
784 * correctly by being frequent enough to ensure we don't miss multiple
785 * overflows of a counter between snapshots. For Gen8+ the i915 perf
786 * snapshots provide the extra context-switch reports that let us
787 * subtract out the progress of counters associated with other
788 * contexts running on the system.
789 */
790 static void
791 accumulate_oa_reports(struct brw_context *brw,
792 struct brw_perf_query_object *obj)
793 {
794 const struct gen_device_info *devinfo = &brw->screen->devinfo;
795 struct gl_perf_query_object *o = &obj->base;
796 uint32_t *start;
797 uint32_t *last;
798 uint32_t *end;
799 struct exec_node *first_samples_node;
800 bool in_ctx = true;
801 int out_duration = 0;
802
803 assert(o->Ready);
804 assert(obj->oa.map != NULL);
805
806 start = last = obj->oa.map;
807 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
808
809 if (start[0] != obj->oa.begin_report_id) {
810 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
811 goto error;
812 }
813 if (end[0] != (obj->oa.begin_report_id + 1)) {
814 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
815 goto error;
816 }
817
818 obj->oa.hw_id = start[2];
819
820 /* See if we have any periodic reports to accumulate too... */
821
822 /* N.B. The oa.samples_head was set when the query began and
823 * pointed to the tail of the brw->perfquery.sample_buffers list at
824 * the time the query started. Since the buffer existed before the
825 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
826 * that no data in this particular node's buffer can possibly be
827 * associated with the query - so skip ahead one...
828 */
829 first_samples_node = obj->oa.samples_head->next;
830
831 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
832 &brw->perfquery.sample_buffers,
833 first_samples_node)
834 {
835 int offset = 0;
836
837 while (offset < buf->len) {
838 const struct drm_i915_perf_record_header *header =
839 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
840
841 assert(header->size != 0);
842 assert(header->size <= buf->len);
843
844 offset += header->size;
845
846 switch (header->type) {
847 case DRM_I915_PERF_RECORD_SAMPLE: {
848 uint32_t *report = (uint32_t *)(header + 1);
849 bool add = true;
850
851 /* Ignore reports that come before the start marker.
852 * (Note: takes care to allow overflow of 32bit timestamps)
853 */
854 if (brw_timebase_scale(brw, report[1] - start[1]) > 5000000000)
855 continue;
856
857 /* Ignore reports that come after the end marker.
858 * (Note: takes care to allow overflow of 32bit timestamps)
859 */
860 if (brw_timebase_scale(brw, report[1] - end[1]) <= 5000000000)
861 goto end;
862
863 /* For Gen8+ since the counters continue while other
864 * contexts are running we need to discount any unrelated
865 * deltas. The hardware automatically generates a report
866 * on context switch which gives us a new reference point
867 * to continuing adding deltas from.
868 *
869 * For Haswell we can rely on the HW to stop the progress
870 * of OA counters while any other context is acctive.
871 */
872 if (devinfo->gen >= 8) {
873 if (in_ctx && report[2] != obj->oa.hw_id) {
874 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
875 in_ctx = false;
876 out_duration = 0;
877 } else if (in_ctx == false && report[2] == obj->oa.hw_id) {
878 DBG("i915 perf: Switch TO\n");
879 in_ctx = true;
880
881 /* From experimentation in IGT, we found that the OA unit
882 * might label some report as "idle" (using an invalid
883 * context ID), right after a report for a given context.
884 * Deltas generated by those reports actually belong to the
885 * previous context, even though they're not labelled as
886 * such.
887 *
888 * We didn't *really* Switch AWAY in the case that we e.g.
889 * saw a single periodic report while idle...
890 */
891 if (out_duration >= 1)
892 add = false;
893 } else if (in_ctx) {
894 assert(report[2] == obj->oa.hw_id);
895 DBG("i915 perf: Continuation IN\n");
896 } else {
897 assert(report[2] != obj->oa.hw_id);
898 DBG("i915 perf: Continuation OUT\n");
899 add = false;
900 out_duration++;
901 }
902 }
903
904 if (add)
905 add_deltas(brw, obj, last, report);
906
907 last = report;
908
909 break;
910 }
911
912 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
913 DBG("i915 perf: OA error: all reports lost\n");
914 goto error;
915 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
916 DBG("i915 perf: OA report lost\n");
917 break;
918 }
919 }
920 }
921
922 end:
923
924 add_deltas(brw, obj, last, end);
925
926 DBG("Marking %d accumulated - results gathered\n", o->Id);
927
928 obj->oa.results_accumulated = true;
929 drop_from_unaccumulated_query_list(brw, obj);
930 dec_n_oa_users(brw);
931
932 return;
933
934 error:
935
936 discard_all_queries(brw);
937 }
938
939 /******************************************************************************/
940
941 static bool
942 open_i915_perf_oa_stream(struct brw_context *brw,
943 int metrics_set_id,
944 int report_format,
945 int period_exponent,
946 int drm_fd,
947 uint32_t ctx_id)
948 {
949 uint64_t properties[] = {
950 /* Single context sampling */
951 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
952
953 /* Include OA reports in samples */
954 DRM_I915_PERF_PROP_SAMPLE_OA, true,
955
956 /* OA unit configuration */
957 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
958 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
959 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
960 };
961 struct drm_i915_perf_open_param param = {
962 .flags = I915_PERF_FLAG_FD_CLOEXEC |
963 I915_PERF_FLAG_FD_NONBLOCK |
964 I915_PERF_FLAG_DISABLED,
965 .num_properties = ARRAY_SIZE(properties) / 2,
966 .properties_ptr = (uintptr_t) properties,
967 };
968 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
969 if (fd == -1) {
970 DBG("Error opening i915 perf OA stream: %m\n");
971 return false;
972 }
973
974 brw->perfquery.oa_stream_fd = fd;
975
976 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
977 brw->perfquery.current_oa_format = report_format;
978
979 return true;
980 }
981
982 static void
983 close_perf(struct brw_context *brw,
984 const struct brw_perf_query_info *query)
985 {
986 if (brw->perfquery.oa_stream_fd != -1) {
987 close(brw->perfquery.oa_stream_fd);
988 brw->perfquery.oa_stream_fd = -1;
989 }
990 if (query->kind == OA_COUNTERS_RAW) {
991 struct brw_perf_query_info *raw_query =
992 (struct brw_perf_query_info *) query;
993 raw_query->oa_metrics_set_id = 0;
994 }
995 }
996
997 static void
998 capture_frequency_stat_register(struct brw_context *brw,
999 struct brw_bo *bo,
1000 uint32_t bo_offset)
1001 {
1002 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1003
1004 if (devinfo->gen >= 7 && devinfo->gen <= 8 &&
1005 !devinfo->is_baytrail && !devinfo->is_cherryview) {
1006 brw_store_register_mem32(brw, bo, GEN7_RPSTAT1, bo_offset);
1007 } else if (devinfo->gen >= 9) {
1008 brw_store_register_mem32(brw, bo, GEN9_RPSTAT0, bo_offset);
1009 }
1010 }
1011
1012 /**
1013 * Driver hook for glBeginPerfQueryINTEL().
1014 */
1015 static bool
1016 brw_begin_perf_query(struct gl_context *ctx,
1017 struct gl_perf_query_object *o)
1018 {
1019 struct brw_context *brw = brw_context(ctx);
1020 struct brw_perf_query_object *obj = brw_perf_query(o);
1021 const struct brw_perf_query_info *query = obj->query;
1022
1023 /* We can assume the frontend hides mistaken attempts to Begin a
1024 * query object multiple times before its End. Similarly if an
1025 * application reuses a query object before results have arrived
1026 * the frontend will wait for prior results so we don't need
1027 * to support abandoning in-flight results.
1028 */
1029 assert(!o->Active);
1030 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
1031
1032 DBG("Begin(%d)\n", o->Id);
1033
1034 /* XXX: We have to consider that the command parser unit that parses batch
1035 * buffer commands and is used to capture begin/end counter snapshots isn't
1036 * implicitly synchronized with what's currently running across other GPU
1037 * units (such as the EUs running shaders) that the performance counters are
1038 * associated with.
1039 *
1040 * The intention of performance queries is to measure the work associated
1041 * with commands between the begin/end delimiters and so for that to be the
1042 * case we need to explicitly synchronize the parsing of commands to capture
1043 * Begin/End counter snapshots with what's running across other parts of the
1044 * GPU.
1045 *
1046 * When the command parser reaches a Begin marker it effectively needs to
1047 * drain everything currently running on the GPU until the hardware is idle
1048 * before capturing the first snapshot of counters - otherwise the results
1049 * would also be measuring the effects of earlier commands.
1050 *
1051 * When the command parser reaches an End marker it needs to stall until
1052 * everything currently running on the GPU has finished before capturing the
1053 * end snapshot - otherwise the results won't be a complete representation
1054 * of the work.
1055 *
1056 * Theoretically there could be opportunities to minimize how much of the
1057 * GPU pipeline is drained, or that we stall for, when we know what specific
1058 * units the performance counters being queried relate to but we don't
1059 * currently attempt to be clever here.
1060 *
1061 * Note: with our current simple approach here then for back-to-back queries
1062 * we will redundantly emit duplicate commands to synchronize the command
1063 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1064 * second synchronization is effectively a NOOP.
1065 *
1066 * N.B. The final results are based on deltas of counters between (inside)
1067 * Begin/End markers so even though the total wall clock time of the
1068 * workload is stretched by larger pipeline bubbles the bubbles themselves
1069 * are generally invisible to the query results. Whether that's a good or a
1070 * bad thing depends on the use case. For a lower real-time impact while
1071 * capturing metrics then periodic sampling may be a better choice than
1072 * INTEL_performance_query.
1073 *
1074 *
1075 * This is our Begin synchronization point to drain current work on the
1076 * GPU before we capture our first counter snapshot...
1077 */
1078 brw_emit_mi_flush(brw);
1079
1080 switch (query->kind) {
1081 case OA_COUNTERS:
1082 case OA_COUNTERS_RAW: {
1083
1084 /* Opening an i915 perf stream implies exclusive access to the OA unit
1085 * which will generate counter reports for a specific counter set with a
1086 * specific layout/format so we can't begin any OA based queries that
1087 * require a different counter set or format unless we get an opportunity
1088 * to close the stream and open a new one...
1089 */
1090 uint64_t metric_id = brw_perf_query_get_metric_id(brw, query);
1091
1092 if (brw->perfquery.oa_stream_fd != -1 &&
1093 brw->perfquery.current_oa_metrics_set_id != metric_id) {
1094
1095 if (brw->perfquery.n_oa_users != 0) {
1096 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64"\n",
1097 o->Id, brw->perfquery.current_oa_metrics_set_id, metric_id);
1098 return false;
1099 } else
1100 close_perf(brw, query);
1101 }
1102
1103 /* If the OA counters aren't already on, enable them. */
1104 if (brw->perfquery.oa_stream_fd == -1) {
1105 __DRIscreen *screen = brw->screen->driScrnPriv;
1106 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1107
1108 /* The period_exponent gives a sampling period as follows:
1109 * sample_period = timestamp_period * 2^(period_exponent + 1)
1110 *
1111 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1112 * ~83ns (GEN8/9).
1113 *
1114 * The counter overflow period is derived from the EuActive counter
1115 * which reads a counter that increments by the number of clock
1116 * cycles multiplied by the number of EUs. It can be calculated as:
1117 *
1118 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1119 *
1120 * (E.g. 40 EUs @ 1GHz = ~53ms)
1121 *
1122 * We select a sampling period inferior to that overflow period to
1123 * ensure we cannot see more than 1 counter overflow, otherwise we
1124 * could loose information.
1125 */
1126
1127 int a_counter_in_bits = 32;
1128 if (devinfo->gen >= 8)
1129 a_counter_in_bits = 40;
1130
1131 uint64_t overflow_period = pow(2, a_counter_in_bits) /
1132 (brw->perfquery.sys_vars.n_eus *
1133 /* drop 1GHz freq to have units in nanoseconds */
1134 2);
1135
1136 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1137 overflow_period, overflow_period / 1000000ul, brw->perfquery.sys_vars.n_eus);
1138
1139 int period_exponent = 0;
1140 uint64_t prev_sample_period, next_sample_period;
1141 for (int e = 0; e < 30; e++) {
1142 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1143 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1144
1145 /* Take the previous sampling period, lower than the overflow
1146 * period.
1147 */
1148 if (prev_sample_period < overflow_period &&
1149 next_sample_period > overflow_period)
1150 period_exponent = e + 1;
1151 }
1152
1153 if (period_exponent == 0) {
1154 DBG("WARNING: enable to find a sampling exponent\n");
1155 return false;
1156 }
1157
1158 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1159 prev_sample_period / 1000000ul);
1160
1161 if (!open_i915_perf_oa_stream(brw,
1162 metric_id,
1163 query->oa_format,
1164 period_exponent,
1165 screen->fd, /* drm fd */
1166 brw->hw_ctx))
1167 return false;
1168 } else {
1169 assert(brw->perfquery.current_oa_metrics_set_id == metric_id &&
1170 brw->perfquery.current_oa_format == query->oa_format);
1171 }
1172
1173 if (!inc_n_oa_users(brw)) {
1174 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1175 return false;
1176 }
1177
1178 if (obj->oa.bo) {
1179 brw_bo_unreference(obj->oa.bo);
1180 obj->oa.bo = NULL;
1181 }
1182
1183 obj->oa.bo =
1184 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo", MI_RPC_BO_SIZE,
1185 BRW_MEMZONE_OTHER);
1186 #ifdef DEBUG
1187 /* Pre-filling the BO helps debug whether writes landed. */
1188 void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
1189 memset(map, 0x80, MI_RPC_BO_SIZE);
1190 brw_bo_unmap(obj->oa.bo);
1191 #endif
1192
1193 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1194 brw->perfquery.next_query_start_report_id += 2;
1195
1196 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1197 * delimiting commands end up in different batchbuffers. If that's the
1198 * case, the measurement will include the time it takes for the kernel
1199 * scheduler to load a new request into the hardware. This is manifested in
1200 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1201 */
1202 intel_batchbuffer_flush(brw);
1203
1204 /* Take a starting OA counter snapshot. */
1205 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1206 obj->oa.begin_report_id);
1207 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_START_OFFSET_BYTES);
1208
1209 ++brw->perfquery.n_active_oa_queries;
1210
1211 /* No already-buffered samples can possibly be associated with this query
1212 * so create a marker within the list of sample buffers enabling us to
1213 * easily ignore earlier samples when processing this query after
1214 * completion.
1215 */
1216 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1217 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1218
1219 struct brw_oa_sample_buf *buf =
1220 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1221
1222 /* This reference will ensure that future/following sample
1223 * buffers (that may relate to this query) can't be freed until
1224 * this drops to zero.
1225 */
1226 buf->refcount++;
1227
1228 obj->oa.hw_id = 0xffffffff;
1229 memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
1230 obj->oa.results_accumulated = false;
1231
1232 add_to_unaccumulated_query_list(brw, obj);
1233 break;
1234 }
1235
1236 case PIPELINE_STATS:
1237 if (obj->pipeline_stats.bo) {
1238 brw_bo_unreference(obj->pipeline_stats.bo);
1239 obj->pipeline_stats.bo = NULL;
1240 }
1241
1242 obj->pipeline_stats.bo =
1243 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1244 STATS_BO_SIZE, BRW_MEMZONE_OTHER);
1245
1246 /* Take starting snapshots. */
1247 snapshot_statistics_registers(brw, obj, 0);
1248
1249 ++brw->perfquery.n_active_pipeline_stats_queries;
1250 break;
1251
1252 default:
1253 unreachable("Unknown query type");
1254 break;
1255 }
1256
1257 if (INTEL_DEBUG & DEBUG_PERFMON)
1258 dump_perf_queries(brw);
1259
1260 return true;
1261 }
1262
1263 /**
1264 * Driver hook for glEndPerfQueryINTEL().
1265 */
1266 static void
1267 brw_end_perf_query(struct gl_context *ctx,
1268 struct gl_perf_query_object *o)
1269 {
1270 struct brw_context *brw = brw_context(ctx);
1271 struct brw_perf_query_object *obj = brw_perf_query(o);
1272
1273 DBG("End(%d)\n", o->Id);
1274
1275 /* Ensure that the work associated with the queried commands will have
1276 * finished before taking our query end counter readings.
1277 *
1278 * For more details see comment in brw_begin_perf_query for
1279 * corresponding flush.
1280 */
1281 brw_emit_mi_flush(brw);
1282
1283 switch (obj->query->kind) {
1284 case OA_COUNTERS:
1285 case OA_COUNTERS_RAW:
1286
1287 /* NB: It's possible that the query will have already been marked
1288 * as 'accumulated' if an error was seen while reading samples
1289 * from perf. In this case we mustn't try and emit a closing
1290 * MI_RPC command in case the OA unit has already been disabled
1291 */
1292 if (!obj->oa.results_accumulated) {
1293 /* Take an ending OA counter snapshot. */
1294 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_END_OFFSET_BYTES);
1295 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
1296 MI_RPC_BO_END_OFFSET_BYTES,
1297 obj->oa.begin_report_id + 1);
1298 }
1299
1300 --brw->perfquery.n_active_oa_queries;
1301
1302 /* NB: even though the query has now ended, it can't be accumulated
1303 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1304 * to query->oa.bo
1305 */
1306 break;
1307
1308 case PIPELINE_STATS:
1309 snapshot_statistics_registers(brw, obj,
1310 STATS_BO_END_OFFSET_BYTES);
1311 --brw->perfquery.n_active_pipeline_stats_queries;
1312 break;
1313
1314 default:
1315 unreachable("Unknown query type");
1316 break;
1317 }
1318 }
1319
1320 static void
1321 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1322 {
1323 struct brw_context *brw = brw_context(ctx);
1324 struct brw_perf_query_object *obj = brw_perf_query(o);
1325 struct brw_bo *bo = NULL;
1326
1327 assert(!o->Ready);
1328
1329 switch (obj->query->kind) {
1330 case OA_COUNTERS:
1331 case OA_COUNTERS_RAW:
1332 bo = obj->oa.bo;
1333 break;
1334
1335 case PIPELINE_STATS:
1336 bo = obj->pipeline_stats.bo;
1337 break;
1338
1339 default:
1340 unreachable("Unknown query type");
1341 break;
1342 }
1343
1344 if (bo == NULL)
1345 return;
1346
1347 /* If the current batch references our results bo then we need to
1348 * flush first...
1349 */
1350 if (brw_batch_references(&brw->batch, bo))
1351 intel_batchbuffer_flush(brw);
1352
1353 brw_bo_wait_rendering(bo);
1354
1355 /* Due to a race condition between the OA unit signaling report
1356 * availability and the report actually being written into memory,
1357 * we need to wait for all the reports to come in before we can
1358 * read them.
1359 */
1360 if (obj->query->kind == OA_COUNTERS ||
1361 obj->query->kind == OA_COUNTERS_RAW) {
1362 while (!read_oa_samples_for_query(brw, obj))
1363 ;
1364 }
1365 }
1366
1367 static bool
1368 brw_is_perf_query_ready(struct gl_context *ctx,
1369 struct gl_perf_query_object *o)
1370 {
1371 struct brw_context *brw = brw_context(ctx);
1372 struct brw_perf_query_object *obj = brw_perf_query(o);
1373
1374 if (o->Ready)
1375 return true;
1376
1377 switch (obj->query->kind) {
1378 case OA_COUNTERS:
1379 case OA_COUNTERS_RAW:
1380 return (obj->oa.results_accumulated ||
1381 (obj->oa.bo &&
1382 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1383 !brw_bo_busy(obj->oa.bo) &&
1384 read_oa_samples_for_query(brw, obj)));
1385 case PIPELINE_STATS:
1386 return (obj->pipeline_stats.bo &&
1387 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1388 !brw_bo_busy(obj->pipeline_stats.bo));
1389
1390 default:
1391 unreachable("Unknown query type");
1392 break;
1393 }
1394
1395 return false;
1396 }
1397
1398 static void
1399 gen8_read_report_clock_ratios(const uint32_t *report,
1400 uint64_t *slice_freq_hz,
1401 uint64_t *unslice_freq_hz)
1402 {
1403 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1404 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1405 * divided this way :
1406 *
1407 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1408 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1409 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1410 *
1411 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1412 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1413 *
1414 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1415 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1416 */
1417
1418 uint32_t unslice_freq = report[0] & 0x1ff;
1419 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1420 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1421 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1422
1423 *slice_freq_hz = slice_freq * 16666667ULL;
1424 *unslice_freq_hz = unslice_freq * 16666667ULL;
1425 }
1426
1427 static void
1428 read_slice_unslice_frequencies(struct brw_context *brw,
1429 struct brw_perf_query_object *obj)
1430 {
1431 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1432 uint32_t *begin_report, *end_report;
1433
1434 /* Slice/Unslice frequency is only available in the OA reports when the
1435 * "Disable OA reports due to clock ratio change" field in
1436 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1437 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1438 *
1439 * Documentation says this should be available on Gen9+ but experimentation
1440 * shows that Gen8 reports similar values, so we enable it there too.
1441 */
1442 if (devinfo->gen < 8)
1443 return;
1444
1445 begin_report = obj->oa.map;
1446 end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
1447
1448 gen8_read_report_clock_ratios(begin_report,
1449 &obj->oa.slice_frequency[0],
1450 &obj->oa.unslice_frequency[0]);
1451 gen8_read_report_clock_ratios(end_report,
1452 &obj->oa.slice_frequency[1],
1453 &obj->oa.unslice_frequency[1]);
1454 }
1455
1456 static void
1457 read_gt_frequency(struct brw_context *brw,
1458 struct brw_perf_query_object *obj)
1459 {
1460 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1461 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
1462 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
1463
1464 switch (devinfo->gen) {
1465 case 7:
1466 case 8:
1467 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1468 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1469 break;
1470 case 9:
1471 case 10:
1472 case 11:
1473 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1474 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1475 break;
1476 default:
1477 unreachable("unexpected gen");
1478 }
1479
1480 /* Put the numbers into Hz. */
1481 obj->oa.gt_frequency[0] *= 1000000ULL;
1482 obj->oa.gt_frequency[1] *= 1000000ULL;
1483 }
1484
1485 static int
1486 get_oa_counter_data(struct brw_context *brw,
1487 struct brw_perf_query_object *obj,
1488 size_t data_size,
1489 uint8_t *data)
1490 {
1491 const struct brw_perf_query_info *query = obj->query;
1492 int n_counters = query->n_counters;
1493 int written = 0;
1494
1495 for (int i = 0; i < n_counters; i++) {
1496 const struct brw_perf_query_counter *counter = &query->counters[i];
1497 uint64_t *out_uint64;
1498 float *out_float;
1499
1500 if (counter->size) {
1501 switch (counter->data_type) {
1502 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL:
1503 out_uint64 = (uint64_t *)(data + counter->offset);
1504 *out_uint64 = counter->oa_counter_read_uint64(brw, query,
1505 obj->oa.accumulator);
1506 break;
1507 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL:
1508 out_float = (float *)(data + counter->offset);
1509 *out_float = counter->oa_counter_read_float(brw, query,
1510 obj->oa.accumulator);
1511 break;
1512 default:
1513 /* So far we aren't using uint32, double or bool32... */
1514 unreachable("unexpected counter data type");
1515 }
1516 written = counter->offset + counter->size;
1517 }
1518 }
1519
1520 return written;
1521 }
1522
1523 static int
1524 get_pipeline_stats_data(struct brw_context *brw,
1525 struct brw_perf_query_object *obj,
1526 size_t data_size,
1527 uint8_t *data)
1528
1529 {
1530 const struct brw_perf_query_info *query = obj->query;
1531 int n_counters = obj->query->n_counters;
1532 uint8_t *p = data;
1533
1534 uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
1535 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1536
1537 for (int i = 0; i < n_counters; i++) {
1538 const struct brw_perf_query_counter *counter = &query->counters[i];
1539 uint64_t value = end[i] - start[i];
1540
1541 if (counter->pipeline_stat.numerator !=
1542 counter->pipeline_stat.denominator) {
1543 value *= counter->pipeline_stat.numerator;
1544 value /= counter->pipeline_stat.denominator;
1545 }
1546
1547 *((uint64_t *)p) = value;
1548 p += 8;
1549 }
1550
1551 brw_bo_unmap(obj->pipeline_stats.bo);
1552
1553 return p - data;
1554 }
1555
1556 /**
1557 * Driver hook for glGetPerfQueryDataINTEL().
1558 */
1559 static void
1560 brw_get_perf_query_data(struct gl_context *ctx,
1561 struct gl_perf_query_object *o,
1562 GLsizei data_size,
1563 GLuint *data,
1564 GLuint *bytes_written)
1565 {
1566 struct brw_context *brw = brw_context(ctx);
1567 struct brw_perf_query_object *obj = brw_perf_query(o);
1568 int written = 0;
1569
1570 assert(brw_is_perf_query_ready(ctx, o));
1571
1572 DBG("GetData(%d)\n", o->Id);
1573
1574 if (INTEL_DEBUG & DEBUG_PERFMON)
1575 dump_perf_queries(brw);
1576
1577 /* We expect that the frontend only calls this hook when it knows
1578 * that results are available.
1579 */
1580 assert(o->Ready);
1581
1582 switch (obj->query->kind) {
1583 case OA_COUNTERS:
1584 case OA_COUNTERS_RAW:
1585 if (!obj->oa.results_accumulated) {
1586 read_gt_frequency(brw, obj);
1587 read_slice_unslice_frequencies(brw, obj);
1588 accumulate_oa_reports(brw, obj);
1589 assert(obj->oa.results_accumulated);
1590
1591 brw_bo_unmap(obj->oa.bo);
1592 obj->oa.map = NULL;
1593 }
1594 if (obj->query->kind == OA_COUNTERS)
1595 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1596 else
1597 written = brw_perf_query_get_mdapi_oa_data(brw, obj, data_size, (uint8_t *)data);
1598 break;
1599
1600 case PIPELINE_STATS:
1601 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1602 break;
1603
1604 default:
1605 unreachable("Unknown query type");
1606 break;
1607 }
1608
1609 if (bytes_written)
1610 *bytes_written = written;
1611 }
1612
1613 static struct gl_perf_query_object *
1614 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1615 {
1616 struct brw_context *brw = brw_context(ctx);
1617 const struct brw_perf_query_info *query =
1618 &brw->perfquery.queries[query_index];
1619 struct brw_perf_query_object *obj =
1620 calloc(1, sizeof(struct brw_perf_query_object));
1621
1622 if (!obj)
1623 return NULL;
1624
1625 obj->query = query;
1626
1627 brw->perfquery.n_query_instances++;
1628
1629 return &obj->base;
1630 }
1631
1632 /**
1633 * Driver hook for glDeletePerfQueryINTEL().
1634 */
1635 static void
1636 brw_delete_perf_query(struct gl_context *ctx,
1637 struct gl_perf_query_object *o)
1638 {
1639 struct brw_context *brw = brw_context(ctx);
1640 struct brw_perf_query_object *obj = brw_perf_query(o);
1641
1642 /* We can assume that the frontend waits for a query to complete
1643 * before ever calling into here, so we don't have to worry about
1644 * deleting an in-flight query object.
1645 */
1646 assert(!o->Active);
1647 assert(!o->Used || o->Ready);
1648
1649 DBG("Delete(%d)\n", o->Id);
1650
1651 switch (obj->query->kind) {
1652 case OA_COUNTERS:
1653 case OA_COUNTERS_RAW:
1654 if (obj->oa.bo) {
1655 if (!obj->oa.results_accumulated) {
1656 drop_from_unaccumulated_query_list(brw, obj);
1657 dec_n_oa_users(brw);
1658 }
1659
1660 brw_bo_unreference(obj->oa.bo);
1661 obj->oa.bo = NULL;
1662 }
1663
1664 obj->oa.results_accumulated = false;
1665 break;
1666
1667 case PIPELINE_STATS:
1668 if (obj->pipeline_stats.bo) {
1669 brw_bo_unreference(obj->pipeline_stats.bo);
1670 obj->pipeline_stats.bo = NULL;
1671 }
1672 break;
1673
1674 default:
1675 unreachable("Unknown query type");
1676 break;
1677 }
1678
1679 /* As an indication that the INTEL_performance_query extension is no
1680 * longer in use, it's a good time to free our cache of sample
1681 * buffers and close any current i915-perf stream.
1682 */
1683 if (--brw->perfquery.n_query_instances == 0) {
1684 free_sample_bufs(brw);
1685 close_perf(brw, obj->query);
1686 }
1687
1688 free(obj);
1689 }
1690
1691 /******************************************************************************/
1692
1693 static void
1694 init_pipeline_statistic_query_registers(struct brw_context *brw)
1695 {
1696 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1697 struct brw_perf_query_info *query = brw_perf_query_append_query_info(brw);
1698
1699 query->kind = PIPELINE_STATS;
1700 query->name = "Pipeline Statistics Registers";
1701 query->n_counters = 0;
1702 query->counters =
1703 rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
1704
1705 brw_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
1706 "N vertices submitted");
1707 brw_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1708 "N primitives submitted");
1709 brw_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1710 "N vertex shader invocations");
1711
1712 if (devinfo->gen == 6) {
1713 brw_perf_query_info_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1714 "SO_PRIM_STORAGE_NEEDED",
1715 "N geometry shader stream-out primitives (total)");
1716 brw_perf_query_info_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1717 "SO_NUM_PRIMS_WRITTEN",
1718 "N geometry shader stream-out primitives (written)");
1719 } else {
1720 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1721 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1722 "N stream-out (stream 0) primitives (total)");
1723 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1724 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1725 "N stream-out (stream 1) primitives (total)");
1726 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1727 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1728 "N stream-out (stream 2) primitives (total)");
1729 brw_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1730 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1731 "N stream-out (stream 3) primitives (total)");
1732 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1733 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1734 "N stream-out (stream 0) primitives (written)");
1735 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1736 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1737 "N stream-out (stream 1) primitives (written)");
1738 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1739 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1740 "N stream-out (stream 2) primitives (written)");
1741 brw_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1742 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1743 "N stream-out (stream 3) primitives (written)");
1744 }
1745
1746 brw_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1747 "N TCS shader invocations");
1748 brw_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1749 "N TES shader invocations");
1750
1751 brw_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1752 "N geometry shader invocations");
1753 brw_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1754 "N geometry shader primitives emitted");
1755
1756 brw_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1757 "N primitives entering clipping");
1758 brw_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1759 "N primitives leaving clipping");
1760
1761 if (devinfo->is_haswell || devinfo->gen == 8)
1762 brw_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1763 "N fragment shader invocations",
1764 "N fragment shader invocations");
1765 else
1766 brw_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1767 "N fragment shader invocations");
1768
1769 brw_perf_query_info_add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
1770
1771 if (devinfo->gen >= 7)
1772 brw_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1773 "N compute shader invocations");
1774
1775 query->data_size = sizeof(uint64_t) * query->n_counters;
1776 }
1777
1778 static void
1779 register_oa_config(struct brw_context *brw,
1780 const struct brw_perf_query_info *query,
1781 uint64_t config_id)
1782 {
1783 struct brw_perf_query_info *registred_query =
1784 brw_perf_query_append_query_info(brw);
1785
1786 *registred_query = *query;
1787 registred_query->oa_metrics_set_id = config_id;
1788 DBG("metric set registred: id = %" PRIu64", guid = %s\n",
1789 registred_query->oa_metrics_set_id, query->guid);
1790 }
1791
1792 static void
1793 enumerate_sysfs_metrics(struct brw_context *brw)
1794 {
1795 char buf[256];
1796 DIR *metricsdir = NULL;
1797 struct dirent *metric_entry;
1798 int len;
1799
1800 len = snprintf(buf, sizeof(buf), "%s/metrics", brw->perfquery.sysfs_dev_dir);
1801 if (len < 0 || len >= sizeof(buf)) {
1802 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1803 return;
1804 }
1805
1806 metricsdir = opendir(buf);
1807 if (!metricsdir) {
1808 DBG("Failed to open %s: %m\n", buf);
1809 return;
1810 }
1811
1812 while ((metric_entry = readdir(metricsdir))) {
1813 struct hash_entry *entry;
1814
1815 if ((metric_entry->d_type != DT_DIR &&
1816 metric_entry->d_type != DT_LNK) ||
1817 metric_entry->d_name[0] == '.')
1818 continue;
1819
1820 DBG("metric set: %s\n", metric_entry->d_name);
1821 entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
1822 metric_entry->d_name);
1823 if (entry) {
1824 uint64_t id;
1825
1826 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
1827 brw->perfquery.sysfs_dev_dir, metric_entry->d_name);
1828 if (len < 0 || len >= sizeof(buf)) {
1829 DBG("Failed to concatenate path to sysfs metric id file\n");
1830 continue;
1831 }
1832
1833 if (!read_file_uint64(buf, &id)) {
1834 DBG("Failed to read metric set id from %s: %m", buf);
1835 continue;
1836 }
1837
1838 register_oa_config(brw, (const struct brw_perf_query_info *)entry->data, id);
1839 } else
1840 DBG("metric set not known by mesa (skipping)\n");
1841 }
1842
1843 closedir(metricsdir);
1844 }
1845
1846 static bool
1847 kernel_has_dynamic_config_support(struct brw_context *brw)
1848 {
1849 __DRIscreen *screen = brw->screen->driScrnPriv;
1850 struct hash_entry *entry;
1851
1852 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1853 struct brw_perf_query_info *query = entry->data;
1854 char config_path[280];
1855 uint64_t config_id;
1856
1857 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1858 brw->perfquery.sysfs_dev_dir, query->guid);
1859
1860 /* Look for the test config, which we know we can't replace. */
1861 if (read_file_uint64(config_path, &config_id) && config_id == 1) {
1862 return drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
1863 &config_id) < 0 && errno == ENOENT;
1864 }
1865 }
1866
1867 return false;
1868 }
1869
1870 static void
1871 init_oa_configs(struct brw_context *brw)
1872 {
1873 __DRIscreen *screen = brw->screen->driScrnPriv;
1874 struct hash_entry *entry;
1875
1876 hash_table_foreach(brw->perfquery.oa_metrics_table, entry) {
1877 const struct brw_perf_query_info *query = entry->data;
1878 struct drm_i915_perf_oa_config config;
1879 char config_path[280];
1880 uint64_t config_id;
1881 int ret;
1882
1883 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
1884 brw->perfquery.sysfs_dev_dir, query->guid);
1885
1886 /* Don't recreate already loaded configs. */
1887 if (read_file_uint64(config_path, &config_id)) {
1888 DBG("metric set: %s (already loaded)\n", query->guid);
1889 register_oa_config(brw, query, config_id);
1890 continue;
1891 }
1892
1893 memset(&config, 0, sizeof(config));
1894
1895 memcpy(config.uuid, query->guid, sizeof(config.uuid));
1896
1897 config.n_mux_regs = query->n_mux_regs;
1898 config.mux_regs_ptr = (uintptr_t) query->mux_regs;
1899
1900 config.n_boolean_regs = query->n_b_counter_regs;
1901 config.boolean_regs_ptr = (uintptr_t) query->b_counter_regs;
1902
1903 config.n_flex_regs = query->n_flex_regs;
1904 config.flex_regs_ptr = (uintptr_t) query->flex_regs;
1905
1906 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
1907 if (ret < 0) {
1908 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
1909 query->name, query->guid, strerror(errno));
1910 continue;
1911 }
1912
1913 register_oa_config(brw, query, ret);
1914 DBG("metric set: %s (added)\n", query->guid);
1915 }
1916 }
1917
1918 static bool
1919 query_topology(struct brw_context *brw)
1920 {
1921 __DRIscreen *screen = brw->screen->driScrnPriv;
1922 struct drm_i915_query_item item = {
1923 .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
1924 };
1925 struct drm_i915_query query = {
1926 .num_items = 1,
1927 .items_ptr = (uintptr_t) &item,
1928 };
1929
1930 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query))
1931 return false;
1932
1933 struct drm_i915_query_topology_info *topo_info =
1934 (struct drm_i915_query_topology_info *) calloc(1, item.length);
1935 item.data_ptr = (uintptr_t) topo_info;
1936
1937 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query) ||
1938 item.length <= 0)
1939 return false;
1940
1941 gen_device_info_update_from_topology(&brw->screen->devinfo,
1942 topo_info);
1943
1944 free(topo_info);
1945
1946 return true;
1947 }
1948
1949 static bool
1950 getparam_topology(struct brw_context *brw)
1951 {
1952 __DRIscreen *screen = brw->screen->driScrnPriv;
1953 drm_i915_getparam_t gp;
1954 int ret;
1955
1956 int slice_mask = 0;
1957 gp.param = I915_PARAM_SLICE_MASK;
1958 gp.value = &slice_mask;
1959 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1960 if (ret)
1961 return false;
1962
1963 int subslice_mask = 0;
1964 gp.param = I915_PARAM_SUBSLICE_MASK;
1965 gp.value = &subslice_mask;
1966 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1967 if (ret)
1968 return false;
1969
1970 gen_device_info_update_from_masks(&brw->screen->devinfo,
1971 slice_mask,
1972 subslice_mask,
1973 brw->screen->eu_total);
1974
1975 return true;
1976 }
1977
1978 static void
1979 compute_topology_builtins(struct brw_context *brw)
1980 {
1981 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1982
1983 brw->perfquery.sys_vars.slice_mask = devinfo->slice_masks;
1984 brw->perfquery.sys_vars.n_eu_slices = devinfo->num_slices;
1985
1986 for (int i = 0; i < sizeof(devinfo->subslice_masks[i]); i++) {
1987 brw->perfquery.sys_vars.n_eu_sub_slices +=
1988 _mesa_bitcount(devinfo->subslice_masks[i]);
1989 }
1990
1991 for (int i = 0; i < sizeof(devinfo->eu_masks); i++)
1992 brw->perfquery.sys_vars.n_eus += _mesa_bitcount(devinfo->eu_masks[i]);
1993
1994 brw->perfquery.sys_vars.eu_threads_count =
1995 brw->perfquery.sys_vars.n_eus * devinfo->num_thread_per_eu;
1996
1997 /* At the moment the subslice mask builtin has groups of 3bits for each
1998 * slice.
1999 *
2000 * Ideally equations would be updated to have a slice/subslice query
2001 * function/operator.
2002 */
2003 brw->perfquery.sys_vars.subslice_mask = 0;
2004 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
2005 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
2006 if (gen_device_info_subslice_available(devinfo, s, ss))
2007 brw->perfquery.sys_vars.subslice_mask |= 1UL << (s * 3 + ss);
2008 }
2009 }
2010 }
2011
2012 static bool
2013 init_oa_sys_vars(struct brw_context *brw)
2014 {
2015 const struct gen_device_info *devinfo = &brw->screen->devinfo;
2016 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
2017 __DRIscreen *screen = brw->screen->driScrnPriv;
2018
2019 if (!read_sysfs_drm_device_file_uint64(brw, "gt_min_freq_mhz", &min_freq_mhz))
2020 return false;
2021
2022 if (!read_sysfs_drm_device_file_uint64(brw, "gt_max_freq_mhz", &max_freq_mhz))
2023 return false;
2024
2025 if (!query_topology(brw)) {
2026 /* We need the i915 query uAPI on CNL+ (kernel 4.17+). */
2027 if (devinfo->gen >= 10)
2028 return false;
2029
2030 if (!getparam_topology(brw)) {
2031 /* We need the SLICE_MASK/SUBSLICE_MASK on gen8+ (kernel 4.13+). */
2032 if (devinfo->gen >= 8)
2033 return false;
2034
2035 /* On Haswell, the values are already computed for us in
2036 * gen_device_info.
2037 */
2038 }
2039 }
2040
2041 memset(&brw->perfquery.sys_vars, 0, sizeof(brw->perfquery.sys_vars));
2042 brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
2043 brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
2044 brw->perfquery.sys_vars.timestamp_frequency = devinfo->timestamp_frequency;
2045 brw->perfquery.sys_vars.revision = intel_device_get_revision(screen->fd);
2046 compute_topology_builtins(brw);
2047
2048 return true;
2049 }
2050
2051 static bool
2052 get_sysfs_dev_dir(struct brw_context *brw)
2053 {
2054 __DRIscreen *screen = brw->screen->driScrnPriv;
2055 struct stat sb;
2056 int min, maj;
2057 DIR *drmdir;
2058 struct dirent *drm_entry;
2059 int len;
2060
2061 brw->perfquery.sysfs_dev_dir[0] = '\0';
2062
2063 if (fstat(screen->fd, &sb)) {
2064 DBG("Failed to stat DRM fd\n");
2065 return false;
2066 }
2067
2068 maj = major(sb.st_rdev);
2069 min = minor(sb.st_rdev);
2070
2071 if (!S_ISCHR(sb.st_mode)) {
2072 DBG("DRM fd is not a character device as expected\n");
2073 return false;
2074 }
2075
2076 len = snprintf(brw->perfquery.sysfs_dev_dir,
2077 sizeof(brw->perfquery.sysfs_dev_dir),
2078 "/sys/dev/char/%d:%d/device/drm", maj, min);
2079 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir)) {
2080 DBG("Failed to concatenate sysfs path to drm device\n");
2081 return false;
2082 }
2083
2084 drmdir = opendir(brw->perfquery.sysfs_dev_dir);
2085 if (!drmdir) {
2086 DBG("Failed to open %s: %m\n", brw->perfquery.sysfs_dev_dir);
2087 return false;
2088 }
2089
2090 while ((drm_entry = readdir(drmdir))) {
2091 if ((drm_entry->d_type == DT_DIR ||
2092 drm_entry->d_type == DT_LNK) &&
2093 strncmp(drm_entry->d_name, "card", 4) == 0)
2094 {
2095 len = snprintf(brw->perfquery.sysfs_dev_dir,
2096 sizeof(brw->perfquery.sysfs_dev_dir),
2097 "/sys/dev/char/%d:%d/device/drm/%s",
2098 maj, min, drm_entry->d_name);
2099 closedir(drmdir);
2100 if (len < 0 || len >= sizeof(brw->perfquery.sysfs_dev_dir))
2101 return false;
2102 else
2103 return true;
2104 }
2105 }
2106
2107 closedir(drmdir);
2108
2109 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
2110 maj, min);
2111
2112 return false;
2113 }
2114
2115 typedef void (*perf_register_oa_queries_t)(struct brw_context *);
2116
2117 static perf_register_oa_queries_t
2118 get_register_queries_function(const struct gen_device_info *devinfo)
2119 {
2120 if (devinfo->is_haswell)
2121 return brw_oa_register_queries_hsw;
2122 if (devinfo->is_cherryview)
2123 return brw_oa_register_queries_chv;
2124 if (devinfo->is_broadwell)
2125 return brw_oa_register_queries_bdw;
2126 if (devinfo->is_broxton)
2127 return brw_oa_register_queries_bxt;
2128 if (devinfo->is_skylake) {
2129 if (devinfo->gt == 2)
2130 return brw_oa_register_queries_sklgt2;
2131 if (devinfo->gt == 3)
2132 return brw_oa_register_queries_sklgt3;
2133 if (devinfo->gt == 4)
2134 return brw_oa_register_queries_sklgt4;
2135 }
2136 if (devinfo->is_kabylake) {
2137 if (devinfo->gt == 2)
2138 return brw_oa_register_queries_kblgt2;
2139 if (devinfo->gt == 3)
2140 return brw_oa_register_queries_kblgt3;
2141 }
2142 if (devinfo->is_geminilake)
2143 return brw_oa_register_queries_glk;
2144 if (devinfo->is_coffeelake) {
2145 if (devinfo->gt == 2)
2146 return brw_oa_register_queries_cflgt2;
2147 if (devinfo->gt == 3)
2148 return brw_oa_register_queries_cflgt3;
2149 }
2150 if (devinfo->is_cannonlake)
2151 return brw_oa_register_queries_cnl;
2152
2153 return NULL;
2154 }
2155
2156 static unsigned
2157 brw_init_perf_query_info(struct gl_context *ctx)
2158 {
2159 struct brw_context *brw = brw_context(ctx);
2160 const struct gen_device_info *devinfo = &brw->screen->devinfo;
2161 bool i915_perf_oa_available = false;
2162 struct stat sb;
2163 perf_register_oa_queries_t oa_register;
2164
2165 if (brw->perfquery.n_queries)
2166 return brw->perfquery.n_queries;
2167
2168 init_pipeline_statistic_query_registers(brw);
2169 brw_perf_query_register_mdapi_statistic_query(brw);
2170
2171 oa_register = get_register_queries_function(devinfo);
2172
2173 /* The existence of this sysctl parameter implies the kernel supports
2174 * the i915 perf interface.
2175 */
2176 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
2177
2178 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
2179 * metrics unless running as root.
2180 */
2181 if (devinfo->is_haswell)
2182 i915_perf_oa_available = true;
2183 else {
2184 uint64_t paranoid = 1;
2185
2186 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
2187
2188 if (paranoid == 0 || geteuid() == 0)
2189 i915_perf_oa_available = true;
2190 }
2191 }
2192
2193 if (i915_perf_oa_available &&
2194 oa_register &&
2195 get_sysfs_dev_dir(brw) &&
2196 init_oa_sys_vars(brw))
2197 {
2198 brw->perfquery.oa_metrics_table =
2199 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2200 _mesa_key_string_equal);
2201
2202 /* Index all the metric sets mesa knows about before looking to see what
2203 * the kernel is advertising.
2204 */
2205 oa_register(brw);
2206
2207 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
2208 kernel_has_dynamic_config_support(brw))
2209 init_oa_configs(brw);
2210 else
2211 enumerate_sysfs_metrics(brw);
2212
2213 brw_perf_query_register_mdapi_oa_query(brw);
2214 }
2215
2216 brw->perfquery.unaccumulated =
2217 ralloc_array(brw, struct brw_perf_query_object *, 2);
2218 brw->perfquery.unaccumulated_elements = 0;
2219 brw->perfquery.unaccumulated_array_size = 2;
2220
2221 exec_list_make_empty(&brw->perfquery.sample_buffers);
2222 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
2223
2224 /* It's convenient to guarantee that this linked list of sample
2225 * buffers is never empty so we add an empty head so when we
2226 * Begin an OA query we can always take a reference on a buffer
2227 * in this list.
2228 */
2229 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
2230 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
2231
2232 brw->perfquery.oa_stream_fd = -1;
2233
2234 brw->perfquery.next_query_start_report_id = 1000;
2235
2236 return brw->perfquery.n_queries;
2237 }
2238
2239 void
2240 brw_init_performance_queries(struct brw_context *brw)
2241 {
2242 struct gl_context *ctx = &brw->ctx;
2243
2244 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
2245 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
2246 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
2247 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
2248 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
2249 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
2250 ctx->Driver.EndPerfQuery = brw_end_perf_query;
2251 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
2252 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
2253 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
2254 }