i965: perf: fix pointer to integer cast
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43 #include <dirent.h>
44
45 /* put before sys/types.h to silence glibc warnings */
46 #ifdef MAJOR_IN_MKDEV
47 #include <sys/mkdev.h>
48 #endif
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
51 #endif
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <fcntl.h>
55 #include <sys/mman.h>
56 #include <sys/ioctl.h>
57
58 #include <xf86drm.h>
59 #include <i915_drm.h>
60
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
65
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_hsw.h"
75 #include "intel_batchbuffer.h"
76
77 #define FILE_DEBUG_FLAG DEBUG_PERFMON
78
79 /*
80 * The largest OA format we can use on Haswell includes:
81 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
82 */
83 #define MAX_OA_REPORT_COUNTERS 62
84
85 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
86 256) /* OA counter report */
87
88 /**
89 * Periodic OA samples are read() into these buffer structures via the
90 * i915 perf kernel interface and appended to the
91 * brw->perfquery.sample_buffers linked list. When we process the
92 * results of an OA metrics query we need to consider all the periodic
93 * samples between the Begin and End MI_REPORT_PERF_COUNT command
94 * markers.
95 *
96 * 'Periodic' is a simplification as there are other automatic reports
97 * written by the hardware also buffered here.
98 *
99 * Considering three queries, A, B and C:
100 *
101 * Time ---->
102 * ________________A_________________
103 * | |
104 * | ________B_________ _____C___________
105 * | | | | | |
106 *
107 * And an illustration of sample buffers read over this time frame:
108 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
109 *
110 * These nodes may hold samples for query A:
111 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
112 *
113 * These nodes may hold samples for query B:
114 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
115 *
116 * These nodes may hold samples for query C:
117 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
118 *
119 * The illustration assumes we have an even distribution of periodic
120 * samples so all nodes have the same size plotted against time:
121 *
122 * Note, to simplify code, the list is never empty.
123 *
124 * With overlapping queries we can see that periodic OA reports may
125 * relate to multiple queries and care needs to be take to keep
126 * track of sample buffers until there are no queries that might
127 * depend on their contents.
128 *
129 * We use a node ref counting system where a reference ensures that a
130 * node and all following nodes can't be freed/recycled until the
131 * reference drops to zero.
132 *
133 * E.g. with a ref of one here:
134 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
135 *
136 * These nodes could be freed or recycled ("reaped"):
137 * [ 0 ][ 0 ]
138 *
139 * These must be preserved until the leading ref drops to zero:
140 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
141 *
142 * When a query starts we take a reference on the current tail of
143 * the list, knowing that no already-buffered samples can possibly
144 * relate to the newly-started query. A pointer to this node is
145 * also saved in the query object's ->oa.samples_head.
146 *
147 * E.g. starting query A while there are two nodes in .sample_buffers:
148 * ________________A________
149 * |
150 *
151 * [ 0 ][ 1 ]
152 * ^_______ Add a reference and store pointer to node in
153 * A->oa.samples_head
154 *
155 * Moving forward to when the B query starts with no new buffer nodes:
156 * (for reference, i915 perf reads() are only done when queries finish)
157 * ________________A_______
158 * | ________B___
159 * | |
160 *
161 * [ 0 ][ 2 ]
162 * ^_______ Add a reference and store pointer to
163 * node in B->oa.samples_head
164 *
165 * Once a query is finished, after an OA query has become 'Ready',
166 * once the End OA report has landed and after we we have processed
167 * all the intermediate periodic samples then we drop the
168 * ->oa.samples_head reference we took at the start.
169 *
170 * So when the B query has finished we have:
171 * ________________A________
172 * | ______B___________
173 * | | |
174 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
175 * ^_______ Drop B->oa.samples_head reference
176 *
177 * We still can't free these due to the A->oa.samples_head ref:
178 * [ 1 ][ 0 ][ 0 ][ 0 ]
179 *
180 * When the A query finishes: (note there's a new ref for C's samples_head)
181 * ________________A_________________
182 * | |
183 * | _____C_________
184 * | | |
185 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
186 * ^_______ Drop A->oa.samples_head reference
187 *
188 * And we can now reap these nodes up to the C->oa.samples_head:
189 * [ X ][ X ][ X ][ X ]
190 * keeping -> [ 1 ][ 0 ][ 0 ]
191 *
192 * We reap old sample buffers each time we finish processing an OA
193 * query by iterating the sample_buffers list from the head until we
194 * find a referenced node and stop.
195 *
196 * Reaped buffers move to a perfquery.free_sample_buffers list and
197 * when we come to read() we first look to recycle a buffer from the
198 * free_sample_buffers list before allocating a new buffer.
199 */
200 struct brw_oa_sample_buf {
201 struct exec_node link;
202 int refcount;
203 int len;
204 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
205 };
206
207 /**
208 * i965 representation of a performance query object.
209 *
210 * NB: We want to keep this structure relatively lean considering that
211 * applications may expect to allocate enough objects to be able to
212 * query around all draw calls in a frame.
213 */
214 struct brw_perf_query_object
215 {
216 struct gl_perf_query_object base;
217
218 const struct brw_perf_query_info *query;
219
220 /* See query->kind to know which state below is in use... */
221 union {
222 struct {
223
224 /**
225 * BO containing OA counter snapshots at query Begin/End time.
226 */
227 struct brw_bo *bo;
228
229 /**
230 * The MI_REPORT_PERF_COUNT command lets us specify a unique
231 * ID that will be reflected in the resulting OA report
232 * that's written by the GPU. This is the ID we're expecting
233 * in the begin report and the the end report should be
234 * @begin_report_id + 1.
235 */
236 int begin_report_id;
237
238 /**
239 * Reference the head of the brw->perfquery.sample_buffers
240 * list at the time that the query started (so we only need
241 * to look at nodes after this point when looking for samples
242 * related to this query)
243 *
244 * (See struct brw_oa_sample_buf description for more details)
245 */
246 struct exec_node *samples_head;
247
248 /**
249 * Storage for the final accumulated OA counters.
250 */
251 uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
252
253 /**
254 * false while in the unaccumulated_elements list, and set to
255 * true when the final, end MI_RPC snapshot has been
256 * accumulated.
257 */
258 bool results_accumulated;
259
260 } oa;
261
262 struct {
263 /**
264 * BO containing starting and ending snapshots for the
265 * statistics counters.
266 */
267 struct brw_bo *bo;
268 } pipeline_stats;
269 };
270 };
271
272 /** Downcasting convenience macro. */
273 static inline struct brw_perf_query_object *
274 brw_perf_query(struct gl_perf_query_object *o)
275 {
276 return (struct brw_perf_query_object *) o;
277 }
278
279 #define STATS_BO_SIZE 4096
280 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
281 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
282
283 #define MI_RPC_BO_SIZE 4096
284 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
285
286 /******************************************************************************/
287
288 static bool
289 brw_is_perf_query_ready(struct gl_context *ctx,
290 struct gl_perf_query_object *o);
291
292 static void
293 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
294 {
295 struct gl_context *ctx = brw_void;
296 struct gl_perf_query_object *o = query_void;
297 struct brw_perf_query_object *obj = query_void;
298
299 switch (obj->query->kind) {
300 case OA_COUNTERS:
301 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
302 id,
303 o->Used ? "Dirty," : "New,",
304 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
305 obj->oa.bo ? "yes," : "no,",
306 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
307 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
308 break;
309 case PIPELINE_STATS:
310 DBG("%4d: %-6s %-8s BO: %-4s\n",
311 id,
312 o->Used ? "Dirty," : "New,",
313 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
314 obj->pipeline_stats.bo ? "yes" : "no");
315 break;
316 }
317 }
318
319 static void
320 dump_perf_queries(struct brw_context *brw)
321 {
322 struct gl_context *ctx = &brw->ctx;
323 DBG("Queries: (Open queries = %d, OA users = %d)\n",
324 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
325 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
326 }
327
328 /******************************************************************************/
329
330 static struct brw_oa_sample_buf *
331 get_free_sample_buf(struct brw_context *brw)
332 {
333 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
334 struct brw_oa_sample_buf *buf;
335
336 if (node)
337 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
338 else {
339 buf = ralloc_size(brw, sizeof(*buf));
340
341 exec_node_init(&buf->link);
342 buf->refcount = 0;
343 buf->len = 0;
344 }
345
346 return buf;
347 }
348
349 static void
350 reap_old_sample_buffers(struct brw_context *brw)
351 {
352 struct exec_node *tail_node =
353 exec_list_get_tail(&brw->perfquery.sample_buffers);
354 struct brw_oa_sample_buf *tail_buf =
355 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
356
357 /* Remove all old, unreferenced sample buffers walking forward from
358 * the head of the list, except always leave at least one node in
359 * the list so we always have a node to reference when we Begin
360 * a new query.
361 */
362 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
363 &brw->perfquery.sample_buffers)
364 {
365 if (buf->refcount == 0 && buf != tail_buf) {
366 exec_node_remove(&buf->link);
367 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
368 } else
369 return;
370 }
371 }
372
373 static void
374 free_sample_bufs(struct brw_context *brw)
375 {
376 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
377 &brw->perfquery.free_sample_buffers)
378 ralloc_free(buf);
379
380 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
381 }
382
383 /******************************************************************************/
384
385 /**
386 * Driver hook for glGetPerfQueryInfoINTEL().
387 */
388 static void
389 brw_get_perf_query_info(struct gl_context *ctx,
390 unsigned query_index,
391 const char **name,
392 GLuint *data_size,
393 GLuint *n_counters,
394 GLuint *n_active)
395 {
396 struct brw_context *brw = brw_context(ctx);
397 const struct brw_perf_query_info *query =
398 &brw->perfquery.queries[query_index];
399
400 *name = query->name;
401 *data_size = query->data_size;
402 *n_counters = query->n_counters;
403
404 switch (query->kind) {
405 case OA_COUNTERS:
406 *n_active = brw->perfquery.n_active_oa_queries;
407 break;
408
409 case PIPELINE_STATS:
410 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
411 break;
412 }
413 }
414
415 /**
416 * Driver hook for glGetPerfCounterInfoINTEL().
417 */
418 static void
419 brw_get_perf_counter_info(struct gl_context *ctx,
420 unsigned query_index,
421 unsigned counter_index,
422 const char **name,
423 const char **desc,
424 GLuint *offset,
425 GLuint *data_size,
426 GLuint *type_enum,
427 GLuint *data_type_enum,
428 GLuint64 *raw_max)
429 {
430 struct brw_context *brw = brw_context(ctx);
431 const struct brw_perf_query_info *query =
432 &brw->perfquery.queries[query_index];
433 const struct brw_perf_query_counter *counter =
434 &query->counters[counter_index];
435
436 *name = counter->name;
437 *desc = counter->desc;
438 *offset = counter->offset;
439 *data_size = counter->size;
440 *type_enum = counter->type;
441 *data_type_enum = counter->data_type;
442 *raw_max = counter->raw_max;
443 }
444
445 /******************************************************************************/
446
447 /**
448 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
449 * pipeline statistics for the performance query object.
450 */
451 static void
452 snapshot_statistics_registers(struct brw_context *brw,
453 struct brw_perf_query_object *obj,
454 uint32_t offset_in_bytes)
455 {
456 const struct brw_perf_query_info *query = obj->query;
457 const int n_counters = query->n_counters;
458
459 for (int i = 0; i < n_counters; i++) {
460 const struct brw_perf_query_counter *counter = &query->counters[i];
461
462 assert(counter->data_type == GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL);
463
464 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
465 counter->pipeline_stat.reg,
466 offset_in_bytes + i * sizeof(uint64_t));
467 }
468 }
469
470 /**
471 * Emit an MI_REPORT_PERF_COUNT command packet.
472 *
473 * This asks the GPU to write a report of the current OA counter
474 * values into @bo at the given offset and containing the given
475 * @report_id which we can cross-reference when parsing the report.
476 */
477 static void
478 emit_mi_report_perf_count(struct brw_context *brw,
479 struct brw_bo *bo,
480 uint32_t offset_in_bytes,
481 uint32_t report_id)
482 {
483 assert(offset_in_bytes % 64 == 0);
484
485 BEGIN_BATCH(3);
486 OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT);
487 OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
488 offset_in_bytes);
489 OUT_BATCH(report_id);
490 ADVANCE_BATCH();
491 }
492
493 /**
494 * Add a query to the global list of "unaccumulated queries."
495 *
496 * Queries are tracked here until all the associated OA reports have
497 * been accumulated via accumulate_oa_reports() after the end
498 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
499 */
500 static void
501 add_to_unaccumulated_query_list(struct brw_context *brw,
502 struct brw_perf_query_object *obj)
503 {
504 if (brw->perfquery.unaccumulated_elements >=
505 brw->perfquery.unaccumulated_array_size)
506 {
507 brw->perfquery.unaccumulated_array_size *= 1.5;
508 brw->perfquery.unaccumulated =
509 reralloc(brw, brw->perfquery.unaccumulated,
510 struct brw_perf_query_object *,
511 brw->perfquery.unaccumulated_array_size);
512 }
513
514 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
515 }
516
517 /**
518 * Remove a query from the global list of unaccumulated queries once
519 * after successfully accumulating the OA reports associated with the
520 * query in accumulate_oa_reports() or when discarding unwanted query
521 * results.
522 */
523 static void
524 drop_from_unaccumulated_query_list(struct brw_context *brw,
525 struct brw_perf_query_object *obj)
526 {
527 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
528 if (brw->perfquery.unaccumulated[i] == obj) {
529 int last_elt = --brw->perfquery.unaccumulated_elements;
530
531 if (i == last_elt)
532 brw->perfquery.unaccumulated[i] = NULL;
533 else {
534 brw->perfquery.unaccumulated[i] =
535 brw->perfquery.unaccumulated[last_elt];
536 }
537
538 break;
539 }
540 }
541
542 /* Drop our samples_head reference so that associated periodic
543 * sample data buffers can potentially be reaped if they aren't
544 * referenced by any other queries...
545 */
546
547 struct brw_oa_sample_buf *buf =
548 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
549
550 assert(buf->refcount > 0);
551 buf->refcount--;
552
553 obj->oa.samples_head = NULL;
554
555 reap_old_sample_buffers(brw);
556 }
557
558 static uint64_t
559 timebase_scale(struct brw_context *brw, uint32_t u32_time_delta)
560 {
561 uint64_t tmp = ((uint64_t)u32_time_delta) * 1000000000ull;
562
563 return tmp ? tmp / brw->perfquery.sys_vars.timestamp_frequency : 0;
564 }
565
566 static void
567 accumulate_uint32(const uint32_t *report0,
568 const uint32_t *report1,
569 uint64_t *accumulator)
570 {
571 *accumulator += (uint32_t)(*report1 - *report0);
572 }
573
574 /**
575 * Given pointers to starting and ending OA snapshots, add the deltas for each
576 * counter to the results.
577 */
578 static void
579 add_deltas(struct brw_context *brw,
580 struct brw_perf_query_object *obj,
581 const uint32_t *start,
582 const uint32_t *end)
583 {
584 const struct brw_perf_query_info *query = obj->query;
585 uint64_t *accumulator = obj->oa.accumulator;
586 int i;
587
588 switch (query->oa_format) {
589 case I915_OA_FORMAT_A45_B8_C8:
590 accumulate_uint32(start + 1, end + 1, accumulator); /* timestamp */
591
592 for (i = 0; i < 61; i++)
593 accumulate_uint32(start + 3 + i, end + 3 + i, accumulator + 1 + i);
594
595 break;
596 default:
597 unreachable("Can't accumulate OA counters in unknown format");
598 }
599 }
600
601 static bool
602 inc_n_oa_users(struct brw_context *brw)
603 {
604 if (brw->perfquery.n_oa_users == 0 &&
605 drmIoctl(brw->perfquery.oa_stream_fd,
606 I915_PERF_IOCTL_ENABLE, 0) < 0)
607 {
608 return false;
609 }
610 ++brw->perfquery.n_oa_users;
611
612 return true;
613 }
614
615 static void
616 dec_n_oa_users(struct brw_context *brw)
617 {
618 /* Disabling the i915 perf stream will effectively disable the OA
619 * counters. Note it's important to be sure there are no outstanding
620 * MI_RPC commands at this point since they could stall the CS
621 * indefinitely once OACONTROL is disabled.
622 */
623 --brw->perfquery.n_oa_users;
624 if (brw->perfquery.n_oa_users == 0 &&
625 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
626 {
627 DBG("WARNING: Error disabling i915 perf stream: %m\n");
628 }
629 }
630
631 /* In general if we see anything spurious while accumulating results,
632 * we don't try and continue accumulating the current query, hoping
633 * for the best, we scrap anything outstanding, and then hope for the
634 * best with new queries.
635 */
636 static void
637 discard_all_queries(struct brw_context *brw)
638 {
639 while (brw->perfquery.unaccumulated_elements) {
640 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
641
642 obj->oa.results_accumulated = true;
643 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
644
645 dec_n_oa_users(brw);
646 }
647 }
648
649 static bool
650 read_oa_samples(struct brw_context *brw)
651 {
652 while (1) {
653 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
654 int len;
655
656 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
657 sizeof(buf->buf))) < 0 && errno == EINTR)
658 ;
659
660 if (len <= 0) {
661 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
662
663 if (len < 0) {
664 if (errno == EAGAIN)
665 return true;
666 else {
667 DBG("Error reading i915 perf samples: %m\n");
668 return false;
669 }
670 } else {
671 DBG("Spurious EOF reading i915 perf samples\n");
672 return false;
673 }
674 }
675
676 buf->len = len;
677 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
678 }
679
680 unreachable("not reached");
681 return false;
682 }
683
684 /**
685 * Accumulate raw OA counter values based on deltas between pairs
686 * of OA reports.
687 *
688 * Accumulation starts from the first report captured via
689 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
690 * last MI_RPC report requested by brw_end_perf_query(). Between these
691 * two reports there may also some number of periodically sampled OA
692 * reports collected via the i915 perf interface - depending on the
693 * duration of the query.
694 *
695 * These periodic snapshots help to ensure we handle counter overflow
696 * correctly by being frequent enough to ensure we don't miss multiple
697 * overflows of a counter between snapshots.
698 */
699 static void
700 accumulate_oa_reports(struct brw_context *brw,
701 struct brw_perf_query_object *obj)
702 {
703 struct gl_perf_query_object *o = &obj->base;
704 uint32_t *query_buffer;
705 uint32_t *start;
706 uint32_t *last;
707 uint32_t *end;
708 struct exec_node *first_samples_node;
709
710 assert(o->Ready);
711
712 /* Collect the latest periodic OA reports from i915 perf */
713 if (!read_oa_samples(brw))
714 goto error;
715
716 brw_bo_map(brw, obj->oa.bo, false);
717 query_buffer = obj->oa.bo->virtual;
718
719 start = last = query_buffer;
720 end = query_buffer + (MI_RPC_BO_END_OFFSET_BYTES / sizeof(uint32_t));
721
722 if (start[0] != obj->oa.begin_report_id) {
723 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
724 goto error;
725 }
726 if (end[0] != (obj->oa.begin_report_id + 1)) {
727 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
728 goto error;
729 }
730
731 /* See if we have any periodic reports to accumulate too... */
732
733 /* N.B. The oa.samples_head was set when the query began and
734 * pointed to the tail of the brw->perfquery.sample_buffers list at
735 * the time the query started. Since the buffer existed before the
736 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
737 * that no data in this particular node's buffer can possibly be
738 * associated with the query - so skip ahead one...
739 */
740 first_samples_node = obj->oa.samples_head->next;
741
742 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
743 &brw->perfquery.sample_buffers,
744 first_samples_node)
745 {
746 int offset = 0;
747
748 while (offset < buf->len) {
749 const struct drm_i915_perf_record_header *header =
750 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
751
752 assert(header->size != 0);
753 assert(header->size <= buf->len);
754
755 offset += header->size;
756
757 switch (header->type) {
758 case DRM_I915_PERF_RECORD_SAMPLE: {
759 uint32_t *report = (uint32_t *)(header + 1);
760
761 /* Ignore reports that come before the start marker.
762 * (Note: takes care to allow overflow of 32bit timestamps)
763 */
764 if (timebase_scale(brw, report[1] - start[1]) > 5000000000)
765 continue;
766
767 /* Ignore reports that come after the end marker.
768 * (Note: takes care to allow overflow of 32bit timestamps)
769 */
770 if (timebase_scale(brw, report[1] - end[1]) <= 5000000000)
771 goto end;
772
773 add_deltas(brw, obj, last, report);
774
775 last = report;
776
777 break;
778 }
779
780 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
781 DBG("i915 perf: OA error: all reports lost\n");
782 goto error;
783 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
784 DBG("i915 perf: OA report lost\n");
785 break;
786 }
787 }
788 }
789
790 end:
791
792 add_deltas(brw, obj, last, end);
793
794 DBG("Marking %d accumulated - results gathered\n", o->Id);
795
796 brw_bo_unmap(obj->oa.bo);
797 obj->oa.results_accumulated = true;
798 drop_from_unaccumulated_query_list(brw, obj);
799 dec_n_oa_users(brw);
800
801 return;
802
803 error:
804
805 brw_bo_unmap(obj->oa.bo);
806 discard_all_queries(brw);
807 }
808
809 /******************************************************************************/
810
811 static bool
812 open_i915_perf_oa_stream(struct brw_context *brw,
813 int metrics_set_id,
814 int report_format,
815 int period_exponent,
816 int drm_fd,
817 uint32_t ctx_id)
818 {
819 uint64_t properties[] = {
820 /* Single context sampling */
821 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
822
823 /* Include OA reports in samples */
824 DRM_I915_PERF_PROP_SAMPLE_OA, true,
825
826 /* OA unit configuration */
827 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
828 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
829 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
830 };
831 struct drm_i915_perf_open_param param = {
832 .flags = I915_PERF_FLAG_FD_CLOEXEC |
833 I915_PERF_FLAG_FD_NONBLOCK |
834 I915_PERF_FLAG_DISABLED,
835 .num_properties = ARRAY_SIZE(properties) / 2,
836 .properties_ptr = (uintptr_t) properties,
837 };
838 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
839 if (fd == -1) {
840 DBG("Error opening i915 perf OA stream: %m\n");
841 return false;
842 }
843
844 brw->perfquery.oa_stream_fd = fd;
845
846 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
847 brw->perfquery.current_oa_format = report_format;
848
849 return true;
850 }
851
852 static void
853 close_perf(struct brw_context *brw)
854 {
855 if (brw->perfquery.oa_stream_fd != -1) {
856 close(brw->perfquery.oa_stream_fd);
857 brw->perfquery.oa_stream_fd = -1;
858 }
859 }
860
861 /**
862 * Driver hook for glBeginPerfQueryINTEL().
863 */
864 static bool
865 brw_begin_perf_query(struct gl_context *ctx,
866 struct gl_perf_query_object *o)
867 {
868 struct brw_context *brw = brw_context(ctx);
869 struct brw_perf_query_object *obj = brw_perf_query(o);
870 const struct brw_perf_query_info *query = obj->query;
871
872 /* We can assume the frontend hides mistaken attempts to Begin a
873 * query object multiple times before its End. Similarly if an
874 * application reuses a query object before results have arrived
875 * the frontend will wait for prior results so we don't need
876 * to support abandoning in-flight results.
877 */
878 assert(!o->Active);
879 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
880
881 DBG("Begin(%d)\n", o->Id);
882
883 /* XXX: We have to consider that the command parser unit that parses batch
884 * buffer commands and is used to capture begin/end counter snapshots isn't
885 * implicitly synchronized with what's currently running across other GPU
886 * units (such as the EUs running shaders) that the performance counters are
887 * associated with.
888 *
889 * The intention of performance queries is to measure the work associated
890 * with commands between the begin/end delimiters and so for that to be the
891 * case we need to explicitly synchronize the parsing of commands to capture
892 * Begin/End counter snapshots with what's running across other parts of the
893 * GPU.
894 *
895 * When the command parser reaches a Begin marker it effectively needs to
896 * drain everything currently running on the GPU until the hardware is idle
897 * before capturing the first snapshot of counters - otherwise the results
898 * would also be measuring the effects of earlier commands.
899 *
900 * When the command parser reaches an End marker it needs to stall until
901 * everything currently running on the GPU has finished before capturing the
902 * end snapshot - otherwise the results won't be a complete representation
903 * of the work.
904 *
905 * Theoretically there could be opportunities to minimize how much of the
906 * GPU pipeline is drained, or that we stall for, when we know what specific
907 * units the performance counters being queried relate to but we don't
908 * currently attempt to be clever here.
909 *
910 * Note: with our current simple approach here then for back-to-back queries
911 * we will redundantly emit duplicate commands to synchronize the command
912 * streamer with the rest of the GPU pipeline, but we assume that in HW the
913 * second synchronization is effectively a NOOP.
914 *
915 * N.B. The final results are based on deltas of counters between (inside)
916 * Begin/End markers so even though the total wall clock time of the
917 * workload is stretched by larger pipeline bubbles the bubbles themselves
918 * are generally invisible to the query results. Whether that's a good or a
919 * bad thing depends on the use case. For a lower real-time impact while
920 * capturing metrics then periodic sampling may be a better choice than
921 * INTEL_performance_query.
922 *
923 *
924 * This is our Begin synchronization point to drain current work on the
925 * GPU before we capture our first counter snapshot...
926 */
927 brw_emit_mi_flush(brw);
928
929 switch (query->kind) {
930 case OA_COUNTERS:
931
932 /* Opening an i915 perf stream implies exclusive access to the OA unit
933 * which will generate counter reports for a specific counter set with a
934 * specific layout/format so we can't begin any OA based queries that
935 * require a different counter set or format unless we get an opportunity
936 * to close the stream and open a new one...
937 */
938 if (brw->perfquery.oa_stream_fd != -1 &&
939 brw->perfquery.current_oa_metrics_set_id !=
940 query->oa_metrics_set_id) {
941
942 if (brw->perfquery.n_oa_users != 0)
943 return false;
944 else
945 close_perf(brw);
946 }
947
948 /* If the OA counters aren't already on, enable them. */
949 if (brw->perfquery.oa_stream_fd == -1) {
950 __DRIscreen *screen = brw->screen->driScrnPriv;
951 int period_exponent;
952
953 /* The timestamp for HSW+ increments every 80ns
954 *
955 * The period_exponent gives a sampling period as follows:
956 * sample_period = 80ns * 2^(period_exponent + 1)
957 *
958 * The overflow period for Haswell can be calculated as:
959 *
960 * 2^32 / (n_eus * max_gen_freq * 2)
961 * (E.g. 40 EUs @ 1GHz = ~53ms)
962 *
963 * We currently sample every 42 milliseconds...
964 */
965 period_exponent = 18;
966
967 if (!open_i915_perf_oa_stream(brw,
968 query->oa_metrics_set_id,
969 query->oa_format,
970 period_exponent,
971 screen->fd, /* drm fd */
972 brw->hw_ctx))
973 return false;
974 } else {
975 assert(brw->perfquery.current_oa_metrics_set_id ==
976 query->oa_metrics_set_id &&
977 brw->perfquery.current_oa_format ==
978 query->oa_format);
979 }
980
981 if (!inc_n_oa_users(brw)) {
982 DBG("WARNING: Error enabling i915 perf stream: %m\n");
983 return false;
984 }
985
986 if (obj->oa.bo) {
987 brw_bo_unreference(obj->oa.bo);
988 obj->oa.bo = NULL;
989 }
990
991 obj->oa.bo =
992 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
993 MI_RPC_BO_SIZE, 64);
994 #ifdef DEBUG
995 /* Pre-filling the BO helps debug whether writes landed. */
996 brw_bo_map(brw, obj->oa.bo, true);
997 memset((char *) obj->oa.bo->virtual, 0x80, MI_RPC_BO_SIZE);
998 brw_bo_unmap(obj->oa.bo);
999 #endif
1000
1001 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1002 brw->perfquery.next_query_start_report_id += 2;
1003
1004 /* Take a starting OA counter snapshot. */
1005 emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1006 obj->oa.begin_report_id);
1007 ++brw->perfquery.n_active_oa_queries;
1008
1009 /* No already-buffered samples can possibly be associated with this query
1010 * so create a marker within the list of sample buffers enabling us to
1011 * easily ignore earlier samples when processing this query after
1012 * completion.
1013 */
1014 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1015 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1016
1017 struct brw_oa_sample_buf *buf =
1018 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1019
1020 /* This reference will ensure that future/following sample
1021 * buffers (that may relate to this query) can't be freed until
1022 * this drops to zero.
1023 */
1024 buf->refcount++;
1025
1026 memset(obj->oa.accumulator, 0, sizeof(obj->oa.accumulator));
1027 obj->oa.results_accumulated = false;
1028
1029 add_to_unaccumulated_query_list(brw, obj);
1030 break;
1031
1032 case PIPELINE_STATS:
1033 if (obj->pipeline_stats.bo) {
1034 brw_bo_unreference(obj->pipeline_stats.bo);
1035 obj->pipeline_stats.bo = NULL;
1036 }
1037
1038 obj->pipeline_stats.bo =
1039 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1040 STATS_BO_SIZE, 64);
1041
1042 /* Take starting snapshots. */
1043 snapshot_statistics_registers(brw, obj, 0);
1044
1045 ++brw->perfquery.n_active_pipeline_stats_queries;
1046 break;
1047 }
1048
1049 if (INTEL_DEBUG & DEBUG_PERFMON)
1050 dump_perf_queries(brw);
1051
1052 return true;
1053 }
1054
1055 /**
1056 * Driver hook for glEndPerfQueryINTEL().
1057 */
1058 static void
1059 brw_end_perf_query(struct gl_context *ctx,
1060 struct gl_perf_query_object *o)
1061 {
1062 struct brw_context *brw = brw_context(ctx);
1063 struct brw_perf_query_object *obj = brw_perf_query(o);
1064
1065 DBG("End(%d)\n", o->Id);
1066
1067 /* Ensure that the work associated with the queried commands will have
1068 * finished before taking our query end counter readings.
1069 *
1070 * For more details see comment in brw_begin_perf_query for
1071 * corresponding flush.
1072 */
1073 brw_emit_mi_flush(brw);
1074
1075 switch (obj->query->kind) {
1076 case OA_COUNTERS:
1077
1078 /* NB: It's possible that the query will have already been marked
1079 * as 'accumulated' if an error was seen while reading samples
1080 * from perf. In this case we mustn't try and emit a closing
1081 * MI_RPC command in case the OA unit has already been disabled
1082 */
1083 if (!obj->oa.results_accumulated) {
1084 /* Take an ending OA counter snapshot. */
1085 emit_mi_report_perf_count(brw, obj->oa.bo,
1086 MI_RPC_BO_END_OFFSET_BYTES,
1087 obj->oa.begin_report_id + 1);
1088 }
1089
1090 --brw->perfquery.n_active_oa_queries;
1091
1092 /* NB: even though the query has now ended, it can't be accumulated
1093 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1094 * to query->oa.bo
1095 */
1096 break;
1097
1098 case PIPELINE_STATS:
1099 snapshot_statistics_registers(brw, obj,
1100 STATS_BO_END_OFFSET_BYTES);
1101 --brw->perfquery.n_active_pipeline_stats_queries;
1102 break;
1103 }
1104 }
1105
1106 static void
1107 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1108 {
1109 struct brw_context *brw = brw_context(ctx);
1110 struct brw_perf_query_object *obj = brw_perf_query(o);
1111 struct brw_bo *bo = NULL;
1112
1113 assert(!o->Ready);
1114
1115 switch (obj->query->kind) {
1116 case OA_COUNTERS:
1117 bo = obj->oa.bo;
1118 break;
1119
1120 case PIPELINE_STATS:
1121 bo = obj->pipeline_stats.bo;
1122 break;
1123 }
1124
1125 if (bo == NULL)
1126 return;
1127
1128 /* If the current batch references our results bo then we need to
1129 * flush first...
1130 */
1131 if (brw_batch_references(&brw->batch, bo))
1132 intel_batchbuffer_flush(brw);
1133
1134 brw_bo_wait_rendering(brw, bo);
1135 }
1136
1137 static bool
1138 brw_is_perf_query_ready(struct gl_context *ctx,
1139 struct gl_perf_query_object *o)
1140 {
1141 struct brw_context *brw = brw_context(ctx);
1142 struct brw_perf_query_object *obj = brw_perf_query(o);
1143
1144 if (o->Ready)
1145 return true;
1146
1147 switch (obj->query->kind) {
1148 case OA_COUNTERS:
1149 return (obj->oa.results_accumulated ||
1150 (obj->oa.bo &&
1151 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1152 !brw_bo_busy(obj->oa.bo)));
1153
1154 case PIPELINE_STATS:
1155 return (obj->pipeline_stats.bo &&
1156 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1157 !brw_bo_busy(obj->pipeline_stats.bo));
1158 }
1159
1160 unreachable("missing ready check for unknown query kind");
1161 return false;
1162 }
1163
1164 static int
1165 get_oa_counter_data(struct brw_context *brw,
1166 struct brw_perf_query_object *obj,
1167 size_t data_size,
1168 uint8_t *data)
1169 {
1170 const struct brw_perf_query_info *query = obj->query;
1171 int n_counters = query->n_counters;
1172 int written = 0;
1173
1174 if (!obj->oa.results_accumulated) {
1175 accumulate_oa_reports(brw, obj);
1176 assert(obj->oa.results_accumulated);
1177 }
1178
1179 for (int i = 0; i < n_counters; i++) {
1180 const struct brw_perf_query_counter *counter = &query->counters[i];
1181 uint64_t *out_uint64;
1182 float *out_float;
1183
1184 if (counter->size) {
1185 switch (counter->data_type) {
1186 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL:
1187 out_uint64 = (uint64_t *)(data + counter->offset);
1188 *out_uint64 = counter->oa_counter_read_uint64(brw, query,
1189 obj->oa.accumulator);
1190 break;
1191 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL:
1192 out_float = (float *)(data + counter->offset);
1193 *out_float = counter->oa_counter_read_float(brw, query,
1194 obj->oa.accumulator);
1195 break;
1196 default:
1197 /* So far we aren't using uint32, double or bool32... */
1198 unreachable("unexpected counter data type");
1199 }
1200 written = counter->offset + counter->size;
1201 }
1202 }
1203
1204 return written;
1205 }
1206
1207 static int
1208 get_pipeline_stats_data(struct brw_context *brw,
1209 struct brw_perf_query_object *obj,
1210 size_t data_size,
1211 uint8_t *data)
1212
1213 {
1214 const struct brw_perf_query_info *query = obj->query;
1215 int n_counters = obj->query->n_counters;
1216 uint8_t *p = data;
1217
1218 brw_bo_map(brw, obj->pipeline_stats.bo, false);
1219 uint64_t *start = obj->pipeline_stats.bo->virtual;
1220 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1221
1222 for (int i = 0; i < n_counters; i++) {
1223 const struct brw_perf_query_counter *counter = &query->counters[i];
1224 uint64_t value = end[i] - start[i];
1225
1226 if (counter->pipeline_stat.numerator !=
1227 counter->pipeline_stat.denominator) {
1228 value *= counter->pipeline_stat.numerator;
1229 value /= counter->pipeline_stat.denominator;
1230 }
1231
1232 *((uint64_t *)p) = value;
1233 p += 8;
1234 }
1235
1236 brw_bo_unmap(obj->pipeline_stats.bo);
1237
1238 return p - data;
1239 }
1240
1241 /**
1242 * Driver hook for glGetPerfQueryDataINTEL().
1243 */
1244 static void
1245 brw_get_perf_query_data(struct gl_context *ctx,
1246 struct gl_perf_query_object *o,
1247 GLsizei data_size,
1248 GLuint *data,
1249 GLuint *bytes_written)
1250 {
1251 struct brw_context *brw = brw_context(ctx);
1252 struct brw_perf_query_object *obj = brw_perf_query(o);
1253 int written = 0;
1254
1255 assert(brw_is_perf_query_ready(ctx, o));
1256
1257 DBG("GetData(%d)\n", o->Id);
1258
1259 if (INTEL_DEBUG & DEBUG_PERFMON)
1260 dump_perf_queries(brw);
1261
1262 /* We expect that the frontend only calls this hook when it knows
1263 * that results are available.
1264 */
1265 assert(o->Ready);
1266
1267 switch (obj->query->kind) {
1268 case OA_COUNTERS:
1269 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1270 break;
1271
1272 case PIPELINE_STATS:
1273 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1274 break;
1275 }
1276
1277 if (bytes_written)
1278 *bytes_written = written;
1279 }
1280
1281 static struct gl_perf_query_object *
1282 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1283 {
1284 struct brw_context *brw = brw_context(ctx);
1285 const struct brw_perf_query_info *query =
1286 &brw->perfquery.queries[query_index];
1287 struct brw_perf_query_object *obj =
1288 calloc(1, sizeof(struct brw_perf_query_object));
1289
1290 if (!obj)
1291 return NULL;
1292
1293 obj->query = query;
1294
1295 brw->perfquery.n_query_instances++;
1296
1297 return &obj->base;
1298 }
1299
1300 /**
1301 * Driver hook for glDeletePerfQueryINTEL().
1302 */
1303 static void
1304 brw_delete_perf_query(struct gl_context *ctx,
1305 struct gl_perf_query_object *o)
1306 {
1307 struct brw_context *brw = brw_context(ctx);
1308 struct brw_perf_query_object *obj = brw_perf_query(o);
1309
1310 /* We can assume that the frontend waits for a query to complete
1311 * before ever calling into here, so we don't have to worry about
1312 * deleting an in-flight query object.
1313 */
1314 assert(!o->Active);
1315 assert(!o->Used || o->Ready);
1316
1317 DBG("Delete(%d)\n", o->Id);
1318
1319 switch (obj->query->kind) {
1320 case OA_COUNTERS:
1321 if (obj->oa.bo) {
1322 if (!obj->oa.results_accumulated) {
1323 drop_from_unaccumulated_query_list(brw, obj);
1324 dec_n_oa_users(brw);
1325 }
1326
1327 brw_bo_unreference(obj->oa.bo);
1328 obj->oa.bo = NULL;
1329 }
1330
1331 obj->oa.results_accumulated = false;
1332 break;
1333
1334 case PIPELINE_STATS:
1335 if (obj->pipeline_stats.bo) {
1336 brw_bo_unreference(obj->pipeline_stats.bo);
1337 obj->pipeline_stats.bo = NULL;
1338 }
1339 break;
1340 }
1341
1342 free(obj);
1343
1344 /* As an indication that the INTEL_performance_query extension is no
1345 * longer in use, it's a good time to free our cache of sample
1346 * buffers and close any current i915-perf stream.
1347 */
1348 if (--brw->perfquery.n_query_instances == 0) {
1349 free_sample_bufs(brw);
1350 close_perf(brw);
1351 }
1352 }
1353
1354 /******************************************************************************/
1355
1356 static struct brw_perf_query_info *
1357 append_query_info(struct brw_context *brw)
1358 {
1359 brw->perfquery.queries =
1360 reralloc(brw, brw->perfquery.queries,
1361 struct brw_perf_query_info, ++brw->perfquery.n_queries);
1362
1363 return &brw->perfquery.queries[brw->perfquery.n_queries - 1];
1364 }
1365
1366 static void
1367 add_stat_reg(struct brw_perf_query_info *query,
1368 uint32_t reg,
1369 uint32_t numerator,
1370 uint32_t denominator,
1371 const char *name,
1372 const char *description)
1373 {
1374 struct brw_perf_query_counter *counter;
1375
1376 assert(query->n_counters < MAX_STAT_COUNTERS);
1377
1378 counter = &query->counters[query->n_counters];
1379 counter->name = name;
1380 counter->desc = description;
1381 counter->type = GL_PERFQUERY_COUNTER_RAW_INTEL;
1382 counter->data_type = GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL;
1383 counter->size = sizeof(uint64_t);
1384 counter->offset = sizeof(uint64_t) * query->n_counters;
1385 counter->pipeline_stat.reg = reg;
1386 counter->pipeline_stat.numerator = numerator;
1387 counter->pipeline_stat.denominator = denominator;
1388
1389 query->n_counters++;
1390 }
1391
1392 static void
1393 add_basic_stat_reg(struct brw_perf_query_info *query,
1394 uint32_t reg, const char *name)
1395 {
1396 add_stat_reg(query, reg, 1, 1, name, name);
1397 }
1398
1399 static void
1400 init_pipeline_statistic_query_registers(struct brw_context *brw)
1401 {
1402 struct brw_perf_query_info *query = append_query_info(brw);
1403
1404 query->kind = PIPELINE_STATS;
1405 query->name = "Pipeline Statistics Registers";
1406 query->n_counters = 0;
1407 query->counters =
1408 rzalloc_array(brw, struct brw_perf_query_counter, MAX_STAT_COUNTERS);
1409
1410 add_basic_stat_reg(query, IA_VERTICES_COUNT,
1411 "N vertices submitted");
1412 add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1413 "N primitives submitted");
1414 add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1415 "N vertex shader invocations");
1416
1417 if (brw->gen == 6) {
1418 add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1419 "SO_PRIM_STORAGE_NEEDED",
1420 "N geometry shader stream-out primitives (total)");
1421 add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1422 "SO_NUM_PRIMS_WRITTEN",
1423 "N geometry shader stream-out primitives (written)");
1424 } else {
1425 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1426 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1427 "N stream-out (stream 0) primitives (total)");
1428 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1429 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1430 "N stream-out (stream 1) primitives (total)");
1431 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1432 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1433 "N stream-out (stream 2) primitives (total)");
1434 add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1435 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1436 "N stream-out (stream 3) primitives (total)");
1437 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1438 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1439 "N stream-out (stream 0) primitives (written)");
1440 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1441 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1442 "N stream-out (stream 1) primitives (written)");
1443 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1444 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1445 "N stream-out (stream 2) primitives (written)");
1446 add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1447 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1448 "N stream-out (stream 3) primitives (written)");
1449 }
1450
1451 add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1452 "N TCS shader invocations");
1453 add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1454 "N TES shader invocations");
1455
1456 add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1457 "N geometry shader invocations");
1458 add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1459 "N geometry shader primitives emitted");
1460
1461 add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1462 "N primitives entering clipping");
1463 add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1464 "N primitives leaving clipping");
1465
1466 if (brw->is_haswell || brw->gen == 8)
1467 add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1468 "N fragment shader invocations",
1469 "N fragment shader invocations");
1470 else
1471 add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1472 "N fragment shader invocations");
1473
1474 add_basic_stat_reg(query, PS_DEPTH_COUNT, "N z-pass fragments");
1475
1476 if (brw->gen >= 7)
1477 add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1478 "N compute shader invocations");
1479
1480 query->data_size = sizeof(uint64_t) * query->n_counters;
1481 }
1482
1483 static bool
1484 read_file_uint64(const char *file, uint64_t *val)
1485 {
1486 char buf[32];
1487 int fd, n;
1488
1489 fd = open(file, 0);
1490 if (fd < 0)
1491 return false;
1492 n = read(fd, buf, sizeof (buf) - 1);
1493 close(fd);
1494 if (n < 0)
1495 return false;
1496
1497 buf[n] = '\0';
1498 *val = strtoull(buf, NULL, 0);
1499
1500 return true;
1501 }
1502
1503 static void
1504 enumerate_sysfs_metrics(struct brw_context *brw, const char *sysfs_dev_dir)
1505 {
1506 char buf[256];
1507 DIR *metricsdir = NULL;
1508 struct dirent *metric_entry;
1509 int len;
1510
1511 len = snprintf(buf, sizeof(buf), "%s/metrics", sysfs_dev_dir);
1512 if (len < 0 || len >= sizeof(buf)) {
1513 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1514 return;
1515 }
1516
1517 metricsdir = opendir(buf);
1518 if (!metricsdir) {
1519 DBG("Failed to open %s: %m\n", buf);
1520 return;
1521 }
1522
1523 while ((metric_entry = readdir(metricsdir))) {
1524 struct hash_entry *entry;
1525
1526 if ((metric_entry->d_type != DT_DIR &&
1527 metric_entry->d_type != DT_LNK) ||
1528 metric_entry->d_name[0] == '.')
1529 continue;
1530
1531 DBG("metric set: %s\n", metric_entry->d_name);
1532 entry = _mesa_hash_table_search(brw->perfquery.oa_metrics_table,
1533 metric_entry->d_name);
1534 if (entry) {
1535 struct brw_perf_query_info *query;
1536 uint64_t id;
1537
1538 len = snprintf(buf, sizeof(buf), "%s/metrics/%s/id",
1539 sysfs_dev_dir, metric_entry->d_name);
1540 if (len < 0 || len >= sizeof(buf)) {
1541 DBG("Failed to concatenate path to sysfs metric id file\n");
1542 continue;
1543 }
1544
1545 if (!read_file_uint64(buf, &id)) {
1546 DBG("Failed to read metric set id from %s: %m", buf);
1547 continue;
1548 }
1549
1550 query = append_query_info(brw);
1551 *query = *(struct brw_perf_query_info *)entry->data;
1552 query->oa_metrics_set_id = id;
1553
1554 DBG("metric set known by mesa: id = %" PRIu64"\n",
1555 query->oa_metrics_set_id);
1556 } else
1557 DBG("metric set not known by mesa (skipping)\n");
1558 }
1559
1560 closedir(metricsdir);
1561 }
1562
1563 static bool
1564 read_sysfs_drm_device_file_uint64(struct brw_context *brw,
1565 const char *sysfs_dev_dir,
1566 const char *file,
1567 uint64_t *value)
1568 {
1569 char buf[512];
1570 int len;
1571
1572 len = snprintf(buf, sizeof(buf), "%s/%s", sysfs_dev_dir, file);
1573 if (len < 0 || len >= sizeof(buf)) {
1574 DBG("Failed to concatenate sys filename to read u64 from\n");
1575 return false;
1576 }
1577
1578 return read_file_uint64(buf, value);
1579 }
1580
1581 static bool
1582 init_oa_sys_vars(struct brw_context *brw, const char *sysfs_dev_dir)
1583 {
1584 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
1585
1586 if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
1587 "gt_min_freq_mhz",
1588 &min_freq_mhz))
1589 return false;
1590
1591 if (!read_sysfs_drm_device_file_uint64(brw, sysfs_dev_dir,
1592 "gt_max_freq_mhz",
1593 &max_freq_mhz))
1594 return false;
1595
1596 brw->perfquery.sys_vars.gt_min_freq = min_freq_mhz * 1000000;
1597 brw->perfquery.sys_vars.gt_max_freq = max_freq_mhz * 1000000;
1598
1599 if (brw->is_haswell) {
1600 const struct gen_device_info *info = &brw->screen->devinfo;
1601
1602 brw->perfquery.sys_vars.timestamp_frequency = 12500000;
1603
1604 if (info->gt == 1) {
1605 brw->perfquery.sys_vars.n_eus = 10;
1606 brw->perfquery.sys_vars.n_eu_slices = 1;
1607 brw->perfquery.sys_vars.subslice_mask = 0x1;
1608 } else if (info->gt == 2) {
1609 brw->perfquery.sys_vars.n_eus = 20;
1610 brw->perfquery.sys_vars.n_eu_slices = 1;
1611 brw->perfquery.sys_vars.subslice_mask = 0x3;
1612 } else if (info->gt == 3) {
1613 brw->perfquery.sys_vars.n_eus = 40;
1614 brw->perfquery.sys_vars.n_eu_slices = 2;
1615 brw->perfquery.sys_vars.subslice_mask = 0xf;
1616 } else
1617 unreachable("not reached");
1618
1619 return true;
1620 } else
1621 return false;
1622 }
1623
1624 static bool
1625 get_sysfs_dev_dir(struct brw_context *brw,
1626 char *path_buf,
1627 int path_buf_len)
1628 {
1629 __DRIscreen *screen = brw->screen->driScrnPriv;
1630 struct stat sb;
1631 int min, maj;
1632 DIR *drmdir;
1633 struct dirent *drm_entry;
1634 int len;
1635
1636 assert(path_buf);
1637 assert(path_buf_len);
1638 path_buf[0] = '\0';
1639
1640 if (fstat(screen->fd, &sb)) {
1641 DBG("Failed to stat DRM fd\n");
1642 return false;
1643 }
1644
1645 maj = major(sb.st_rdev);
1646 min = minor(sb.st_rdev);
1647
1648 if (!S_ISCHR(sb.st_mode)) {
1649 DBG("DRM fd is not a character device as expected\n");
1650 return false;
1651 }
1652
1653 len = snprintf(path_buf, path_buf_len,
1654 "/sys/dev/char/%d:%d/device/drm", maj, min);
1655 if (len < 0 || len >= path_buf_len) {
1656 DBG("Failed to concatenate sysfs path to drm device\n");
1657 return false;
1658 }
1659
1660 drmdir = opendir(path_buf);
1661 if (!drmdir) {
1662 DBG("Failed to open %s: %m\n", path_buf);
1663 return false;
1664 }
1665
1666 while ((drm_entry = readdir(drmdir))) {
1667 if ((drm_entry->d_type == DT_DIR ||
1668 drm_entry->d_type == DT_LNK) &&
1669 strncmp(drm_entry->d_name, "card", 4) == 0)
1670 {
1671 len = snprintf(path_buf, path_buf_len,
1672 "/sys/dev/char/%d:%d/device/drm/%s",
1673 maj, min, drm_entry->d_name);
1674 closedir(drmdir);
1675 if (len < 0 || len >= path_buf_len)
1676 return false;
1677 else
1678 return true;
1679 }
1680 }
1681
1682 closedir(drmdir);
1683
1684 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
1685 maj, min);
1686
1687 return false;
1688 }
1689
1690 static unsigned
1691 brw_init_perf_query_info(struct gl_context *ctx)
1692 {
1693 struct brw_context *brw = brw_context(ctx);
1694 struct stat sb;
1695 char sysfs_dev_dir[128];
1696
1697 if (brw->perfquery.n_queries)
1698 return brw->perfquery.n_queries;
1699
1700 init_pipeline_statistic_query_registers(brw);
1701
1702 /* The existence of this sysctl parameter implies the kernel supports
1703 * the i915 perf interface.
1704 */
1705 if (brw->is_haswell &&
1706 stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0 &&
1707 get_sysfs_dev_dir(brw, sysfs_dev_dir, sizeof(sysfs_dev_dir)) &&
1708 init_oa_sys_vars(brw, sysfs_dev_dir))
1709 {
1710 brw->perfquery.oa_metrics_table =
1711 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
1712 _mesa_key_string_equal);
1713
1714 /* Index all the metric sets mesa knows about before looking to
1715 * see what the kernel is advertising.
1716 */
1717 brw_oa_register_queries_hsw(brw);
1718
1719 enumerate_sysfs_metrics(brw, sysfs_dev_dir);
1720 }
1721
1722 brw->perfquery.unaccumulated =
1723 ralloc_array(brw, struct brw_perf_query_object *, 2);
1724 brw->perfquery.unaccumulated_elements = 0;
1725 brw->perfquery.unaccumulated_array_size = 2;
1726
1727 exec_list_make_empty(&brw->perfquery.sample_buffers);
1728 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
1729
1730 /* It's convenient to guarantee that this linked list of sample
1731 * buffers is never empty so we add an empty head so when we
1732 * Begin an OA query we can always take a reference on a buffer
1733 * in this list.
1734 */
1735 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
1736 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
1737
1738 brw->perfquery.oa_stream_fd = -1;
1739
1740 brw->perfquery.next_query_start_report_id = 1000;
1741
1742 return brw->perfquery.n_queries;
1743 }
1744
1745 void
1746 brw_init_performance_queries(struct brw_context *brw)
1747 {
1748 struct gl_context *ctx = &brw->ctx;
1749
1750 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
1751 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
1752 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
1753 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
1754 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
1755 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
1756 ctx->Driver.EndPerfQuery = brw_end_perf_query;
1757 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
1758 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
1759 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
1760 }