2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
45 /* put before sys/types.h to silence glibc warnings */
47 #include <sys/mkdev.h>
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
52 #include <sys/types.h>
56 #include <sys/ioctl.h>
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_hsw.h"
75 #include "intel_batchbuffer.h"
77 #define FILE_DEBUG_FLAG DEBUG_PERFMON
80 * The largest OA format we can use on Haswell includes:
81 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
83 #define MAX_OA_REPORT_COUNTERS 62
85 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
86 256) /* OA counter report */
89 * Periodic OA samples are read() into these buffer structures via the
90 * i915 perf kernel interface and appended to the
91 * brw->perfquery.sample_buffers linked list. When we process the
92 * results of an OA metrics query we need to consider all the periodic
93 * samples between the Begin and End MI_REPORT_PERF_COUNT command
96 * 'Periodic' is a simplification as there are other automatic reports
97 * written by the hardware also buffered here.
99 * Considering three queries, A, B and C:
102 * ________________A_________________
104 * | ________B_________ _____C___________
107 * And an illustration of sample buffers read over this time frame:
108 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
110 * These nodes may hold samples for query A:
111 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
113 * These nodes may hold samples for query B:
114 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
116 * These nodes may hold samples for query C:
117 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
119 * The illustration assumes we have an even distribution of periodic
120 * samples so all nodes have the same size plotted against time:
122 * Note, to simplify code, the list is never empty.
124 * With overlapping queries we can see that periodic OA reports may
125 * relate to multiple queries and care needs to be take to keep
126 * track of sample buffers until there are no queries that might
127 * depend on their contents.
129 * We use a node ref counting system where a reference ensures that a
130 * node and all following nodes can't be freed/recycled until the
131 * reference drops to zero.
133 * E.g. with a ref of one here:
134 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
136 * These nodes could be freed or recycled ("reaped"):
139 * These must be preserved until the leading ref drops to zero:
140 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
142 * When a query starts we take a reference on the current tail of
143 * the list, knowing that no already-buffered samples can possibly
144 * relate to the newly-started query. A pointer to this node is
145 * also saved in the query object's ->oa.samples_head.
147 * E.g. starting query A while there are two nodes in .sample_buffers:
148 * ________________A________
152 * ^_______ Add a reference and store pointer to node in
155 * Moving forward to when the B query starts with no new buffer nodes:
156 * (for reference, i915 perf reads() are only done when queries finish)
157 * ________________A_______
162 * ^_______ Add a reference and store pointer to
163 * node in B->oa.samples_head
165 * Once a query is finished, after an OA query has become 'Ready',
166 * once the End OA report has landed and after we we have processed
167 * all the intermediate periodic samples then we drop the
168 * ->oa.samples_head reference we took at the start.
170 * So when the B query has finished we have:
171 * ________________A________
172 * | ______B___________
174 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
175 * ^_______ Drop B->oa.samples_head reference
177 * We still can't free these due to the A->oa.samples_head ref:
178 * [ 1 ][ 0 ][ 0 ][ 0 ]
180 * When the A query finishes: (note there's a new ref for C's samples_head)
181 * ________________A_________________
185 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
186 * ^_______ Drop A->oa.samples_head reference
188 * And we can now reap these nodes up to the C->oa.samples_head:
189 * [ X ][ X ][ X ][ X ]
190 * keeping -> [ 1 ][ 0 ][ 0 ]
192 * We reap old sample buffers each time we finish processing an OA
193 * query by iterating the sample_buffers list from the head until we
194 * find a referenced node and stop.
196 * Reaped buffers move to a perfquery.free_sample_buffers list and
197 * when we come to read() we first look to recycle a buffer from the
198 * free_sample_buffers list before allocating a new buffer.
200 struct brw_oa_sample_buf
{
201 struct exec_node link
;
204 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
208 * i965 representation of a performance query object.
210 * NB: We want to keep this structure relatively lean considering that
211 * applications may expect to allocate enough objects to be able to
212 * query around all draw calls in a frame.
214 struct brw_perf_query_object
216 struct gl_perf_query_object base
;
218 const struct brw_perf_query_info
*query
;
220 /* See query->kind to know which state below is in use... */
225 * BO containing OA counter snapshots at query Begin/End time.
230 * The MI_REPORT_PERF_COUNT command lets us specify a unique
231 * ID that will be reflected in the resulting OA report
232 * that's written by the GPU. This is the ID we're expecting
233 * in the begin report and the the end report should be
234 * @begin_report_id + 1.
239 * Reference the head of the brw->perfquery.sample_buffers
240 * list at the time that the query started (so we only need
241 * to look at nodes after this point when looking for samples
242 * related to this query)
244 * (See struct brw_oa_sample_buf description for more details)
246 struct exec_node
*samples_head
;
249 * Storage for the final accumulated OA counters.
251 uint64_t accumulator
[MAX_OA_REPORT_COUNTERS
];
254 * false while in the unaccumulated_elements list, and set to
255 * true when the final, end MI_RPC snapshot has been
258 bool results_accumulated
;
264 * BO containing starting and ending snapshots for the
265 * statistics counters.
272 /** Downcasting convenience macro. */
273 static inline struct brw_perf_query_object
*
274 brw_perf_query(struct gl_perf_query_object
*o
)
276 return (struct brw_perf_query_object
*) o
;
279 #define STATS_BO_SIZE 4096
280 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
281 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
283 #define MI_RPC_BO_SIZE 4096
284 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
286 /******************************************************************************/
289 brw_is_perf_query_ready(struct gl_context
*ctx
,
290 struct gl_perf_query_object
*o
);
293 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
295 struct gl_context
*ctx
= brw_void
;
296 struct gl_perf_query_object
*o
= query_void
;
297 struct brw_perf_query_object
*obj
= query_void
;
299 switch (obj
->query
->kind
) {
301 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
303 o
->Used
? "Dirty," : "New,",
304 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
305 obj
->oa
.bo
? "yes," : "no,",
306 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
307 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
310 DBG("%4d: %-6s %-8s BO: %-4s\n",
312 o
->Used
? "Dirty," : "New,",
313 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
314 obj
->pipeline_stats
.bo
? "yes" : "no");
320 dump_perf_queries(struct brw_context
*brw
)
322 struct gl_context
*ctx
= &brw
->ctx
;
323 DBG("Queries: (Open queries = %d, OA users = %d)\n",
324 brw
->perfquery
.n_active_oa_queries
, brw
->perfquery
.n_oa_users
);
325 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
328 /******************************************************************************/
330 static struct brw_oa_sample_buf
*
331 get_free_sample_buf(struct brw_context
*brw
)
333 struct exec_node
*node
= exec_list_pop_head(&brw
->perfquery
.free_sample_buffers
);
334 struct brw_oa_sample_buf
*buf
;
337 buf
= exec_node_data(struct brw_oa_sample_buf
, node
, link
);
339 buf
= ralloc_size(brw
, sizeof(*buf
));
341 exec_node_init(&buf
->link
);
350 reap_old_sample_buffers(struct brw_context
*brw
)
352 struct exec_node
*tail_node
=
353 exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
354 struct brw_oa_sample_buf
*tail_buf
=
355 exec_node_data(struct brw_oa_sample_buf
, tail_node
, link
);
357 /* Remove all old, unreferenced sample buffers walking forward from
358 * the head of the list, except always leave at least one node in
359 * the list so we always have a node to reference when we Begin
362 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
363 &brw
->perfquery
.sample_buffers
)
365 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
366 exec_node_remove(&buf
->link
);
367 exec_list_push_head(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
374 free_sample_bufs(struct brw_context
*brw
)
376 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
377 &brw
->perfquery
.free_sample_buffers
)
380 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
383 /******************************************************************************/
386 * Driver hook for glGetPerfQueryInfoINTEL().
389 brw_get_perf_query_info(struct gl_context
*ctx
,
390 unsigned query_index
,
396 struct brw_context
*brw
= brw_context(ctx
);
397 const struct brw_perf_query_info
*query
=
398 &brw
->perfquery
.queries
[query_index
];
401 *data_size
= query
->data_size
;
402 *n_counters
= query
->n_counters
;
404 switch (query
->kind
) {
406 *n_active
= brw
->perfquery
.n_active_oa_queries
;
410 *n_active
= brw
->perfquery
.n_active_pipeline_stats_queries
;
416 * Driver hook for glGetPerfCounterInfoINTEL().
419 brw_get_perf_counter_info(struct gl_context
*ctx
,
420 unsigned query_index
,
421 unsigned counter_index
,
427 GLuint
*data_type_enum
,
430 struct brw_context
*brw
= brw_context(ctx
);
431 const struct brw_perf_query_info
*query
=
432 &brw
->perfquery
.queries
[query_index
];
433 const struct brw_perf_query_counter
*counter
=
434 &query
->counters
[counter_index
];
436 *name
= counter
->name
;
437 *desc
= counter
->desc
;
438 *offset
= counter
->offset
;
439 *data_size
= counter
->size
;
440 *type_enum
= counter
->type
;
441 *data_type_enum
= counter
->data_type
;
442 *raw_max
= counter
->raw_max
;
445 /******************************************************************************/
448 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
449 * pipeline statistics for the performance query object.
452 snapshot_statistics_registers(struct brw_context
*brw
,
453 struct brw_perf_query_object
*obj
,
454 uint32_t offset_in_bytes
)
456 const struct brw_perf_query_info
*query
= obj
->query
;
457 const int n_counters
= query
->n_counters
;
459 for (int i
= 0; i
< n_counters
; i
++) {
460 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
462 assert(counter
->data_type
== GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
);
464 brw_store_register_mem64(brw
, obj
->pipeline_stats
.bo
,
465 counter
->pipeline_stat
.reg
,
466 offset_in_bytes
+ i
* sizeof(uint64_t));
471 * Emit an MI_REPORT_PERF_COUNT command packet.
473 * This asks the GPU to write a report of the current OA counter
474 * values into @bo at the given offset and containing the given
475 * @report_id which we can cross-reference when parsing the report.
478 emit_mi_report_perf_count(struct brw_context
*brw
,
480 uint32_t offset_in_bytes
,
483 assert(offset_in_bytes
% 64 == 0);
486 OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT
);
487 OUT_RELOC(bo
, I915_GEM_DOMAIN_INSTRUCTION
, I915_GEM_DOMAIN_INSTRUCTION
,
489 OUT_BATCH(report_id
);
494 * Add a query to the global list of "unaccumulated queries."
496 * Queries are tracked here until all the associated OA reports have
497 * been accumulated via accumulate_oa_reports() after the end
498 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
501 add_to_unaccumulated_query_list(struct brw_context
*brw
,
502 struct brw_perf_query_object
*obj
)
504 if (brw
->perfquery
.unaccumulated_elements
>=
505 brw
->perfquery
.unaccumulated_array_size
)
507 brw
->perfquery
.unaccumulated_array_size
*= 1.5;
508 brw
->perfquery
.unaccumulated
=
509 reralloc(brw
, brw
->perfquery
.unaccumulated
,
510 struct brw_perf_query_object
*,
511 brw
->perfquery
.unaccumulated_array_size
);
514 brw
->perfquery
.unaccumulated
[brw
->perfquery
.unaccumulated_elements
++] = obj
;
518 * Remove a query from the global list of unaccumulated queries once
519 * after successfully accumulating the OA reports associated with the
520 * query in accumulate_oa_reports() or when discarding unwanted query
524 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
525 struct brw_perf_query_object
*obj
)
527 for (int i
= 0; i
< brw
->perfquery
.unaccumulated_elements
; i
++) {
528 if (brw
->perfquery
.unaccumulated
[i
] == obj
) {
529 int last_elt
= --brw
->perfquery
.unaccumulated_elements
;
532 brw
->perfquery
.unaccumulated
[i
] = NULL
;
534 brw
->perfquery
.unaccumulated
[i
] =
535 brw
->perfquery
.unaccumulated
[last_elt
];
542 /* Drop our samples_head reference so that associated periodic
543 * sample data buffers can potentially be reaped if they aren't
544 * referenced by any other queries...
547 struct brw_oa_sample_buf
*buf
=
548 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
550 assert(buf
->refcount
> 0);
553 obj
->oa
.samples_head
= NULL
;
555 reap_old_sample_buffers(brw
);
559 timebase_scale(struct brw_context
*brw
, uint32_t u32_time_delta
)
561 uint64_t tmp
= ((uint64_t)u32_time_delta
) * 1000000000ull;
563 return tmp
? tmp
/ brw
->perfquery
.sys_vars
.timestamp_frequency
: 0;
567 accumulate_uint32(const uint32_t *report0
,
568 const uint32_t *report1
,
569 uint64_t *accumulator
)
571 *accumulator
+= (uint32_t)(*report1
- *report0
);
575 * Given pointers to starting and ending OA snapshots, add the deltas for each
576 * counter to the results.
579 add_deltas(struct brw_context
*brw
,
580 struct brw_perf_query_object
*obj
,
581 const uint32_t *start
,
584 const struct brw_perf_query_info
*query
= obj
->query
;
585 uint64_t *accumulator
= obj
->oa
.accumulator
;
588 switch (query
->oa_format
) {
589 case I915_OA_FORMAT_A45_B8_C8
:
590 accumulate_uint32(start
+ 1, end
+ 1, accumulator
); /* timestamp */
592 for (i
= 0; i
< 61; i
++)
593 accumulate_uint32(start
+ 3 + i
, end
+ 3 + i
, accumulator
+ 1 + i
);
597 unreachable("Can't accumulate OA counters in unknown format");
602 inc_n_oa_users(struct brw_context
*brw
)
604 if (brw
->perfquery
.n_oa_users
== 0 &&
605 drmIoctl(brw
->perfquery
.oa_stream_fd
,
606 I915_PERF_IOCTL_ENABLE
, 0) < 0)
610 ++brw
->perfquery
.n_oa_users
;
616 dec_n_oa_users(struct brw_context
*brw
)
618 /* Disabling the i915 perf stream will effectively disable the OA
619 * counters. Note it's important to be sure there are no outstanding
620 * MI_RPC commands at this point since they could stall the CS
621 * indefinitely once OACONTROL is disabled.
623 --brw
->perfquery
.n_oa_users
;
624 if (brw
->perfquery
.n_oa_users
== 0 &&
625 drmIoctl(brw
->perfquery
.oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
627 DBG("WARNING: Error disabling i915 perf stream: %m\n");
631 /* In general if we see anything spurious while accumulating results,
632 * we don't try and continue accumulating the current query, hoping
633 * for the best, we scrap anything outstanding, and then hope for the
634 * best with new queries.
637 discard_all_queries(struct brw_context
*brw
)
639 while (brw
->perfquery
.unaccumulated_elements
) {
640 struct brw_perf_query_object
*obj
= brw
->perfquery
.unaccumulated
[0];
642 obj
->oa
.results_accumulated
= true;
643 drop_from_unaccumulated_query_list(brw
, brw
->perfquery
.unaccumulated
[0]);
650 read_oa_samples(struct brw_context
*brw
)
653 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
656 while ((len
= read(brw
->perfquery
.oa_stream_fd
, buf
->buf
,
657 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
661 exec_list_push_tail(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
667 DBG("Error reading i915 perf samples: %m\n");
671 DBG("Spurious EOF reading i915 perf samples\n");
677 exec_list_push_tail(&brw
->perfquery
.sample_buffers
, &buf
->link
);
680 unreachable("not reached");
685 * Accumulate raw OA counter values based on deltas between pairs
688 * Accumulation starts from the first report captured via
689 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
690 * last MI_RPC report requested by brw_end_perf_query(). Between these
691 * two reports there may also some number of periodically sampled OA
692 * reports collected via the i915 perf interface - depending on the
693 * duration of the query.
695 * These periodic snapshots help to ensure we handle counter overflow
696 * correctly by being frequent enough to ensure we don't miss multiple
697 * overflows of a counter between snapshots.
700 accumulate_oa_reports(struct brw_context
*brw
,
701 struct brw_perf_query_object
*obj
)
703 struct gl_perf_query_object
*o
= &obj
->base
;
704 uint32_t *query_buffer
;
708 struct exec_node
*first_samples_node
;
712 /* Collect the latest periodic OA reports from i915 perf */
713 if (!read_oa_samples(brw
))
716 brw_bo_map(brw
, obj
->oa
.bo
, false);
717 query_buffer
= obj
->oa
.bo
->virtual;
719 start
= last
= query_buffer
;
720 end
= query_buffer
+ (MI_RPC_BO_END_OFFSET_BYTES
/ sizeof(uint32_t));
722 if (start
[0] != obj
->oa
.begin_report_id
) {
723 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
726 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
727 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
731 /* See if we have any periodic reports to accumulate too... */
733 /* N.B. The oa.samples_head was set when the query began and
734 * pointed to the tail of the brw->perfquery.sample_buffers list at
735 * the time the query started. Since the buffer existed before the
736 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
737 * that no data in this particular node's buffer can possibly be
738 * associated with the query - so skip ahead one...
740 first_samples_node
= obj
->oa
.samples_head
->next
;
742 foreach_list_typed_from(struct brw_oa_sample_buf
, buf
, link
,
743 &brw
->perfquery
.sample_buffers
,
748 while (offset
< buf
->len
) {
749 const struct drm_i915_perf_record_header
*header
=
750 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
752 assert(header
->size
!= 0);
753 assert(header
->size
<= buf
->len
);
755 offset
+= header
->size
;
757 switch (header
->type
) {
758 case DRM_I915_PERF_RECORD_SAMPLE
: {
759 uint32_t *report
= (uint32_t *)(header
+ 1);
761 /* Ignore reports that come before the start marker.
762 * (Note: takes care to allow overflow of 32bit timestamps)
764 if (timebase_scale(brw
, report
[1] - start
[1]) > 5000000000)
767 /* Ignore reports that come after the end marker.
768 * (Note: takes care to allow overflow of 32bit timestamps)
770 if (timebase_scale(brw
, report
[1] - end
[1]) <= 5000000000)
773 add_deltas(brw
, obj
, last
, report
);
780 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
781 DBG("i915 perf: OA error: all reports lost\n");
783 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
784 DBG("i915 perf: OA report lost\n");
792 add_deltas(brw
, obj
, last
, end
);
794 DBG("Marking %d accumulated - results gathered\n", o
->Id
);
796 brw_bo_unmap(obj
->oa
.bo
);
797 obj
->oa
.results_accumulated
= true;
798 drop_from_unaccumulated_query_list(brw
, obj
);
805 brw_bo_unmap(obj
->oa
.bo
);
806 discard_all_queries(brw
);
809 /******************************************************************************/
812 open_i915_perf_oa_stream(struct brw_context
*brw
,
819 uint64_t properties
[] = {
820 /* Single context sampling */
821 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
823 /* Include OA reports in samples */
824 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
826 /* OA unit configuration */
827 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
828 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
829 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
831 struct drm_i915_perf_open_param param
= {
832 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
833 I915_PERF_FLAG_FD_NONBLOCK
|
834 I915_PERF_FLAG_DISABLED
,
835 .num_properties
= ARRAY_SIZE(properties
) / 2,
836 .properties_ptr
= (uintptr_t) properties
,
838 int fd
= drmIoctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
840 DBG("Error opening i915 perf OA stream: %m\n");
844 brw
->perfquery
.oa_stream_fd
= fd
;
846 brw
->perfquery
.current_oa_metrics_set_id
= metrics_set_id
;
847 brw
->perfquery
.current_oa_format
= report_format
;
853 close_perf(struct brw_context
*brw
)
855 if (brw
->perfquery
.oa_stream_fd
!= -1) {
856 close(brw
->perfquery
.oa_stream_fd
);
857 brw
->perfquery
.oa_stream_fd
= -1;
862 * Driver hook for glBeginPerfQueryINTEL().
865 brw_begin_perf_query(struct gl_context
*ctx
,
866 struct gl_perf_query_object
*o
)
868 struct brw_context
*brw
= brw_context(ctx
);
869 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
870 const struct brw_perf_query_info
*query
= obj
->query
;
872 /* We can assume the frontend hides mistaken attempts to Begin a
873 * query object multiple times before its End. Similarly if an
874 * application reuses a query object before results have arrived
875 * the frontend will wait for prior results so we don't need
876 * to support abandoning in-flight results.
879 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
881 DBG("Begin(%d)\n", o
->Id
);
883 /* XXX: We have to consider that the command parser unit that parses batch
884 * buffer commands and is used to capture begin/end counter snapshots isn't
885 * implicitly synchronized with what's currently running across other GPU
886 * units (such as the EUs running shaders) that the performance counters are
889 * The intention of performance queries is to measure the work associated
890 * with commands between the begin/end delimiters and so for that to be the
891 * case we need to explicitly synchronize the parsing of commands to capture
892 * Begin/End counter snapshots with what's running across other parts of the
895 * When the command parser reaches a Begin marker it effectively needs to
896 * drain everything currently running on the GPU until the hardware is idle
897 * before capturing the first snapshot of counters - otherwise the results
898 * would also be measuring the effects of earlier commands.
900 * When the command parser reaches an End marker it needs to stall until
901 * everything currently running on the GPU has finished before capturing the
902 * end snapshot - otherwise the results won't be a complete representation
905 * Theoretically there could be opportunities to minimize how much of the
906 * GPU pipeline is drained, or that we stall for, when we know what specific
907 * units the performance counters being queried relate to but we don't
908 * currently attempt to be clever here.
910 * Note: with our current simple approach here then for back-to-back queries
911 * we will redundantly emit duplicate commands to synchronize the command
912 * streamer with the rest of the GPU pipeline, but we assume that in HW the
913 * second synchronization is effectively a NOOP.
915 * N.B. The final results are based on deltas of counters between (inside)
916 * Begin/End markers so even though the total wall clock time of the
917 * workload is stretched by larger pipeline bubbles the bubbles themselves
918 * are generally invisible to the query results. Whether that's a good or a
919 * bad thing depends on the use case. For a lower real-time impact while
920 * capturing metrics then periodic sampling may be a better choice than
921 * INTEL_performance_query.
924 * This is our Begin synchronization point to drain current work on the
925 * GPU before we capture our first counter snapshot...
927 brw_emit_mi_flush(brw
);
929 switch (query
->kind
) {
932 /* Opening an i915 perf stream implies exclusive access to the OA unit
933 * which will generate counter reports for a specific counter set with a
934 * specific layout/format so we can't begin any OA based queries that
935 * require a different counter set or format unless we get an opportunity
936 * to close the stream and open a new one...
938 if (brw
->perfquery
.oa_stream_fd
!= -1 &&
939 brw
->perfquery
.current_oa_metrics_set_id
!=
940 query
->oa_metrics_set_id
) {
942 if (brw
->perfquery
.n_oa_users
!= 0)
948 /* If the OA counters aren't already on, enable them. */
949 if (brw
->perfquery
.oa_stream_fd
== -1) {
950 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
953 /* The timestamp for HSW+ increments every 80ns
955 * The period_exponent gives a sampling period as follows:
956 * sample_period = 80ns * 2^(period_exponent + 1)
958 * The overflow period for Haswell can be calculated as:
960 * 2^32 / (n_eus * max_gen_freq * 2)
961 * (E.g. 40 EUs @ 1GHz = ~53ms)
963 * We currently sample every 42 milliseconds...
965 period_exponent
= 18;
967 if (!open_i915_perf_oa_stream(brw
,
968 query
->oa_metrics_set_id
,
971 screen
->fd
, /* drm fd */
975 assert(brw
->perfquery
.current_oa_metrics_set_id
==
976 query
->oa_metrics_set_id
&&
977 brw
->perfquery
.current_oa_format
==
981 if (!inc_n_oa_users(brw
)) {
982 DBG("WARNING: Error enabling i915 perf stream: %m\n");
987 brw_bo_unreference(obj
->oa
.bo
);
992 brw_bo_alloc(brw
->bufmgr
, "perf. query OA MI_RPC bo",
995 /* Pre-filling the BO helps debug whether writes landed. */
996 brw_bo_map(brw
, obj
->oa
.bo
, true);
997 memset((char *) obj
->oa
.bo
->virtual, 0x80, MI_RPC_BO_SIZE
);
998 brw_bo_unmap(obj
->oa
.bo
);
1001 obj
->oa
.begin_report_id
= brw
->perfquery
.next_query_start_report_id
;
1002 brw
->perfquery
.next_query_start_report_id
+= 2;
1004 /* Take a starting OA counter snapshot. */
1005 emit_mi_report_perf_count(brw
, obj
->oa
.bo
, 0,
1006 obj
->oa
.begin_report_id
);
1007 ++brw
->perfquery
.n_active_oa_queries
;
1009 /* No already-buffered samples can possibly be associated with this query
1010 * so create a marker within the list of sample buffers enabling us to
1011 * easily ignore earlier samples when processing this query after
1014 assert(!exec_list_is_empty(&brw
->perfquery
.sample_buffers
));
1015 obj
->oa
.samples_head
= exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
1017 struct brw_oa_sample_buf
*buf
=
1018 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
1020 /* This reference will ensure that future/following sample
1021 * buffers (that may relate to this query) can't be freed until
1022 * this drops to zero.
1026 memset(obj
->oa
.accumulator
, 0, sizeof(obj
->oa
.accumulator
));
1027 obj
->oa
.results_accumulated
= false;
1029 add_to_unaccumulated_query_list(brw
, obj
);
1032 case PIPELINE_STATS
:
1033 if (obj
->pipeline_stats
.bo
) {
1034 brw_bo_unreference(obj
->pipeline_stats
.bo
);
1035 obj
->pipeline_stats
.bo
= NULL
;
1038 obj
->pipeline_stats
.bo
=
1039 brw_bo_alloc(brw
->bufmgr
, "perf. query pipeline stats bo",
1042 /* Take starting snapshots. */
1043 snapshot_statistics_registers(brw
, obj
, 0);
1045 ++brw
->perfquery
.n_active_pipeline_stats_queries
;
1049 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1050 dump_perf_queries(brw
);
1056 * Driver hook for glEndPerfQueryINTEL().
1059 brw_end_perf_query(struct gl_context
*ctx
,
1060 struct gl_perf_query_object
*o
)
1062 struct brw_context
*brw
= brw_context(ctx
);
1063 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1065 DBG("End(%d)\n", o
->Id
);
1067 /* Ensure that the work associated with the queried commands will have
1068 * finished before taking our query end counter readings.
1070 * For more details see comment in brw_begin_perf_query for
1071 * corresponding flush.
1073 brw_emit_mi_flush(brw
);
1075 switch (obj
->query
->kind
) {
1078 /* NB: It's possible that the query will have already been marked
1079 * as 'accumulated' if an error was seen while reading samples
1080 * from perf. In this case we mustn't try and emit a closing
1081 * MI_RPC command in case the OA unit has already been disabled
1083 if (!obj
->oa
.results_accumulated
) {
1084 /* Take an ending OA counter snapshot. */
1085 emit_mi_report_perf_count(brw
, obj
->oa
.bo
,
1086 MI_RPC_BO_END_OFFSET_BYTES
,
1087 obj
->oa
.begin_report_id
+ 1);
1090 --brw
->perfquery
.n_active_oa_queries
;
1092 /* NB: even though the query has now ended, it can't be accumulated
1093 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1098 case PIPELINE_STATS
:
1099 snapshot_statistics_registers(brw
, obj
,
1100 STATS_BO_END_OFFSET_BYTES
);
1101 --brw
->perfquery
.n_active_pipeline_stats_queries
;
1107 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
1109 struct brw_context
*brw
= brw_context(ctx
);
1110 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1111 struct brw_bo
*bo
= NULL
;
1115 switch (obj
->query
->kind
) {
1120 case PIPELINE_STATS
:
1121 bo
= obj
->pipeline_stats
.bo
;
1128 /* If the current batch references our results bo then we need to
1131 if (brw_batch_references(&brw
->batch
, bo
))
1132 intel_batchbuffer_flush(brw
);
1134 brw_bo_wait_rendering(brw
, bo
);
1138 brw_is_perf_query_ready(struct gl_context
*ctx
,
1139 struct gl_perf_query_object
*o
)
1141 struct brw_context
*brw
= brw_context(ctx
);
1142 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1147 switch (obj
->query
->kind
) {
1149 return (obj
->oa
.results_accumulated
||
1151 !brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
1152 !brw_bo_busy(obj
->oa
.bo
)));
1154 case PIPELINE_STATS
:
1155 return (obj
->pipeline_stats
.bo
&&
1156 !brw_batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
1157 !brw_bo_busy(obj
->pipeline_stats
.bo
));
1160 unreachable("missing ready check for unknown query kind");
1165 get_oa_counter_data(struct brw_context
*brw
,
1166 struct brw_perf_query_object
*obj
,
1170 const struct brw_perf_query_info
*query
= obj
->query
;
1171 int n_counters
= query
->n_counters
;
1174 if (!obj
->oa
.results_accumulated
) {
1175 accumulate_oa_reports(brw
, obj
);
1176 assert(obj
->oa
.results_accumulated
);
1179 for (int i
= 0; i
< n_counters
; i
++) {
1180 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
1181 uint64_t *out_uint64
;
1184 if (counter
->size
) {
1185 switch (counter
->data_type
) {
1186 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
:
1187 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1188 *out_uint64
= counter
->oa_counter_read_uint64(brw
, query
,
1189 obj
->oa
.accumulator
);
1191 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
:
1192 out_float
= (float *)(data
+ counter
->offset
);
1193 *out_float
= counter
->oa_counter_read_float(brw
, query
,
1194 obj
->oa
.accumulator
);
1197 /* So far we aren't using uint32, double or bool32... */
1198 unreachable("unexpected counter data type");
1200 written
= counter
->offset
+ counter
->size
;
1208 get_pipeline_stats_data(struct brw_context
*brw
,
1209 struct brw_perf_query_object
*obj
,
1214 const struct brw_perf_query_info
*query
= obj
->query
;
1215 int n_counters
= obj
->query
->n_counters
;
1218 brw_bo_map(brw
, obj
->pipeline_stats
.bo
, false);
1219 uint64_t *start
= obj
->pipeline_stats
.bo
->virtual;
1220 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1222 for (int i
= 0; i
< n_counters
; i
++) {
1223 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
1224 uint64_t value
= end
[i
] - start
[i
];
1226 if (counter
->pipeline_stat
.numerator
!=
1227 counter
->pipeline_stat
.denominator
) {
1228 value
*= counter
->pipeline_stat
.numerator
;
1229 value
/= counter
->pipeline_stat
.denominator
;
1232 *((uint64_t *)p
) = value
;
1236 brw_bo_unmap(obj
->pipeline_stats
.bo
);
1242 * Driver hook for glGetPerfQueryDataINTEL().
1245 brw_get_perf_query_data(struct gl_context
*ctx
,
1246 struct gl_perf_query_object
*o
,
1249 GLuint
*bytes_written
)
1251 struct brw_context
*brw
= brw_context(ctx
);
1252 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1255 assert(brw_is_perf_query_ready(ctx
, o
));
1257 DBG("GetData(%d)\n", o
->Id
);
1259 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1260 dump_perf_queries(brw
);
1262 /* We expect that the frontend only calls this hook when it knows
1263 * that results are available.
1267 switch (obj
->query
->kind
) {
1269 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
1272 case PIPELINE_STATS
:
1273 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
1278 *bytes_written
= written
;
1281 static struct gl_perf_query_object
*
1282 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
1284 struct brw_context
*brw
= brw_context(ctx
);
1285 const struct brw_perf_query_info
*query
=
1286 &brw
->perfquery
.queries
[query_index
];
1287 struct brw_perf_query_object
*obj
=
1288 calloc(1, sizeof(struct brw_perf_query_object
));
1295 brw
->perfquery
.n_query_instances
++;
1301 * Driver hook for glDeletePerfQueryINTEL().
1304 brw_delete_perf_query(struct gl_context
*ctx
,
1305 struct gl_perf_query_object
*o
)
1307 struct brw_context
*brw
= brw_context(ctx
);
1308 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1310 /* We can assume that the frontend waits for a query to complete
1311 * before ever calling into here, so we don't have to worry about
1312 * deleting an in-flight query object.
1315 assert(!o
->Used
|| o
->Ready
);
1317 DBG("Delete(%d)\n", o
->Id
);
1319 switch (obj
->query
->kind
) {
1322 if (!obj
->oa
.results_accumulated
) {
1323 drop_from_unaccumulated_query_list(brw
, obj
);
1324 dec_n_oa_users(brw
);
1327 brw_bo_unreference(obj
->oa
.bo
);
1331 obj
->oa
.results_accumulated
= false;
1334 case PIPELINE_STATS
:
1335 if (obj
->pipeline_stats
.bo
) {
1336 brw_bo_unreference(obj
->pipeline_stats
.bo
);
1337 obj
->pipeline_stats
.bo
= NULL
;
1344 /* As an indication that the INTEL_performance_query extension is no
1345 * longer in use, it's a good time to free our cache of sample
1346 * buffers and close any current i915-perf stream.
1348 if (--brw
->perfquery
.n_query_instances
== 0) {
1349 free_sample_bufs(brw
);
1354 /******************************************************************************/
1356 static struct brw_perf_query_info
*
1357 append_query_info(struct brw_context
*brw
)
1359 brw
->perfquery
.queries
=
1360 reralloc(brw
, brw
->perfquery
.queries
,
1361 struct brw_perf_query_info
, ++brw
->perfquery
.n_queries
);
1363 return &brw
->perfquery
.queries
[brw
->perfquery
.n_queries
- 1];
1367 add_stat_reg(struct brw_perf_query_info
*query
,
1370 uint32_t denominator
,
1372 const char *description
)
1374 struct brw_perf_query_counter
*counter
;
1376 assert(query
->n_counters
< MAX_STAT_COUNTERS
);
1378 counter
= &query
->counters
[query
->n_counters
];
1379 counter
->name
= name
;
1380 counter
->desc
= description
;
1381 counter
->type
= GL_PERFQUERY_COUNTER_RAW_INTEL
;
1382 counter
->data_type
= GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
;
1383 counter
->size
= sizeof(uint64_t);
1384 counter
->offset
= sizeof(uint64_t) * query
->n_counters
;
1385 counter
->pipeline_stat
.reg
= reg
;
1386 counter
->pipeline_stat
.numerator
= numerator
;
1387 counter
->pipeline_stat
.denominator
= denominator
;
1389 query
->n_counters
++;
1393 add_basic_stat_reg(struct brw_perf_query_info
*query
,
1394 uint32_t reg
, const char *name
)
1396 add_stat_reg(query
, reg
, 1, 1, name
, name
);
1400 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1402 struct brw_perf_query_info
*query
= append_query_info(brw
);
1404 query
->kind
= PIPELINE_STATS
;
1405 query
->name
= "Pipeline Statistics Registers";
1406 query
->n_counters
= 0;
1408 rzalloc_array(brw
, struct brw_perf_query_counter
, MAX_STAT_COUNTERS
);
1410 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1411 "N vertices submitted");
1412 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1413 "N primitives submitted");
1414 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1415 "N vertex shader invocations");
1417 if (brw
->gen
== 6) {
1418 add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1419 "SO_PRIM_STORAGE_NEEDED",
1420 "N geometry shader stream-out primitives (total)");
1421 add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1422 "SO_NUM_PRIMS_WRITTEN",
1423 "N geometry shader stream-out primitives (written)");
1425 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1426 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1427 "N stream-out (stream 0) primitives (total)");
1428 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1429 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1430 "N stream-out (stream 1) primitives (total)");
1431 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1432 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1433 "N stream-out (stream 2) primitives (total)");
1434 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1435 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1436 "N stream-out (stream 3) primitives (total)");
1437 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1438 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1439 "N stream-out (stream 0) primitives (written)");
1440 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1441 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1442 "N stream-out (stream 1) primitives (written)");
1443 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1444 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1445 "N stream-out (stream 2) primitives (written)");
1446 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1447 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1448 "N stream-out (stream 3) primitives (written)");
1451 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1452 "N TCS shader invocations");
1453 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1454 "N TES shader invocations");
1456 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1457 "N geometry shader invocations");
1458 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1459 "N geometry shader primitives emitted");
1461 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1462 "N primitives entering clipping");
1463 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1464 "N primitives leaving clipping");
1466 if (brw
->is_haswell
|| brw
->gen
== 8)
1467 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1468 "N fragment shader invocations",
1469 "N fragment shader invocations");
1471 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1472 "N fragment shader invocations");
1474 add_basic_stat_reg(query
, PS_DEPTH_COUNT
, "N z-pass fragments");
1477 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1478 "N compute shader invocations");
1480 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1484 read_file_uint64(const char *file
, uint64_t *val
)
1492 n
= read(fd
, buf
, sizeof (buf
) - 1);
1498 *val
= strtoull(buf
, NULL
, 0);
1504 enumerate_sysfs_metrics(struct brw_context
*brw
, const char *sysfs_dev_dir
)
1507 DIR *metricsdir
= NULL
;
1508 struct dirent
*metric_entry
;
1511 len
= snprintf(buf
, sizeof(buf
), "%s/metrics", sysfs_dev_dir
);
1512 if (len
< 0 || len
>= sizeof(buf
)) {
1513 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1517 metricsdir
= opendir(buf
);
1519 DBG("Failed to open %s: %m\n", buf
);
1523 while ((metric_entry
= readdir(metricsdir
))) {
1524 struct hash_entry
*entry
;
1526 if ((metric_entry
->d_type
!= DT_DIR
&&
1527 metric_entry
->d_type
!= DT_LNK
) ||
1528 metric_entry
->d_name
[0] == '.')
1531 DBG("metric set: %s\n", metric_entry
->d_name
);
1532 entry
= _mesa_hash_table_search(brw
->perfquery
.oa_metrics_table
,
1533 metric_entry
->d_name
);
1535 struct brw_perf_query_info
*query
;
1538 len
= snprintf(buf
, sizeof(buf
), "%s/metrics/%s/id",
1539 sysfs_dev_dir
, metric_entry
->d_name
);
1540 if (len
< 0 || len
>= sizeof(buf
)) {
1541 DBG("Failed to concatenate path to sysfs metric id file\n");
1545 if (!read_file_uint64(buf
, &id
)) {
1546 DBG("Failed to read metric set id from %s: %m", buf
);
1550 query
= append_query_info(brw
);
1551 *query
= *(struct brw_perf_query_info
*)entry
->data
;
1552 query
->oa_metrics_set_id
= id
;
1554 DBG("metric set known by mesa: id = %" PRIu64
"\n",
1555 query
->oa_metrics_set_id
);
1557 DBG("metric set not known by mesa (skipping)\n");
1560 closedir(metricsdir
);
1564 read_sysfs_drm_device_file_uint64(struct brw_context
*brw
,
1565 const char *sysfs_dev_dir
,
1572 len
= snprintf(buf
, sizeof(buf
), "%s/%s", sysfs_dev_dir
, file
);
1573 if (len
< 0 || len
>= sizeof(buf
)) {
1574 DBG("Failed to concatenate sys filename to read u64 from\n");
1578 return read_file_uint64(buf
, value
);
1582 init_oa_sys_vars(struct brw_context
*brw
, const char *sysfs_dev_dir
)
1584 uint64_t min_freq_mhz
= 0, max_freq_mhz
= 0;
1586 if (!read_sysfs_drm_device_file_uint64(brw
, sysfs_dev_dir
,
1591 if (!read_sysfs_drm_device_file_uint64(brw
, sysfs_dev_dir
,
1596 brw
->perfquery
.sys_vars
.gt_min_freq
= min_freq_mhz
* 1000000;
1597 brw
->perfquery
.sys_vars
.gt_max_freq
= max_freq_mhz
* 1000000;
1599 if (brw
->is_haswell
) {
1600 const struct gen_device_info
*info
= &brw
->screen
->devinfo
;
1602 brw
->perfquery
.sys_vars
.timestamp_frequency
= 12500000;
1604 if (info
->gt
== 1) {
1605 brw
->perfquery
.sys_vars
.n_eus
= 10;
1606 brw
->perfquery
.sys_vars
.n_eu_slices
= 1;
1607 brw
->perfquery
.sys_vars
.subslice_mask
= 0x1;
1608 } else if (info
->gt
== 2) {
1609 brw
->perfquery
.sys_vars
.n_eus
= 20;
1610 brw
->perfquery
.sys_vars
.n_eu_slices
= 1;
1611 brw
->perfquery
.sys_vars
.subslice_mask
= 0x3;
1612 } else if (info
->gt
== 3) {
1613 brw
->perfquery
.sys_vars
.n_eus
= 40;
1614 brw
->perfquery
.sys_vars
.n_eu_slices
= 2;
1615 brw
->perfquery
.sys_vars
.subslice_mask
= 0xf;
1617 unreachable("not reached");
1625 get_sysfs_dev_dir(struct brw_context
*brw
,
1629 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1633 struct dirent
*drm_entry
;
1637 assert(path_buf_len
);
1640 if (fstat(screen
->fd
, &sb
)) {
1641 DBG("Failed to stat DRM fd\n");
1645 maj
= major(sb
.st_rdev
);
1646 min
= minor(sb
.st_rdev
);
1648 if (!S_ISCHR(sb
.st_mode
)) {
1649 DBG("DRM fd is not a character device as expected\n");
1653 len
= snprintf(path_buf
, path_buf_len
,
1654 "/sys/dev/char/%d:%d/device/drm", maj
, min
);
1655 if (len
< 0 || len
>= path_buf_len
) {
1656 DBG("Failed to concatenate sysfs path to drm device\n");
1660 drmdir
= opendir(path_buf
);
1662 DBG("Failed to open %s: %m\n", path_buf
);
1666 while ((drm_entry
= readdir(drmdir
))) {
1667 if ((drm_entry
->d_type
== DT_DIR
||
1668 drm_entry
->d_type
== DT_LNK
) &&
1669 strncmp(drm_entry
->d_name
, "card", 4) == 0)
1671 len
= snprintf(path_buf
, path_buf_len
,
1672 "/sys/dev/char/%d:%d/device/drm/%s",
1673 maj
, min
, drm_entry
->d_name
);
1675 if (len
< 0 || len
>= path_buf_len
)
1684 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
1691 brw_init_perf_query_info(struct gl_context
*ctx
)
1693 struct brw_context
*brw
= brw_context(ctx
);
1695 char sysfs_dev_dir
[128];
1697 if (brw
->perfquery
.n_queries
)
1698 return brw
->perfquery
.n_queries
;
1700 init_pipeline_statistic_query_registers(brw
);
1702 /* The existence of this sysctl parameter implies the kernel supports
1703 * the i915 perf interface.
1705 if (brw
->is_haswell
&&
1706 stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb
) == 0 &&
1707 get_sysfs_dev_dir(brw
, sysfs_dev_dir
, sizeof(sysfs_dev_dir
)) &&
1708 init_oa_sys_vars(brw
, sysfs_dev_dir
))
1710 brw
->perfquery
.oa_metrics_table
=
1711 _mesa_hash_table_create(NULL
, _mesa_key_hash_string
,
1712 _mesa_key_string_equal
);
1714 /* Index all the metric sets mesa knows about before looking to
1715 * see what the kernel is advertising.
1717 brw_oa_register_queries_hsw(brw
);
1719 enumerate_sysfs_metrics(brw
, sysfs_dev_dir
);
1722 brw
->perfquery
.unaccumulated
=
1723 ralloc_array(brw
, struct brw_perf_query_object
*, 2);
1724 brw
->perfquery
.unaccumulated_elements
= 0;
1725 brw
->perfquery
.unaccumulated_array_size
= 2;
1727 exec_list_make_empty(&brw
->perfquery
.sample_buffers
);
1728 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
1730 /* It's convenient to guarantee that this linked list of sample
1731 * buffers is never empty so we add an empty head so when we
1732 * Begin an OA query we can always take a reference on a buffer
1735 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
1736 exec_list_push_head(&brw
->perfquery
.sample_buffers
, &buf
->link
);
1738 brw
->perfquery
.oa_stream_fd
= -1;
1740 brw
->perfquery
.next_query_start_report_id
= 1000;
1742 return brw
->perfquery
.n_queries
;
1746 brw_init_performance_queries(struct brw_context
*brw
)
1748 struct gl_context
*ctx
= &brw
->ctx
;
1750 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
1751 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
1752 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
1753 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
1754 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
1755 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
1756 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
1757 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
1758 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
1759 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;