2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
45 /* put before sys/types.h to silence glibc warnings */
47 #include <sys/mkdev.h>
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
52 #include <sys/types.h>
56 #include <sys/ioctl.h>
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "brw_oa_hsw.h"
75 #include "intel_batchbuffer.h"
77 #define FILE_DEBUG_FLAG DEBUG_PERFMON
80 * The largest OA format we can use on Haswell includes:
81 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
83 #define MAX_OA_REPORT_COUNTERS 62
85 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
86 256) /* OA counter report */
89 * Periodic OA samples are read() into these buffer structures via the
90 * i915 perf kernel interface and appended to the
91 * brw->perfquery.sample_buffers linked list. When we process the
92 * results of an OA metrics query we need to consider all the periodic
93 * samples between the Begin and End MI_REPORT_PERF_COUNT command
96 * 'Periodic' is a simplification as there are other automatic reports
97 * written by the hardware also buffered here.
99 * Considering three queries, A, B and C:
102 * ________________A_________________
104 * | ________B_________ _____C___________
107 * And an illustration of sample buffers read over this time frame:
108 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
110 * These nodes may hold samples for query A:
111 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
113 * These nodes may hold samples for query B:
114 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
116 * These nodes may hold samples for query C:
117 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
119 * The illustration assumes we have an even distribution of periodic
120 * samples so all nodes have the same size plotted against time:
122 * Note, to simplify code, the list is never empty.
124 * With overlapping queries we can see that periodic OA reports may
125 * relate to multiple queries and care needs to be take to keep
126 * track of sample buffers until there are no queries that might
127 * depend on their contents.
129 * We use a node ref counting system where a reference ensures that a
130 * node and all following nodes can't be freed/recycled until the
131 * reference drops to zero.
133 * E.g. with a ref of one here:
134 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
136 * These nodes could be freed or recycled ("reaped"):
139 * These must be preserved until the leading ref drops to zero:
140 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
142 * When a query starts we take a reference on the current tail of
143 * the list, knowing that no already-buffered samples can possibly
144 * relate to the newly-started query. A pointer to this node is
145 * also saved in the query object's ->oa.samples_head.
147 * E.g. starting query A while there are two nodes in .sample_buffers:
148 * ________________A________
152 * ^_______ Add a reference and store pointer to node in
155 * Moving forward to when the B query starts with no new buffer nodes:
156 * (for reference, i915 perf reads() are only done when queries finish)
157 * ________________A_______
162 * ^_______ Add a reference and store pointer to
163 * node in B->oa.samples_head
165 * Once a query is finished, after an OA query has become 'Ready',
166 * once the End OA report has landed and after we we have processed
167 * all the intermediate periodic samples then we drop the
168 * ->oa.samples_head reference we took at the start.
170 * So when the B query has finished we have:
171 * ________________A________
172 * | ______B___________
174 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
175 * ^_______ Drop B->oa.samples_head reference
177 * We still can't free these due to the A->oa.samples_head ref:
178 * [ 1 ][ 0 ][ 0 ][ 0 ]
180 * When the A query finishes: (note there's a new ref for C's samples_head)
181 * ________________A_________________
185 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
186 * ^_______ Drop A->oa.samples_head reference
188 * And we can now reap these nodes up to the C->oa.samples_head:
189 * [ X ][ X ][ X ][ X ]
190 * keeping -> [ 1 ][ 0 ][ 0 ]
192 * We reap old sample buffers each time we finish processing an OA
193 * query by iterating the sample_buffers list from the head until we
194 * find a referenced node and stop.
196 * Reaped buffers move to a perfquery.free_sample_buffers list and
197 * when we come to read() we first look to recycle a buffer from the
198 * free_sample_buffers list before allocating a new buffer.
200 struct brw_oa_sample_buf
{
201 struct exec_node link
;
204 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
208 * i965 representation of a performance query object.
210 * NB: We want to keep this structure relatively lean considering that
211 * applications may expect to allocate enough objects to be able to
212 * query around all draw calls in a frame.
214 struct brw_perf_query_object
216 struct gl_perf_query_object base
;
218 const struct brw_perf_query_info
*query
;
220 /* See query->kind to know which state below is in use... */
225 * BO containing OA counter snapshots at query Begin/End time.
230 * The MI_REPORT_PERF_COUNT command lets us specify a unique
231 * ID that will be reflected in the resulting OA report
232 * that's written by the GPU. This is the ID we're expecting
233 * in the begin report and the the end report should be
234 * @begin_report_id + 1.
239 * Reference the head of the brw->perfquery.sample_buffers
240 * list at the time that the query started (so we only need
241 * to look at nodes after this point when looking for samples
242 * related to this query)
244 * (See struct brw_oa_sample_buf description for more details)
246 struct exec_node
*samples_head
;
249 * Storage for the final accumulated OA counters.
251 uint64_t accumulator
[MAX_OA_REPORT_COUNTERS
];
254 * false while in the unaccumulated_elements list, and set to
255 * true when the final, end MI_RPC snapshot has been
258 bool results_accumulated
;
264 * BO containing starting and ending snapshots for the
265 * statistics counters.
272 /** Downcasting convenience macro. */
273 static inline struct brw_perf_query_object
*
274 brw_perf_query(struct gl_perf_query_object
*o
)
276 return (struct brw_perf_query_object
*) o
;
279 #define STATS_BO_SIZE 4096
280 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
281 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
283 #define MI_RPC_BO_SIZE 4096
284 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
286 /******************************************************************************/
289 brw_is_perf_query_ready(struct gl_context
*ctx
,
290 struct gl_perf_query_object
*o
);
293 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
295 struct gl_context
*ctx
= brw_void
;
296 struct gl_perf_query_object
*o
= query_void
;
297 struct brw_perf_query_object
*obj
= query_void
;
299 switch (obj
->query
->kind
) {
301 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
303 o
->Used
? "Dirty," : "New,",
304 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
305 obj
->oa
.bo
? "yes," : "no,",
306 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
307 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
310 DBG("%4d: %-6s %-8s BO: %-4s\n",
312 o
->Used
? "Dirty," : "New,",
313 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
314 obj
->pipeline_stats
.bo
? "yes" : "no");
320 dump_perf_queries(struct brw_context
*brw
)
322 struct gl_context
*ctx
= &brw
->ctx
;
323 DBG("Queries: (Open queries = %d, OA users = %d)\n",
324 brw
->perfquery
.n_active_oa_queries
, brw
->perfquery
.n_oa_users
);
325 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
328 /******************************************************************************/
330 static struct brw_oa_sample_buf
*
331 get_free_sample_buf(struct brw_context
*brw
)
333 struct exec_node
*node
= exec_list_pop_head(&brw
->perfquery
.free_sample_buffers
);
334 struct brw_oa_sample_buf
*buf
;
337 buf
= exec_node_data(struct brw_oa_sample_buf
, node
, link
);
339 buf
= ralloc_size(brw
, sizeof(*buf
));
341 exec_node_init(&buf
->link
);
350 reap_old_sample_buffers(struct brw_context
*brw
)
352 struct exec_node
*tail_node
=
353 exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
354 struct brw_oa_sample_buf
*tail_buf
=
355 exec_node_data(struct brw_oa_sample_buf
, tail_node
, link
);
357 /* Remove all old, unreferenced sample buffers walking forward from
358 * the head of the list, except always leave at least one node in
359 * the list so we always have a node to reference when we Begin
362 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
363 &brw
->perfquery
.sample_buffers
)
365 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
366 exec_node_remove(&buf
->link
);
367 exec_list_push_head(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
374 free_sample_bufs(struct brw_context
*brw
)
376 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
377 &brw
->perfquery
.free_sample_buffers
)
380 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
383 /******************************************************************************/
386 * Driver hook for glGetPerfQueryInfoINTEL().
389 brw_get_perf_query_info(struct gl_context
*ctx
,
390 unsigned query_index
,
396 struct brw_context
*brw
= brw_context(ctx
);
397 const struct brw_perf_query_info
*query
=
398 &brw
->perfquery
.queries
[query_index
];
401 *data_size
= query
->data_size
;
402 *n_counters
= query
->n_counters
;
404 switch (query
->kind
) {
406 *n_active
= brw
->perfquery
.n_active_oa_queries
;
410 *n_active
= brw
->perfquery
.n_active_pipeline_stats_queries
;
416 * Driver hook for glGetPerfCounterInfoINTEL().
419 brw_get_perf_counter_info(struct gl_context
*ctx
,
420 unsigned query_index
,
421 unsigned counter_index
,
427 GLuint
*data_type_enum
,
430 struct brw_context
*brw
= brw_context(ctx
);
431 const struct brw_perf_query_info
*query
=
432 &brw
->perfquery
.queries
[query_index
];
433 const struct brw_perf_query_counter
*counter
=
434 &query
->counters
[counter_index
];
436 *name
= counter
->name
;
437 *desc
= counter
->desc
;
438 *offset
= counter
->offset
;
439 *data_size
= counter
->size
;
440 *type_enum
= counter
->type
;
441 *data_type_enum
= counter
->data_type
;
442 *raw_max
= counter
->raw_max
;
445 /******************************************************************************/
448 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
449 * pipeline statistics for the performance query object.
452 snapshot_statistics_registers(struct brw_context
*brw
,
453 struct brw_perf_query_object
*obj
,
454 uint32_t offset_in_bytes
)
456 const struct brw_perf_query_info
*query
= obj
->query
;
457 const int n_counters
= query
->n_counters
;
459 for (int i
= 0; i
< n_counters
; i
++) {
460 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
462 assert(counter
->data_type
== GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
);
464 brw_store_register_mem64(brw
, obj
->pipeline_stats
.bo
,
465 counter
->pipeline_stat
.reg
,
466 offset_in_bytes
+ i
* sizeof(uint64_t));
471 * Add a query to the global list of "unaccumulated queries."
473 * Queries are tracked here until all the associated OA reports have
474 * been accumulated via accumulate_oa_reports() after the end
475 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
478 add_to_unaccumulated_query_list(struct brw_context
*brw
,
479 struct brw_perf_query_object
*obj
)
481 if (brw
->perfquery
.unaccumulated_elements
>=
482 brw
->perfquery
.unaccumulated_array_size
)
484 brw
->perfquery
.unaccumulated_array_size
*= 1.5;
485 brw
->perfquery
.unaccumulated
=
486 reralloc(brw
, brw
->perfquery
.unaccumulated
,
487 struct brw_perf_query_object
*,
488 brw
->perfquery
.unaccumulated_array_size
);
491 brw
->perfquery
.unaccumulated
[brw
->perfquery
.unaccumulated_elements
++] = obj
;
495 * Remove a query from the global list of unaccumulated queries once
496 * after successfully accumulating the OA reports associated with the
497 * query in accumulate_oa_reports() or when discarding unwanted query
501 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
502 struct brw_perf_query_object
*obj
)
504 for (int i
= 0; i
< brw
->perfquery
.unaccumulated_elements
; i
++) {
505 if (brw
->perfquery
.unaccumulated
[i
] == obj
) {
506 int last_elt
= --brw
->perfquery
.unaccumulated_elements
;
509 brw
->perfquery
.unaccumulated
[i
] = NULL
;
511 brw
->perfquery
.unaccumulated
[i
] =
512 brw
->perfquery
.unaccumulated
[last_elt
];
519 /* Drop our samples_head reference so that associated periodic
520 * sample data buffers can potentially be reaped if they aren't
521 * referenced by any other queries...
524 struct brw_oa_sample_buf
*buf
=
525 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
527 assert(buf
->refcount
> 0);
530 obj
->oa
.samples_head
= NULL
;
532 reap_old_sample_buffers(brw
);
536 timebase_scale(struct brw_context
*brw
, uint32_t u32_time_delta
)
538 uint64_t tmp
= ((uint64_t)u32_time_delta
) * 1000000000ull;
540 return tmp
? tmp
/ brw
->perfquery
.sys_vars
.timestamp_frequency
: 0;
544 accumulate_uint32(const uint32_t *report0
,
545 const uint32_t *report1
,
546 uint64_t *accumulator
)
548 *accumulator
+= (uint32_t)(*report1
- *report0
);
552 * Given pointers to starting and ending OA snapshots, add the deltas for each
553 * counter to the results.
556 add_deltas(struct brw_context
*brw
,
557 struct brw_perf_query_object
*obj
,
558 const uint32_t *start
,
561 const struct brw_perf_query_info
*query
= obj
->query
;
562 uint64_t *accumulator
= obj
->oa
.accumulator
;
565 switch (query
->oa_format
) {
566 case I915_OA_FORMAT_A45_B8_C8
:
567 accumulate_uint32(start
+ 1, end
+ 1, accumulator
); /* timestamp */
569 for (i
= 0; i
< 61; i
++)
570 accumulate_uint32(start
+ 3 + i
, end
+ 3 + i
, accumulator
+ 1 + i
);
574 unreachable("Can't accumulate OA counters in unknown format");
579 inc_n_oa_users(struct brw_context
*brw
)
581 if (brw
->perfquery
.n_oa_users
== 0 &&
582 drmIoctl(brw
->perfquery
.oa_stream_fd
,
583 I915_PERF_IOCTL_ENABLE
, 0) < 0)
587 ++brw
->perfquery
.n_oa_users
;
593 dec_n_oa_users(struct brw_context
*brw
)
595 /* Disabling the i915 perf stream will effectively disable the OA
596 * counters. Note it's important to be sure there are no outstanding
597 * MI_RPC commands at this point since they could stall the CS
598 * indefinitely once OACONTROL is disabled.
600 --brw
->perfquery
.n_oa_users
;
601 if (brw
->perfquery
.n_oa_users
== 0 &&
602 drmIoctl(brw
->perfquery
.oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
604 DBG("WARNING: Error disabling i915 perf stream: %m\n");
608 /* In general if we see anything spurious while accumulating results,
609 * we don't try and continue accumulating the current query, hoping
610 * for the best, we scrap anything outstanding, and then hope for the
611 * best with new queries.
614 discard_all_queries(struct brw_context
*brw
)
616 while (brw
->perfquery
.unaccumulated_elements
) {
617 struct brw_perf_query_object
*obj
= brw
->perfquery
.unaccumulated
[0];
619 obj
->oa
.results_accumulated
= true;
620 drop_from_unaccumulated_query_list(brw
, brw
->perfquery
.unaccumulated
[0]);
627 read_oa_samples(struct brw_context
*brw
)
630 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
633 while ((len
= read(brw
->perfquery
.oa_stream_fd
, buf
->buf
,
634 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
638 exec_list_push_tail(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
644 DBG("Error reading i915 perf samples: %m\n");
648 DBG("Spurious EOF reading i915 perf samples\n");
654 exec_list_push_tail(&brw
->perfquery
.sample_buffers
, &buf
->link
);
657 unreachable("not reached");
662 * Accumulate raw OA counter values based on deltas between pairs
665 * Accumulation starts from the first report captured via
666 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
667 * last MI_RPC report requested by brw_end_perf_query(). Between these
668 * two reports there may also some number of periodically sampled OA
669 * reports collected via the i915 perf interface - depending on the
670 * duration of the query.
672 * These periodic snapshots help to ensure we handle counter overflow
673 * correctly by being frequent enough to ensure we don't miss multiple
674 * overflows of a counter between snapshots.
677 accumulate_oa_reports(struct brw_context
*brw
,
678 struct brw_perf_query_object
*obj
)
680 struct gl_perf_query_object
*o
= &obj
->base
;
681 uint32_t *query_buffer
;
685 struct exec_node
*first_samples_node
;
689 /* Collect the latest periodic OA reports from i915 perf */
690 if (!read_oa_samples(brw
))
693 query_buffer
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_READ
);
695 start
= last
= query_buffer
;
696 end
= query_buffer
+ (MI_RPC_BO_END_OFFSET_BYTES
/ sizeof(uint32_t));
698 if (start
[0] != obj
->oa
.begin_report_id
) {
699 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
702 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
703 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
707 /* See if we have any periodic reports to accumulate too... */
709 /* N.B. The oa.samples_head was set when the query began and
710 * pointed to the tail of the brw->perfquery.sample_buffers list at
711 * the time the query started. Since the buffer existed before the
712 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
713 * that no data in this particular node's buffer can possibly be
714 * associated with the query - so skip ahead one...
716 first_samples_node
= obj
->oa
.samples_head
->next
;
718 foreach_list_typed_from(struct brw_oa_sample_buf
, buf
, link
,
719 &brw
->perfquery
.sample_buffers
,
724 while (offset
< buf
->len
) {
725 const struct drm_i915_perf_record_header
*header
=
726 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
728 assert(header
->size
!= 0);
729 assert(header
->size
<= buf
->len
);
731 offset
+= header
->size
;
733 switch (header
->type
) {
734 case DRM_I915_PERF_RECORD_SAMPLE
: {
735 uint32_t *report
= (uint32_t *)(header
+ 1);
737 /* Ignore reports that come before the start marker.
738 * (Note: takes care to allow overflow of 32bit timestamps)
740 if (timebase_scale(brw
, report
[1] - start
[1]) > 5000000000)
743 /* Ignore reports that come after the end marker.
744 * (Note: takes care to allow overflow of 32bit timestamps)
746 if (timebase_scale(brw
, report
[1] - end
[1]) <= 5000000000)
749 add_deltas(brw
, obj
, last
, report
);
756 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
757 DBG("i915 perf: OA error: all reports lost\n");
759 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
760 DBG("i915 perf: OA report lost\n");
768 add_deltas(brw
, obj
, last
, end
);
770 DBG("Marking %d accumulated - results gathered\n", o
->Id
);
772 brw_bo_unmap(obj
->oa
.bo
);
773 obj
->oa
.results_accumulated
= true;
774 drop_from_unaccumulated_query_list(brw
, obj
);
781 brw_bo_unmap(obj
->oa
.bo
);
782 discard_all_queries(brw
);
785 /******************************************************************************/
788 open_i915_perf_oa_stream(struct brw_context
*brw
,
795 uint64_t properties
[] = {
796 /* Single context sampling */
797 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
799 /* Include OA reports in samples */
800 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
802 /* OA unit configuration */
803 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
804 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
805 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
807 struct drm_i915_perf_open_param param
= {
808 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
809 I915_PERF_FLAG_FD_NONBLOCK
|
810 I915_PERF_FLAG_DISABLED
,
811 .num_properties
= ARRAY_SIZE(properties
) / 2,
812 .properties_ptr
= (uintptr_t) properties
,
814 int fd
= drmIoctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
816 DBG("Error opening i915 perf OA stream: %m\n");
820 brw
->perfquery
.oa_stream_fd
= fd
;
822 brw
->perfquery
.current_oa_metrics_set_id
= metrics_set_id
;
823 brw
->perfquery
.current_oa_format
= report_format
;
829 close_perf(struct brw_context
*brw
)
831 if (brw
->perfquery
.oa_stream_fd
!= -1) {
832 close(brw
->perfquery
.oa_stream_fd
);
833 brw
->perfquery
.oa_stream_fd
= -1;
838 * Driver hook for glBeginPerfQueryINTEL().
841 brw_begin_perf_query(struct gl_context
*ctx
,
842 struct gl_perf_query_object
*o
)
844 struct brw_context
*brw
= brw_context(ctx
);
845 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
846 const struct brw_perf_query_info
*query
= obj
->query
;
848 /* We can assume the frontend hides mistaken attempts to Begin a
849 * query object multiple times before its End. Similarly if an
850 * application reuses a query object before results have arrived
851 * the frontend will wait for prior results so we don't need
852 * to support abandoning in-flight results.
855 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
857 DBG("Begin(%d)\n", o
->Id
);
859 /* XXX: We have to consider that the command parser unit that parses batch
860 * buffer commands and is used to capture begin/end counter snapshots isn't
861 * implicitly synchronized with what's currently running across other GPU
862 * units (such as the EUs running shaders) that the performance counters are
865 * The intention of performance queries is to measure the work associated
866 * with commands between the begin/end delimiters and so for that to be the
867 * case we need to explicitly synchronize the parsing of commands to capture
868 * Begin/End counter snapshots with what's running across other parts of the
871 * When the command parser reaches a Begin marker it effectively needs to
872 * drain everything currently running on the GPU until the hardware is idle
873 * before capturing the first snapshot of counters - otherwise the results
874 * would also be measuring the effects of earlier commands.
876 * When the command parser reaches an End marker it needs to stall until
877 * everything currently running on the GPU has finished before capturing the
878 * end snapshot - otherwise the results won't be a complete representation
881 * Theoretically there could be opportunities to minimize how much of the
882 * GPU pipeline is drained, or that we stall for, when we know what specific
883 * units the performance counters being queried relate to but we don't
884 * currently attempt to be clever here.
886 * Note: with our current simple approach here then for back-to-back queries
887 * we will redundantly emit duplicate commands to synchronize the command
888 * streamer with the rest of the GPU pipeline, but we assume that in HW the
889 * second synchronization is effectively a NOOP.
891 * N.B. The final results are based on deltas of counters between (inside)
892 * Begin/End markers so even though the total wall clock time of the
893 * workload is stretched by larger pipeline bubbles the bubbles themselves
894 * are generally invisible to the query results. Whether that's a good or a
895 * bad thing depends on the use case. For a lower real-time impact while
896 * capturing metrics then periodic sampling may be a better choice than
897 * INTEL_performance_query.
900 * This is our Begin synchronization point to drain current work on the
901 * GPU before we capture our first counter snapshot...
903 brw_emit_mi_flush(brw
);
905 switch (query
->kind
) {
908 /* Opening an i915 perf stream implies exclusive access to the OA unit
909 * which will generate counter reports for a specific counter set with a
910 * specific layout/format so we can't begin any OA based queries that
911 * require a different counter set or format unless we get an opportunity
912 * to close the stream and open a new one...
914 if (brw
->perfquery
.oa_stream_fd
!= -1 &&
915 brw
->perfquery
.current_oa_metrics_set_id
!=
916 query
->oa_metrics_set_id
) {
918 if (brw
->perfquery
.n_oa_users
!= 0)
924 /* If the OA counters aren't already on, enable them. */
925 if (brw
->perfquery
.oa_stream_fd
== -1) {
926 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
929 /* The timestamp for HSW+ increments every 80ns
931 * The period_exponent gives a sampling period as follows:
932 * sample_period = 80ns * 2^(period_exponent + 1)
934 * The overflow period for Haswell can be calculated as:
936 * 2^32 / (n_eus * max_gen_freq * 2)
937 * (E.g. 40 EUs @ 1GHz = ~53ms)
939 * We currently sample every 42 milliseconds...
941 period_exponent
= 18;
943 if (!open_i915_perf_oa_stream(brw
,
944 query
->oa_metrics_set_id
,
947 screen
->fd
, /* drm fd */
951 assert(brw
->perfquery
.current_oa_metrics_set_id
==
952 query
->oa_metrics_set_id
&&
953 brw
->perfquery
.current_oa_format
==
957 if (!inc_n_oa_users(brw
)) {
958 DBG("WARNING: Error enabling i915 perf stream: %m\n");
963 brw_bo_unreference(obj
->oa
.bo
);
968 brw_bo_alloc(brw
->bufmgr
, "perf. query OA MI_RPC bo",
971 /* Pre-filling the BO helps debug whether writes landed. */
972 void *map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_WRITE
);
973 memset(map
, 0x80, MI_RPC_BO_SIZE
);
974 brw_bo_unmap(obj
->oa
.bo
);
977 obj
->oa
.begin_report_id
= brw
->perfquery
.next_query_start_report_id
;
978 brw
->perfquery
.next_query_start_report_id
+= 2;
980 /* Take a starting OA counter snapshot. */
981 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
, 0,
982 obj
->oa
.begin_report_id
);
983 ++brw
->perfquery
.n_active_oa_queries
;
985 /* No already-buffered samples can possibly be associated with this query
986 * so create a marker within the list of sample buffers enabling us to
987 * easily ignore earlier samples when processing this query after
990 assert(!exec_list_is_empty(&brw
->perfquery
.sample_buffers
));
991 obj
->oa
.samples_head
= exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
993 struct brw_oa_sample_buf
*buf
=
994 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
996 /* This reference will ensure that future/following sample
997 * buffers (that may relate to this query) can't be freed until
998 * this drops to zero.
1002 memset(obj
->oa
.accumulator
, 0, sizeof(obj
->oa
.accumulator
));
1003 obj
->oa
.results_accumulated
= false;
1005 add_to_unaccumulated_query_list(brw
, obj
);
1008 case PIPELINE_STATS
:
1009 if (obj
->pipeline_stats
.bo
) {
1010 brw_bo_unreference(obj
->pipeline_stats
.bo
);
1011 obj
->pipeline_stats
.bo
= NULL
;
1014 obj
->pipeline_stats
.bo
=
1015 brw_bo_alloc(brw
->bufmgr
, "perf. query pipeline stats bo",
1018 /* Take starting snapshots. */
1019 snapshot_statistics_registers(brw
, obj
, 0);
1021 ++brw
->perfquery
.n_active_pipeline_stats_queries
;
1025 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1026 dump_perf_queries(brw
);
1032 * Driver hook for glEndPerfQueryINTEL().
1035 brw_end_perf_query(struct gl_context
*ctx
,
1036 struct gl_perf_query_object
*o
)
1038 struct brw_context
*brw
= brw_context(ctx
);
1039 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1041 DBG("End(%d)\n", o
->Id
);
1043 /* Ensure that the work associated with the queried commands will have
1044 * finished before taking our query end counter readings.
1046 * For more details see comment in brw_begin_perf_query for
1047 * corresponding flush.
1049 brw_emit_mi_flush(brw
);
1051 switch (obj
->query
->kind
) {
1054 /* NB: It's possible that the query will have already been marked
1055 * as 'accumulated' if an error was seen while reading samples
1056 * from perf. In this case we mustn't try and emit a closing
1057 * MI_RPC command in case the OA unit has already been disabled
1059 if (!obj
->oa
.results_accumulated
) {
1060 /* Take an ending OA counter snapshot. */
1061 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
,
1062 MI_RPC_BO_END_OFFSET_BYTES
,
1063 obj
->oa
.begin_report_id
+ 1);
1066 --brw
->perfquery
.n_active_oa_queries
;
1068 /* NB: even though the query has now ended, it can't be accumulated
1069 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1074 case PIPELINE_STATS
:
1075 snapshot_statistics_registers(brw
, obj
,
1076 STATS_BO_END_OFFSET_BYTES
);
1077 --brw
->perfquery
.n_active_pipeline_stats_queries
;
1083 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
1085 struct brw_context
*brw
= brw_context(ctx
);
1086 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1087 struct brw_bo
*bo
= NULL
;
1091 switch (obj
->query
->kind
) {
1096 case PIPELINE_STATS
:
1097 bo
= obj
->pipeline_stats
.bo
;
1104 /* If the current batch references our results bo then we need to
1107 if (brw_batch_references(&brw
->batch
, bo
))
1108 intel_batchbuffer_flush(brw
);
1110 brw_bo_wait_rendering(brw
, bo
);
1114 brw_is_perf_query_ready(struct gl_context
*ctx
,
1115 struct gl_perf_query_object
*o
)
1117 struct brw_context
*brw
= brw_context(ctx
);
1118 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1123 switch (obj
->query
->kind
) {
1125 return (obj
->oa
.results_accumulated
||
1127 !brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
1128 !brw_bo_busy(obj
->oa
.bo
)));
1130 case PIPELINE_STATS
:
1131 return (obj
->pipeline_stats
.bo
&&
1132 !brw_batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
1133 !brw_bo_busy(obj
->pipeline_stats
.bo
));
1136 unreachable("missing ready check for unknown query kind");
1141 get_oa_counter_data(struct brw_context
*brw
,
1142 struct brw_perf_query_object
*obj
,
1146 const struct brw_perf_query_info
*query
= obj
->query
;
1147 int n_counters
= query
->n_counters
;
1150 if (!obj
->oa
.results_accumulated
) {
1151 accumulate_oa_reports(brw
, obj
);
1152 assert(obj
->oa
.results_accumulated
);
1155 for (int i
= 0; i
< n_counters
; i
++) {
1156 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
1157 uint64_t *out_uint64
;
1160 if (counter
->size
) {
1161 switch (counter
->data_type
) {
1162 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
:
1163 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1164 *out_uint64
= counter
->oa_counter_read_uint64(brw
, query
,
1165 obj
->oa
.accumulator
);
1167 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
:
1168 out_float
= (float *)(data
+ counter
->offset
);
1169 *out_float
= counter
->oa_counter_read_float(brw
, query
,
1170 obj
->oa
.accumulator
);
1173 /* So far we aren't using uint32, double or bool32... */
1174 unreachable("unexpected counter data type");
1176 written
= counter
->offset
+ counter
->size
;
1184 get_pipeline_stats_data(struct brw_context
*brw
,
1185 struct brw_perf_query_object
*obj
,
1190 const struct brw_perf_query_info
*query
= obj
->query
;
1191 int n_counters
= obj
->query
->n_counters
;
1194 uint64_t *start
= brw_bo_map(brw
, obj
->pipeline_stats
.bo
, MAP_READ
);
1195 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1197 for (int i
= 0; i
< n_counters
; i
++) {
1198 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
1199 uint64_t value
= end
[i
] - start
[i
];
1201 if (counter
->pipeline_stat
.numerator
!=
1202 counter
->pipeline_stat
.denominator
) {
1203 value
*= counter
->pipeline_stat
.numerator
;
1204 value
/= counter
->pipeline_stat
.denominator
;
1207 *((uint64_t *)p
) = value
;
1211 brw_bo_unmap(obj
->pipeline_stats
.bo
);
1217 * Driver hook for glGetPerfQueryDataINTEL().
1220 brw_get_perf_query_data(struct gl_context
*ctx
,
1221 struct gl_perf_query_object
*o
,
1224 GLuint
*bytes_written
)
1226 struct brw_context
*brw
= brw_context(ctx
);
1227 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1230 assert(brw_is_perf_query_ready(ctx
, o
));
1232 DBG("GetData(%d)\n", o
->Id
);
1234 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1235 dump_perf_queries(brw
);
1237 /* We expect that the frontend only calls this hook when it knows
1238 * that results are available.
1242 switch (obj
->query
->kind
) {
1244 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
1247 case PIPELINE_STATS
:
1248 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
1253 *bytes_written
= written
;
1256 static struct gl_perf_query_object
*
1257 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
1259 struct brw_context
*brw
= brw_context(ctx
);
1260 const struct brw_perf_query_info
*query
=
1261 &brw
->perfquery
.queries
[query_index
];
1262 struct brw_perf_query_object
*obj
=
1263 calloc(1, sizeof(struct brw_perf_query_object
));
1270 brw
->perfquery
.n_query_instances
++;
1276 * Driver hook for glDeletePerfQueryINTEL().
1279 brw_delete_perf_query(struct gl_context
*ctx
,
1280 struct gl_perf_query_object
*o
)
1282 struct brw_context
*brw
= brw_context(ctx
);
1283 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1285 /* We can assume that the frontend waits for a query to complete
1286 * before ever calling into here, so we don't have to worry about
1287 * deleting an in-flight query object.
1290 assert(!o
->Used
|| o
->Ready
);
1292 DBG("Delete(%d)\n", o
->Id
);
1294 switch (obj
->query
->kind
) {
1297 if (!obj
->oa
.results_accumulated
) {
1298 drop_from_unaccumulated_query_list(brw
, obj
);
1299 dec_n_oa_users(brw
);
1302 brw_bo_unreference(obj
->oa
.bo
);
1306 obj
->oa
.results_accumulated
= false;
1309 case PIPELINE_STATS
:
1310 if (obj
->pipeline_stats
.bo
) {
1311 brw_bo_unreference(obj
->pipeline_stats
.bo
);
1312 obj
->pipeline_stats
.bo
= NULL
;
1319 /* As an indication that the INTEL_performance_query extension is no
1320 * longer in use, it's a good time to free our cache of sample
1321 * buffers and close any current i915-perf stream.
1323 if (--brw
->perfquery
.n_query_instances
== 0) {
1324 free_sample_bufs(brw
);
1329 /******************************************************************************/
1331 static struct brw_perf_query_info
*
1332 append_query_info(struct brw_context
*brw
)
1334 brw
->perfquery
.queries
=
1335 reralloc(brw
, brw
->perfquery
.queries
,
1336 struct brw_perf_query_info
, ++brw
->perfquery
.n_queries
);
1338 return &brw
->perfquery
.queries
[brw
->perfquery
.n_queries
- 1];
1342 add_stat_reg(struct brw_perf_query_info
*query
,
1345 uint32_t denominator
,
1347 const char *description
)
1349 struct brw_perf_query_counter
*counter
;
1351 assert(query
->n_counters
< MAX_STAT_COUNTERS
);
1353 counter
= &query
->counters
[query
->n_counters
];
1354 counter
->name
= name
;
1355 counter
->desc
= description
;
1356 counter
->type
= GL_PERFQUERY_COUNTER_RAW_INTEL
;
1357 counter
->data_type
= GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
;
1358 counter
->size
= sizeof(uint64_t);
1359 counter
->offset
= sizeof(uint64_t) * query
->n_counters
;
1360 counter
->pipeline_stat
.reg
= reg
;
1361 counter
->pipeline_stat
.numerator
= numerator
;
1362 counter
->pipeline_stat
.denominator
= denominator
;
1364 query
->n_counters
++;
1368 add_basic_stat_reg(struct brw_perf_query_info
*query
,
1369 uint32_t reg
, const char *name
)
1371 add_stat_reg(query
, reg
, 1, 1, name
, name
);
1375 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1377 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1378 struct brw_perf_query_info
*query
= append_query_info(brw
);
1380 query
->kind
= PIPELINE_STATS
;
1381 query
->name
= "Pipeline Statistics Registers";
1382 query
->n_counters
= 0;
1384 rzalloc_array(brw
, struct brw_perf_query_counter
, MAX_STAT_COUNTERS
);
1386 add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1387 "N vertices submitted");
1388 add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1389 "N primitives submitted");
1390 add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1391 "N vertex shader invocations");
1393 if (devinfo
->gen
== 6) {
1394 add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1395 "SO_PRIM_STORAGE_NEEDED",
1396 "N geometry shader stream-out primitives (total)");
1397 add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1398 "SO_NUM_PRIMS_WRITTEN",
1399 "N geometry shader stream-out primitives (written)");
1401 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1402 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1403 "N stream-out (stream 0) primitives (total)");
1404 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1405 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1406 "N stream-out (stream 1) primitives (total)");
1407 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1408 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1409 "N stream-out (stream 2) primitives (total)");
1410 add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1411 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1412 "N stream-out (stream 3) primitives (total)");
1413 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1414 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1415 "N stream-out (stream 0) primitives (written)");
1416 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1417 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1418 "N stream-out (stream 1) primitives (written)");
1419 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1420 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1421 "N stream-out (stream 2) primitives (written)");
1422 add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1423 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1424 "N stream-out (stream 3) primitives (written)");
1427 add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1428 "N TCS shader invocations");
1429 add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1430 "N TES shader invocations");
1432 add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1433 "N geometry shader invocations");
1434 add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1435 "N geometry shader primitives emitted");
1437 add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1438 "N primitives entering clipping");
1439 add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1440 "N primitives leaving clipping");
1442 if (devinfo
->is_haswell
|| devinfo
->gen
== 8)
1443 add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1444 "N fragment shader invocations",
1445 "N fragment shader invocations");
1447 add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1448 "N fragment shader invocations");
1450 add_basic_stat_reg(query
, PS_DEPTH_COUNT
, "N z-pass fragments");
1452 if (devinfo
->gen
>= 7)
1453 add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1454 "N compute shader invocations");
1456 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1460 read_file_uint64(const char *file
, uint64_t *val
)
1468 n
= read(fd
, buf
, sizeof (buf
) - 1);
1474 *val
= strtoull(buf
, NULL
, 0);
1480 enumerate_sysfs_metrics(struct brw_context
*brw
, const char *sysfs_dev_dir
)
1483 DIR *metricsdir
= NULL
;
1484 struct dirent
*metric_entry
;
1487 len
= snprintf(buf
, sizeof(buf
), "%s/metrics", sysfs_dev_dir
);
1488 if (len
< 0 || len
>= sizeof(buf
)) {
1489 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1493 metricsdir
= opendir(buf
);
1495 DBG("Failed to open %s: %m\n", buf
);
1499 while ((metric_entry
= readdir(metricsdir
))) {
1500 struct hash_entry
*entry
;
1502 if ((metric_entry
->d_type
!= DT_DIR
&&
1503 metric_entry
->d_type
!= DT_LNK
) ||
1504 metric_entry
->d_name
[0] == '.')
1507 DBG("metric set: %s\n", metric_entry
->d_name
);
1508 entry
= _mesa_hash_table_search(brw
->perfquery
.oa_metrics_table
,
1509 metric_entry
->d_name
);
1511 struct brw_perf_query_info
*query
;
1514 len
= snprintf(buf
, sizeof(buf
), "%s/metrics/%s/id",
1515 sysfs_dev_dir
, metric_entry
->d_name
);
1516 if (len
< 0 || len
>= sizeof(buf
)) {
1517 DBG("Failed to concatenate path to sysfs metric id file\n");
1521 if (!read_file_uint64(buf
, &id
)) {
1522 DBG("Failed to read metric set id from %s: %m", buf
);
1526 query
= append_query_info(brw
);
1527 *query
= *(struct brw_perf_query_info
*)entry
->data
;
1528 query
->oa_metrics_set_id
= id
;
1530 DBG("metric set known by mesa: id = %" PRIu64
"\n",
1531 query
->oa_metrics_set_id
);
1533 DBG("metric set not known by mesa (skipping)\n");
1536 closedir(metricsdir
);
1540 read_sysfs_drm_device_file_uint64(struct brw_context
*brw
,
1541 const char *sysfs_dev_dir
,
1548 len
= snprintf(buf
, sizeof(buf
), "%s/%s", sysfs_dev_dir
, file
);
1549 if (len
< 0 || len
>= sizeof(buf
)) {
1550 DBG("Failed to concatenate sys filename to read u64 from\n");
1554 return read_file_uint64(buf
, value
);
1558 init_oa_sys_vars(struct brw_context
*brw
, const char *sysfs_dev_dir
)
1560 uint64_t min_freq_mhz
= 0, max_freq_mhz
= 0;
1562 if (!read_sysfs_drm_device_file_uint64(brw
, sysfs_dev_dir
,
1567 if (!read_sysfs_drm_device_file_uint64(brw
, sysfs_dev_dir
,
1572 brw
->perfquery
.sys_vars
.gt_min_freq
= min_freq_mhz
* 1000000;
1573 brw
->perfquery
.sys_vars
.gt_max_freq
= max_freq_mhz
* 1000000;
1575 if (brw
->is_haswell
) {
1576 const struct gen_device_info
*info
= &brw
->screen
->devinfo
;
1578 brw
->perfquery
.sys_vars
.timestamp_frequency
= 12500000;
1580 if (info
->gt
== 1) {
1581 brw
->perfquery
.sys_vars
.n_eus
= 10;
1582 brw
->perfquery
.sys_vars
.n_eu_slices
= 1;
1583 brw
->perfquery
.sys_vars
.subslice_mask
= 0x1;
1584 } else if (info
->gt
== 2) {
1585 brw
->perfquery
.sys_vars
.n_eus
= 20;
1586 brw
->perfquery
.sys_vars
.n_eu_slices
= 1;
1587 brw
->perfquery
.sys_vars
.subslice_mask
= 0x3;
1588 } else if (info
->gt
== 3) {
1589 brw
->perfquery
.sys_vars
.n_eus
= 40;
1590 brw
->perfquery
.sys_vars
.n_eu_slices
= 2;
1591 brw
->perfquery
.sys_vars
.subslice_mask
= 0xf;
1593 unreachable("not reached");
1601 get_sysfs_dev_dir(struct brw_context
*brw
,
1605 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1609 struct dirent
*drm_entry
;
1613 assert(path_buf_len
);
1616 if (fstat(screen
->fd
, &sb
)) {
1617 DBG("Failed to stat DRM fd\n");
1621 maj
= major(sb
.st_rdev
);
1622 min
= minor(sb
.st_rdev
);
1624 if (!S_ISCHR(sb
.st_mode
)) {
1625 DBG("DRM fd is not a character device as expected\n");
1629 len
= snprintf(path_buf
, path_buf_len
,
1630 "/sys/dev/char/%d:%d/device/drm", maj
, min
);
1631 if (len
< 0 || len
>= path_buf_len
) {
1632 DBG("Failed to concatenate sysfs path to drm device\n");
1636 drmdir
= opendir(path_buf
);
1638 DBG("Failed to open %s: %m\n", path_buf
);
1642 while ((drm_entry
= readdir(drmdir
))) {
1643 if ((drm_entry
->d_type
== DT_DIR
||
1644 drm_entry
->d_type
== DT_LNK
) &&
1645 strncmp(drm_entry
->d_name
, "card", 4) == 0)
1647 len
= snprintf(path_buf
, path_buf_len
,
1648 "/sys/dev/char/%d:%d/device/drm/%s",
1649 maj
, min
, drm_entry
->d_name
);
1651 if (len
< 0 || len
>= path_buf_len
)
1660 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
1667 brw_init_perf_query_info(struct gl_context
*ctx
)
1669 struct brw_context
*brw
= brw_context(ctx
);
1671 char sysfs_dev_dir
[128];
1673 if (brw
->perfquery
.n_queries
)
1674 return brw
->perfquery
.n_queries
;
1676 init_pipeline_statistic_query_registers(brw
);
1678 /* The existence of this sysctl parameter implies the kernel supports
1679 * the i915 perf interface.
1681 if (brw
->is_haswell
&&
1682 stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb
) == 0 &&
1683 get_sysfs_dev_dir(brw
, sysfs_dev_dir
, sizeof(sysfs_dev_dir
)) &&
1684 init_oa_sys_vars(brw
, sysfs_dev_dir
))
1686 brw
->perfquery
.oa_metrics_table
=
1687 _mesa_hash_table_create(NULL
, _mesa_key_hash_string
,
1688 _mesa_key_string_equal
);
1690 /* Index all the metric sets mesa knows about before looking to
1691 * see what the kernel is advertising.
1693 brw_oa_register_queries_hsw(brw
);
1695 enumerate_sysfs_metrics(brw
, sysfs_dev_dir
);
1698 brw
->perfquery
.unaccumulated
=
1699 ralloc_array(brw
, struct brw_perf_query_object
*, 2);
1700 brw
->perfquery
.unaccumulated_elements
= 0;
1701 brw
->perfquery
.unaccumulated_array_size
= 2;
1703 exec_list_make_empty(&brw
->perfquery
.sample_buffers
);
1704 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
1706 /* It's convenient to guarantee that this linked list of sample
1707 * buffers is never empty so we add an empty head so when we
1708 * Begin an OA query we can always take a reference on a buffer
1711 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
1712 exec_list_push_head(&brw
->perfquery
.sample_buffers
, &buf
->link
);
1714 brw
->perfquery
.oa_stream_fd
= -1;
1716 brw
->perfquery
.next_query_start_report_id
= 1000;
1718 return brw
->perfquery
.n_queries
;
1722 brw_init_performance_queries(struct brw_context
*brw
)
1724 struct gl_context
*ctx
= &brw
->ctx
;
1726 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
1727 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
1728 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
1729 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
1730 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
1731 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
1732 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
1733 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
1734 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
1735 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;