2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
45 /* put before sys/types.h to silence glibc warnings */
47 #include <sys/mkdev.h>
49 #ifdef MAJOR_IN_SYSMACROS
50 #include <sys/sysmacros.h>
52 #include <sys/types.h>
56 #include <sys/ioctl.h>
59 #include "drm-uapi/i915_drm.h"
61 #include "main/hash.h"
62 #include "main/macros.h"
63 #include "main/mtypes.h"
64 #include "main/performance_query.h"
66 #include "util/bitset.h"
67 #include "util/ralloc.h"
68 #include "util/hash_table.h"
69 #include "util/list.h"
70 #include "util/u_math.h"
72 #include "brw_context.h"
73 #include "brw_defines.h"
74 #include "brw_performance_query.h"
75 #include "brw_oa_metrics.h"
76 #include "intel_batchbuffer.h"
78 #define FILE_DEBUG_FLAG DEBUG_PERFMON
80 #define OAREPORT_REASON_MASK 0x3f
81 #define OAREPORT_REASON_SHIFT 19
82 #define OAREPORT_REASON_TIMER (1<<0)
83 #define OAREPORT_REASON_TRIGGER1 (1<<1)
84 #define OAREPORT_REASON_TRIGGER2 (1<<2)
85 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
86 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
88 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
89 256) /* OA counter report */
92 * Periodic OA samples are read() into these buffer structures via the
93 * i915 perf kernel interface and appended to the
94 * brw->perfquery.sample_buffers linked list. When we process the
95 * results of an OA metrics query we need to consider all the periodic
96 * samples between the Begin and End MI_REPORT_PERF_COUNT command
99 * 'Periodic' is a simplification as there are other automatic reports
100 * written by the hardware also buffered here.
102 * Considering three queries, A, B and C:
105 * ________________A_________________
107 * | ________B_________ _____C___________
110 * And an illustration of sample buffers read over this time frame:
111 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
113 * These nodes may hold samples for query A:
114 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
116 * These nodes may hold samples for query B:
117 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
119 * These nodes may hold samples for query C:
120 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
122 * The illustration assumes we have an even distribution of periodic
123 * samples so all nodes have the same size plotted against time:
125 * Note, to simplify code, the list is never empty.
127 * With overlapping queries we can see that periodic OA reports may
128 * relate to multiple queries and care needs to be take to keep
129 * track of sample buffers until there are no queries that might
130 * depend on their contents.
132 * We use a node ref counting system where a reference ensures that a
133 * node and all following nodes can't be freed/recycled until the
134 * reference drops to zero.
136 * E.g. with a ref of one here:
137 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
139 * These nodes could be freed or recycled ("reaped"):
142 * These must be preserved until the leading ref drops to zero:
143 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
145 * When a query starts we take a reference on the current tail of
146 * the list, knowing that no already-buffered samples can possibly
147 * relate to the newly-started query. A pointer to this node is
148 * also saved in the query object's ->oa.samples_head.
150 * E.g. starting query A while there are two nodes in .sample_buffers:
151 * ________________A________
155 * ^_______ Add a reference and store pointer to node in
158 * Moving forward to when the B query starts with no new buffer nodes:
159 * (for reference, i915 perf reads() are only done when queries finish)
160 * ________________A_______
165 * ^_______ Add a reference and store pointer to
166 * node in B->oa.samples_head
168 * Once a query is finished, after an OA query has become 'Ready',
169 * once the End OA report has landed and after we we have processed
170 * all the intermediate periodic samples then we drop the
171 * ->oa.samples_head reference we took at the start.
173 * So when the B query has finished we have:
174 * ________________A________
175 * | ______B___________
177 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
178 * ^_______ Drop B->oa.samples_head reference
180 * We still can't free these due to the A->oa.samples_head ref:
181 * [ 1 ][ 0 ][ 0 ][ 0 ]
183 * When the A query finishes: (note there's a new ref for C's samples_head)
184 * ________________A_________________
188 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
189 * ^_______ Drop A->oa.samples_head reference
191 * And we can now reap these nodes up to the C->oa.samples_head:
192 * [ X ][ X ][ X ][ X ]
193 * keeping -> [ 1 ][ 0 ][ 0 ]
195 * We reap old sample buffers each time we finish processing an OA
196 * query by iterating the sample_buffers list from the head until we
197 * find a referenced node and stop.
199 * Reaped buffers move to a perfquery.free_sample_buffers list and
200 * when we come to read() we first look to recycle a buffer from the
201 * free_sample_buffers list before allocating a new buffer.
203 struct brw_oa_sample_buf
{
204 struct exec_node link
;
207 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
208 uint32_t last_timestamp
;
211 /** Downcasting convenience macro. */
212 static inline struct brw_perf_query_object
*
213 brw_perf_query(struct gl_perf_query_object
*o
)
215 return (struct brw_perf_query_object
*) o
;
218 #define MI_RPC_BO_SIZE 4096
219 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
220 #define MI_FREQ_START_OFFSET_BYTES (3072)
221 #define MI_FREQ_END_OFFSET_BYTES (3076)
223 /******************************************************************************/
226 read_file_uint64(const char *file
, uint64_t *val
)
234 while ((n
= read(fd
, buf
, sizeof (buf
) - 1)) < 0 &&
241 *val
= strtoull(buf
, NULL
, 0);
247 read_sysfs_drm_device_file_uint64(struct brw_context
*brw
,
254 len
= snprintf(buf
, sizeof(buf
), "%s/%s",
255 brw
->perfquery
.sysfs_dev_dir
, file
);
256 if (len
< 0 || len
>= sizeof(buf
)) {
257 DBG("Failed to concatenate sys filename to read u64 from\n");
261 return read_file_uint64(buf
, value
);
264 /******************************************************************************/
267 brw_is_perf_query_ready(struct gl_context
*ctx
,
268 struct gl_perf_query_object
*o
);
271 brw_perf_query_get_metric_id(struct brw_context
*brw
,
272 const struct brw_perf_query_info
*query
)
274 /* These queries are know not to ever change, their config ID has been
275 * loaded upon the first query creation. No need to look them up again.
277 if (query
->kind
== OA_COUNTERS
)
278 return query
->oa_metrics_set_id
;
280 assert(query
->kind
== OA_COUNTERS_RAW
);
282 /* Raw queries can be reprogrammed up by an external application/library.
283 * When a raw query is used for the first time it's id is set to a value !=
284 * 0. When it stops being used the id returns to 0. No need to reload the
285 * ID when it's already loaded.
287 if (query
->oa_metrics_set_id
!= 0) {
288 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64
"\n",
289 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
290 return query
->oa_metrics_set_id
;
293 char metric_id_file
[280];
294 snprintf(metric_id_file
, sizeof(metric_id_file
),
295 "%s/metrics/%s/id", brw
->perfquery
.sysfs_dev_dir
, query
->guid
);
297 struct brw_perf_query_info
*raw_query
= (struct brw_perf_query_info
*)query
;
298 if (!read_file_uint64(metric_id_file
, &raw_query
->oa_metrics_set_id
)) {
299 DBG("Unable to read query guid=%s ID, falling back to test config\n", query
->guid
);
300 raw_query
->oa_metrics_set_id
= 1ULL;
302 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64
"\n",
303 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
305 return query
->oa_metrics_set_id
;
309 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
311 struct gl_context
*ctx
= brw_void
;
312 struct gl_perf_query_object
*o
= query_void
;
313 struct brw_perf_query_object
*obj
= query_void
;
315 switch (obj
->query
->kind
) {
317 case OA_COUNTERS_RAW
:
318 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
320 o
->Used
? "Dirty," : "New,",
321 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
322 obj
->oa
.bo
? "yes," : "no,",
323 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
324 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
327 DBG("%4d: %-6s %-8s BO: %-4s\n",
329 o
->Used
? "Dirty," : "New,",
330 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
331 obj
->pipeline_stats
.bo
? "yes" : "no");
334 unreachable("Unknown query type");
340 dump_perf_queries(struct brw_context
*brw
)
342 struct gl_context
*ctx
= &brw
->ctx
;
343 DBG("Queries: (Open queries = %d, OA users = %d)\n",
344 brw
->perfquery
.n_active_oa_queries
, brw
->perfquery
.n_oa_users
);
345 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
348 /******************************************************************************/
350 static struct brw_oa_sample_buf
*
351 get_free_sample_buf(struct brw_context
*brw
)
353 struct exec_node
*node
= exec_list_pop_head(&brw
->perfquery
.free_sample_buffers
);
354 struct brw_oa_sample_buf
*buf
;
357 buf
= exec_node_data(struct brw_oa_sample_buf
, node
, link
);
359 buf
= ralloc_size(brw
, sizeof(*buf
));
361 exec_node_init(&buf
->link
);
370 reap_old_sample_buffers(struct brw_context
*brw
)
372 struct exec_node
*tail_node
=
373 exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
374 struct brw_oa_sample_buf
*tail_buf
=
375 exec_node_data(struct brw_oa_sample_buf
, tail_node
, link
);
377 /* Remove all old, unreferenced sample buffers walking forward from
378 * the head of the list, except always leave at least one node in
379 * the list so we always have a node to reference when we Begin
382 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
383 &brw
->perfquery
.sample_buffers
)
385 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
386 exec_node_remove(&buf
->link
);
387 exec_list_push_head(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
394 free_sample_bufs(struct brw_context
*brw
)
396 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
397 &brw
->perfquery
.free_sample_buffers
)
400 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
403 /******************************************************************************/
406 * Driver hook for glGetPerfQueryInfoINTEL().
409 brw_get_perf_query_info(struct gl_context
*ctx
,
410 unsigned query_index
,
416 struct brw_context
*brw
= brw_context(ctx
);
417 const struct brw_perf_query_info
*query
=
418 &brw
->perfquery
.queries
[query_index
];
421 *data_size
= query
->data_size
;
422 *n_counters
= query
->n_counters
;
424 switch (query
->kind
) {
426 case OA_COUNTERS_RAW
:
427 *n_active
= brw
->perfquery
.n_active_oa_queries
;
431 *n_active
= brw
->perfquery
.n_active_pipeline_stats_queries
;
435 unreachable("Unknown query type");
441 * Driver hook for glGetPerfCounterInfoINTEL().
444 brw_get_perf_counter_info(struct gl_context
*ctx
,
445 unsigned query_index
,
446 unsigned counter_index
,
452 GLuint
*data_type_enum
,
455 struct brw_context
*brw
= brw_context(ctx
);
456 const struct brw_perf_query_info
*query
=
457 &brw
->perfquery
.queries
[query_index
];
458 const struct brw_perf_query_counter
*counter
=
459 &query
->counters
[counter_index
];
461 *name
= counter
->name
;
462 *desc
= counter
->desc
;
463 *offset
= counter
->offset
;
464 *data_size
= counter
->size
;
465 *type_enum
= counter
->type
;
466 *data_type_enum
= counter
->data_type
;
467 *raw_max
= counter
->raw_max
;
470 /******************************************************************************/
473 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
474 * pipeline statistics for the performance query object.
477 snapshot_statistics_registers(struct brw_context
*brw
,
478 struct brw_perf_query_object
*obj
,
479 uint32_t offset_in_bytes
)
481 const struct brw_perf_query_info
*query
= obj
->query
;
482 const int n_counters
= query
->n_counters
;
484 for (int i
= 0; i
< n_counters
; i
++) {
485 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
487 assert(counter
->data_type
== GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
);
489 brw_store_register_mem64(brw
, obj
->pipeline_stats
.bo
,
490 counter
->pipeline_stat
.reg
,
491 offset_in_bytes
+ i
* sizeof(uint64_t));
496 * Add a query to the global list of "unaccumulated queries."
498 * Queries are tracked here until all the associated OA reports have
499 * been accumulated via accumulate_oa_reports() after the end
500 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
503 add_to_unaccumulated_query_list(struct brw_context
*brw
,
504 struct brw_perf_query_object
*obj
)
506 if (brw
->perfquery
.unaccumulated_elements
>=
507 brw
->perfquery
.unaccumulated_array_size
)
509 brw
->perfquery
.unaccumulated_array_size
*= 1.5;
510 brw
->perfquery
.unaccumulated
=
511 reralloc(brw
, brw
->perfquery
.unaccumulated
,
512 struct brw_perf_query_object
*,
513 brw
->perfquery
.unaccumulated_array_size
);
516 brw
->perfquery
.unaccumulated
[brw
->perfquery
.unaccumulated_elements
++] = obj
;
520 * Remove a query from the global list of unaccumulated queries once
521 * after successfully accumulating the OA reports associated with the
522 * query in accumulate_oa_reports() or when discarding unwanted query
526 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
527 struct brw_perf_query_object
*obj
)
529 for (int i
= 0; i
< brw
->perfquery
.unaccumulated_elements
; i
++) {
530 if (brw
->perfquery
.unaccumulated
[i
] == obj
) {
531 int last_elt
= --brw
->perfquery
.unaccumulated_elements
;
534 brw
->perfquery
.unaccumulated
[i
] = NULL
;
536 brw
->perfquery
.unaccumulated
[i
] =
537 brw
->perfquery
.unaccumulated
[last_elt
];
544 /* Drop our samples_head reference so that associated periodic
545 * sample data buffers can potentially be reaped if they aren't
546 * referenced by any other queries...
549 struct brw_oa_sample_buf
*buf
=
550 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
552 assert(buf
->refcount
> 0);
555 obj
->oa
.samples_head
= NULL
;
557 reap_old_sample_buffers(brw
);
561 * Given pointers to starting and ending OA snapshots, add the deltas for each
562 * counter to the results.
565 add_deltas(struct brw_context
*brw
,
566 struct brw_perf_query_object
*obj
,
567 const uint32_t *start
,
570 const struct brw_perf_query_info
*query
= obj
->query
;
571 uint64_t *accumulator
= obj
->oa
.accumulator
;
575 obj
->oa
.reports_accumulated
++;
577 switch (query
->oa_format
) {
578 case I915_OA_FORMAT_A32u40_A4u32_B8_C8
:
579 brw_perf_query_accumulate_uint32(start
+ 1, end
+ 1, accumulator
+ idx
++); /* timestamp */
580 brw_perf_query_accumulate_uint32(start
+ 3, end
+ 3, accumulator
+ idx
++); /* clock */
582 /* 32x 40bit A counters... */
583 for (i
= 0; i
< 32; i
++)
584 brw_perf_query_accumulate_uint40(i
, start
, end
, accumulator
+ idx
++);
586 /* 4x 32bit A counters... */
587 for (i
= 0; i
< 4; i
++)
588 brw_perf_query_accumulate_uint32(start
+ 36 + i
, end
+ 36 + i
,
589 accumulator
+ idx
++);
591 /* 8x 32bit B counters + 8x 32bit C counters... */
592 for (i
= 0; i
< 16; i
++)
593 brw_perf_query_accumulate_uint32(start
+ 48 + i
, end
+ 48 + i
,
594 accumulator
+ idx
++);
597 case I915_OA_FORMAT_A45_B8_C8
:
598 brw_perf_query_accumulate_uint32(start
+ 1, end
+ 1, accumulator
); /* timestamp */
600 for (i
= 0; i
< 61; i
++)
601 brw_perf_query_accumulate_uint32(start
+ 3 + i
, end
+ 3 + i
, accumulator
+ 1 + i
);
605 unreachable("Can't accumulate OA counters in unknown format");
610 inc_n_oa_users(struct brw_context
*brw
)
612 if (brw
->perfquery
.n_oa_users
== 0 &&
613 drmIoctl(brw
->perfquery
.oa_stream_fd
,
614 I915_PERF_IOCTL_ENABLE
, 0) < 0)
618 ++brw
->perfquery
.n_oa_users
;
624 dec_n_oa_users(struct brw_context
*brw
)
626 /* Disabling the i915 perf stream will effectively disable the OA
627 * counters. Note it's important to be sure there are no outstanding
628 * MI_RPC commands at this point since they could stall the CS
629 * indefinitely once OACONTROL is disabled.
631 --brw
->perfquery
.n_oa_users
;
632 if (brw
->perfquery
.n_oa_users
== 0 &&
633 drmIoctl(brw
->perfquery
.oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
635 DBG("WARNING: Error disabling i915 perf stream: %m\n");
639 /* In general if we see anything spurious while accumulating results,
640 * we don't try and continue accumulating the current query, hoping
641 * for the best, we scrap anything outstanding, and then hope for the
642 * best with new queries.
645 discard_all_queries(struct brw_context
*brw
)
647 while (brw
->perfquery
.unaccumulated_elements
) {
648 struct brw_perf_query_object
*obj
= brw
->perfquery
.unaccumulated
[0];
650 obj
->oa
.results_accumulated
= true;
651 drop_from_unaccumulated_query_list(brw
, brw
->perfquery
.unaccumulated
[0]);
658 OA_READ_STATUS_ERROR
,
659 OA_READ_STATUS_UNFINISHED
,
660 OA_READ_STATUS_FINISHED
,
663 static enum OaReadStatus
664 read_oa_samples_until(struct brw_context
*brw
,
665 uint32_t start_timestamp
,
666 uint32_t end_timestamp
)
668 struct exec_node
*tail_node
=
669 exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
670 struct brw_oa_sample_buf
*tail_buf
=
671 exec_node_data(struct brw_oa_sample_buf
, tail_node
, link
);
672 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
675 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
679 while ((len
= read(brw
->perfquery
.oa_stream_fd
, buf
->buf
,
680 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
684 exec_list_push_tail(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
688 return ((last_timestamp
- start_timestamp
) >=
689 (end_timestamp
- start_timestamp
)) ?
690 OA_READ_STATUS_FINISHED
:
691 OA_READ_STATUS_UNFINISHED
;
693 DBG("Error reading i915 perf samples: %m\n");
696 DBG("Spurious EOF reading i915 perf samples\n");
698 return OA_READ_STATUS_ERROR
;
702 exec_list_push_tail(&brw
->perfquery
.sample_buffers
, &buf
->link
);
704 /* Go through the reports and update the last timestamp. */
706 while (offset
< buf
->len
) {
707 const struct drm_i915_perf_record_header
*header
=
708 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
709 uint32_t *report
= (uint32_t *) (header
+ 1);
711 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
712 last_timestamp
= report
[1];
714 offset
+= header
->size
;
717 buf
->last_timestamp
= last_timestamp
;
720 unreachable("not reached");
721 return OA_READ_STATUS_ERROR
;
725 * Try to read all the reports until either the delimiting timestamp
726 * or an error arises.
729 read_oa_samples_for_query(struct brw_context
*brw
,
730 struct brw_perf_query_object
*obj
)
736 /* We need the MI_REPORT_PERF_COUNT to land before we can start
738 assert(!brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
739 !brw_bo_busy(obj
->oa
.bo
));
741 /* Map the BO once here and let accumulate_oa_reports() unmap
743 if (obj
->oa
.map
== NULL
)
744 obj
->oa
.map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_READ
);
746 start
= last
= obj
->oa
.map
;
747 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
749 if (start
[0] != obj
->oa
.begin_report_id
) {
750 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
753 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
754 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
758 /* Read the reports until the end timestamp. */
759 switch (read_oa_samples_until(brw
, start
[1], end
[1])) {
760 case OA_READ_STATUS_ERROR
:
761 /* Fallthrough and let accumulate_oa_reports() deal with the
763 case OA_READ_STATUS_FINISHED
:
765 case OA_READ_STATUS_UNFINISHED
:
769 unreachable("invalid read status");
774 * Accumulate raw OA counter values based on deltas between pairs of
777 * Accumulation starts from the first report captured via
778 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
779 * last MI_RPC report requested by brw_end_perf_query(). Between these
780 * two reports there may also some number of periodically sampled OA
781 * reports collected via the i915 perf interface - depending on the
782 * duration of the query.
784 * These periodic snapshots help to ensure we handle counter overflow
785 * correctly by being frequent enough to ensure we don't miss multiple
786 * overflows of a counter between snapshots. For Gen8+ the i915 perf
787 * snapshots provide the extra context-switch reports that let us
788 * subtract out the progress of counters associated with other
789 * contexts running on the system.
792 accumulate_oa_reports(struct brw_context
*brw
,
793 struct brw_perf_query_object
*obj
)
795 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
796 struct gl_perf_query_object
*o
= &obj
->base
;
800 struct exec_node
*first_samples_node
;
802 int out_duration
= 0;
805 assert(obj
->oa
.map
!= NULL
);
807 start
= last
= obj
->oa
.map
;
808 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
810 if (start
[0] != obj
->oa
.begin_report_id
) {
811 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
814 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
815 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
819 obj
->oa
.hw_id
= start
[2];
821 /* See if we have any periodic reports to accumulate too... */
823 /* N.B. The oa.samples_head was set when the query began and
824 * pointed to the tail of the brw->perfquery.sample_buffers list at
825 * the time the query started. Since the buffer existed before the
826 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
827 * that no data in this particular node's buffer can possibly be
828 * associated with the query - so skip ahead one...
830 first_samples_node
= obj
->oa
.samples_head
->next
;
832 foreach_list_typed_from(struct brw_oa_sample_buf
, buf
, link
,
833 &brw
->perfquery
.sample_buffers
,
838 while (offset
< buf
->len
) {
839 const struct drm_i915_perf_record_header
*header
=
840 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
842 assert(header
->size
!= 0);
843 assert(header
->size
<= buf
->len
);
845 offset
+= header
->size
;
847 switch (header
->type
) {
848 case DRM_I915_PERF_RECORD_SAMPLE
: {
849 uint32_t *report
= (uint32_t *)(header
+ 1);
852 /* Ignore reports that come before the start marker.
853 * (Note: takes care to allow overflow of 32bit timestamps)
855 if (brw_timebase_scale(brw
, report
[1] - start
[1]) > 5000000000)
858 /* Ignore reports that come after the end marker.
859 * (Note: takes care to allow overflow of 32bit timestamps)
861 if (brw_timebase_scale(brw
, report
[1] - end
[1]) <= 5000000000)
864 /* For Gen8+ since the counters continue while other
865 * contexts are running we need to discount any unrelated
866 * deltas. The hardware automatically generates a report
867 * on context switch which gives us a new reference point
868 * to continuing adding deltas from.
870 * For Haswell we can rely on the HW to stop the progress
871 * of OA counters while any other context is acctive.
873 if (devinfo
->gen
>= 8) {
874 if (in_ctx
&& report
[2] != obj
->oa
.hw_id
) {
875 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
878 } else if (in_ctx
== false && report
[2] == obj
->oa
.hw_id
) {
879 DBG("i915 perf: Switch TO\n");
882 /* From experimentation in IGT, we found that the OA unit
883 * might label some report as "idle" (using an invalid
884 * context ID), right after a report for a given context.
885 * Deltas generated by those reports actually belong to the
886 * previous context, even though they're not labelled as
889 * We didn't *really* Switch AWAY in the case that we e.g.
890 * saw a single periodic report while idle...
892 if (out_duration
>= 1)
895 assert(report
[2] == obj
->oa
.hw_id
);
896 DBG("i915 perf: Continuation IN\n");
898 assert(report
[2] != obj
->oa
.hw_id
);
899 DBG("i915 perf: Continuation OUT\n");
906 add_deltas(brw
, obj
, last
, report
);
913 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
914 DBG("i915 perf: OA error: all reports lost\n");
916 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
917 DBG("i915 perf: OA report lost\n");
925 add_deltas(brw
, obj
, last
, end
);
927 DBG("Marking %d accumulated - results gathered\n", o
->Id
);
929 obj
->oa
.results_accumulated
= true;
930 drop_from_unaccumulated_query_list(brw
, obj
);
937 discard_all_queries(brw
);
940 /******************************************************************************/
943 open_i915_perf_oa_stream(struct brw_context
*brw
,
950 uint64_t properties
[] = {
951 /* Single context sampling */
952 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
954 /* Include OA reports in samples */
955 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
957 /* OA unit configuration */
958 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
959 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
960 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
962 struct drm_i915_perf_open_param param
= {
963 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
964 I915_PERF_FLAG_FD_NONBLOCK
|
965 I915_PERF_FLAG_DISABLED
,
966 .num_properties
= ARRAY_SIZE(properties
) / 2,
967 .properties_ptr
= (uintptr_t) properties
,
969 int fd
= drmIoctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
971 DBG("Error opening i915 perf OA stream: %m\n");
975 brw
->perfquery
.oa_stream_fd
= fd
;
977 brw
->perfquery
.current_oa_metrics_set_id
= metrics_set_id
;
978 brw
->perfquery
.current_oa_format
= report_format
;
984 close_perf(struct brw_context
*brw
,
985 const struct brw_perf_query_info
*query
)
987 if (brw
->perfquery
.oa_stream_fd
!= -1) {
988 close(brw
->perfquery
.oa_stream_fd
);
989 brw
->perfquery
.oa_stream_fd
= -1;
991 if (query
->kind
== OA_COUNTERS_RAW
) {
992 struct brw_perf_query_info
*raw_query
=
993 (struct brw_perf_query_info
*) query
;
994 raw_query
->oa_metrics_set_id
= 0;
999 capture_frequency_stat_register(struct brw_context
*brw
,
1003 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1005 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8 &&
1006 !devinfo
->is_baytrail
&& !devinfo
->is_cherryview
) {
1007 brw_store_register_mem32(brw
, bo
, GEN7_RPSTAT1
, bo_offset
);
1008 } else if (devinfo
->gen
>= 9) {
1009 brw_store_register_mem32(brw
, bo
, GEN9_RPSTAT0
, bo_offset
);
1014 * Driver hook for glBeginPerfQueryINTEL().
1017 brw_begin_perf_query(struct gl_context
*ctx
,
1018 struct gl_perf_query_object
*o
)
1020 struct brw_context
*brw
= brw_context(ctx
);
1021 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1022 const struct brw_perf_query_info
*query
= obj
->query
;
1024 /* We can assume the frontend hides mistaken attempts to Begin a
1025 * query object multiple times before its End. Similarly if an
1026 * application reuses a query object before results have arrived
1027 * the frontend will wait for prior results so we don't need
1028 * to support abandoning in-flight results.
1031 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
1033 DBG("Begin(%d)\n", o
->Id
);
1035 /* XXX: We have to consider that the command parser unit that parses batch
1036 * buffer commands and is used to capture begin/end counter snapshots isn't
1037 * implicitly synchronized with what's currently running across other GPU
1038 * units (such as the EUs running shaders) that the performance counters are
1041 * The intention of performance queries is to measure the work associated
1042 * with commands between the begin/end delimiters and so for that to be the
1043 * case we need to explicitly synchronize the parsing of commands to capture
1044 * Begin/End counter snapshots with what's running across other parts of the
1047 * When the command parser reaches a Begin marker it effectively needs to
1048 * drain everything currently running on the GPU until the hardware is idle
1049 * before capturing the first snapshot of counters - otherwise the results
1050 * would also be measuring the effects of earlier commands.
1052 * When the command parser reaches an End marker it needs to stall until
1053 * everything currently running on the GPU has finished before capturing the
1054 * end snapshot - otherwise the results won't be a complete representation
1057 * Theoretically there could be opportunities to minimize how much of the
1058 * GPU pipeline is drained, or that we stall for, when we know what specific
1059 * units the performance counters being queried relate to but we don't
1060 * currently attempt to be clever here.
1062 * Note: with our current simple approach here then for back-to-back queries
1063 * we will redundantly emit duplicate commands to synchronize the command
1064 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1065 * second synchronization is effectively a NOOP.
1067 * N.B. The final results are based on deltas of counters between (inside)
1068 * Begin/End markers so even though the total wall clock time of the
1069 * workload is stretched by larger pipeline bubbles the bubbles themselves
1070 * are generally invisible to the query results. Whether that's a good or a
1071 * bad thing depends on the use case. For a lower real-time impact while
1072 * capturing metrics then periodic sampling may be a better choice than
1073 * INTEL_performance_query.
1076 * This is our Begin synchronization point to drain current work on the
1077 * GPU before we capture our first counter snapshot...
1079 brw_emit_mi_flush(brw
);
1081 switch (query
->kind
) {
1083 case OA_COUNTERS_RAW
: {
1085 /* Opening an i915 perf stream implies exclusive access to the OA unit
1086 * which will generate counter reports for a specific counter set with a
1087 * specific layout/format so we can't begin any OA based queries that
1088 * require a different counter set or format unless we get an opportunity
1089 * to close the stream and open a new one...
1091 uint64_t metric_id
= brw_perf_query_get_metric_id(brw
, query
);
1093 if (brw
->perfquery
.oa_stream_fd
!= -1 &&
1094 brw
->perfquery
.current_oa_metrics_set_id
!= metric_id
) {
1096 if (brw
->perfquery
.n_oa_users
!= 0) {
1097 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64
"\n",
1098 o
->Id
, brw
->perfquery
.current_oa_metrics_set_id
, metric_id
);
1101 close_perf(brw
, query
);
1104 /* If the OA counters aren't already on, enable them. */
1105 if (brw
->perfquery
.oa_stream_fd
== -1) {
1106 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1107 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1109 /* The period_exponent gives a sampling period as follows:
1110 * sample_period = timestamp_period * 2^(period_exponent + 1)
1112 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1115 * The counter overflow period is derived from the EuActive counter
1116 * which reads a counter that increments by the number of clock
1117 * cycles multiplied by the number of EUs. It can be calculated as:
1119 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1121 * (E.g. 40 EUs @ 1GHz = ~53ms)
1123 * We select a sampling period inferior to that overflow period to
1124 * ensure we cannot see more than 1 counter overflow, otherwise we
1125 * could loose information.
1128 int a_counter_in_bits
= 32;
1129 if (devinfo
->gen
>= 8)
1130 a_counter_in_bits
= 40;
1132 uint64_t overflow_period
= pow(2, a_counter_in_bits
) /
1133 (brw
->perfquery
.sys_vars
.n_eus
*
1134 /* drop 1GHz freq to have units in nanoseconds */
1137 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
1138 overflow_period
, overflow_period
/ 1000000ul, brw
->perfquery
.sys_vars
.n_eus
);
1140 int period_exponent
= 0;
1141 uint64_t prev_sample_period
, next_sample_period
;
1142 for (int e
= 0; e
< 30; e
++) {
1143 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
1144 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
1146 /* Take the previous sampling period, lower than the overflow
1149 if (prev_sample_period
< overflow_period
&&
1150 next_sample_period
> overflow_period
)
1151 period_exponent
= e
+ 1;
1154 if (period_exponent
== 0) {
1155 DBG("WARNING: enable to find a sampling exponent\n");
1159 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
1160 prev_sample_period
/ 1000000ul);
1162 if (!open_i915_perf_oa_stream(brw
,
1166 screen
->fd
, /* drm fd */
1170 assert(brw
->perfquery
.current_oa_metrics_set_id
== metric_id
&&
1171 brw
->perfquery
.current_oa_format
== query
->oa_format
);
1174 if (!inc_n_oa_users(brw
)) {
1175 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1180 brw_bo_unreference(obj
->oa
.bo
);
1185 brw_bo_alloc(brw
->bufmgr
, "perf. query OA MI_RPC bo", MI_RPC_BO_SIZE
,
1188 /* Pre-filling the BO helps debug whether writes landed. */
1189 void *map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_WRITE
);
1190 memset(map
, 0x80, MI_RPC_BO_SIZE
);
1191 brw_bo_unmap(obj
->oa
.bo
);
1194 obj
->oa
.begin_report_id
= brw
->perfquery
.next_query_start_report_id
;
1195 brw
->perfquery
.next_query_start_report_id
+= 2;
1197 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1198 * delimiting commands end up in different batchbuffers. If that's the
1199 * case, the measurement will include the time it takes for the kernel
1200 * scheduler to load a new request into the hardware. This is manifested in
1201 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1203 intel_batchbuffer_flush(brw
);
1205 /* Take a starting OA counter snapshot. */
1206 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
, 0,
1207 obj
->oa
.begin_report_id
);
1208 capture_frequency_stat_register(brw
, obj
->oa
.bo
, MI_FREQ_START_OFFSET_BYTES
);
1210 ++brw
->perfquery
.n_active_oa_queries
;
1212 /* No already-buffered samples can possibly be associated with this query
1213 * so create a marker within the list of sample buffers enabling us to
1214 * easily ignore earlier samples when processing this query after
1217 assert(!exec_list_is_empty(&brw
->perfquery
.sample_buffers
));
1218 obj
->oa
.samples_head
= exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
1220 struct brw_oa_sample_buf
*buf
=
1221 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
1223 /* This reference will ensure that future/following sample
1224 * buffers (that may relate to this query) can't be freed until
1225 * this drops to zero.
1229 obj
->oa
.hw_id
= 0xffffffff;
1230 memset(obj
->oa
.accumulator
, 0, sizeof(obj
->oa
.accumulator
));
1231 obj
->oa
.results_accumulated
= false;
1233 add_to_unaccumulated_query_list(brw
, obj
);
1237 case PIPELINE_STATS
:
1238 if (obj
->pipeline_stats
.bo
) {
1239 brw_bo_unreference(obj
->pipeline_stats
.bo
);
1240 obj
->pipeline_stats
.bo
= NULL
;
1243 obj
->pipeline_stats
.bo
=
1244 brw_bo_alloc(brw
->bufmgr
, "perf. query pipeline stats bo",
1245 STATS_BO_SIZE
, BRW_MEMZONE_OTHER
);
1247 /* Take starting snapshots. */
1248 snapshot_statistics_registers(brw
, obj
, 0);
1250 ++brw
->perfquery
.n_active_pipeline_stats_queries
;
1254 unreachable("Unknown query type");
1258 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1259 dump_perf_queries(brw
);
1265 * Driver hook for glEndPerfQueryINTEL().
1268 brw_end_perf_query(struct gl_context
*ctx
,
1269 struct gl_perf_query_object
*o
)
1271 struct brw_context
*brw
= brw_context(ctx
);
1272 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1274 DBG("End(%d)\n", o
->Id
);
1276 /* Ensure that the work associated with the queried commands will have
1277 * finished before taking our query end counter readings.
1279 * For more details see comment in brw_begin_perf_query for
1280 * corresponding flush.
1282 brw_emit_mi_flush(brw
);
1284 switch (obj
->query
->kind
) {
1286 case OA_COUNTERS_RAW
:
1288 /* NB: It's possible that the query will have already been marked
1289 * as 'accumulated' if an error was seen while reading samples
1290 * from perf. In this case we mustn't try and emit a closing
1291 * MI_RPC command in case the OA unit has already been disabled
1293 if (!obj
->oa
.results_accumulated
) {
1294 /* Take an ending OA counter snapshot. */
1295 capture_frequency_stat_register(brw
, obj
->oa
.bo
, MI_FREQ_END_OFFSET_BYTES
);
1296 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
,
1297 MI_RPC_BO_END_OFFSET_BYTES
,
1298 obj
->oa
.begin_report_id
+ 1);
1301 --brw
->perfquery
.n_active_oa_queries
;
1303 /* NB: even though the query has now ended, it can't be accumulated
1304 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1309 case PIPELINE_STATS
:
1310 snapshot_statistics_registers(brw
, obj
,
1311 STATS_BO_END_OFFSET_BYTES
);
1312 --brw
->perfquery
.n_active_pipeline_stats_queries
;
1316 unreachable("Unknown query type");
1322 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
1324 struct brw_context
*brw
= brw_context(ctx
);
1325 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1326 struct brw_bo
*bo
= NULL
;
1330 switch (obj
->query
->kind
) {
1332 case OA_COUNTERS_RAW
:
1336 case PIPELINE_STATS
:
1337 bo
= obj
->pipeline_stats
.bo
;
1341 unreachable("Unknown query type");
1348 /* If the current batch references our results bo then we need to
1351 if (brw_batch_references(&brw
->batch
, bo
))
1352 intel_batchbuffer_flush(brw
);
1354 brw_bo_wait_rendering(bo
);
1356 /* Due to a race condition between the OA unit signaling report
1357 * availability and the report actually being written into memory,
1358 * we need to wait for all the reports to come in before we can
1361 if (obj
->query
->kind
== OA_COUNTERS
||
1362 obj
->query
->kind
== OA_COUNTERS_RAW
) {
1363 while (!read_oa_samples_for_query(brw
, obj
))
1369 brw_is_perf_query_ready(struct gl_context
*ctx
,
1370 struct gl_perf_query_object
*o
)
1372 struct brw_context
*brw
= brw_context(ctx
);
1373 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1378 switch (obj
->query
->kind
) {
1380 case OA_COUNTERS_RAW
:
1381 return (obj
->oa
.results_accumulated
||
1383 !brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
1384 !brw_bo_busy(obj
->oa
.bo
) &&
1385 read_oa_samples_for_query(brw
, obj
)));
1386 case PIPELINE_STATS
:
1387 return (obj
->pipeline_stats
.bo
&&
1388 !brw_batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
1389 !brw_bo_busy(obj
->pipeline_stats
.bo
));
1392 unreachable("Unknown query type");
1400 gen8_read_report_clock_ratios(const uint32_t *report
,
1401 uint64_t *slice_freq_hz
,
1402 uint64_t *unslice_freq_hz
)
1404 /* The lower 16bits of the RPT_ID field of the OA reports contains a
1405 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1406 * divided this way :
1408 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1409 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1410 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1412 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1413 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1415 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1416 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1419 uint32_t unslice_freq
= report
[0] & 0x1ff;
1420 uint32_t slice_freq_low
= (report
[0] >> 25) & 0x7f;
1421 uint32_t slice_freq_high
= (report
[0] >> 9) & 0x3;
1422 uint32_t slice_freq
= slice_freq_low
| (slice_freq_high
<< 7);
1424 *slice_freq_hz
= slice_freq
* 16666667ULL;
1425 *unslice_freq_hz
= unslice_freq
* 16666667ULL;
1429 read_slice_unslice_frequencies(struct brw_context
*brw
,
1430 struct brw_perf_query_object
*obj
)
1432 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1433 uint32_t *begin_report
, *end_report
;
1435 /* Slice/Unslice frequency is only available in the OA reports when the
1436 * "Disable OA reports due to clock ratio change" field in
1437 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1438 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1440 * Documentation says this should be available on Gen9+ but experimentation
1441 * shows that Gen8 reports similar values, so we enable it there too.
1443 if (devinfo
->gen
< 8)
1446 begin_report
= obj
->oa
.map
;
1447 end_report
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1449 gen8_read_report_clock_ratios(begin_report
,
1450 &obj
->oa
.slice_frequency
[0],
1451 &obj
->oa
.unslice_frequency
[0]);
1452 gen8_read_report_clock_ratios(end_report
,
1453 &obj
->oa
.slice_frequency
[1],
1454 &obj
->oa
.unslice_frequency
[1]);
1458 read_gt_frequency(struct brw_context
*brw
,
1459 struct brw_perf_query_object
*obj
)
1461 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1462 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
1463 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
1465 switch (devinfo
->gen
) {
1468 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1469 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1474 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1475 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1478 unreachable("unexpected gen");
1481 /* Put the numbers into Hz. */
1482 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
1483 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
1487 get_oa_counter_data(struct brw_context
*brw
,
1488 struct brw_perf_query_object
*obj
,
1492 const struct brw_perf_query_info
*query
= obj
->query
;
1493 int n_counters
= query
->n_counters
;
1496 for (int i
= 0; i
< n_counters
; i
++) {
1497 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
1498 uint64_t *out_uint64
;
1501 if (counter
->size
) {
1502 switch (counter
->data_type
) {
1503 case GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
:
1504 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1505 *out_uint64
= counter
->oa_counter_read_uint64(brw
, query
,
1506 obj
->oa
.accumulator
);
1508 case GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
:
1509 out_float
= (float *)(data
+ counter
->offset
);
1510 *out_float
= counter
->oa_counter_read_float(brw
, query
,
1511 obj
->oa
.accumulator
);
1514 /* So far we aren't using uint32, double or bool32... */
1515 unreachable("unexpected counter data type");
1517 written
= counter
->offset
+ counter
->size
;
1525 get_pipeline_stats_data(struct brw_context
*brw
,
1526 struct brw_perf_query_object
*obj
,
1531 const struct brw_perf_query_info
*query
= obj
->query
;
1532 int n_counters
= obj
->query
->n_counters
;
1535 uint64_t *start
= brw_bo_map(brw
, obj
->pipeline_stats
.bo
, MAP_READ
);
1536 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1538 for (int i
= 0; i
< n_counters
; i
++) {
1539 const struct brw_perf_query_counter
*counter
= &query
->counters
[i
];
1540 uint64_t value
= end
[i
] - start
[i
];
1542 if (counter
->pipeline_stat
.numerator
!=
1543 counter
->pipeline_stat
.denominator
) {
1544 value
*= counter
->pipeline_stat
.numerator
;
1545 value
/= counter
->pipeline_stat
.denominator
;
1548 *((uint64_t *)p
) = value
;
1552 brw_bo_unmap(obj
->pipeline_stats
.bo
);
1558 * Driver hook for glGetPerfQueryDataINTEL().
1561 brw_get_perf_query_data(struct gl_context
*ctx
,
1562 struct gl_perf_query_object
*o
,
1565 GLuint
*bytes_written
)
1567 struct brw_context
*brw
= brw_context(ctx
);
1568 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1571 assert(brw_is_perf_query_ready(ctx
, o
));
1573 DBG("GetData(%d)\n", o
->Id
);
1575 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1576 dump_perf_queries(brw
);
1578 /* We expect that the frontend only calls this hook when it knows
1579 * that results are available.
1583 switch (obj
->query
->kind
) {
1585 case OA_COUNTERS_RAW
:
1586 if (!obj
->oa
.results_accumulated
) {
1587 read_gt_frequency(brw
, obj
);
1588 read_slice_unslice_frequencies(brw
, obj
);
1589 accumulate_oa_reports(brw
, obj
);
1590 assert(obj
->oa
.results_accumulated
);
1592 brw_bo_unmap(obj
->oa
.bo
);
1595 if (obj
->query
->kind
== OA_COUNTERS
)
1596 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
1598 written
= brw_perf_query_get_mdapi_oa_data(brw
, obj
, data_size
, (uint8_t *)data
);
1601 case PIPELINE_STATS
:
1602 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
1606 unreachable("Unknown query type");
1611 *bytes_written
= written
;
1614 static struct gl_perf_query_object
*
1615 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
1617 struct brw_context
*brw
= brw_context(ctx
);
1618 const struct brw_perf_query_info
*query
=
1619 &brw
->perfquery
.queries
[query_index
];
1620 struct brw_perf_query_object
*obj
=
1621 calloc(1, sizeof(struct brw_perf_query_object
));
1628 brw
->perfquery
.n_query_instances
++;
1634 * Driver hook for glDeletePerfQueryINTEL().
1637 brw_delete_perf_query(struct gl_context
*ctx
,
1638 struct gl_perf_query_object
*o
)
1640 struct brw_context
*brw
= brw_context(ctx
);
1641 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1643 /* We can assume that the frontend waits for a query to complete
1644 * before ever calling into here, so we don't have to worry about
1645 * deleting an in-flight query object.
1648 assert(!o
->Used
|| o
->Ready
);
1650 DBG("Delete(%d)\n", o
->Id
);
1652 switch (obj
->query
->kind
) {
1654 case OA_COUNTERS_RAW
:
1656 if (!obj
->oa
.results_accumulated
) {
1657 drop_from_unaccumulated_query_list(brw
, obj
);
1658 dec_n_oa_users(brw
);
1661 brw_bo_unreference(obj
->oa
.bo
);
1665 obj
->oa
.results_accumulated
= false;
1668 case PIPELINE_STATS
:
1669 if (obj
->pipeline_stats
.bo
) {
1670 brw_bo_unreference(obj
->pipeline_stats
.bo
);
1671 obj
->pipeline_stats
.bo
= NULL
;
1676 unreachable("Unknown query type");
1680 /* As an indication that the INTEL_performance_query extension is no
1681 * longer in use, it's a good time to free our cache of sample
1682 * buffers and close any current i915-perf stream.
1684 if (--brw
->perfquery
.n_query_instances
== 0) {
1685 free_sample_bufs(brw
);
1686 close_perf(brw
, obj
->query
);
1692 /******************************************************************************/
1695 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1697 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1698 struct brw_perf_query_info
*query
= brw_perf_query_append_query_info(brw
);
1700 query
->kind
= PIPELINE_STATS
;
1701 query
->name
= "Pipeline Statistics Registers";
1702 query
->n_counters
= 0;
1704 rzalloc_array(brw
, struct brw_perf_query_counter
, MAX_STAT_COUNTERS
);
1706 brw_perf_query_info_add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1707 "N vertices submitted");
1708 brw_perf_query_info_add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1709 "N primitives submitted");
1710 brw_perf_query_info_add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1711 "N vertex shader invocations");
1713 if (devinfo
->gen
== 6) {
1714 brw_perf_query_info_add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1715 "SO_PRIM_STORAGE_NEEDED",
1716 "N geometry shader stream-out primitives (total)");
1717 brw_perf_query_info_add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1718 "SO_NUM_PRIMS_WRITTEN",
1719 "N geometry shader stream-out primitives (written)");
1721 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1722 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1723 "N stream-out (stream 0) primitives (total)");
1724 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1725 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1726 "N stream-out (stream 1) primitives (total)");
1727 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1728 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1729 "N stream-out (stream 2) primitives (total)");
1730 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1731 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1732 "N stream-out (stream 3) primitives (total)");
1733 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1734 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1735 "N stream-out (stream 0) primitives (written)");
1736 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1737 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1738 "N stream-out (stream 1) primitives (written)");
1739 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1740 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1741 "N stream-out (stream 2) primitives (written)");
1742 brw_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1743 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1744 "N stream-out (stream 3) primitives (written)");
1747 brw_perf_query_info_add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1748 "N TCS shader invocations");
1749 brw_perf_query_info_add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1750 "N TES shader invocations");
1752 brw_perf_query_info_add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1753 "N geometry shader invocations");
1754 brw_perf_query_info_add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1755 "N geometry shader primitives emitted");
1757 brw_perf_query_info_add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1758 "N primitives entering clipping");
1759 brw_perf_query_info_add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1760 "N primitives leaving clipping");
1762 if (devinfo
->is_haswell
|| devinfo
->gen
== 8)
1763 brw_perf_query_info_add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1764 "N fragment shader invocations",
1765 "N fragment shader invocations");
1767 brw_perf_query_info_add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1768 "N fragment shader invocations");
1770 brw_perf_query_info_add_basic_stat_reg(query
, PS_DEPTH_COUNT
, "N z-pass fragments");
1772 if (devinfo
->gen
>= 7)
1773 brw_perf_query_info_add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1774 "N compute shader invocations");
1776 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1780 register_oa_config(struct brw_context
*brw
,
1781 const struct brw_perf_query_info
*query
,
1784 struct brw_perf_query_info
*registred_query
=
1785 brw_perf_query_append_query_info(brw
);
1787 *registred_query
= *query
;
1788 registred_query
->oa_metrics_set_id
= config_id
;
1789 DBG("metric set registred: id = %" PRIu64
", guid = %s\n",
1790 registred_query
->oa_metrics_set_id
, query
->guid
);
1794 enumerate_sysfs_metrics(struct brw_context
*brw
)
1797 DIR *metricsdir
= NULL
;
1798 struct dirent
*metric_entry
;
1801 len
= snprintf(buf
, sizeof(buf
), "%s/metrics", brw
->perfquery
.sysfs_dev_dir
);
1802 if (len
< 0 || len
>= sizeof(buf
)) {
1803 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
1807 metricsdir
= opendir(buf
);
1809 DBG("Failed to open %s: %m\n", buf
);
1813 while ((metric_entry
= readdir(metricsdir
))) {
1814 struct hash_entry
*entry
;
1816 if ((metric_entry
->d_type
!= DT_DIR
&&
1817 metric_entry
->d_type
!= DT_LNK
) ||
1818 metric_entry
->d_name
[0] == '.')
1821 DBG("metric set: %s\n", metric_entry
->d_name
);
1822 entry
= _mesa_hash_table_search(brw
->perfquery
.oa_metrics_table
,
1823 metric_entry
->d_name
);
1827 len
= snprintf(buf
, sizeof(buf
), "%s/metrics/%s/id",
1828 brw
->perfquery
.sysfs_dev_dir
, metric_entry
->d_name
);
1829 if (len
< 0 || len
>= sizeof(buf
)) {
1830 DBG("Failed to concatenate path to sysfs metric id file\n");
1834 if (!read_file_uint64(buf
, &id
)) {
1835 DBG("Failed to read metric set id from %s: %m", buf
);
1839 register_oa_config(brw
, (const struct brw_perf_query_info
*)entry
->data
, id
);
1841 DBG("metric set not known by mesa (skipping)\n");
1844 closedir(metricsdir
);
1848 kernel_has_dynamic_config_support(struct brw_context
*brw
)
1850 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1852 hash_table_foreach(brw
->perfquery
.oa_metrics_table
, entry
) {
1853 struct brw_perf_query_info
*query
= entry
->data
;
1854 char config_path
[280];
1857 snprintf(config_path
, sizeof(config_path
), "%s/metrics/%s/id",
1858 brw
->perfquery
.sysfs_dev_dir
, query
->guid
);
1860 /* Look for the test config, which we know we can't replace. */
1861 if (read_file_uint64(config_path
, &config_id
) && config_id
== 1) {
1862 return drmIoctl(screen
->fd
, DRM_IOCTL_I915_PERF_REMOVE_CONFIG
,
1863 &config_id
) < 0 && errno
== ENOENT
;
1871 init_oa_configs(struct brw_context
*brw
)
1873 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1875 hash_table_foreach(brw
->perfquery
.oa_metrics_table
, entry
) {
1876 const struct brw_perf_query_info
*query
= entry
->data
;
1877 struct drm_i915_perf_oa_config config
;
1878 char config_path
[280];
1882 snprintf(config_path
, sizeof(config_path
), "%s/metrics/%s/id",
1883 brw
->perfquery
.sysfs_dev_dir
, query
->guid
);
1885 /* Don't recreate already loaded configs. */
1886 if (read_file_uint64(config_path
, &config_id
)) {
1887 DBG("metric set: %s (already loaded)\n", query
->guid
);
1888 register_oa_config(brw
, query
, config_id
);
1892 memset(&config
, 0, sizeof(config
));
1894 memcpy(config
.uuid
, query
->guid
, sizeof(config
.uuid
));
1896 config
.n_mux_regs
= query
->n_mux_regs
;
1897 config
.mux_regs_ptr
= (uintptr_t) query
->mux_regs
;
1899 config
.n_boolean_regs
= query
->n_b_counter_regs
;
1900 config
.boolean_regs_ptr
= (uintptr_t) query
->b_counter_regs
;
1902 config
.n_flex_regs
= query
->n_flex_regs
;
1903 config
.flex_regs_ptr
= (uintptr_t) query
->flex_regs
;
1905 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_I915_PERF_ADD_CONFIG
, &config
);
1907 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
1908 query
->name
, query
->guid
, strerror(errno
));
1912 register_oa_config(brw
, query
, ret
);
1913 DBG("metric set: %s (added)\n", query
->guid
);
1918 query_topology(struct brw_context
*brw
)
1920 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1921 struct drm_i915_query_item item
= {
1922 .query_id
= DRM_I915_QUERY_TOPOLOGY_INFO
,
1924 struct drm_i915_query query
= {
1926 .items_ptr
= (uintptr_t) &item
,
1929 if (drmIoctl(screen
->fd
, DRM_IOCTL_I915_QUERY
, &query
))
1932 struct drm_i915_query_topology_info
*topo_info
=
1933 (struct drm_i915_query_topology_info
*) calloc(1, item
.length
);
1934 item
.data_ptr
= (uintptr_t) topo_info
;
1936 if (drmIoctl(screen
->fd
, DRM_IOCTL_I915_QUERY
, &query
) ||
1940 gen_device_info_update_from_topology(&brw
->screen
->devinfo
,
1949 getparam_topology(struct brw_context
*brw
)
1951 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1952 drm_i915_getparam_t gp
;
1956 gp
.param
= I915_PARAM_SLICE_MASK
;
1957 gp
.value
= &slice_mask
;
1958 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
1962 int subslice_mask
= 0;
1963 gp
.param
= I915_PARAM_SUBSLICE_MASK
;
1964 gp
.value
= &subslice_mask
;
1965 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
1969 gen_device_info_update_from_masks(&brw
->screen
->devinfo
,
1972 brw
->screen
->eu_total
);
1978 compute_topology_builtins(struct brw_context
*brw
)
1980 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1982 brw
->perfquery
.sys_vars
.slice_mask
= devinfo
->slice_masks
;
1983 brw
->perfquery
.sys_vars
.n_eu_slices
= devinfo
->num_slices
;
1985 for (int i
= 0; i
< sizeof(devinfo
->subslice_masks
[i
]); i
++) {
1986 brw
->perfquery
.sys_vars
.n_eu_sub_slices
+=
1987 util_bitcount(devinfo
->subslice_masks
[i
]);
1990 for (int i
= 0; i
< sizeof(devinfo
->eu_masks
); i
++)
1991 brw
->perfquery
.sys_vars
.n_eus
+= util_bitcount(devinfo
->eu_masks
[i
]);
1993 brw
->perfquery
.sys_vars
.eu_threads_count
=
1994 brw
->perfquery
.sys_vars
.n_eus
* devinfo
->num_thread_per_eu
;
1996 /* The subslice mask builtin contains bits for all slices. Prior to Gen11
1997 * it had groups of 3bits for each slice, on Gen11 it's 8bits for each
2000 * Ideally equations would be updated to have a slice/subslice query
2001 * function/operator.
2003 brw
->perfquery
.sys_vars
.subslice_mask
= 0;
2005 int bits_per_subslice
= devinfo
->gen
== 11 ? 8 : 3;
2007 for (int s
= 0; s
< util_last_bit(devinfo
->slice_masks
); s
++) {
2008 for (int ss
= 0; ss
< (devinfo
->subslice_slice_stride
* 8); ss
++) {
2009 if (gen_device_info_subslice_available(devinfo
, s
, ss
))
2010 brw
->perfquery
.sys_vars
.subslice_mask
|= 1UL << (s
* bits_per_subslice
+ ss
);
2016 init_oa_sys_vars(struct brw_context
*brw
)
2018 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2019 uint64_t min_freq_mhz
= 0, max_freq_mhz
= 0;
2021 if (!read_sysfs_drm_device_file_uint64(brw
, "gt_min_freq_mhz", &min_freq_mhz
))
2024 if (!read_sysfs_drm_device_file_uint64(brw
, "gt_max_freq_mhz", &max_freq_mhz
))
2027 if (!query_topology(brw
)) {
2028 /* We need the i915 query uAPI on CNL+ (kernel 4.17+). */
2029 if (devinfo
->gen
>= 10)
2032 if (!getparam_topology(brw
)) {
2033 /* We need the SLICE_MASK/SUBSLICE_MASK on gen8+ (kernel 4.13+). */
2034 if (devinfo
->gen
>= 8)
2037 /* On Haswell, the values are already computed for us in
2043 memset(&brw
->perfquery
.sys_vars
, 0, sizeof(brw
->perfquery
.sys_vars
));
2044 brw
->perfquery
.sys_vars
.gt_min_freq
= min_freq_mhz
* 1000000;
2045 brw
->perfquery
.sys_vars
.gt_max_freq
= max_freq_mhz
* 1000000;
2046 brw
->perfquery
.sys_vars
.timestamp_frequency
= devinfo
->timestamp_frequency
;
2047 brw
->perfquery
.sys_vars
.revision
= devinfo
->revision
;
2048 compute_topology_builtins(brw
);
2054 get_sysfs_dev_dir(struct brw_context
*brw
)
2056 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
2060 struct dirent
*drm_entry
;
2063 brw
->perfquery
.sysfs_dev_dir
[0] = '\0';
2065 if (fstat(screen
->fd
, &sb
)) {
2066 DBG("Failed to stat DRM fd\n");
2070 maj
= major(sb
.st_rdev
);
2071 min
= minor(sb
.st_rdev
);
2073 if (!S_ISCHR(sb
.st_mode
)) {
2074 DBG("DRM fd is not a character device as expected\n");
2078 len
= snprintf(brw
->perfquery
.sysfs_dev_dir
,
2079 sizeof(brw
->perfquery
.sysfs_dev_dir
),
2080 "/sys/dev/char/%d:%d/device/drm", maj
, min
);
2081 if (len
< 0 || len
>= sizeof(brw
->perfquery
.sysfs_dev_dir
)) {
2082 DBG("Failed to concatenate sysfs path to drm device\n");
2086 drmdir
= opendir(brw
->perfquery
.sysfs_dev_dir
);
2088 DBG("Failed to open %s: %m\n", brw
->perfquery
.sysfs_dev_dir
);
2092 while ((drm_entry
= readdir(drmdir
))) {
2093 if ((drm_entry
->d_type
== DT_DIR
||
2094 drm_entry
->d_type
== DT_LNK
) &&
2095 strncmp(drm_entry
->d_name
, "card", 4) == 0)
2097 len
= snprintf(brw
->perfquery
.sysfs_dev_dir
,
2098 sizeof(brw
->perfquery
.sysfs_dev_dir
),
2099 "/sys/dev/char/%d:%d/device/drm/%s",
2100 maj
, min
, drm_entry
->d_name
);
2102 if (len
< 0 || len
>= sizeof(brw
->perfquery
.sysfs_dev_dir
))
2111 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
2117 typedef void (*perf_register_oa_queries_t
)(struct brw_context
*);
2119 static perf_register_oa_queries_t
2120 get_register_queries_function(const struct gen_device_info
*devinfo
)
2122 if (devinfo
->is_haswell
)
2123 return brw_oa_register_queries_hsw
;
2124 if (devinfo
->is_cherryview
)
2125 return brw_oa_register_queries_chv
;
2126 if (devinfo
->is_broadwell
)
2127 return brw_oa_register_queries_bdw
;
2128 if (devinfo
->is_broxton
)
2129 return brw_oa_register_queries_bxt
;
2130 if (devinfo
->is_skylake
) {
2131 if (devinfo
->gt
== 2)
2132 return brw_oa_register_queries_sklgt2
;
2133 if (devinfo
->gt
== 3)
2134 return brw_oa_register_queries_sklgt3
;
2135 if (devinfo
->gt
== 4)
2136 return brw_oa_register_queries_sklgt4
;
2138 if (devinfo
->is_kabylake
) {
2139 if (devinfo
->gt
== 2)
2140 return brw_oa_register_queries_kblgt2
;
2141 if (devinfo
->gt
== 3)
2142 return brw_oa_register_queries_kblgt3
;
2144 if (devinfo
->is_geminilake
)
2145 return brw_oa_register_queries_glk
;
2146 if (devinfo
->is_coffeelake
) {
2147 if (devinfo
->gt
== 2)
2148 return brw_oa_register_queries_cflgt2
;
2149 if (devinfo
->gt
== 3)
2150 return brw_oa_register_queries_cflgt3
;
2152 if (devinfo
->is_cannonlake
)
2153 return brw_oa_register_queries_cnl
;
2154 if (devinfo
->gen
== 11)
2155 return brw_oa_register_queries_icl
;
2161 brw_init_perf_query_info(struct gl_context
*ctx
)
2163 struct brw_context
*brw
= brw_context(ctx
);
2164 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2165 bool i915_perf_oa_available
= false;
2167 perf_register_oa_queries_t oa_register
;
2169 if (brw
->perfquery
.n_queries
)
2170 return brw
->perfquery
.n_queries
;
2172 init_pipeline_statistic_query_registers(brw
);
2173 brw_perf_query_register_mdapi_statistic_query(brw
);
2175 oa_register
= get_register_queries_function(devinfo
);
2177 /* The existence of this sysctl parameter implies the kernel supports
2178 * the i915 perf interface.
2180 if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb
) == 0) {
2182 /* If _paranoid == 1 then on Gen8+ we won't be able to access OA
2183 * metrics unless running as root.
2185 if (devinfo
->is_haswell
)
2186 i915_perf_oa_available
= true;
2188 uint64_t paranoid
= 1;
2190 read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", ¶noid
);
2192 if (paranoid
== 0 || geteuid() == 0)
2193 i915_perf_oa_available
= true;
2197 if (i915_perf_oa_available
&&
2199 get_sysfs_dev_dir(brw
) &&
2200 init_oa_sys_vars(brw
))
2202 brw
->perfquery
.oa_metrics_table
=
2203 _mesa_hash_table_create(NULL
, _mesa_key_hash_string
,
2204 _mesa_key_string_equal
);
2206 /* Index all the metric sets mesa knows about before looking to see what
2207 * the kernel is advertising.
2211 if (likely((INTEL_DEBUG
& DEBUG_NO_OACONFIG
) == 0) &&
2212 kernel_has_dynamic_config_support(brw
))
2213 init_oa_configs(brw
);
2215 enumerate_sysfs_metrics(brw
);
2217 brw_perf_query_register_mdapi_oa_query(brw
);
2220 brw
->perfquery
.unaccumulated
=
2221 ralloc_array(brw
, struct brw_perf_query_object
*, 2);
2222 brw
->perfquery
.unaccumulated_elements
= 0;
2223 brw
->perfquery
.unaccumulated_array_size
= 2;
2225 exec_list_make_empty(&brw
->perfquery
.sample_buffers
);
2226 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
2228 /* It's convenient to guarantee that this linked list of sample
2229 * buffers is never empty so we add an empty head so when we
2230 * Begin an OA query we can always take a reference on a buffer
2233 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
2234 exec_list_push_head(&brw
->perfquery
.sample_buffers
, &buf
->link
);
2236 brw
->perfquery
.oa_stream_fd
= -1;
2238 brw
->perfquery
.next_query_start_report_id
= 1000;
2240 return brw
->perfquery
.n_queries
;
2244 brw_init_performance_queries(struct brw_context
*brw
)
2246 struct gl_context
*ctx
= &brw
->ctx
;
2248 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
2249 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
2250 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
2251 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
2252 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
2253 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
2254 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
2255 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
2256 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
2257 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;