2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
44 /* put before sys/types.h to silence glibc warnings */
46 #include <sys/mkdev.h>
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
51 #include <sys/types.h>
55 #include <sys/ioctl.h>
58 #include "drm-uapi/i915_drm.h"
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "intel_batchbuffer.h"
76 #include "perf/gen_perf.h"
77 #include "perf/gen_perf_mdapi.h"
79 #define FILE_DEBUG_FLAG DEBUG_PERFMON
81 #define OAREPORT_REASON_MASK 0x3f
82 #define OAREPORT_REASON_SHIFT 19
83 #define OAREPORT_REASON_TIMER (1<<0)
84 #define OAREPORT_REASON_TRIGGER1 (1<<1)
85 #define OAREPORT_REASON_TRIGGER2 (1<<2)
86 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
87 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
89 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
90 256) /* OA counter report */
93 * Periodic OA samples are read() into these buffer structures via the
94 * i915 perf kernel interface and appended to the
95 * brw->perfquery.sample_buffers linked list. When we process the
96 * results of an OA metrics query we need to consider all the periodic
97 * samples between the Begin and End MI_REPORT_PERF_COUNT command
100 * 'Periodic' is a simplification as there are other automatic reports
101 * written by the hardware also buffered here.
103 * Considering three queries, A, B and C:
106 * ________________A_________________
108 * | ________B_________ _____C___________
111 * And an illustration of sample buffers read over this time frame:
112 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
114 * These nodes may hold samples for query A:
115 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
117 * These nodes may hold samples for query B:
118 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
120 * These nodes may hold samples for query C:
121 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
123 * The illustration assumes we have an even distribution of periodic
124 * samples so all nodes have the same size plotted against time:
126 * Note, to simplify code, the list is never empty.
128 * With overlapping queries we can see that periodic OA reports may
129 * relate to multiple queries and care needs to be take to keep
130 * track of sample buffers until there are no queries that might
131 * depend on their contents.
133 * We use a node ref counting system where a reference ensures that a
134 * node and all following nodes can't be freed/recycled until the
135 * reference drops to zero.
137 * E.g. with a ref of one here:
138 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
140 * These nodes could be freed or recycled ("reaped"):
143 * These must be preserved until the leading ref drops to zero:
144 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
146 * When a query starts we take a reference on the current tail of
147 * the list, knowing that no already-buffered samples can possibly
148 * relate to the newly-started query. A pointer to this node is
149 * also saved in the query object's ->oa.samples_head.
151 * E.g. starting query A while there are two nodes in .sample_buffers:
152 * ________________A________
156 * ^_______ Add a reference and store pointer to node in
159 * Moving forward to when the B query starts with no new buffer nodes:
160 * (for reference, i915 perf reads() are only done when queries finish)
161 * ________________A_______
166 * ^_______ Add a reference and store pointer to
167 * node in B->oa.samples_head
169 * Once a query is finished, after an OA query has become 'Ready',
170 * once the End OA report has landed and after we we have processed
171 * all the intermediate periodic samples then we drop the
172 * ->oa.samples_head reference we took at the start.
174 * So when the B query has finished we have:
175 * ________________A________
176 * | ______B___________
178 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
179 * ^_______ Drop B->oa.samples_head reference
181 * We still can't free these due to the A->oa.samples_head ref:
182 * [ 1 ][ 0 ][ 0 ][ 0 ]
184 * When the A query finishes: (note there's a new ref for C's samples_head)
185 * ________________A_________________
189 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
190 * ^_______ Drop A->oa.samples_head reference
192 * And we can now reap these nodes up to the C->oa.samples_head:
193 * [ X ][ X ][ X ][ X ]
194 * keeping -> [ 1 ][ 0 ][ 0 ]
196 * We reap old sample buffers each time we finish processing an OA
197 * query by iterating the sample_buffers list from the head until we
198 * find a referenced node and stop.
200 * Reaped buffers move to a perfquery.free_sample_buffers list and
201 * when we come to read() we first look to recycle a buffer from the
202 * free_sample_buffers list before allocating a new buffer.
204 struct brw_oa_sample_buf
{
205 struct exec_node link
;
208 uint8_t buf
[I915_PERF_OA_SAMPLE_SIZE
* 10];
209 uint32_t last_timestamp
;
212 /** Downcasting convenience macro. */
213 static inline struct brw_perf_query_object
*
214 brw_perf_query(struct gl_perf_query_object
*o
)
216 return (struct brw_perf_query_object
*) o
;
219 #define MI_RPC_BO_SIZE 4096
220 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
221 #define MI_FREQ_START_OFFSET_BYTES (3072)
222 #define MI_FREQ_END_OFFSET_BYTES (3076)
224 /******************************************************************************/
227 brw_is_perf_query_ready(struct gl_context
*ctx
,
228 struct gl_perf_query_object
*o
);
231 brw_perf_query_get_metric_id(struct brw_context
*brw
,
232 const struct gen_perf_query_info
*query
)
234 /* These queries are know not to ever change, their config ID has been
235 * loaded upon the first query creation. No need to look them up again.
237 if (query
->kind
== GEN_PERF_QUERY_TYPE_OA
)
238 return query
->oa_metrics_set_id
;
240 assert(query
->kind
== GEN_PERF_QUERY_TYPE_RAW
);
242 /* Raw queries can be reprogrammed up by an external application/library.
243 * When a raw query is used for the first time it's id is set to a value !=
244 * 0. When it stops being used the id returns to 0. No need to reload the
245 * ID when it's already loaded.
247 if (query
->oa_metrics_set_id
!= 0) {
248 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64
"\n",
249 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
250 return query
->oa_metrics_set_id
;
253 struct gen_perf_query_info
*raw_query
= (struct gen_perf_query_info
*)query
;
254 if (!gen_perf_load_metric_id(brw
->perfquery
.perf
, query
->guid
,
255 &raw_query
->oa_metrics_set_id
)) {
256 DBG("Unable to read query guid=%s ID, falling back to test config\n", query
->guid
);
257 raw_query
->oa_metrics_set_id
= 1ULL;
259 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64
"\n",
260 query
->name
, query
->guid
, query
->oa_metrics_set_id
);
262 return query
->oa_metrics_set_id
;
266 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
268 struct gl_context
*ctx
= brw_void
;
269 struct gl_perf_query_object
*o
= query_void
;
270 struct brw_perf_query_object
*obj
= query_void
;
272 switch (obj
->query
->kind
) {
273 case GEN_PERF_QUERY_TYPE_OA
:
274 case GEN_PERF_QUERY_TYPE_RAW
:
275 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
277 o
->Used
? "Dirty," : "New,",
278 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
279 obj
->oa
.bo
? "yes," : "no,",
280 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
281 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
283 case GEN_PERF_QUERY_TYPE_PIPELINE
:
284 DBG("%4d: %-6s %-8s BO: %-4s\n",
286 o
->Used
? "Dirty," : "New,",
287 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
288 obj
->pipeline_stats
.bo
? "yes" : "no");
291 unreachable("Unknown query type");
297 dump_perf_queries(struct brw_context
*brw
)
299 struct gl_context
*ctx
= &brw
->ctx
;
300 DBG("Queries: (Open queries = %d, OA users = %d)\n",
301 brw
->perfquery
.n_active_oa_queries
, brw
->perfquery
.n_oa_users
);
302 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
305 /******************************************************************************/
307 static struct brw_oa_sample_buf
*
308 get_free_sample_buf(struct brw_context
*brw
)
310 struct exec_node
*node
= exec_list_pop_head(&brw
->perfquery
.free_sample_buffers
);
311 struct brw_oa_sample_buf
*buf
;
314 buf
= exec_node_data(struct brw_oa_sample_buf
, node
, link
);
316 buf
= ralloc_size(brw
, sizeof(*buf
));
318 exec_node_init(&buf
->link
);
327 reap_old_sample_buffers(struct brw_context
*brw
)
329 struct exec_node
*tail_node
=
330 exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
331 struct brw_oa_sample_buf
*tail_buf
=
332 exec_node_data(struct brw_oa_sample_buf
, tail_node
, link
);
334 /* Remove all old, unreferenced sample buffers walking forward from
335 * the head of the list, except always leave at least one node in
336 * the list so we always have a node to reference when we Begin
339 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
340 &brw
->perfquery
.sample_buffers
)
342 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
343 exec_node_remove(&buf
->link
);
344 exec_list_push_head(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
351 free_sample_bufs(struct brw_context
*brw
)
353 foreach_list_typed_safe(struct brw_oa_sample_buf
, buf
, link
,
354 &brw
->perfquery
.free_sample_buffers
)
357 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
360 /******************************************************************************/
363 * Driver hook for glGetPerfQueryInfoINTEL().
366 brw_get_perf_query_info(struct gl_context
*ctx
,
367 unsigned query_index
,
373 struct brw_context
*brw
= brw_context(ctx
);
374 const struct gen_perf_query_info
*query
=
375 &brw
->perfquery
.perf
->queries
[query_index
];
378 *data_size
= query
->data_size
;
379 *n_counters
= query
->n_counters
;
381 switch (query
->kind
) {
382 case GEN_PERF_QUERY_TYPE_OA
:
383 case GEN_PERF_QUERY_TYPE_RAW
:
384 *n_active
= brw
->perfquery
.n_active_oa_queries
;
387 case GEN_PERF_QUERY_TYPE_PIPELINE
:
388 *n_active
= brw
->perfquery
.n_active_pipeline_stats_queries
;
392 unreachable("Unknown query type");
398 gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type
)
401 case GEN_PERF_COUNTER_TYPE_EVENT
: return GL_PERFQUERY_COUNTER_EVENT_INTEL
;
402 case GEN_PERF_COUNTER_TYPE_DURATION_NORM
: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL
;
403 case GEN_PERF_COUNTER_TYPE_DURATION_RAW
: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL
;
404 case GEN_PERF_COUNTER_TYPE_THROUGHPUT
: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL
;
405 case GEN_PERF_COUNTER_TYPE_RAW
: return GL_PERFQUERY_COUNTER_RAW_INTEL
;
406 case GEN_PERF_COUNTER_TYPE_TIMESTAMP
: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL
;
408 unreachable("Unknown counter type");
413 gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type
)
416 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL
;
417 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL
;
418 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
;
419 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
;
420 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL
;
422 unreachable("Unknown counter data type");
427 * Driver hook for glGetPerfCounterInfoINTEL().
430 brw_get_perf_counter_info(struct gl_context
*ctx
,
431 unsigned query_index
,
432 unsigned counter_index
,
438 GLuint
*data_type_enum
,
441 struct brw_context
*brw
= brw_context(ctx
);
442 const struct gen_perf_query_info
*query
=
443 &brw
->perfquery
.perf
->queries
[query_index
];
444 const struct gen_perf_query_counter
*counter
=
445 &query
->counters
[counter_index
];
447 *name
= counter
->name
;
448 *desc
= counter
->desc
;
449 *offset
= counter
->offset
;
450 *data_size
= gen_perf_query_counter_get_size(counter
);
451 *type_enum
= gen_counter_type_enum_to_gl_type(counter
->type
);
452 *data_type_enum
= gen_counter_data_type_to_gl_type(counter
->data_type
);
453 *raw_max
= counter
->raw_max
;
456 /******************************************************************************/
459 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
460 * pipeline statistics for the performance query object.
463 snapshot_statistics_registers(struct brw_context
*brw
,
464 struct brw_perf_query_object
*obj
,
465 uint32_t offset_in_bytes
)
467 const struct gen_perf_query_info
*query
= obj
->query
;
468 const int n_counters
= query
->n_counters
;
470 for (int i
= 0; i
< n_counters
; i
++) {
471 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
473 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
475 brw_store_register_mem64(brw
, obj
->pipeline_stats
.bo
,
476 counter
->pipeline_stat
.reg
,
477 offset_in_bytes
+ i
* sizeof(uint64_t));
482 * Add a query to the global list of "unaccumulated queries."
484 * Queries are tracked here until all the associated OA reports have
485 * been accumulated via accumulate_oa_reports() after the end
486 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
489 add_to_unaccumulated_query_list(struct brw_context
*brw
,
490 struct brw_perf_query_object
*obj
)
492 if (brw
->perfquery
.unaccumulated_elements
>=
493 brw
->perfquery
.unaccumulated_array_size
)
495 brw
->perfquery
.unaccumulated_array_size
*= 1.5;
496 brw
->perfquery
.unaccumulated
=
497 reralloc(brw
, brw
->perfquery
.unaccumulated
,
498 struct brw_perf_query_object
*,
499 brw
->perfquery
.unaccumulated_array_size
);
502 brw
->perfquery
.unaccumulated
[brw
->perfquery
.unaccumulated_elements
++] = obj
;
506 * Remove a query from the global list of unaccumulated queries once
507 * after successfully accumulating the OA reports associated with the
508 * query in accumulate_oa_reports() or when discarding unwanted query
512 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
513 struct brw_perf_query_object
*obj
)
515 for (int i
= 0; i
< brw
->perfquery
.unaccumulated_elements
; i
++) {
516 if (brw
->perfquery
.unaccumulated
[i
] == obj
) {
517 int last_elt
= --brw
->perfquery
.unaccumulated_elements
;
520 brw
->perfquery
.unaccumulated
[i
] = NULL
;
522 brw
->perfquery
.unaccumulated
[i
] =
523 brw
->perfquery
.unaccumulated
[last_elt
];
530 /* Drop our samples_head reference so that associated periodic
531 * sample data buffers can potentially be reaped if they aren't
532 * referenced by any other queries...
535 struct brw_oa_sample_buf
*buf
=
536 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
538 assert(buf
->refcount
> 0);
541 obj
->oa
.samples_head
= NULL
;
543 reap_old_sample_buffers(brw
);
547 inc_n_oa_users(struct brw_context
*brw
)
549 if (brw
->perfquery
.n_oa_users
== 0 &&
550 drmIoctl(brw
->perfquery
.oa_stream_fd
,
551 I915_PERF_IOCTL_ENABLE
, 0) < 0)
555 ++brw
->perfquery
.n_oa_users
;
561 dec_n_oa_users(struct brw_context
*brw
)
563 /* Disabling the i915 perf stream will effectively disable the OA
564 * counters. Note it's important to be sure there are no outstanding
565 * MI_RPC commands at this point since they could stall the CS
566 * indefinitely once OACONTROL is disabled.
568 --brw
->perfquery
.n_oa_users
;
569 if (brw
->perfquery
.n_oa_users
== 0 &&
570 drmIoctl(brw
->perfquery
.oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
572 DBG("WARNING: Error disabling i915 perf stream: %m\n");
576 /* In general if we see anything spurious while accumulating results,
577 * we don't try and continue accumulating the current query, hoping
578 * for the best, we scrap anything outstanding, and then hope for the
579 * best with new queries.
582 discard_all_queries(struct brw_context
*brw
)
584 while (brw
->perfquery
.unaccumulated_elements
) {
585 struct brw_perf_query_object
*obj
= brw
->perfquery
.unaccumulated
[0];
587 obj
->oa
.results_accumulated
= true;
588 drop_from_unaccumulated_query_list(brw
, brw
->perfquery
.unaccumulated
[0]);
595 OA_READ_STATUS_ERROR
,
596 OA_READ_STATUS_UNFINISHED
,
597 OA_READ_STATUS_FINISHED
,
600 static enum OaReadStatus
601 read_oa_samples_until(struct brw_context
*brw
,
602 uint32_t start_timestamp
,
603 uint32_t end_timestamp
)
605 struct exec_node
*tail_node
=
606 exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
607 struct brw_oa_sample_buf
*tail_buf
=
608 exec_node_data(struct brw_oa_sample_buf
, tail_node
, link
);
609 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
612 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
616 while ((len
= read(brw
->perfquery
.oa_stream_fd
, buf
->buf
,
617 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
621 exec_list_push_tail(&brw
->perfquery
.free_sample_buffers
, &buf
->link
);
625 return ((last_timestamp
- start_timestamp
) >=
626 (end_timestamp
- start_timestamp
)) ?
627 OA_READ_STATUS_FINISHED
:
628 OA_READ_STATUS_UNFINISHED
;
630 DBG("Error reading i915 perf samples: %m\n");
633 DBG("Spurious EOF reading i915 perf samples\n");
635 return OA_READ_STATUS_ERROR
;
639 exec_list_push_tail(&brw
->perfquery
.sample_buffers
, &buf
->link
);
641 /* Go through the reports and update the last timestamp. */
643 while (offset
< buf
->len
) {
644 const struct drm_i915_perf_record_header
*header
=
645 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
646 uint32_t *report
= (uint32_t *) (header
+ 1);
648 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
649 last_timestamp
= report
[1];
651 offset
+= header
->size
;
654 buf
->last_timestamp
= last_timestamp
;
657 unreachable("not reached");
658 return OA_READ_STATUS_ERROR
;
662 * Try to read all the reports until either the delimiting timestamp
663 * or an error arises.
666 read_oa_samples_for_query(struct brw_context
*brw
,
667 struct brw_perf_query_object
*obj
)
673 /* We need the MI_REPORT_PERF_COUNT to land before we can start
675 assert(!brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
676 !brw_bo_busy(obj
->oa
.bo
));
678 /* Map the BO once here and let accumulate_oa_reports() unmap
680 if (obj
->oa
.map
== NULL
)
681 obj
->oa
.map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_READ
);
683 start
= last
= obj
->oa
.map
;
684 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
686 if (start
[0] != obj
->oa
.begin_report_id
) {
687 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
690 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
691 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
695 /* Read the reports until the end timestamp. */
696 switch (read_oa_samples_until(brw
, start
[1], end
[1])) {
697 case OA_READ_STATUS_ERROR
:
698 /* Fallthrough and let accumulate_oa_reports() deal with the
700 case OA_READ_STATUS_FINISHED
:
702 case OA_READ_STATUS_UNFINISHED
:
706 unreachable("invalid read status");
711 * Accumulate raw OA counter values based on deltas between pairs of
714 * Accumulation starts from the first report captured via
715 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
716 * last MI_RPC report requested by brw_end_perf_query(). Between these
717 * two reports there may also some number of periodically sampled OA
718 * reports collected via the i915 perf interface - depending on the
719 * duration of the query.
721 * These periodic snapshots help to ensure we handle counter overflow
722 * correctly by being frequent enough to ensure we don't miss multiple
723 * overflows of a counter between snapshots. For Gen8+ the i915 perf
724 * snapshots provide the extra context-switch reports that let us
725 * subtract out the progress of counters associated with other
726 * contexts running on the system.
729 accumulate_oa_reports(struct brw_context
*brw
,
730 struct brw_perf_query_object
*obj
)
732 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
733 struct gl_perf_query_object
*o
= &obj
->base
;
737 struct exec_node
*first_samples_node
;
739 int out_duration
= 0;
742 assert(obj
->oa
.map
!= NULL
);
744 start
= last
= obj
->oa
.map
;
745 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
747 if (start
[0] != obj
->oa
.begin_report_id
) {
748 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
751 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
752 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
756 /* See if we have any periodic reports to accumulate too... */
758 /* N.B. The oa.samples_head was set when the query began and
759 * pointed to the tail of the brw->perfquery.sample_buffers list at
760 * the time the query started. Since the buffer existed before the
761 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
762 * that no data in this particular node's buffer can possibly be
763 * associated with the query - so skip ahead one...
765 first_samples_node
= obj
->oa
.samples_head
->next
;
767 foreach_list_typed_from(struct brw_oa_sample_buf
, buf
, link
,
768 &brw
->perfquery
.sample_buffers
,
773 while (offset
< buf
->len
) {
774 const struct drm_i915_perf_record_header
*header
=
775 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
777 assert(header
->size
!= 0);
778 assert(header
->size
<= buf
->len
);
780 offset
+= header
->size
;
782 switch (header
->type
) {
783 case DRM_I915_PERF_RECORD_SAMPLE
: {
784 uint32_t *report
= (uint32_t *)(header
+ 1);
787 /* Ignore reports that come before the start marker.
788 * (Note: takes care to allow overflow of 32bit timestamps)
790 if (gen_device_info_timebase_scale(devinfo
,
791 report
[1] - start
[1]) > 5000000000) {
795 /* Ignore reports that come after the end marker.
796 * (Note: takes care to allow overflow of 32bit timestamps)
798 if (gen_device_info_timebase_scale(devinfo
,
799 report
[1] - end
[1]) <= 5000000000) {
803 /* For Gen8+ since the counters continue while other
804 * contexts are running we need to discount any unrelated
805 * deltas. The hardware automatically generates a report
806 * on context switch which gives us a new reference point
807 * to continuing adding deltas from.
809 * For Haswell we can rely on the HW to stop the progress
810 * of OA counters while any other context is acctive.
812 if (devinfo
->gen
>= 8) {
813 if (in_ctx
&& report
[2] != obj
->oa
.result
.hw_id
) {
814 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
817 } else if (in_ctx
== false && report
[2] == obj
->oa
.result
.hw_id
) {
818 DBG("i915 perf: Switch TO\n");
821 /* From experimentation in IGT, we found that the OA unit
822 * might label some report as "idle" (using an invalid
823 * context ID), right after a report for a given context.
824 * Deltas generated by those reports actually belong to the
825 * previous context, even though they're not labelled as
828 * We didn't *really* Switch AWAY in the case that we e.g.
829 * saw a single periodic report while idle...
831 if (out_duration
>= 1)
834 assert(report
[2] == obj
->oa
.result
.hw_id
);
835 DBG("i915 perf: Continuation IN\n");
837 assert(report
[2] != obj
->oa
.result
.hw_id
);
838 DBG("i915 perf: Continuation OUT\n");
845 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->query
,
854 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
855 DBG("i915 perf: OA error: all reports lost\n");
857 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
858 DBG("i915 perf: OA report lost\n");
866 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->query
,
869 DBG("Marking %d accumulated - results gathered\n", o
->Id
);
871 obj
->oa
.results_accumulated
= true;
872 drop_from_unaccumulated_query_list(brw
, obj
);
879 discard_all_queries(brw
);
882 /******************************************************************************/
885 open_i915_perf_oa_stream(struct brw_context
*brw
,
892 uint64_t properties
[] = {
893 /* Single context sampling */
894 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
896 /* Include OA reports in samples */
897 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
899 /* OA unit configuration */
900 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
901 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
902 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
904 struct drm_i915_perf_open_param param
= {
905 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
906 I915_PERF_FLAG_FD_NONBLOCK
|
907 I915_PERF_FLAG_DISABLED
,
908 .num_properties
= ARRAY_SIZE(properties
) / 2,
909 .properties_ptr
= (uintptr_t) properties
,
911 int fd
= drmIoctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
913 DBG("Error opening i915 perf OA stream: %m\n");
917 brw
->perfquery
.oa_stream_fd
= fd
;
919 brw
->perfquery
.current_oa_metrics_set_id
= metrics_set_id
;
920 brw
->perfquery
.current_oa_format
= report_format
;
926 close_perf(struct brw_context
*brw
,
927 const struct gen_perf_query_info
*query
)
929 if (brw
->perfquery
.oa_stream_fd
!= -1) {
930 close(brw
->perfquery
.oa_stream_fd
);
931 brw
->perfquery
.oa_stream_fd
= -1;
933 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
934 struct gen_perf_query_info
*raw_query
=
935 (struct gen_perf_query_info
*) query
;
936 raw_query
->oa_metrics_set_id
= 0;
941 capture_frequency_stat_register(struct brw_context
*brw
,
945 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
947 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8 &&
948 !devinfo
->is_baytrail
&& !devinfo
->is_cherryview
) {
949 brw_store_register_mem32(brw
, bo
, GEN7_RPSTAT1
, bo_offset
);
950 } else if (devinfo
->gen
>= 9) {
951 brw_store_register_mem32(brw
, bo
, GEN9_RPSTAT0
, bo_offset
);
956 * Driver hook for glBeginPerfQueryINTEL().
959 brw_begin_perf_query(struct gl_context
*ctx
,
960 struct gl_perf_query_object
*o
)
962 struct brw_context
*brw
= brw_context(ctx
);
963 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
964 const struct gen_perf_query_info
*query
= obj
->query
;
966 /* We can assume the frontend hides mistaken attempts to Begin a
967 * query object multiple times before its End. Similarly if an
968 * application reuses a query object before results have arrived
969 * the frontend will wait for prior results so we don't need
970 * to support abandoning in-flight results.
973 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
975 DBG("Begin(%d)\n", o
->Id
);
977 /* XXX: We have to consider that the command parser unit that parses batch
978 * buffer commands and is used to capture begin/end counter snapshots isn't
979 * implicitly synchronized with what's currently running across other GPU
980 * units (such as the EUs running shaders) that the performance counters are
983 * The intention of performance queries is to measure the work associated
984 * with commands between the begin/end delimiters and so for that to be the
985 * case we need to explicitly synchronize the parsing of commands to capture
986 * Begin/End counter snapshots with what's running across other parts of the
989 * When the command parser reaches a Begin marker it effectively needs to
990 * drain everything currently running on the GPU until the hardware is idle
991 * before capturing the first snapshot of counters - otherwise the results
992 * would also be measuring the effects of earlier commands.
994 * When the command parser reaches an End marker it needs to stall until
995 * everything currently running on the GPU has finished before capturing the
996 * end snapshot - otherwise the results won't be a complete representation
999 * Theoretically there could be opportunities to minimize how much of the
1000 * GPU pipeline is drained, or that we stall for, when we know what specific
1001 * units the performance counters being queried relate to but we don't
1002 * currently attempt to be clever here.
1004 * Note: with our current simple approach here then for back-to-back queries
1005 * we will redundantly emit duplicate commands to synchronize the command
1006 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1007 * second synchronization is effectively a NOOP.
1009 * N.B. The final results are based on deltas of counters between (inside)
1010 * Begin/End markers so even though the total wall clock time of the
1011 * workload is stretched by larger pipeline bubbles the bubbles themselves
1012 * are generally invisible to the query results. Whether that's a good or a
1013 * bad thing depends on the use case. For a lower real-time impact while
1014 * capturing metrics then periodic sampling may be a better choice than
1015 * INTEL_performance_query.
1018 * This is our Begin synchronization point to drain current work on the
1019 * GPU before we capture our first counter snapshot...
1021 brw_emit_mi_flush(brw
);
1023 switch (query
->kind
) {
1024 case GEN_PERF_QUERY_TYPE_OA
:
1025 case GEN_PERF_QUERY_TYPE_RAW
: {
1027 /* Opening an i915 perf stream implies exclusive access to the OA unit
1028 * which will generate counter reports for a specific counter set with a
1029 * specific layout/format so we can't begin any OA based queries that
1030 * require a different counter set or format unless we get an opportunity
1031 * to close the stream and open a new one...
1033 uint64_t metric_id
= brw_perf_query_get_metric_id(brw
, query
);
1035 if (brw
->perfquery
.oa_stream_fd
!= -1 &&
1036 brw
->perfquery
.current_oa_metrics_set_id
!= metric_id
) {
1038 if (brw
->perfquery
.n_oa_users
!= 0) {
1039 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64
"\n",
1040 o
->Id
, brw
->perfquery
.current_oa_metrics_set_id
, metric_id
);
1043 close_perf(brw
, query
);
1046 /* If the OA counters aren't already on, enable them. */
1047 if (brw
->perfquery
.oa_stream_fd
== -1) {
1048 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1049 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1051 /* The period_exponent gives a sampling period as follows:
1052 * sample_period = timestamp_period * 2^(period_exponent + 1)
1054 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1057 * The counter overflow period is derived from the EuActive counter
1058 * which reads a counter that increments by the number of clock
1059 * cycles multiplied by the number of EUs. It can be calculated as:
1061 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1063 * (E.g. 40 EUs @ 1GHz = ~53ms)
1065 * We select a sampling period inferior to that overflow period to
1066 * ensure we cannot see more than 1 counter overflow, otherwise we
1067 * could loose information.
1070 int a_counter_in_bits
= 32;
1071 if (devinfo
->gen
>= 8)
1072 a_counter_in_bits
= 40;
1074 uint64_t overflow_period
= pow(2, a_counter_in_bits
) /
1075 (brw
->perfquery
.perf
->sys_vars
.n_eus
*
1076 /* drop 1GHz freq to have units in nanoseconds */
1079 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
1080 overflow_period
, overflow_period
/ 1000000ul, brw
->perfquery
.perf
->sys_vars
.n_eus
);
1082 int period_exponent
= 0;
1083 uint64_t prev_sample_period
, next_sample_period
;
1084 for (int e
= 0; e
< 30; e
++) {
1085 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
1086 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
1088 /* Take the previous sampling period, lower than the overflow
1091 if (prev_sample_period
< overflow_period
&&
1092 next_sample_period
> overflow_period
)
1093 period_exponent
= e
+ 1;
1096 if (period_exponent
== 0) {
1097 DBG("WARNING: enable to find a sampling exponent\n");
1101 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
1102 prev_sample_period
/ 1000000ul);
1104 if (!open_i915_perf_oa_stream(brw
,
1108 screen
->fd
, /* drm fd */
1112 assert(brw
->perfquery
.current_oa_metrics_set_id
== metric_id
&&
1113 brw
->perfquery
.current_oa_format
== query
->oa_format
);
1116 if (!inc_n_oa_users(brw
)) {
1117 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1122 brw
->perfquery
.perf
->vtbl
.bo_unreference(obj
->oa
.bo
);
1127 brw
->perfquery
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
1128 "perf. query OA MI_RPC bo",
1131 /* Pre-filling the BO helps debug whether writes landed. */
1132 void *map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_WRITE
);
1133 memset(map
, 0x80, MI_RPC_BO_SIZE
);
1134 brw_bo_unmap(obj
->oa
.bo
);
1137 obj
->oa
.begin_report_id
= brw
->perfquery
.next_query_start_report_id
;
1138 brw
->perfquery
.next_query_start_report_id
+= 2;
1140 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1141 * delimiting commands end up in different batchbuffers. If that's the
1142 * case, the measurement will include the time it takes for the kernel
1143 * scheduler to load a new request into the hardware. This is manifested in
1144 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1146 intel_batchbuffer_flush(brw
);
1148 /* Take a starting OA counter snapshot. */
1149 brw
->perfquery
.perf
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
, 0,
1150 obj
->oa
.begin_report_id
);
1151 capture_frequency_stat_register(brw
, obj
->oa
.bo
, MI_FREQ_START_OFFSET_BYTES
);
1153 ++brw
->perfquery
.n_active_oa_queries
;
1155 /* No already-buffered samples can possibly be associated with this query
1156 * so create a marker within the list of sample buffers enabling us to
1157 * easily ignore earlier samples when processing this query after
1160 assert(!exec_list_is_empty(&brw
->perfquery
.sample_buffers
));
1161 obj
->oa
.samples_head
= exec_list_get_tail(&brw
->perfquery
.sample_buffers
);
1163 struct brw_oa_sample_buf
*buf
=
1164 exec_node_data(struct brw_oa_sample_buf
, obj
->oa
.samples_head
, link
);
1166 /* This reference will ensure that future/following sample
1167 * buffers (that may relate to this query) can't be freed until
1168 * this drops to zero.
1172 gen_perf_query_result_clear(&obj
->oa
.result
);
1173 obj
->oa
.results_accumulated
= false;
1175 add_to_unaccumulated_query_list(brw
, obj
);
1179 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1180 if (obj
->pipeline_stats
.bo
) {
1181 brw
->perfquery
.perf
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
1182 obj
->pipeline_stats
.bo
= NULL
;
1185 obj
->pipeline_stats
.bo
=
1186 brw
->perfquery
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
1187 "perf. query pipeline stats bo",
1190 /* Take starting snapshots. */
1191 snapshot_statistics_registers(brw
, obj
, 0);
1193 ++brw
->perfquery
.n_active_pipeline_stats_queries
;
1197 unreachable("Unknown query type");
1201 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1202 dump_perf_queries(brw
);
1208 * Driver hook for glEndPerfQueryINTEL().
1211 brw_end_perf_query(struct gl_context
*ctx
,
1212 struct gl_perf_query_object
*o
)
1214 struct brw_context
*brw
= brw_context(ctx
);
1215 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1217 DBG("End(%d)\n", o
->Id
);
1219 /* Ensure that the work associated with the queried commands will have
1220 * finished before taking our query end counter readings.
1222 * For more details see comment in brw_begin_perf_query for
1223 * corresponding flush.
1225 brw_emit_mi_flush(brw
);
1227 switch (obj
->query
->kind
) {
1228 case GEN_PERF_QUERY_TYPE_OA
:
1229 case GEN_PERF_QUERY_TYPE_RAW
:
1231 /* NB: It's possible that the query will have already been marked
1232 * as 'accumulated' if an error was seen while reading samples
1233 * from perf. In this case we mustn't try and emit a closing
1234 * MI_RPC command in case the OA unit has already been disabled
1236 if (!obj
->oa
.results_accumulated
) {
1237 /* Take an ending OA counter snapshot. */
1238 capture_frequency_stat_register(brw
, obj
->oa
.bo
, MI_FREQ_END_OFFSET_BYTES
);
1239 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
,
1240 MI_RPC_BO_END_OFFSET_BYTES
,
1241 obj
->oa
.begin_report_id
+ 1);
1244 --brw
->perfquery
.n_active_oa_queries
;
1246 /* NB: even though the query has now ended, it can't be accumulated
1247 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1252 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1253 snapshot_statistics_registers(brw
, obj
,
1254 STATS_BO_END_OFFSET_BYTES
);
1255 --brw
->perfquery
.n_active_pipeline_stats_queries
;
1259 unreachable("Unknown query type");
1265 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
1267 struct brw_context
*brw
= brw_context(ctx
);
1268 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1269 struct brw_bo
*bo
= NULL
;
1273 switch (obj
->query
->kind
) {
1274 case GEN_PERF_QUERY_TYPE_OA
:
1275 case GEN_PERF_QUERY_TYPE_RAW
:
1279 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1280 bo
= obj
->pipeline_stats
.bo
;
1284 unreachable("Unknown query type");
1291 /* If the current batch references our results bo then we need to
1294 if (brw_batch_references(&brw
->batch
, bo
))
1295 intel_batchbuffer_flush(brw
);
1297 brw_bo_wait_rendering(bo
);
1299 /* Due to a race condition between the OA unit signaling report
1300 * availability and the report actually being written into memory,
1301 * we need to wait for all the reports to come in before we can
1304 if (obj
->query
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1305 obj
->query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1306 while (!read_oa_samples_for_query(brw
, obj
))
1312 brw_is_perf_query_ready(struct gl_context
*ctx
,
1313 struct gl_perf_query_object
*o
)
1315 struct brw_context
*brw
= brw_context(ctx
);
1316 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1321 switch (obj
->query
->kind
) {
1322 case GEN_PERF_QUERY_TYPE_OA
:
1323 case GEN_PERF_QUERY_TYPE_RAW
:
1324 return (obj
->oa
.results_accumulated
||
1326 !brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
1327 !brw_bo_busy(obj
->oa
.bo
) &&
1328 read_oa_samples_for_query(brw
, obj
)));
1329 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1330 return (obj
->pipeline_stats
.bo
&&
1331 !brw_batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
1332 !brw_bo_busy(obj
->pipeline_stats
.bo
));
1335 unreachable("Unknown query type");
1343 read_slice_unslice_frequencies(struct brw_context
*brw
,
1344 struct brw_perf_query_object
*obj
)
1346 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1347 uint32_t *begin_report
= obj
->oa
.map
, *end_report
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1349 gen_perf_query_result_read_frequencies(&obj
->oa
.result
,
1350 devinfo
, begin_report
, end_report
);
1354 read_gt_frequency(struct brw_context
*brw
,
1355 struct brw_perf_query_object
*obj
)
1357 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1358 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
1359 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
1361 switch (devinfo
->gen
) {
1364 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1365 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1370 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1371 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1374 unreachable("unexpected gen");
1377 /* Put the numbers into Hz. */
1378 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
1379 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
1383 get_oa_counter_data(struct brw_context
*brw
,
1384 struct brw_perf_query_object
*obj
,
1388 struct gen_perf_config
*perf
= brw
->perfquery
.perf
;
1389 const struct gen_perf_query_info
*query
= obj
->query
;
1390 int n_counters
= query
->n_counters
;
1393 for (int i
= 0; i
< n_counters
; i
++) {
1394 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1395 uint64_t *out_uint64
;
1397 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
1400 switch (counter
->data_type
) {
1401 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
1402 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1404 counter
->oa_counter_read_uint64(perf
, query
,
1405 obj
->oa
.result
.accumulator
);
1407 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
1408 out_float
= (float *)(data
+ counter
->offset
);
1410 counter
->oa_counter_read_float(perf
, query
,
1411 obj
->oa
.result
.accumulator
);
1414 /* So far we aren't using uint32, double or bool32... */
1415 unreachable("unexpected counter data type");
1417 written
= counter
->offset
+ counter_size
;
1425 get_pipeline_stats_data(struct brw_context
*brw
,
1426 struct brw_perf_query_object
*obj
,
1431 const struct gen_perf_query_info
*query
= obj
->query
;
1432 int n_counters
= obj
->query
->n_counters
;
1435 uint64_t *start
= brw_bo_map(brw
, obj
->pipeline_stats
.bo
, MAP_READ
);
1436 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1438 for (int i
= 0; i
< n_counters
; i
++) {
1439 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1440 uint64_t value
= end
[i
] - start
[i
];
1442 if (counter
->pipeline_stat
.numerator
!=
1443 counter
->pipeline_stat
.denominator
) {
1444 value
*= counter
->pipeline_stat
.numerator
;
1445 value
/= counter
->pipeline_stat
.denominator
;
1448 *((uint64_t *)p
) = value
;
1452 brw_bo_unmap(obj
->pipeline_stats
.bo
);
1458 * Driver hook for glGetPerfQueryDataINTEL().
1461 brw_get_perf_query_data(struct gl_context
*ctx
,
1462 struct gl_perf_query_object
*o
,
1465 GLuint
*bytes_written
)
1467 struct brw_context
*brw
= brw_context(ctx
);
1468 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1471 assert(brw_is_perf_query_ready(ctx
, o
));
1473 DBG("GetData(%d)\n", o
->Id
);
1475 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1476 dump_perf_queries(brw
);
1478 /* We expect that the frontend only calls this hook when it knows
1479 * that results are available.
1483 switch (obj
->query
->kind
) {
1484 case GEN_PERF_QUERY_TYPE_OA
:
1485 case GEN_PERF_QUERY_TYPE_RAW
:
1486 if (!obj
->oa
.results_accumulated
) {
1487 read_gt_frequency(brw
, obj
);
1488 read_slice_unslice_frequencies(brw
, obj
);
1489 accumulate_oa_reports(brw
, obj
);
1490 assert(obj
->oa
.results_accumulated
);
1492 brw_bo_unmap(obj
->oa
.bo
);
1495 if (obj
->query
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
1496 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
1498 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1500 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
1501 devinfo
, &obj
->oa
.result
,
1502 obj
->oa
.gt_frequency
[0],
1503 obj
->oa
.gt_frequency
[1]);
1507 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1508 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
1512 unreachable("Unknown query type");
1517 *bytes_written
= written
;
1520 static struct gl_perf_query_object
*
1521 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
1523 struct brw_context
*brw
= brw_context(ctx
);
1524 const struct gen_perf_query_info
*query
=
1525 &brw
->perfquery
.perf
->queries
[query_index
];
1526 struct brw_perf_query_object
*obj
=
1527 calloc(1, sizeof(struct brw_perf_query_object
));
1534 brw
->perfquery
.n_query_instances
++;
1540 * Driver hook for glDeletePerfQueryINTEL().
1543 brw_delete_perf_query(struct gl_context
*ctx
,
1544 struct gl_perf_query_object
*o
)
1546 struct brw_context
*brw
= brw_context(ctx
);
1547 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1548 struct gen_perf_config
*perf_cfg
= brw
->perfquery
.perf
;
1550 /* We can assume that the frontend waits for a query to complete
1551 * before ever calling into here, so we don't have to worry about
1552 * deleting an in-flight query object.
1555 assert(!o
->Used
|| o
->Ready
);
1557 DBG("Delete(%d)\n", o
->Id
);
1559 switch (obj
->query
->kind
) {
1560 case GEN_PERF_QUERY_TYPE_OA
:
1561 case GEN_PERF_QUERY_TYPE_RAW
:
1563 if (!obj
->oa
.results_accumulated
) {
1564 drop_from_unaccumulated_query_list(brw
, obj
);
1565 dec_n_oa_users(brw
);
1568 perf_cfg
->vtbl
.bo_unreference(obj
->oa
.bo
);
1572 obj
->oa
.results_accumulated
= false;
1575 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1576 if (obj
->pipeline_stats
.bo
) {
1577 perf_cfg
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
1578 obj
->pipeline_stats
.bo
= NULL
;
1583 unreachable("Unknown query type");
1587 /* As an indication that the INTEL_performance_query extension is no
1588 * longer in use, it's a good time to free our cache of sample
1589 * buffers and close any current i915-perf stream.
1591 if (--brw
->perfquery
.n_query_instances
== 0) {
1592 free_sample_bufs(brw
);
1593 close_perf(brw
, obj
->query
);
1599 /******************************************************************************/
1602 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1604 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1605 struct gen_perf_config
*perf
= brw
->perfquery
.perf
;
1606 struct gen_perf_query_info
*query
=
1607 gen_perf_query_append_query_info(perf
, MAX_STAT_COUNTERS
);
1609 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1610 query
->name
= "Pipeline Statistics Registers";
1612 gen_perf_query_info_add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1613 "N vertices submitted");
1614 gen_perf_query_info_add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1615 "N primitives submitted");
1616 gen_perf_query_info_add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1617 "N vertex shader invocations");
1619 if (devinfo
->gen
== 6) {
1620 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1621 "SO_PRIM_STORAGE_NEEDED",
1622 "N geometry shader stream-out primitives (total)");
1623 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1624 "SO_NUM_PRIMS_WRITTEN",
1625 "N geometry shader stream-out primitives (written)");
1627 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1628 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1629 "N stream-out (stream 0) primitives (total)");
1630 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1631 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1632 "N stream-out (stream 1) primitives (total)");
1633 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1634 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1635 "N stream-out (stream 2) primitives (total)");
1636 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1637 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1638 "N stream-out (stream 3) primitives (total)");
1639 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1640 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1641 "N stream-out (stream 0) primitives (written)");
1642 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1643 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1644 "N stream-out (stream 1) primitives (written)");
1645 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1646 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1647 "N stream-out (stream 2) primitives (written)");
1648 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1649 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1650 "N stream-out (stream 3) primitives (written)");
1653 gen_perf_query_info_add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1654 "N TCS shader invocations");
1655 gen_perf_query_info_add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1656 "N TES shader invocations");
1658 gen_perf_query_info_add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1659 "N geometry shader invocations");
1660 gen_perf_query_info_add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1661 "N geometry shader primitives emitted");
1663 gen_perf_query_info_add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1664 "N primitives entering clipping");
1665 gen_perf_query_info_add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1666 "N primitives leaving clipping");
1668 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1669 gen_perf_query_info_add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1670 "N fragment shader invocations",
1671 "N fragment shader invocations");
1673 gen_perf_query_info_add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1674 "N fragment shader invocations");
1677 gen_perf_query_info_add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
1678 "N z-pass fragments");
1680 if (devinfo
->gen
>= 7) {
1681 gen_perf_query_info_add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1682 "N compute shader invocations");
1685 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1688 /* gen_device_info will have incorrect default topology values for unsupported kernels.
1689 * verify kernel support to ensure OA metrics are accurate.
1692 oa_metrics_kernel_support(int fd
, const struct gen_device_info
*devinfo
)
1694 if (devinfo
->gen
>= 10) {
1695 /* topology uAPI required for CNL+ (kernel 4.17+) make a call to the api
1698 struct drm_i915_query_item item
= {
1699 .query_id
= DRM_I915_QUERY_TOPOLOGY_INFO
,
1701 struct drm_i915_query query
= {
1703 .items_ptr
= (uintptr_t) &item
,
1706 /* kernel 4.17+ supports the query */
1707 return drmIoctl(fd
, DRM_IOCTL_I915_QUERY
, &query
) == 0;
1710 if (devinfo
->gen
>= 8) {
1711 /* 4.13+ api required for gen8 - gen9 */
1713 struct drm_i915_getparam gp
= {
1714 .param
= I915_PARAM_SLICE_MASK
,
1717 /* kernel 4.13+ supports this parameter */
1718 return drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == 0;
1721 if (devinfo
->gen
== 7)
1722 /* default topology values are correct for HSW */
1725 /* oa not supported before gen 7*/
1730 brw_oa_bo_alloc(void *bufmgr
, const char *name
, uint64_t size
)
1732 return brw_bo_alloc(bufmgr
, name
, size
, BRW_MEMZONE_OTHER
);
1736 brw_oa_emit_mi_report_perf_count(void *c
,
1738 uint32_t offset_in_bytes
,
1741 struct brw_context
*ctx
= c
;
1742 ctx
->vtbl
.emit_mi_report_perf_count(ctx
,
1748 typedef void (*bo_unreference_t
)(void *);
1749 typedef void (* emit_mi_report_t
)(void *, void *, uint32_t, uint32_t);
1752 brw_init_perf_query_info(struct gl_context
*ctx
)
1754 struct brw_context
*brw
= brw_context(ctx
);
1755 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1756 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1758 if (brw
->perfquery
.perf
)
1759 return brw
->perfquery
.perf
->n_queries
;
1761 brw
->perfquery
.perf
= gen_perf_new(brw
);
1763 struct gen_perf_config
*perf_cfg
= brw
->perfquery
.perf
;
1764 perf_cfg
->vtbl
.bo_alloc
= brw_oa_bo_alloc
;
1765 perf_cfg
->vtbl
.bo_unreference
= (bo_unreference_t
)brw_bo_unreference
;
1766 perf_cfg
->vtbl
.emit_mi_report_perf_count
=
1767 (emit_mi_report_t
)brw_oa_emit_mi_report_perf_count
;
1769 init_pipeline_statistic_query_registers(brw
);
1770 brw_perf_query_register_mdapi_statistic_query(brw
);
1772 if ((oa_metrics_kernel_support(screen
->fd
, devinfo
)) &&
1773 (gen_perf_load_oa_metrics(brw
->perfquery
.perf
, screen
->fd
, devinfo
)))
1774 brw_perf_query_register_mdapi_oa_query(brw
);
1776 brw
->perfquery
.unaccumulated
=
1777 ralloc_array(brw
, struct brw_perf_query_object
*, 2);
1778 brw
->perfquery
.unaccumulated_elements
= 0;
1779 brw
->perfquery
.unaccumulated_array_size
= 2;
1781 exec_list_make_empty(&brw
->perfquery
.sample_buffers
);
1782 exec_list_make_empty(&brw
->perfquery
.free_sample_buffers
);
1784 /* It's convenient to guarantee that this linked list of sample
1785 * buffers is never empty so we add an empty head so when we
1786 * Begin an OA query we can always take a reference on a buffer
1789 struct brw_oa_sample_buf
*buf
= get_free_sample_buf(brw
);
1790 exec_list_push_head(&brw
->perfquery
.sample_buffers
, &buf
->link
);
1792 brw
->perfquery
.oa_stream_fd
= -1;
1794 brw
->perfquery
.next_query_start_report_id
= 1000;
1796 return brw
->perfquery
.perf
->n_queries
;
1800 brw_init_performance_queries(struct brw_context
*brw
)
1802 struct gl_context
*ctx
= &brw
->ctx
;
1804 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
1805 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
1806 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
1807 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
1808 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
1809 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
1810 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
1811 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
1812 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
1813 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;