2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
44 /* put before sys/types.h to silence glibc warnings */
46 #include <sys/mkdev.h>
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
51 #include <sys/types.h>
55 #include <sys/ioctl.h>
58 #include "drm-uapi/i915_drm.h"
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "intel_batchbuffer.h"
75 #include "perf/gen_perf.h"
76 #include "perf/gen_perf_mdapi.h"
78 #define FILE_DEBUG_FLAG DEBUG_PERFMON
80 #define OAREPORT_REASON_MASK 0x3f
81 #define OAREPORT_REASON_SHIFT 19
82 #define OAREPORT_REASON_TIMER (1<<0)
83 #define OAREPORT_REASON_TRIGGER1 (1<<1)
84 #define OAREPORT_REASON_TRIGGER2 (1<<2)
85 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
86 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
88 struct brw_perf_query_object
{
89 struct gl_perf_query_object base
;
90 struct gen_perf_query_object
*query
;
93 /** Downcasting convenience macro. */
94 static inline struct brw_perf_query_object
*
95 brw_perf_query(struct gl_perf_query_object
*o
)
97 return (struct brw_perf_query_object
*) o
;
100 #define MI_RPC_BO_SIZE 4096
101 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
102 #define MI_FREQ_START_OFFSET_BYTES (3072)
103 #define MI_FREQ_END_OFFSET_BYTES (3076)
105 /******************************************************************************/
108 brw_is_perf_query_ready(struct gl_context
*ctx
,
109 struct gl_perf_query_object
*o
);
112 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
114 struct gl_context
*ctx
= brw_void
;
115 struct gl_perf_query_object
*o
= query_void
;
116 struct brw_perf_query_object
* brw_query
= brw_perf_query(o
);
117 struct gen_perf_query_object
*obj
= brw_query
->query
;
119 switch (obj
->queryinfo
->kind
) {
120 case GEN_PERF_QUERY_TYPE_OA
:
121 case GEN_PERF_QUERY_TYPE_RAW
:
122 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
124 o
->Used
? "Dirty," : "New,",
125 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
126 obj
->oa
.bo
? "yes," : "no,",
127 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
128 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
130 case GEN_PERF_QUERY_TYPE_PIPELINE
:
131 DBG("%4d: %-6s %-8s BO: %-4s\n",
133 o
->Used
? "Dirty," : "New,",
134 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
135 obj
->pipeline_stats
.bo
? "yes" : "no");
138 unreachable("Unknown query type");
144 dump_perf_queries(struct brw_context
*brw
)
146 struct gl_context
*ctx
= &brw
->ctx
;
147 DBG("Queries: (Open queries = %d, OA users = %d)\n",
148 brw
->perf_ctx
.n_active_oa_queries
, brw
->perf_ctx
.n_oa_users
);
149 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
153 * Driver hook for glGetPerfQueryInfoINTEL().
156 brw_get_perf_query_info(struct gl_context
*ctx
,
157 unsigned query_index
,
163 struct brw_context
*brw
= brw_context(ctx
);
164 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
165 const struct gen_perf_query_info
*query
=
166 &perf_ctx
->perf
->queries
[query_index
];
169 *data_size
= query
->data_size
;
170 *n_counters
= query
->n_counters
;
172 switch (query
->kind
) {
173 case GEN_PERF_QUERY_TYPE_OA
:
174 case GEN_PERF_QUERY_TYPE_RAW
:
175 *n_active
= perf_ctx
->n_active_oa_queries
;
178 case GEN_PERF_QUERY_TYPE_PIPELINE
:
179 *n_active
= perf_ctx
->n_active_pipeline_stats_queries
;
183 unreachable("Unknown query type");
189 gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type
)
192 case GEN_PERF_COUNTER_TYPE_EVENT
: return GL_PERFQUERY_COUNTER_EVENT_INTEL
;
193 case GEN_PERF_COUNTER_TYPE_DURATION_NORM
: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL
;
194 case GEN_PERF_COUNTER_TYPE_DURATION_RAW
: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL
;
195 case GEN_PERF_COUNTER_TYPE_THROUGHPUT
: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL
;
196 case GEN_PERF_COUNTER_TYPE_RAW
: return GL_PERFQUERY_COUNTER_RAW_INTEL
;
197 case GEN_PERF_COUNTER_TYPE_TIMESTAMP
: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL
;
199 unreachable("Unknown counter type");
204 gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type
)
207 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL
;
208 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL
;
209 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
;
210 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
;
211 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL
;
213 unreachable("Unknown counter data type");
218 * Driver hook for glGetPerfCounterInfoINTEL().
221 brw_get_perf_counter_info(struct gl_context
*ctx
,
222 unsigned query_index
,
223 unsigned counter_index
,
229 GLuint
*data_type_enum
,
232 struct brw_context
*brw
= brw_context(ctx
);
233 const struct gen_perf_query_info
*query
=
234 &brw
->perf_ctx
.perf
->queries
[query_index
];
235 const struct gen_perf_query_counter
*counter
=
236 &query
->counters
[counter_index
];
238 *name
= counter
->name
;
239 *desc
= counter
->desc
;
240 *offset
= counter
->offset
;
241 *data_size
= gen_perf_query_counter_get_size(counter
);
242 *type_enum
= gen_counter_type_enum_to_gl_type(counter
->type
);
243 *data_type_enum
= gen_counter_data_type_to_gl_type(counter
->data_type
);
244 *raw_max
= counter
->raw_max
;
248 * Add a query to the global list of "unaccumulated queries."
250 * Queries are tracked here until all the associated OA reports have
251 * been accumulated via accumulate_oa_reports() after the end
252 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
255 add_to_unaccumulated_query_list(struct brw_context
*brw
,
256 struct gen_perf_query_object
*obj
)
258 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
259 if (perf_ctx
->unaccumulated_elements
>=
260 perf_ctx
->unaccumulated_array_size
)
262 perf_ctx
->unaccumulated_array_size
*= 1.5;
263 perf_ctx
->unaccumulated
=
264 reralloc(brw
, perf_ctx
->unaccumulated
,
265 struct gen_perf_query_object
*,
266 perf_ctx
->unaccumulated_array_size
);
269 perf_ctx
->unaccumulated
[perf_ctx
->unaccumulated_elements
++] = obj
;
273 * Remove a query from the global list of unaccumulated queries once
274 * after successfully accumulating the OA reports associated with the
275 * query in accumulate_oa_reports() or when discarding unwanted query
279 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
280 struct gen_perf_query_object
*obj
)
282 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
283 for (int i
= 0; i
< perf_ctx
->unaccumulated_elements
; i
++) {
284 if (perf_ctx
->unaccumulated
[i
] == obj
) {
285 int last_elt
= --perf_ctx
->unaccumulated_elements
;
288 perf_ctx
->unaccumulated
[i
] = NULL
;
290 perf_ctx
->unaccumulated
[i
] =
291 perf_ctx
->unaccumulated
[last_elt
];
298 /* Drop our samples_head reference so that associated periodic
299 * sample data buffers can potentially be reaped if they aren't
300 * referenced by any other queries...
303 struct oa_sample_buf
*buf
=
304 exec_node_data(struct oa_sample_buf
, obj
->oa
.samples_head
, link
);
306 assert(buf
->refcount
> 0);
309 obj
->oa
.samples_head
= NULL
;
311 gen_perf_reap_old_sample_buffers(&brw
->perf_ctx
);
315 inc_n_oa_users(struct brw_context
*brw
)
317 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
318 if (perf_ctx
->n_oa_users
== 0 &&
319 drmIoctl(perf_ctx
->oa_stream_fd
,
320 I915_PERF_IOCTL_ENABLE
, 0) < 0)
324 ++perf_ctx
->n_oa_users
;
330 dec_n_oa_users(struct brw_context
*brw
)
332 /* Disabling the i915 perf stream will effectively disable the OA
333 * counters. Note it's important to be sure there are no outstanding
334 * MI_RPC commands at this point since they could stall the CS
335 * indefinitely once OACONTROL is disabled.
337 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
338 --perf_ctx
->n_oa_users
;
339 if (perf_ctx
->n_oa_users
== 0 &&
340 drmIoctl(perf_ctx
->oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
342 DBG("WARNING: Error disabling i915 perf stream: %m\n");
346 /* In general if we see anything spurious while accumulating results,
347 * we don't try and continue accumulating the current query, hoping
348 * for the best, we scrap anything outstanding, and then hope for the
349 * best with new queries.
352 discard_all_queries(struct brw_context
*brw
)
354 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
355 while (perf_ctx
->unaccumulated_elements
) {
356 struct gen_perf_query_object
*obj
= perf_ctx
->unaccumulated
[0];
358 obj
->oa
.results_accumulated
= true;
359 drop_from_unaccumulated_query_list(brw
, perf_ctx
->unaccumulated
[0]);
366 OA_READ_STATUS_ERROR
,
367 OA_READ_STATUS_UNFINISHED
,
368 OA_READ_STATUS_FINISHED
,
371 static enum OaReadStatus
372 read_oa_samples_until(struct brw_context
*brw
,
373 uint32_t start_timestamp
,
374 uint32_t end_timestamp
)
376 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
377 struct exec_node
*tail_node
=
378 exec_list_get_tail(&perf_ctx
->sample_buffers
);
379 struct oa_sample_buf
*tail_buf
=
380 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
381 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
384 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(perf_ctx
);
388 while ((len
= read(perf_ctx
->oa_stream_fd
, buf
->buf
,
389 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
393 exec_list_push_tail(&perf_ctx
->free_sample_buffers
, &buf
->link
);
397 return ((last_timestamp
- start_timestamp
) >=
398 (end_timestamp
- start_timestamp
)) ?
399 OA_READ_STATUS_FINISHED
:
400 OA_READ_STATUS_UNFINISHED
;
402 DBG("Error reading i915 perf samples: %m\n");
405 DBG("Spurious EOF reading i915 perf samples\n");
407 return OA_READ_STATUS_ERROR
;
411 exec_list_push_tail(&perf_ctx
->sample_buffers
, &buf
->link
);
413 /* Go through the reports and update the last timestamp. */
415 while (offset
< buf
->len
) {
416 const struct drm_i915_perf_record_header
*header
=
417 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
418 uint32_t *report
= (uint32_t *) (header
+ 1);
420 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
421 last_timestamp
= report
[1];
423 offset
+= header
->size
;
426 buf
->last_timestamp
= last_timestamp
;
429 unreachable("not reached");
430 return OA_READ_STATUS_ERROR
;
434 * Try to read all the reports until either the delimiting timestamp
435 * or an error arises.
438 read_oa_samples_for_query(struct brw_context
*brw
,
439 struct gen_perf_query_object
*obj
)
445 /* We need the MI_REPORT_PERF_COUNT to land before we can start
447 assert(!brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
448 !brw_bo_busy(obj
->oa
.bo
));
450 /* Map the BO once here and let accumulate_oa_reports() unmap
452 if (obj
->oa
.map
== NULL
)
453 obj
->oa
.map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_READ
);
455 start
= last
= obj
->oa
.map
;
456 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
458 if (start
[0] != obj
->oa
.begin_report_id
) {
459 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
462 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
463 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
467 /* Read the reports until the end timestamp. */
468 switch (read_oa_samples_until(brw
, start
[1], end
[1])) {
469 case OA_READ_STATUS_ERROR
:
470 /* Fallthrough and let accumulate_oa_reports() deal with the
472 case OA_READ_STATUS_FINISHED
:
474 case OA_READ_STATUS_UNFINISHED
:
478 unreachable("invalid read status");
483 * Accumulate raw OA counter values based on deltas between pairs of
486 * Accumulation starts from the first report captured via
487 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
488 * last MI_RPC report requested by brw_end_perf_query(). Between these
489 * two reports there may also some number of periodically sampled OA
490 * reports collected via the i915 perf interface - depending on the
491 * duration of the query.
493 * These periodic snapshots help to ensure we handle counter overflow
494 * correctly by being frequent enough to ensure we don't miss multiple
495 * overflows of a counter between snapshots. For Gen8+ the i915 perf
496 * snapshots provide the extra context-switch reports that let us
497 * subtract out the progress of counters associated with other
498 * contexts running on the system.
501 accumulate_oa_reports(struct brw_context
*brw
,
502 struct brw_perf_query_object
*brw_query
)
504 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
505 struct gen_perf_query_object
*obj
= brw_query
->query
;
509 struct exec_node
*first_samples_node
;
511 int out_duration
= 0;
513 assert(brw_query
->base
.Ready
);
514 assert(obj
->oa
.map
!= NULL
);
516 start
= last
= obj
->oa
.map
;
517 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
519 if (start
[0] != obj
->oa
.begin_report_id
) {
520 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
523 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
524 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
528 /* See if we have any periodic reports to accumulate too... */
530 /* N.B. The oa.samples_head was set when the query began and
531 * pointed to the tail of the perf_ctx->sample_buffers list at
532 * the time the query started. Since the buffer existed before the
533 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
534 * that no data in this particular node's buffer can possibly be
535 * associated with the query - so skip ahead one...
537 first_samples_node
= obj
->oa
.samples_head
->next
;
539 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
540 &brw
->perf_ctx
.sample_buffers
,
545 while (offset
< buf
->len
) {
546 const struct drm_i915_perf_record_header
*header
=
547 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
549 assert(header
->size
!= 0);
550 assert(header
->size
<= buf
->len
);
552 offset
+= header
->size
;
554 switch (header
->type
) {
555 case DRM_I915_PERF_RECORD_SAMPLE
: {
556 uint32_t *report
= (uint32_t *)(header
+ 1);
559 /* Ignore reports that come before the start marker.
560 * (Note: takes care to allow overflow of 32bit timestamps)
562 if (gen_device_info_timebase_scale(devinfo
,
563 report
[1] - start
[1]) > 5000000000) {
567 /* Ignore reports that come after the end marker.
568 * (Note: takes care to allow overflow of 32bit timestamps)
570 if (gen_device_info_timebase_scale(devinfo
,
571 report
[1] - end
[1]) <= 5000000000) {
575 /* For Gen8+ since the counters continue while other
576 * contexts are running we need to discount any unrelated
577 * deltas. The hardware automatically generates a report
578 * on context switch which gives us a new reference point
579 * to continuing adding deltas from.
581 * For Haswell we can rely on the HW to stop the progress
582 * of OA counters while any other context is acctive.
584 if (devinfo
->gen
>= 8) {
585 if (in_ctx
&& report
[2] != obj
->oa
.result
.hw_id
) {
586 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
589 } else if (in_ctx
== false && report
[2] == obj
->oa
.result
.hw_id
) {
590 DBG("i915 perf: Switch TO\n");
593 /* From experimentation in IGT, we found that the OA unit
594 * might label some report as "idle" (using an invalid
595 * context ID), right after a report for a given context.
596 * Deltas generated by those reports actually belong to the
597 * previous context, even though they're not labelled as
600 * We didn't *really* Switch AWAY in the case that we e.g.
601 * saw a single periodic report while idle...
603 if (out_duration
>= 1)
606 assert(report
[2] == obj
->oa
.result
.hw_id
);
607 DBG("i915 perf: Continuation IN\n");
609 assert(report
[2] != obj
->oa
.result
.hw_id
);
610 DBG("i915 perf: Continuation OUT\n");
617 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->queryinfo
,
626 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
627 DBG("i915 perf: OA error: all reports lost\n");
629 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
630 DBG("i915 perf: OA report lost\n");
638 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->queryinfo
,
641 DBG("Marking %d accumulated - results gathered\n", brw_query
->base
.Id
);
643 obj
->oa
.results_accumulated
= true;
644 drop_from_unaccumulated_query_list(brw
, obj
);
651 discard_all_queries(brw
);
654 /******************************************************************************/
657 open_i915_perf_oa_stream(struct brw_context
*brw
,
664 uint64_t properties
[] = {
665 /* Single context sampling */
666 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
668 /* Include OA reports in samples */
669 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
671 /* OA unit configuration */
672 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
673 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
674 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
676 struct drm_i915_perf_open_param param
= {
677 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
678 I915_PERF_FLAG_FD_NONBLOCK
|
679 I915_PERF_FLAG_DISABLED
,
680 .num_properties
= ARRAY_SIZE(properties
) / 2,
681 .properties_ptr
= (uintptr_t) properties
,
683 int fd
= drmIoctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
685 DBG("Error opening i915 perf OA stream: %m\n");
689 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
690 perf_ctx
->oa_stream_fd
= fd
;
692 perf_ctx
->current_oa_metrics_set_id
= metrics_set_id
;
693 perf_ctx
->current_oa_format
= report_format
;
699 close_perf(struct brw_context
*brw
,
700 const struct gen_perf_query_info
*query
)
702 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
703 if (perf_ctx
->oa_stream_fd
!= -1) {
704 close(perf_ctx
->oa_stream_fd
);
705 perf_ctx
->oa_stream_fd
= -1;
707 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
708 struct gen_perf_query_info
*raw_query
=
709 (struct gen_perf_query_info
*) query
;
710 raw_query
->oa_metrics_set_id
= 0;
715 capture_frequency_stat_register(struct brw_context
*brw
,
719 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
721 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8 &&
722 !devinfo
->is_baytrail
&& !devinfo
->is_cherryview
) {
723 brw_store_register_mem32(brw
, bo
, GEN7_RPSTAT1
, bo_offset
);
724 } else if (devinfo
->gen
>= 9) {
725 brw_store_register_mem32(brw
, bo
, GEN9_RPSTAT0
, bo_offset
);
730 * Driver hook for glBeginPerfQueryINTEL().
733 brw_begin_perf_query(struct gl_context
*ctx
,
734 struct gl_perf_query_object
*o
)
736 struct brw_context
*brw
= brw_context(ctx
);
737 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
738 struct gen_perf_query_object
*obj
= brw_query
->query
;
739 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
740 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
741 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
743 /* We can assume the frontend hides mistaken attempts to Begin a
744 * query object multiple times before its End. Similarly if an
745 * application reuses a query object before results have arrived
746 * the frontend will wait for prior results so we don't need
747 * to support abandoning in-flight results.
750 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
752 DBG("Begin(%d)\n", o
->Id
);
754 /* XXX: We have to consider that the command parser unit that parses batch
755 * buffer commands and is used to capture begin/end counter snapshots isn't
756 * implicitly synchronized with what's currently running across other GPU
757 * units (such as the EUs running shaders) that the performance counters are
760 * The intention of performance queries is to measure the work associated
761 * with commands between the begin/end delimiters and so for that to be the
762 * case we need to explicitly synchronize the parsing of commands to capture
763 * Begin/End counter snapshots with what's running across other parts of the
766 * When the command parser reaches a Begin marker it effectively needs to
767 * drain everything currently running on the GPU until the hardware is idle
768 * before capturing the first snapshot of counters - otherwise the results
769 * would also be measuring the effects of earlier commands.
771 * When the command parser reaches an End marker it needs to stall until
772 * everything currently running on the GPU has finished before capturing the
773 * end snapshot - otherwise the results won't be a complete representation
776 * Theoretically there could be opportunities to minimize how much of the
777 * GPU pipeline is drained, or that we stall for, when we know what specific
778 * units the performance counters being queried relate to but we don't
779 * currently attempt to be clever here.
781 * Note: with our current simple approach here then for back-to-back queries
782 * we will redundantly emit duplicate commands to synchronize the command
783 * streamer with the rest of the GPU pipeline, but we assume that in HW the
784 * second synchronization is effectively a NOOP.
786 * N.B. The final results are based on deltas of counters between (inside)
787 * Begin/End markers so even though the total wall clock time of the
788 * workload is stretched by larger pipeline bubbles the bubbles themselves
789 * are generally invisible to the query results. Whether that's a good or a
790 * bad thing depends on the use case. For a lower real-time impact while
791 * capturing metrics then periodic sampling may be a better choice than
792 * INTEL_performance_query.
795 * This is our Begin synchronization point to drain current work on the
796 * GPU before we capture our first counter snapshot...
798 brw_emit_mi_flush(brw
);
800 switch (query
->kind
) {
801 case GEN_PERF_QUERY_TYPE_OA
:
802 case GEN_PERF_QUERY_TYPE_RAW
: {
804 /* Opening an i915 perf stream implies exclusive access to the OA unit
805 * which will generate counter reports for a specific counter set with a
806 * specific layout/format so we can't begin any OA based queries that
807 * require a different counter set or format unless we get an opportunity
808 * to close the stream and open a new one...
810 uint64_t metric_id
= gen_perf_query_get_metric_id(perf_ctx
->perf
, query
);
812 if (perf_ctx
->oa_stream_fd
!= -1 &&
813 perf_ctx
->current_oa_metrics_set_id
!= metric_id
) {
815 if (perf_ctx
->n_oa_users
!= 0) {
816 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64
"\n",
817 o
->Id
, perf_ctx
->current_oa_metrics_set_id
, metric_id
);
820 close_perf(brw
, query
);
823 /* If the OA counters aren't already on, enable them. */
825 if (perf_ctx
->oa_stream_fd
== -1) {
826 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
827 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
829 /* The period_exponent gives a sampling period as follows:
830 * sample_period = timestamp_period * 2^(period_exponent + 1)
832 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
835 * The counter overflow period is derived from the EuActive counter
836 * which reads a counter that increments by the number of clock
837 * cycles multiplied by the number of EUs. It can be calculated as:
839 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
841 * (E.g. 40 EUs @ 1GHz = ~53ms)
843 * We select a sampling period inferior to that overflow period to
844 * ensure we cannot see more than 1 counter overflow, otherwise we
845 * could loose information.
848 int a_counter_in_bits
= 32;
849 if (devinfo
->gen
>= 8)
850 a_counter_in_bits
= 40;
852 uint64_t overflow_period
= pow(2, a_counter_in_bits
) /
853 (perf_cfg
->sys_vars
.n_eus
*
854 /* drop 1GHz freq to have units in nanoseconds */
857 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
858 overflow_period
, overflow_period
/ 1000000ul, perf_cfg
->sys_vars
.n_eus
);
860 int period_exponent
= 0;
861 uint64_t prev_sample_period
, next_sample_period
;
862 for (int e
= 0; e
< 30; e
++) {
863 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
864 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
866 /* Take the previous sampling period, lower than the overflow
869 if (prev_sample_period
< overflow_period
&&
870 next_sample_period
> overflow_period
)
871 period_exponent
= e
+ 1;
874 if (period_exponent
== 0) {
875 DBG("WARNING: enable to find a sampling exponent\n");
879 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
880 prev_sample_period
/ 1000000ul);
882 if (!open_i915_perf_oa_stream(brw
,
886 screen
->fd
, /* drm fd */
890 assert(perf_ctx
->current_oa_metrics_set_id
== metric_id
&&
891 perf_ctx
->current_oa_format
== query
->oa_format
);
894 if (!inc_n_oa_users(brw
)) {
895 DBG("WARNING: Error enabling i915 perf stream: %m\n");
900 perf_cfg
->vtbl
.bo_unreference(obj
->oa
.bo
);
905 brw
->perf_ctx
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
906 "perf. query OA MI_RPC bo",
909 /* Pre-filling the BO helps debug whether writes landed. */
910 void *map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_WRITE
);
911 memset(map
, 0x80, MI_RPC_BO_SIZE
);
912 brw_bo_unmap(obj
->oa
.bo
);
915 obj
->oa
.begin_report_id
= perf_ctx
->next_query_start_report_id
;
916 perf_ctx
->next_query_start_report_id
+= 2;
918 /* We flush the batchbuffer here to minimize the chances that MI_RPC
919 * delimiting commands end up in different batchbuffers. If that's the
920 * case, the measurement will include the time it takes for the kernel
921 * scheduler to load a new request into the hardware. This is manifested in
922 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
924 perf_cfg
->vtbl
.batchbuffer_flush(brw
, __FILE__
, __LINE__
);
926 /* Take a starting OA counter snapshot. */
927 perf_cfg
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
, 0,
928 obj
->oa
.begin_report_id
);
929 perf_cfg
->vtbl
.capture_frequency_stat_register(brw
, obj
->oa
.bo
,
930 MI_FREQ_START_OFFSET_BYTES
);
932 ++perf_ctx
->n_active_oa_queries
;
934 /* No already-buffered samples can possibly be associated with this query
935 * so create a marker within the list of sample buffers enabling us to
936 * easily ignore earlier samples when processing this query after
939 assert(!exec_list_is_empty(&perf_ctx
->sample_buffers
));
940 obj
->oa
.samples_head
= exec_list_get_tail(&perf_ctx
->sample_buffers
);
942 struct oa_sample_buf
*buf
=
943 exec_node_data(struct oa_sample_buf
, obj
->oa
.samples_head
, link
);
945 /* This reference will ensure that future/following sample
946 * buffers (that may relate to this query) can't be freed until
947 * this drops to zero.
951 gen_perf_query_result_clear(&obj
->oa
.result
);
952 obj
->oa
.results_accumulated
= false;
954 add_to_unaccumulated_query_list(brw
, obj
);
958 case GEN_PERF_QUERY_TYPE_PIPELINE
:
959 if (obj
->pipeline_stats
.bo
) {
960 brw
->perf_ctx
.perf
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
961 obj
->pipeline_stats
.bo
= NULL
;
964 obj
->pipeline_stats
.bo
=
965 brw
->perf_ctx
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
966 "perf. query pipeline stats bo",
969 /* Take starting snapshots. */
970 gen_perf_snapshot_statistics_registers(brw
, perf_cfg
, obj
, 0);
972 ++perf_ctx
->n_active_pipeline_stats_queries
;
976 unreachable("Unknown query type");
980 if (INTEL_DEBUG
& DEBUG_PERFMON
)
981 dump_perf_queries(brw
);
987 * Driver hook for glEndPerfQueryINTEL().
990 brw_end_perf_query(struct gl_context
*ctx
,
991 struct gl_perf_query_object
*o
)
993 struct brw_context
*brw
= brw_context(ctx
);
994 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
995 struct gen_perf_query_object
*obj
= brw_query
->query
;
996 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
997 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
999 DBG("End(%d)\n", o
->Id
);
1001 /* Ensure that the work associated with the queried commands will have
1002 * finished before taking our query end counter readings.
1004 * For more details see comment in brw_begin_perf_query for
1005 * corresponding flush.
1007 brw_emit_mi_flush(brw
);
1009 switch (obj
->queryinfo
->kind
) {
1010 case GEN_PERF_QUERY_TYPE_OA
:
1011 case GEN_PERF_QUERY_TYPE_RAW
:
1013 /* NB: It's possible that the query will have already been marked
1014 * as 'accumulated' if an error was seen while reading samples
1015 * from perf. In this case we mustn't try and emit a closing
1016 * MI_RPC command in case the OA unit has already been disabled
1018 if (!obj
->oa
.results_accumulated
) {
1019 /* Take an ending OA counter snapshot. */
1020 perf_cfg
->vtbl
.capture_frequency_stat_register(brw
, obj
->oa
.bo
,
1021 MI_FREQ_END_OFFSET_BYTES
);
1022 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
,
1023 MI_RPC_BO_END_OFFSET_BYTES
,
1024 obj
->oa
.begin_report_id
+ 1);
1027 --perf_ctx
->n_active_oa_queries
;
1029 /* NB: even though the query has now ended, it can't be accumulated
1030 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1035 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1036 gen_perf_snapshot_statistics_registers(brw
, perf_cfg
, obj
,
1037 STATS_BO_END_OFFSET_BYTES
);
1038 --perf_ctx
->n_active_pipeline_stats_queries
;
1042 unreachable("Unknown query type");
1048 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
1050 struct brw_context
*brw
= brw_context(ctx
);
1051 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
1052 struct gen_perf_query_object
*obj
= brw_query
->query
;
1053 struct brw_bo
*bo
= NULL
;
1054 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1058 switch (obj
->queryinfo
->kind
) {
1059 case GEN_PERF_QUERY_TYPE_OA
:
1060 case GEN_PERF_QUERY_TYPE_RAW
:
1064 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1065 bo
= obj
->pipeline_stats
.bo
;
1069 unreachable("Unknown query type");
1076 /* If the current batch references our results bo then we need to
1079 if (brw_batch_references(&brw
->batch
, bo
))
1080 perf_cfg
->vtbl
.batchbuffer_flush(brw
, __FILE__
, __LINE__
);
1082 brw_bo_wait_rendering(bo
);
1084 /* Due to a race condition between the OA unit signaling report
1085 * availability and the report actually being written into memory,
1086 * we need to wait for all the reports to come in before we can
1089 if (obj
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1090 obj
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1091 while (!read_oa_samples_for_query(brw
, obj
))
1097 brw_is_perf_query_ready(struct gl_context
*ctx
,
1098 struct gl_perf_query_object
*o
)
1100 struct brw_context
*brw
= brw_context(ctx
);
1101 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
1102 struct gen_perf_query_object
*obj
= brw_query
->query
;
1107 switch (obj
->queryinfo
->kind
) {
1108 case GEN_PERF_QUERY_TYPE_OA
:
1109 case GEN_PERF_QUERY_TYPE_RAW
:
1110 return (obj
->oa
.results_accumulated
||
1112 !brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
1113 !brw_bo_busy(obj
->oa
.bo
) &&
1114 read_oa_samples_for_query(brw
, obj
)));
1115 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1116 return (obj
->pipeline_stats
.bo
&&
1117 !brw_batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
1118 !brw_bo_busy(obj
->pipeline_stats
.bo
));
1121 unreachable("Unknown query type");
1129 read_slice_unslice_frequencies(struct brw_context
*brw
,
1130 struct gen_perf_query_object
*obj
)
1132 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1133 uint32_t *begin_report
= obj
->oa
.map
, *end_report
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1135 gen_perf_query_result_read_frequencies(&obj
->oa
.result
,
1136 devinfo
, begin_report
, end_report
);
1140 read_gt_frequency(struct brw_context
*brw
,
1141 struct gen_perf_query_object
*obj
)
1143 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1144 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
1145 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
1147 switch (devinfo
->gen
) {
1150 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1151 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1156 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1157 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1160 unreachable("unexpected gen");
1163 /* Put the numbers into Hz. */
1164 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
1165 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
1169 get_oa_counter_data(struct brw_context
*brw
,
1170 struct gen_perf_query_object
*obj
,
1174 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
1175 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
1176 int n_counters
= query
->n_counters
;
1179 for (int i
= 0; i
< n_counters
; i
++) {
1180 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1181 uint64_t *out_uint64
;
1183 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
1186 switch (counter
->data_type
) {
1187 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
1188 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1190 counter
->oa_counter_read_uint64(perf
, query
,
1191 obj
->oa
.result
.accumulator
);
1193 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
1194 out_float
= (float *)(data
+ counter
->offset
);
1196 counter
->oa_counter_read_float(perf
, query
,
1197 obj
->oa
.result
.accumulator
);
1200 /* So far we aren't using uint32, double or bool32... */
1201 unreachable("unexpected counter data type");
1203 written
= counter
->offset
+ counter_size
;
1211 get_pipeline_stats_data(struct brw_context
*brw
,
1212 struct gen_perf_query_object
*obj
,
1217 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
1218 int n_counters
= obj
->queryinfo
->n_counters
;
1221 uint64_t *start
= brw_bo_map(brw
, obj
->pipeline_stats
.bo
, MAP_READ
);
1222 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1224 for (int i
= 0; i
< n_counters
; i
++) {
1225 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1226 uint64_t value
= end
[i
] - start
[i
];
1228 if (counter
->pipeline_stat
.numerator
!=
1229 counter
->pipeline_stat
.denominator
) {
1230 value
*= counter
->pipeline_stat
.numerator
;
1231 value
/= counter
->pipeline_stat
.denominator
;
1234 *((uint64_t *)p
) = value
;
1238 brw_bo_unmap(obj
->pipeline_stats
.bo
);
1244 * Driver hook for glGetPerfQueryDataINTEL().
1247 brw_get_perf_query_data(struct gl_context
*ctx
,
1248 struct gl_perf_query_object
*o
,
1251 GLuint
*bytes_written
)
1253 struct brw_context
*brw
= brw_context(ctx
);
1254 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
1255 struct gen_perf_query_object
*obj
= brw_query
->query
;
1258 assert(brw_is_perf_query_ready(ctx
, o
));
1260 DBG("GetData(%d)\n", o
->Id
);
1262 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1263 dump_perf_queries(brw
);
1265 /* We expect that the frontend only calls this hook when it knows
1266 * that results are available.
1270 switch (obj
->queryinfo
->kind
) {
1271 case GEN_PERF_QUERY_TYPE_OA
:
1272 case GEN_PERF_QUERY_TYPE_RAW
:
1273 if (!obj
->oa
.results_accumulated
) {
1274 read_gt_frequency(brw
, obj
);
1275 read_slice_unslice_frequencies(brw
, obj
);
1276 accumulate_oa_reports(brw
, brw_query
);
1277 assert(obj
->oa
.results_accumulated
);
1279 brw_bo_unmap(obj
->oa
.bo
);
1282 if (obj
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
1283 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
1285 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1287 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
1288 devinfo
, &obj
->oa
.result
,
1289 obj
->oa
.gt_frequency
[0],
1290 obj
->oa
.gt_frequency
[1]);
1294 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1295 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
1299 unreachable("Unknown query type");
1304 *bytes_written
= written
;
1307 static struct gl_perf_query_object
*
1308 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
1310 struct brw_context
*brw
= brw_context(ctx
);
1311 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
1312 const struct gen_perf_query_info
*queryinfo
=
1313 &perf_ctx
->perf
->queries
[query_index
];
1314 struct gen_perf_query_object
*obj
=
1315 calloc(1, sizeof(struct gen_perf_query_object
));
1320 obj
->queryinfo
= queryinfo
;
1322 perf_ctx
->n_query_instances
++;
1324 struct brw_perf_query_object
*brw_query
= calloc(1, sizeof(struct brw_perf_query_object
));
1325 if (unlikely(!brw_query
))
1327 brw_query
->query
= obj
;
1328 return &brw_query
->base
;
1332 * Driver hook for glDeletePerfQueryINTEL().
1335 brw_delete_perf_query(struct gl_context
*ctx
,
1336 struct gl_perf_query_object
*o
)
1338 struct brw_context
*brw
= brw_context(ctx
);
1339 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1340 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
1341 struct gen_perf_query_object
*obj
= brw_query
->query
;
1342 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
1344 /* We can assume that the frontend waits for a query to complete
1345 * before ever calling into here, so we don't have to worry about
1346 * deleting an in-flight query object.
1349 assert(!o
->Used
|| o
->Ready
);
1351 DBG("Delete(%d)\n", o
->Id
);
1353 switch (obj
->queryinfo
->kind
) {
1354 case GEN_PERF_QUERY_TYPE_OA
:
1355 case GEN_PERF_QUERY_TYPE_RAW
:
1357 if (!obj
->oa
.results_accumulated
) {
1358 drop_from_unaccumulated_query_list(brw
, obj
);
1359 dec_n_oa_users(brw
);
1362 perf_cfg
->vtbl
.bo_unreference(obj
->oa
.bo
);
1366 obj
->oa
.results_accumulated
= false;
1369 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1370 if (obj
->pipeline_stats
.bo
) {
1371 perf_cfg
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
1372 obj
->pipeline_stats
.bo
= NULL
;
1377 unreachable("Unknown query type");
1381 /* As an indication that the INTEL_performance_query extension is no
1382 * longer in use, it's a good time to free our cache of sample
1383 * buffers and close any current i915-perf stream.
1385 if (--perf_ctx
->n_query_instances
== 0) {
1386 gen_perf_free_sample_bufs(perf_ctx
);
1387 close_perf(brw
, obj
->queryinfo
);
1394 /******************************************************************************/
1397 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1399 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1400 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
1401 struct gen_perf_query_info
*query
=
1402 gen_perf_query_append_query_info(perf
, MAX_STAT_COUNTERS
);
1404 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1405 query
->name
= "Pipeline Statistics Registers";
1407 gen_perf_query_info_add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1408 "N vertices submitted");
1409 gen_perf_query_info_add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1410 "N primitives submitted");
1411 gen_perf_query_info_add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1412 "N vertex shader invocations");
1414 if (devinfo
->gen
== 6) {
1415 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1416 "SO_PRIM_STORAGE_NEEDED",
1417 "N geometry shader stream-out primitives (total)");
1418 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1419 "SO_NUM_PRIMS_WRITTEN",
1420 "N geometry shader stream-out primitives (written)");
1422 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1423 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1424 "N stream-out (stream 0) primitives (total)");
1425 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1426 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1427 "N stream-out (stream 1) primitives (total)");
1428 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1429 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1430 "N stream-out (stream 2) primitives (total)");
1431 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1432 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1433 "N stream-out (stream 3) primitives (total)");
1434 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1435 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1436 "N stream-out (stream 0) primitives (written)");
1437 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1438 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1439 "N stream-out (stream 1) primitives (written)");
1440 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1441 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1442 "N stream-out (stream 2) primitives (written)");
1443 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1444 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1445 "N stream-out (stream 3) primitives (written)");
1448 gen_perf_query_info_add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1449 "N TCS shader invocations");
1450 gen_perf_query_info_add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1451 "N TES shader invocations");
1453 gen_perf_query_info_add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1454 "N geometry shader invocations");
1455 gen_perf_query_info_add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1456 "N geometry shader primitives emitted");
1458 gen_perf_query_info_add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1459 "N primitives entering clipping");
1460 gen_perf_query_info_add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1461 "N primitives leaving clipping");
1463 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1464 gen_perf_query_info_add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1465 "N fragment shader invocations",
1466 "N fragment shader invocations");
1468 gen_perf_query_info_add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1469 "N fragment shader invocations");
1472 gen_perf_query_info_add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
1473 "N z-pass fragments");
1475 if (devinfo
->gen
>= 7) {
1476 gen_perf_query_info_add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1477 "N compute shader invocations");
1480 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1483 /* gen_device_info will have incorrect default topology values for unsupported kernels.
1484 * verify kernel support to ensure OA metrics are accurate.
1487 oa_metrics_kernel_support(int fd
, const struct gen_device_info
*devinfo
)
1489 if (devinfo
->gen
>= 10) {
1490 /* topology uAPI required for CNL+ (kernel 4.17+) make a call to the api
1493 struct drm_i915_query_item item
= {
1494 .query_id
= DRM_I915_QUERY_TOPOLOGY_INFO
,
1496 struct drm_i915_query query
= {
1498 .items_ptr
= (uintptr_t) &item
,
1501 /* kernel 4.17+ supports the query */
1502 return drmIoctl(fd
, DRM_IOCTL_I915_QUERY
, &query
) == 0;
1505 if (devinfo
->gen
>= 8) {
1506 /* 4.13+ api required for gen8 - gen9 */
1508 struct drm_i915_getparam gp
= {
1509 .param
= I915_PARAM_SLICE_MASK
,
1512 /* kernel 4.13+ supports this parameter */
1513 return drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == 0;
1516 if (devinfo
->gen
== 7)
1517 /* default topology values are correct for HSW */
1520 /* oa not supported before gen 7*/
1525 brw_oa_bo_alloc(void *bufmgr
, const char *name
, uint64_t size
)
1527 return brw_bo_alloc(bufmgr
, name
, size
, BRW_MEMZONE_OTHER
);
1531 brw_oa_emit_mi_report_perf_count(void *c
,
1533 uint32_t offset_in_bytes
,
1536 struct brw_context
*ctx
= c
;
1537 ctx
->vtbl
.emit_mi_report_perf_count(ctx
,
1543 typedef void (*bo_unreference_t
)(void *);
1544 typedef void (* emit_mi_report_t
)(void *, void *, uint32_t, uint32_t);
1547 brw_oa_batchbuffer_flush(void *c
, const char *file
, int line
)
1549 struct brw_context
*ctx
= c
;
1550 _intel_batchbuffer_flush_fence(ctx
, -1, NULL
, file
, line
);
1553 typedef void (*capture_frequency_stat_register_t
)(void *, void *, uint32_t );
1554 typedef void (*store_register_mem64_t
)(void *ctx
, void *bo
,
1555 uint32_t reg
, uint32_t offset
);
1558 brw_init_perf_query_info(struct gl_context
*ctx
)
1560 struct brw_context
*brw
= brw_context(ctx
);
1561 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1562 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1564 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
1566 return perf_ctx
->perf
->n_queries
;
1568 perf_ctx
->perf
= gen_perf_new(brw
);
1569 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1571 perf_cfg
->vtbl
.bo_alloc
= brw_oa_bo_alloc
;
1572 perf_cfg
->vtbl
.bo_unreference
= (bo_unreference_t
)brw_bo_unreference
;
1573 perf_cfg
->vtbl
.emit_mi_report_perf_count
=
1574 (emit_mi_report_t
)brw_oa_emit_mi_report_perf_count
;
1575 perf_cfg
->vtbl
.batchbuffer_flush
= brw_oa_batchbuffer_flush
;
1576 perf_cfg
->vtbl
.capture_frequency_stat_register
=
1577 (capture_frequency_stat_register_t
) capture_frequency_stat_register
;
1578 perf_cfg
->vtbl
.store_register_mem64
=
1579 (store_register_mem64_t
) brw_store_register_mem64
;
1581 init_pipeline_statistic_query_registers(brw
);
1582 gen_perf_query_register_mdapi_statistic_query(&brw
->screen
->devinfo
,
1583 brw
->perf_ctx
.perf
);
1585 if ((oa_metrics_kernel_support(screen
->fd
, devinfo
)) &&
1586 (gen_perf_load_oa_metrics(perf_cfg
, screen
->fd
, devinfo
)))
1587 gen_perf_query_register_mdapi_oa_query(devinfo
, perf_cfg
);
1589 perf_ctx
->unaccumulated
=
1590 ralloc_array(brw
, struct gen_perf_query_object
*, 2);
1591 perf_ctx
->unaccumulated_elements
= 0;
1592 perf_ctx
->unaccumulated_array_size
= 2;
1594 exec_list_make_empty(&perf_ctx
->sample_buffers
);
1595 exec_list_make_empty(&perf_ctx
->free_sample_buffers
);
1597 /* It's convenient to guarantee that this linked list of sample
1598 * buffers is never empty so we add an empty head so when we
1599 * Begin an OA query we can always take a reference on a buffer
1602 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(&brw
->perf_ctx
);
1603 exec_list_push_head(&perf_ctx
->sample_buffers
, &buf
->link
);
1605 perf_ctx
->oa_stream_fd
= -1;
1607 perf_ctx
->next_query_start_report_id
= 1000;
1609 return perf_cfg
->n_queries
;
1613 brw_init_performance_queries(struct brw_context
*brw
)
1615 struct gl_context
*ctx
= &brw
->ctx
;
1617 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
1618 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
1619 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
1620 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
1621 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
1622 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
1623 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
1624 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
1625 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
1626 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;