2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
44 /* put before sys/types.h to silence glibc warnings */
46 #include <sys/mkdev.h>
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
51 #include <sys/types.h>
55 #include <sys/ioctl.h>
58 #include "drm-uapi/i915_drm.h"
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "intel_batchbuffer.h"
76 #include "perf/gen_perf.h"
77 #include "perf/gen_perf_mdapi.h"
79 #define FILE_DEBUG_FLAG DEBUG_PERFMON
81 #define OAREPORT_REASON_MASK 0x3f
82 #define OAREPORT_REASON_SHIFT 19
83 #define OAREPORT_REASON_TIMER (1<<0)
84 #define OAREPORT_REASON_TRIGGER1 (1<<1)
85 #define OAREPORT_REASON_TRIGGER2 (1<<2)
86 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
87 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
89 /** Downcasting convenience macro. */
90 static inline struct brw_perf_query_object
*
91 brw_perf_query(struct gl_perf_query_object
*o
)
93 return (struct brw_perf_query_object
*) o
;
96 #define MI_RPC_BO_SIZE 4096
97 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
98 #define MI_FREQ_START_OFFSET_BYTES (3072)
99 #define MI_FREQ_END_OFFSET_BYTES (3076)
101 /******************************************************************************/
104 brw_is_perf_query_ready(struct gl_context
*ctx
,
105 struct gl_perf_query_object
*o
);
108 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
110 struct gl_context
*ctx
= brw_void
;
111 struct gl_perf_query_object
*o
= query_void
;
112 struct brw_perf_query_object
*obj
= query_void
;
114 switch (obj
->query
->kind
) {
115 case GEN_PERF_QUERY_TYPE_OA
:
116 case GEN_PERF_QUERY_TYPE_RAW
:
117 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
119 o
->Used
? "Dirty," : "New,",
120 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
121 obj
->oa
.bo
? "yes," : "no,",
122 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
123 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
125 case GEN_PERF_QUERY_TYPE_PIPELINE
:
126 DBG("%4d: %-6s %-8s BO: %-4s\n",
128 o
->Used
? "Dirty," : "New,",
129 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
130 obj
->pipeline_stats
.bo
? "yes" : "no");
133 unreachable("Unknown query type");
139 dump_perf_queries(struct brw_context
*brw
)
141 struct gl_context
*ctx
= &brw
->ctx
;
142 DBG("Queries: (Open queries = %d, OA users = %d)\n",
143 brw
->perf_ctx
.n_active_oa_queries
, brw
->perf_ctx
.n_oa_users
);
144 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
147 /******************************************************************************/
150 reap_old_sample_buffers(struct brw_context
*brw
)
152 struct exec_node
*tail_node
=
153 exec_list_get_tail(&brw
->perf_ctx
.sample_buffers
);
154 struct oa_sample_buf
*tail_buf
=
155 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
157 /* Remove all old, unreferenced sample buffers walking forward from
158 * the head of the list, except always leave at least one node in
159 * the list so we always have a node to reference when we Begin
162 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
163 &brw
->perf_ctx
.sample_buffers
)
165 if (buf
->refcount
== 0 && buf
!= tail_buf
) {
166 exec_node_remove(&buf
->link
);
167 exec_list_push_head(&brw
->perf_ctx
.free_sample_buffers
, &buf
->link
);
174 free_sample_bufs(struct brw_context
*brw
)
176 foreach_list_typed_safe(struct oa_sample_buf
, buf
, link
,
177 &brw
->perf_ctx
.free_sample_buffers
)
180 exec_list_make_empty(&brw
->perf_ctx
.free_sample_buffers
);
183 /******************************************************************************/
186 * Driver hook for glGetPerfQueryInfoINTEL().
189 brw_get_perf_query_info(struct gl_context
*ctx
,
190 unsigned query_index
,
196 struct brw_context
*brw
= brw_context(ctx
);
197 const struct gen_perf_query_info
*query
=
198 &brw
->perf_ctx
.perf
->queries
[query_index
];
201 *data_size
= query
->data_size
;
202 *n_counters
= query
->n_counters
;
204 switch (query
->kind
) {
205 case GEN_PERF_QUERY_TYPE_OA
:
206 case GEN_PERF_QUERY_TYPE_RAW
:
207 *n_active
= brw
->perf_ctx
.n_active_oa_queries
;
210 case GEN_PERF_QUERY_TYPE_PIPELINE
:
211 *n_active
= brw
->perf_ctx
.n_active_pipeline_stats_queries
;
215 unreachable("Unknown query type");
221 gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type
)
224 case GEN_PERF_COUNTER_TYPE_EVENT
: return GL_PERFQUERY_COUNTER_EVENT_INTEL
;
225 case GEN_PERF_COUNTER_TYPE_DURATION_NORM
: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL
;
226 case GEN_PERF_COUNTER_TYPE_DURATION_RAW
: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL
;
227 case GEN_PERF_COUNTER_TYPE_THROUGHPUT
: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL
;
228 case GEN_PERF_COUNTER_TYPE_RAW
: return GL_PERFQUERY_COUNTER_RAW_INTEL
;
229 case GEN_PERF_COUNTER_TYPE_TIMESTAMP
: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL
;
231 unreachable("Unknown counter type");
236 gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type
)
239 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL
;
240 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL
;
241 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
;
242 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
;
243 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL
;
245 unreachable("Unknown counter data type");
250 * Driver hook for glGetPerfCounterInfoINTEL().
253 brw_get_perf_counter_info(struct gl_context
*ctx
,
254 unsigned query_index
,
255 unsigned counter_index
,
261 GLuint
*data_type_enum
,
264 struct brw_context
*brw
= brw_context(ctx
);
265 const struct gen_perf_query_info
*query
=
266 &brw
->perf_ctx
.perf
->queries
[query_index
];
267 const struct gen_perf_query_counter
*counter
=
268 &query
->counters
[counter_index
];
270 *name
= counter
->name
;
271 *desc
= counter
->desc
;
272 *offset
= counter
->offset
;
273 *data_size
= gen_perf_query_counter_get_size(counter
);
274 *type_enum
= gen_counter_type_enum_to_gl_type(counter
->type
);
275 *data_type_enum
= gen_counter_data_type_to_gl_type(counter
->data_type
);
276 *raw_max
= counter
->raw_max
;
279 /******************************************************************************/
282 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
283 * pipeline statistics for the performance query object.
286 snapshot_statistics_registers(struct brw_context
*brw
,
287 struct brw_perf_query_object
*obj
,
288 uint32_t offset_in_bytes
)
290 const struct gen_perf_query_info
*query
= obj
->query
;
291 const int n_counters
= query
->n_counters
;
293 for (int i
= 0; i
< n_counters
; i
++) {
294 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
296 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
298 brw_store_register_mem64(brw
, obj
->pipeline_stats
.bo
,
299 counter
->pipeline_stat
.reg
,
300 offset_in_bytes
+ i
* sizeof(uint64_t));
305 * Add a query to the global list of "unaccumulated queries."
307 * Queries are tracked here until all the associated OA reports have
308 * been accumulated via accumulate_oa_reports() after the end
309 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
312 add_to_unaccumulated_query_list(struct brw_context
*brw
,
313 struct brw_perf_query_object
*obj
)
315 if (brw
->perf_ctx
.unaccumulated_elements
>=
316 brw
->perf_ctx
.unaccumulated_array_size
)
318 brw
->perf_ctx
.unaccumulated_array_size
*= 1.5;
319 brw
->perf_ctx
.unaccumulated
=
320 reralloc(brw
, brw
->perf_ctx
.unaccumulated
,
321 struct brw_perf_query_object
*,
322 brw
->perf_ctx
.unaccumulated_array_size
);
325 brw
->perf_ctx
.unaccumulated
[brw
->perf_ctx
.unaccumulated_elements
++] = obj
;
329 * Remove a query from the global list of unaccumulated queries once
330 * after successfully accumulating the OA reports associated with the
331 * query in accumulate_oa_reports() or when discarding unwanted query
335 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
336 struct brw_perf_query_object
*obj
)
338 for (int i
= 0; i
< brw
->perf_ctx
.unaccumulated_elements
; i
++) {
339 if (brw
->perf_ctx
.unaccumulated
[i
] == obj
) {
340 int last_elt
= --brw
->perf_ctx
.unaccumulated_elements
;
343 brw
->perf_ctx
.unaccumulated
[i
] = NULL
;
345 brw
->perf_ctx
.unaccumulated
[i
] =
346 brw
->perf_ctx
.unaccumulated
[last_elt
];
353 /* Drop our samples_head reference so that associated periodic
354 * sample data buffers can potentially be reaped if they aren't
355 * referenced by any other queries...
358 struct oa_sample_buf
*buf
=
359 exec_node_data(struct oa_sample_buf
, obj
->oa
.samples_head
, link
);
361 assert(buf
->refcount
> 0);
364 obj
->oa
.samples_head
= NULL
;
366 reap_old_sample_buffers(brw
);
370 inc_n_oa_users(struct brw_context
*brw
)
372 if (brw
->perf_ctx
.n_oa_users
== 0 &&
373 drmIoctl(brw
->perf_ctx
.oa_stream_fd
,
374 I915_PERF_IOCTL_ENABLE
, 0) < 0)
378 ++brw
->perf_ctx
.n_oa_users
;
384 dec_n_oa_users(struct brw_context
*brw
)
386 /* Disabling the i915 perf stream will effectively disable the OA
387 * counters. Note it's important to be sure there are no outstanding
388 * MI_RPC commands at this point since they could stall the CS
389 * indefinitely once OACONTROL is disabled.
391 --brw
->perf_ctx
.n_oa_users
;
392 if (brw
->perf_ctx
.n_oa_users
== 0 &&
393 drmIoctl(brw
->perf_ctx
.oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
395 DBG("WARNING: Error disabling i915 perf stream: %m\n");
399 /* In general if we see anything spurious while accumulating results,
400 * we don't try and continue accumulating the current query, hoping
401 * for the best, we scrap anything outstanding, and then hope for the
402 * best with new queries.
405 discard_all_queries(struct brw_context
*brw
)
407 while (brw
->perf_ctx
.unaccumulated_elements
) {
408 struct brw_perf_query_object
*obj
= brw
->perf_ctx
.unaccumulated
[0];
410 obj
->oa
.results_accumulated
= true;
411 drop_from_unaccumulated_query_list(brw
, brw
->perf_ctx
.unaccumulated
[0]);
418 OA_READ_STATUS_ERROR
,
419 OA_READ_STATUS_UNFINISHED
,
420 OA_READ_STATUS_FINISHED
,
423 static enum OaReadStatus
424 read_oa_samples_until(struct brw_context
*brw
,
425 uint32_t start_timestamp
,
426 uint32_t end_timestamp
)
428 struct exec_node
*tail_node
=
429 exec_list_get_tail(&brw
->perf_ctx
.sample_buffers
);
430 struct oa_sample_buf
*tail_buf
=
431 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
432 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
435 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(&brw
->perf_ctx
);
439 while ((len
= read(brw
->perf_ctx
.oa_stream_fd
, buf
->buf
,
440 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
444 exec_list_push_tail(&brw
->perf_ctx
.free_sample_buffers
, &buf
->link
);
448 return ((last_timestamp
- start_timestamp
) >=
449 (end_timestamp
- start_timestamp
)) ?
450 OA_READ_STATUS_FINISHED
:
451 OA_READ_STATUS_UNFINISHED
;
453 DBG("Error reading i915 perf samples: %m\n");
456 DBG("Spurious EOF reading i915 perf samples\n");
458 return OA_READ_STATUS_ERROR
;
462 exec_list_push_tail(&brw
->perf_ctx
.sample_buffers
, &buf
->link
);
464 /* Go through the reports and update the last timestamp. */
466 while (offset
< buf
->len
) {
467 const struct drm_i915_perf_record_header
*header
=
468 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
469 uint32_t *report
= (uint32_t *) (header
+ 1);
471 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
472 last_timestamp
= report
[1];
474 offset
+= header
->size
;
477 buf
->last_timestamp
= last_timestamp
;
480 unreachable("not reached");
481 return OA_READ_STATUS_ERROR
;
485 * Try to read all the reports until either the delimiting timestamp
486 * or an error arises.
489 read_oa_samples_for_query(struct brw_context
*brw
,
490 struct brw_perf_query_object
*obj
)
496 /* We need the MI_REPORT_PERF_COUNT to land before we can start
498 assert(!brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
499 !brw_bo_busy(obj
->oa
.bo
));
501 /* Map the BO once here and let accumulate_oa_reports() unmap
503 if (obj
->oa
.map
== NULL
)
504 obj
->oa
.map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_READ
);
506 start
= last
= obj
->oa
.map
;
507 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
509 if (start
[0] != obj
->oa
.begin_report_id
) {
510 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
513 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
514 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
518 /* Read the reports until the end timestamp. */
519 switch (read_oa_samples_until(brw
, start
[1], end
[1])) {
520 case OA_READ_STATUS_ERROR
:
521 /* Fallthrough and let accumulate_oa_reports() deal with the
523 case OA_READ_STATUS_FINISHED
:
525 case OA_READ_STATUS_UNFINISHED
:
529 unreachable("invalid read status");
534 * Accumulate raw OA counter values based on deltas between pairs of
537 * Accumulation starts from the first report captured via
538 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
539 * last MI_RPC report requested by brw_end_perf_query(). Between these
540 * two reports there may also some number of periodically sampled OA
541 * reports collected via the i915 perf interface - depending on the
542 * duration of the query.
544 * These periodic snapshots help to ensure we handle counter overflow
545 * correctly by being frequent enough to ensure we don't miss multiple
546 * overflows of a counter between snapshots. For Gen8+ the i915 perf
547 * snapshots provide the extra context-switch reports that let us
548 * subtract out the progress of counters associated with other
549 * contexts running on the system.
552 accumulate_oa_reports(struct brw_context
*brw
,
553 struct brw_perf_query_object
*obj
)
555 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
556 struct gl_perf_query_object
*o
= &obj
->base
;
560 struct exec_node
*first_samples_node
;
562 int out_duration
= 0;
565 assert(obj
->oa
.map
!= NULL
);
567 start
= last
= obj
->oa
.map
;
568 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
570 if (start
[0] != obj
->oa
.begin_report_id
) {
571 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
574 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
575 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
579 /* See if we have any periodic reports to accumulate too... */
581 /* N.B. The oa.samples_head was set when the query began and
582 * pointed to the tail of the brw->perf_ctx.sample_buffers list at
583 * the time the query started. Since the buffer existed before the
584 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
585 * that no data in this particular node's buffer can possibly be
586 * associated with the query - so skip ahead one...
588 first_samples_node
= obj
->oa
.samples_head
->next
;
590 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
591 &brw
->perf_ctx
.sample_buffers
,
596 while (offset
< buf
->len
) {
597 const struct drm_i915_perf_record_header
*header
=
598 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
600 assert(header
->size
!= 0);
601 assert(header
->size
<= buf
->len
);
603 offset
+= header
->size
;
605 switch (header
->type
) {
606 case DRM_I915_PERF_RECORD_SAMPLE
: {
607 uint32_t *report
= (uint32_t *)(header
+ 1);
610 /* Ignore reports that come before the start marker.
611 * (Note: takes care to allow overflow of 32bit timestamps)
613 if (gen_device_info_timebase_scale(devinfo
,
614 report
[1] - start
[1]) > 5000000000) {
618 /* Ignore reports that come after the end marker.
619 * (Note: takes care to allow overflow of 32bit timestamps)
621 if (gen_device_info_timebase_scale(devinfo
,
622 report
[1] - end
[1]) <= 5000000000) {
626 /* For Gen8+ since the counters continue while other
627 * contexts are running we need to discount any unrelated
628 * deltas. The hardware automatically generates a report
629 * on context switch which gives us a new reference point
630 * to continuing adding deltas from.
632 * For Haswell we can rely on the HW to stop the progress
633 * of OA counters while any other context is acctive.
635 if (devinfo
->gen
>= 8) {
636 if (in_ctx
&& report
[2] != obj
->oa
.result
.hw_id
) {
637 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
640 } else if (in_ctx
== false && report
[2] == obj
->oa
.result
.hw_id
) {
641 DBG("i915 perf: Switch TO\n");
644 /* From experimentation in IGT, we found that the OA unit
645 * might label some report as "idle" (using an invalid
646 * context ID), right after a report for a given context.
647 * Deltas generated by those reports actually belong to the
648 * previous context, even though they're not labelled as
651 * We didn't *really* Switch AWAY in the case that we e.g.
652 * saw a single periodic report while idle...
654 if (out_duration
>= 1)
657 assert(report
[2] == obj
->oa
.result
.hw_id
);
658 DBG("i915 perf: Continuation IN\n");
660 assert(report
[2] != obj
->oa
.result
.hw_id
);
661 DBG("i915 perf: Continuation OUT\n");
668 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->query
,
677 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
678 DBG("i915 perf: OA error: all reports lost\n");
680 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
681 DBG("i915 perf: OA report lost\n");
689 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->query
,
692 DBG("Marking %d accumulated - results gathered\n", o
->Id
);
694 obj
->oa
.results_accumulated
= true;
695 drop_from_unaccumulated_query_list(brw
, obj
);
702 discard_all_queries(brw
);
705 /******************************************************************************/
708 open_i915_perf_oa_stream(struct brw_context
*brw
,
715 uint64_t properties
[] = {
716 /* Single context sampling */
717 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
719 /* Include OA reports in samples */
720 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
722 /* OA unit configuration */
723 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
724 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
725 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
727 struct drm_i915_perf_open_param param
= {
728 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
729 I915_PERF_FLAG_FD_NONBLOCK
|
730 I915_PERF_FLAG_DISABLED
,
731 .num_properties
= ARRAY_SIZE(properties
) / 2,
732 .properties_ptr
= (uintptr_t) properties
,
734 int fd
= drmIoctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
736 DBG("Error opening i915 perf OA stream: %m\n");
740 brw
->perf_ctx
.oa_stream_fd
= fd
;
742 brw
->perf_ctx
.current_oa_metrics_set_id
= metrics_set_id
;
743 brw
->perf_ctx
.current_oa_format
= report_format
;
749 close_perf(struct brw_context
*brw
,
750 const struct gen_perf_query_info
*query
)
752 if (brw
->perf_ctx
.oa_stream_fd
!= -1) {
753 close(brw
->perf_ctx
.oa_stream_fd
);
754 brw
->perf_ctx
.oa_stream_fd
= -1;
756 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
757 struct gen_perf_query_info
*raw_query
=
758 (struct gen_perf_query_info
*) query
;
759 raw_query
->oa_metrics_set_id
= 0;
764 capture_frequency_stat_register(struct brw_context
*brw
,
768 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
770 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8 &&
771 !devinfo
->is_baytrail
&& !devinfo
->is_cherryview
) {
772 brw_store_register_mem32(brw
, bo
, GEN7_RPSTAT1
, bo_offset
);
773 } else if (devinfo
->gen
>= 9) {
774 brw_store_register_mem32(brw
, bo
, GEN9_RPSTAT0
, bo_offset
);
779 * Driver hook for glBeginPerfQueryINTEL().
782 brw_begin_perf_query(struct gl_context
*ctx
,
783 struct gl_perf_query_object
*o
)
785 struct brw_context
*brw
= brw_context(ctx
);
786 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
787 const struct gen_perf_query_info
*query
= obj
->query
;
788 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
790 /* We can assume the frontend hides mistaken attempts to Begin a
791 * query object multiple times before its End. Similarly if an
792 * application reuses a query object before results have arrived
793 * the frontend will wait for prior results so we don't need
794 * to support abandoning in-flight results.
797 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
799 DBG("Begin(%d)\n", o
->Id
);
801 /* XXX: We have to consider that the command parser unit that parses batch
802 * buffer commands and is used to capture begin/end counter snapshots isn't
803 * implicitly synchronized with what's currently running across other GPU
804 * units (such as the EUs running shaders) that the performance counters are
807 * The intention of performance queries is to measure the work associated
808 * with commands between the begin/end delimiters and so for that to be the
809 * case we need to explicitly synchronize the parsing of commands to capture
810 * Begin/End counter snapshots with what's running across other parts of the
813 * When the command parser reaches a Begin marker it effectively needs to
814 * drain everything currently running on the GPU until the hardware is idle
815 * before capturing the first snapshot of counters - otherwise the results
816 * would also be measuring the effects of earlier commands.
818 * When the command parser reaches an End marker it needs to stall until
819 * everything currently running on the GPU has finished before capturing the
820 * end snapshot - otherwise the results won't be a complete representation
823 * Theoretically there could be opportunities to minimize how much of the
824 * GPU pipeline is drained, or that we stall for, when we know what specific
825 * units the performance counters being queried relate to but we don't
826 * currently attempt to be clever here.
828 * Note: with our current simple approach here then for back-to-back queries
829 * we will redundantly emit duplicate commands to synchronize the command
830 * streamer with the rest of the GPU pipeline, but we assume that in HW the
831 * second synchronization is effectively a NOOP.
833 * N.B. The final results are based on deltas of counters between (inside)
834 * Begin/End markers so even though the total wall clock time of the
835 * workload is stretched by larger pipeline bubbles the bubbles themselves
836 * are generally invisible to the query results. Whether that's a good or a
837 * bad thing depends on the use case. For a lower real-time impact while
838 * capturing metrics then periodic sampling may be a better choice than
839 * INTEL_performance_query.
842 * This is our Begin synchronization point to drain current work on the
843 * GPU before we capture our first counter snapshot...
845 brw_emit_mi_flush(brw
);
847 switch (query
->kind
) {
848 case GEN_PERF_QUERY_TYPE_OA
:
849 case GEN_PERF_QUERY_TYPE_RAW
: {
851 /* Opening an i915 perf stream implies exclusive access to the OA unit
852 * which will generate counter reports for a specific counter set with a
853 * specific layout/format so we can't begin any OA based queries that
854 * require a different counter set or format unless we get an opportunity
855 * to close the stream and open a new one...
857 uint64_t metric_id
= gen_perf_query_get_metric_id(brw
->perf_ctx
.perf
, query
);
859 if (brw
->perf_ctx
.oa_stream_fd
!= -1 &&
860 brw
->perf_ctx
.current_oa_metrics_set_id
!= metric_id
) {
862 if (brw
->perf_ctx
.n_oa_users
!= 0) {
863 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64
"\n",
864 o
->Id
, brw
->perf_ctx
.current_oa_metrics_set_id
, metric_id
);
867 close_perf(brw
, query
);
870 /* If the OA counters aren't already on, enable them. */
871 if (brw
->perf_ctx
.oa_stream_fd
== -1) {
872 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
873 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
875 /* The period_exponent gives a sampling period as follows:
876 * sample_period = timestamp_period * 2^(period_exponent + 1)
878 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
881 * The counter overflow period is derived from the EuActive counter
882 * which reads a counter that increments by the number of clock
883 * cycles multiplied by the number of EUs. It can be calculated as:
885 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
887 * (E.g. 40 EUs @ 1GHz = ~53ms)
889 * We select a sampling period inferior to that overflow period to
890 * ensure we cannot see more than 1 counter overflow, otherwise we
891 * could loose information.
894 int a_counter_in_bits
= 32;
895 if (devinfo
->gen
>= 8)
896 a_counter_in_bits
= 40;
898 uint64_t overflow_period
= pow(2, a_counter_in_bits
) /
899 (brw
->perf_ctx
.perf
->sys_vars
.n_eus
*
900 /* drop 1GHz freq to have units in nanoseconds */
903 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
904 overflow_period
, overflow_period
/ 1000000ul, brw
->perf_ctx
.perf
->sys_vars
.n_eus
);
906 int period_exponent
= 0;
907 uint64_t prev_sample_period
, next_sample_period
;
908 for (int e
= 0; e
< 30; e
++) {
909 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
910 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
912 /* Take the previous sampling period, lower than the overflow
915 if (prev_sample_period
< overflow_period
&&
916 next_sample_period
> overflow_period
)
917 period_exponent
= e
+ 1;
920 if (period_exponent
== 0) {
921 DBG("WARNING: enable to find a sampling exponent\n");
925 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
926 prev_sample_period
/ 1000000ul);
928 if (!open_i915_perf_oa_stream(brw
,
932 screen
->fd
, /* drm fd */
936 assert(brw
->perf_ctx
.current_oa_metrics_set_id
== metric_id
&&
937 brw
->perf_ctx
.current_oa_format
== query
->oa_format
);
940 if (!inc_n_oa_users(brw
)) {
941 DBG("WARNING: Error enabling i915 perf stream: %m\n");
946 brw
->perf_ctx
.perf
->vtbl
.bo_unreference(obj
->oa
.bo
);
951 brw
->perf_ctx
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
952 "perf. query OA MI_RPC bo",
955 /* Pre-filling the BO helps debug whether writes landed. */
956 void *map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_WRITE
);
957 memset(map
, 0x80, MI_RPC_BO_SIZE
);
958 brw_bo_unmap(obj
->oa
.bo
);
961 obj
->oa
.begin_report_id
= brw
->perf_ctx
.next_query_start_report_id
;
962 brw
->perf_ctx
.next_query_start_report_id
+= 2;
964 /* We flush the batchbuffer here to minimize the chances that MI_RPC
965 * delimiting commands end up in different batchbuffers. If that's the
966 * case, the measurement will include the time it takes for the kernel
967 * scheduler to load a new request into the hardware. This is manifested in
968 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
970 perf_cfg
->vtbl
.batchbuffer_flush(brw
, __FILE__
, __LINE__
);
972 /* Take a starting OA counter snapshot. */
973 brw
->perf_ctx
.perf
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
, 0,
974 obj
->oa
.begin_report_id
);
975 perf_cfg
->vtbl
.capture_frequency_stat_register(brw
, obj
->oa
.bo
,
976 MI_FREQ_START_OFFSET_BYTES
);
978 ++brw
->perf_ctx
.n_active_oa_queries
;
980 /* No already-buffered samples can possibly be associated with this query
981 * so create a marker within the list of sample buffers enabling us to
982 * easily ignore earlier samples when processing this query after
985 assert(!exec_list_is_empty(&brw
->perf_ctx
.sample_buffers
));
986 obj
->oa
.samples_head
= exec_list_get_tail(&brw
->perf_ctx
.sample_buffers
);
988 struct oa_sample_buf
*buf
=
989 exec_node_data(struct oa_sample_buf
, obj
->oa
.samples_head
, link
);
991 /* This reference will ensure that future/following sample
992 * buffers (that may relate to this query) can't be freed until
993 * this drops to zero.
997 gen_perf_query_result_clear(&obj
->oa
.result
);
998 obj
->oa
.results_accumulated
= false;
1000 add_to_unaccumulated_query_list(brw
, obj
);
1004 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1005 if (obj
->pipeline_stats
.bo
) {
1006 brw
->perf_ctx
.perf
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
1007 obj
->pipeline_stats
.bo
= NULL
;
1010 obj
->pipeline_stats
.bo
=
1011 brw
->perf_ctx
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
1012 "perf. query pipeline stats bo",
1015 /* Take starting snapshots. */
1016 snapshot_statistics_registers(brw
, obj
, 0);
1018 ++brw
->perf_ctx
.n_active_pipeline_stats_queries
;
1022 unreachable("Unknown query type");
1026 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1027 dump_perf_queries(brw
);
1033 * Driver hook for glEndPerfQueryINTEL().
1036 brw_end_perf_query(struct gl_context
*ctx
,
1037 struct gl_perf_query_object
*o
)
1039 struct brw_context
*brw
= brw_context(ctx
);
1040 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1041 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1043 DBG("End(%d)\n", o
->Id
);
1045 /* Ensure that the work associated with the queried commands will have
1046 * finished before taking our query end counter readings.
1048 * For more details see comment in brw_begin_perf_query for
1049 * corresponding flush.
1051 brw_emit_mi_flush(brw
);
1053 switch (obj
->query
->kind
) {
1054 case GEN_PERF_QUERY_TYPE_OA
:
1055 case GEN_PERF_QUERY_TYPE_RAW
:
1057 /* NB: It's possible that the query will have already been marked
1058 * as 'accumulated' if an error was seen while reading samples
1059 * from perf. In this case we mustn't try and emit a closing
1060 * MI_RPC command in case the OA unit has already been disabled
1062 if (!obj
->oa
.results_accumulated
) {
1063 /* Take an ending OA counter snapshot. */
1064 perf_cfg
->vtbl
.capture_frequency_stat_register(brw
, obj
->oa
.bo
,
1065 MI_FREQ_END_OFFSET_BYTES
);
1066 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
,
1067 MI_RPC_BO_END_OFFSET_BYTES
,
1068 obj
->oa
.begin_report_id
+ 1);
1071 --brw
->perf_ctx
.n_active_oa_queries
;
1073 /* NB: even though the query has now ended, it can't be accumulated
1074 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1079 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1080 snapshot_statistics_registers(brw
, obj
,
1081 STATS_BO_END_OFFSET_BYTES
);
1082 --brw
->perf_ctx
.n_active_pipeline_stats_queries
;
1086 unreachable("Unknown query type");
1092 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
1094 struct brw_context
*brw
= brw_context(ctx
);
1095 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1096 struct brw_bo
*bo
= NULL
;
1097 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1101 switch (obj
->query
->kind
) {
1102 case GEN_PERF_QUERY_TYPE_OA
:
1103 case GEN_PERF_QUERY_TYPE_RAW
:
1107 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1108 bo
= obj
->pipeline_stats
.bo
;
1112 unreachable("Unknown query type");
1119 /* If the current batch references our results bo then we need to
1122 if (brw_batch_references(&brw
->batch
, bo
))
1123 perf_cfg
->vtbl
.batchbuffer_flush(brw
, __FILE__
, __LINE__
);
1125 brw_bo_wait_rendering(bo
);
1127 /* Due to a race condition between the OA unit signaling report
1128 * availability and the report actually being written into memory,
1129 * we need to wait for all the reports to come in before we can
1132 if (obj
->query
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1133 obj
->query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1134 while (!read_oa_samples_for_query(brw
, obj
))
1140 brw_is_perf_query_ready(struct gl_context
*ctx
,
1141 struct gl_perf_query_object
*o
)
1143 struct brw_context
*brw
= brw_context(ctx
);
1144 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1149 switch (obj
->query
->kind
) {
1150 case GEN_PERF_QUERY_TYPE_OA
:
1151 case GEN_PERF_QUERY_TYPE_RAW
:
1152 return (obj
->oa
.results_accumulated
||
1154 !brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
1155 !brw_bo_busy(obj
->oa
.bo
) &&
1156 read_oa_samples_for_query(brw
, obj
)));
1157 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1158 return (obj
->pipeline_stats
.bo
&&
1159 !brw_batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
1160 !brw_bo_busy(obj
->pipeline_stats
.bo
));
1163 unreachable("Unknown query type");
1171 read_slice_unslice_frequencies(struct brw_context
*brw
,
1172 struct brw_perf_query_object
*obj
)
1174 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1175 uint32_t *begin_report
= obj
->oa
.map
, *end_report
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1177 gen_perf_query_result_read_frequencies(&obj
->oa
.result
,
1178 devinfo
, begin_report
, end_report
);
1182 read_gt_frequency(struct brw_context
*brw
,
1183 struct brw_perf_query_object
*obj
)
1185 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1186 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
1187 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
1189 switch (devinfo
->gen
) {
1192 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1193 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1198 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1199 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1202 unreachable("unexpected gen");
1205 /* Put the numbers into Hz. */
1206 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
1207 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
1211 get_oa_counter_data(struct brw_context
*brw
,
1212 struct brw_perf_query_object
*obj
,
1216 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
1217 const struct gen_perf_query_info
*query
= obj
->query
;
1218 int n_counters
= query
->n_counters
;
1221 for (int i
= 0; i
< n_counters
; i
++) {
1222 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1223 uint64_t *out_uint64
;
1225 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
1228 switch (counter
->data_type
) {
1229 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
1230 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1232 counter
->oa_counter_read_uint64(perf
, query
,
1233 obj
->oa
.result
.accumulator
);
1235 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
1236 out_float
= (float *)(data
+ counter
->offset
);
1238 counter
->oa_counter_read_float(perf
, query
,
1239 obj
->oa
.result
.accumulator
);
1242 /* So far we aren't using uint32, double or bool32... */
1243 unreachable("unexpected counter data type");
1245 written
= counter
->offset
+ counter_size
;
1253 get_pipeline_stats_data(struct brw_context
*brw
,
1254 struct brw_perf_query_object
*obj
,
1259 const struct gen_perf_query_info
*query
= obj
->query
;
1260 int n_counters
= obj
->query
->n_counters
;
1263 uint64_t *start
= brw_bo_map(brw
, obj
->pipeline_stats
.bo
, MAP_READ
);
1264 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1266 for (int i
= 0; i
< n_counters
; i
++) {
1267 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1268 uint64_t value
= end
[i
] - start
[i
];
1270 if (counter
->pipeline_stat
.numerator
!=
1271 counter
->pipeline_stat
.denominator
) {
1272 value
*= counter
->pipeline_stat
.numerator
;
1273 value
/= counter
->pipeline_stat
.denominator
;
1276 *((uint64_t *)p
) = value
;
1280 brw_bo_unmap(obj
->pipeline_stats
.bo
);
1286 * Driver hook for glGetPerfQueryDataINTEL().
1289 brw_get_perf_query_data(struct gl_context
*ctx
,
1290 struct gl_perf_query_object
*o
,
1293 GLuint
*bytes_written
)
1295 struct brw_context
*brw
= brw_context(ctx
);
1296 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1299 assert(brw_is_perf_query_ready(ctx
, o
));
1301 DBG("GetData(%d)\n", o
->Id
);
1303 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1304 dump_perf_queries(brw
);
1306 /* We expect that the frontend only calls this hook when it knows
1307 * that results are available.
1311 switch (obj
->query
->kind
) {
1312 case GEN_PERF_QUERY_TYPE_OA
:
1313 case GEN_PERF_QUERY_TYPE_RAW
:
1314 if (!obj
->oa
.results_accumulated
) {
1315 read_gt_frequency(brw
, obj
);
1316 read_slice_unslice_frequencies(brw
, obj
);
1317 accumulate_oa_reports(brw
, obj
);
1318 assert(obj
->oa
.results_accumulated
);
1320 brw_bo_unmap(obj
->oa
.bo
);
1323 if (obj
->query
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
1324 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
1326 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1328 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
1329 devinfo
, &obj
->oa
.result
,
1330 obj
->oa
.gt_frequency
[0],
1331 obj
->oa
.gt_frequency
[1]);
1335 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1336 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
1340 unreachable("Unknown query type");
1345 *bytes_written
= written
;
1348 static struct gl_perf_query_object
*
1349 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
1351 struct brw_context
*brw
= brw_context(ctx
);
1352 const struct gen_perf_query_info
*query
=
1353 &brw
->perf_ctx
.perf
->queries
[query_index
];
1354 struct brw_perf_query_object
*obj
=
1355 calloc(1, sizeof(struct brw_perf_query_object
));
1362 brw
->perf_ctx
.n_query_instances
++;
1368 * Driver hook for glDeletePerfQueryINTEL().
1371 brw_delete_perf_query(struct gl_context
*ctx
,
1372 struct gl_perf_query_object
*o
)
1374 struct brw_context
*brw
= brw_context(ctx
);
1375 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1376 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1378 /* We can assume that the frontend waits for a query to complete
1379 * before ever calling into here, so we don't have to worry about
1380 * deleting an in-flight query object.
1383 assert(!o
->Used
|| o
->Ready
);
1385 DBG("Delete(%d)\n", o
->Id
);
1387 switch (obj
->query
->kind
) {
1388 case GEN_PERF_QUERY_TYPE_OA
:
1389 case GEN_PERF_QUERY_TYPE_RAW
:
1391 if (!obj
->oa
.results_accumulated
) {
1392 drop_from_unaccumulated_query_list(brw
, obj
);
1393 dec_n_oa_users(brw
);
1396 perf_cfg
->vtbl
.bo_unreference(obj
->oa
.bo
);
1400 obj
->oa
.results_accumulated
= false;
1403 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1404 if (obj
->pipeline_stats
.bo
) {
1405 perf_cfg
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
1406 obj
->pipeline_stats
.bo
= NULL
;
1411 unreachable("Unknown query type");
1415 /* As an indication that the INTEL_performance_query extension is no
1416 * longer in use, it's a good time to free our cache of sample
1417 * buffers and close any current i915-perf stream.
1419 if (--brw
->perf_ctx
.n_query_instances
== 0) {
1420 free_sample_bufs(brw
);
1421 close_perf(brw
, obj
->query
);
1427 /******************************************************************************/
1430 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1432 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1433 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
1434 struct gen_perf_query_info
*query
=
1435 gen_perf_query_append_query_info(perf
, MAX_STAT_COUNTERS
);
1437 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1438 query
->name
= "Pipeline Statistics Registers";
1440 gen_perf_query_info_add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1441 "N vertices submitted");
1442 gen_perf_query_info_add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1443 "N primitives submitted");
1444 gen_perf_query_info_add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1445 "N vertex shader invocations");
1447 if (devinfo
->gen
== 6) {
1448 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1449 "SO_PRIM_STORAGE_NEEDED",
1450 "N geometry shader stream-out primitives (total)");
1451 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1452 "SO_NUM_PRIMS_WRITTEN",
1453 "N geometry shader stream-out primitives (written)");
1455 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1456 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1457 "N stream-out (stream 0) primitives (total)");
1458 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1459 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1460 "N stream-out (stream 1) primitives (total)");
1461 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1462 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1463 "N stream-out (stream 2) primitives (total)");
1464 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1465 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1466 "N stream-out (stream 3) primitives (total)");
1467 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1468 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1469 "N stream-out (stream 0) primitives (written)");
1470 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1471 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1472 "N stream-out (stream 1) primitives (written)");
1473 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1474 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1475 "N stream-out (stream 2) primitives (written)");
1476 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1477 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1478 "N stream-out (stream 3) primitives (written)");
1481 gen_perf_query_info_add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1482 "N TCS shader invocations");
1483 gen_perf_query_info_add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1484 "N TES shader invocations");
1486 gen_perf_query_info_add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1487 "N geometry shader invocations");
1488 gen_perf_query_info_add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1489 "N geometry shader primitives emitted");
1491 gen_perf_query_info_add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1492 "N primitives entering clipping");
1493 gen_perf_query_info_add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1494 "N primitives leaving clipping");
1496 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1497 gen_perf_query_info_add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1498 "N fragment shader invocations",
1499 "N fragment shader invocations");
1501 gen_perf_query_info_add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1502 "N fragment shader invocations");
1505 gen_perf_query_info_add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
1506 "N z-pass fragments");
1508 if (devinfo
->gen
>= 7) {
1509 gen_perf_query_info_add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1510 "N compute shader invocations");
1513 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1516 /* gen_device_info will have incorrect default topology values for unsupported kernels.
1517 * verify kernel support to ensure OA metrics are accurate.
1520 oa_metrics_kernel_support(int fd
, const struct gen_device_info
*devinfo
)
1522 if (devinfo
->gen
>= 10) {
1523 /* topology uAPI required for CNL+ (kernel 4.17+) make a call to the api
1526 struct drm_i915_query_item item
= {
1527 .query_id
= DRM_I915_QUERY_TOPOLOGY_INFO
,
1529 struct drm_i915_query query
= {
1531 .items_ptr
= (uintptr_t) &item
,
1534 /* kernel 4.17+ supports the query */
1535 return drmIoctl(fd
, DRM_IOCTL_I915_QUERY
, &query
) == 0;
1538 if (devinfo
->gen
>= 8) {
1539 /* 4.13+ api required for gen8 - gen9 */
1541 struct drm_i915_getparam gp
= {
1542 .param
= I915_PARAM_SLICE_MASK
,
1545 /* kernel 4.13+ supports this parameter */
1546 return drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == 0;
1549 if (devinfo
->gen
== 7)
1550 /* default topology values are correct for HSW */
1553 /* oa not supported before gen 7*/
1558 brw_oa_bo_alloc(void *bufmgr
, const char *name
, uint64_t size
)
1560 return brw_bo_alloc(bufmgr
, name
, size
, BRW_MEMZONE_OTHER
);
1564 brw_oa_emit_mi_report_perf_count(void *c
,
1566 uint32_t offset_in_bytes
,
1569 struct brw_context
*ctx
= c
;
1570 ctx
->vtbl
.emit_mi_report_perf_count(ctx
,
1576 typedef void (*bo_unreference_t
)(void *);
1577 typedef void (* emit_mi_report_t
)(void *, void *, uint32_t, uint32_t);
1580 brw_oa_batchbuffer_flush(void *c
, const char *file
, int line
)
1582 struct brw_context
*ctx
= c
;
1583 _intel_batchbuffer_flush_fence(ctx
, -1, NULL
, file
, line
);
1586 typedef void (*capture_frequency_stat_register_t
)(void *, void *, uint32_t );
1589 brw_init_perf_query_info(struct gl_context
*ctx
)
1591 struct brw_context
*brw
= brw_context(ctx
);
1592 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1593 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1595 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1597 return perf_cfg
->n_queries
;
1599 perf_cfg
= gen_perf_new(brw
);
1600 brw
->perf_ctx
.perf
= perf_cfg
;
1601 perf_cfg
->vtbl
.bo_alloc
= brw_oa_bo_alloc
;
1602 perf_cfg
->vtbl
.bo_unreference
= (bo_unreference_t
)brw_bo_unreference
;
1603 perf_cfg
->vtbl
.emit_mi_report_perf_count
=
1604 (emit_mi_report_t
)brw_oa_emit_mi_report_perf_count
;
1605 perf_cfg
->vtbl
.batchbuffer_flush
= brw_oa_batchbuffer_flush
;
1606 perf_cfg
->vtbl
.capture_frequency_stat_register
=
1607 (capture_frequency_stat_register_t
) capture_frequency_stat_register
;
1609 init_pipeline_statistic_query_registers(brw
);
1610 gen_perf_query_register_mdapi_statistic_query(&brw
->screen
->devinfo
,
1611 brw
->perf_ctx
.perf
);
1613 if ((oa_metrics_kernel_support(screen
->fd
, devinfo
)) &&
1614 (gen_perf_load_oa_metrics(perf_cfg
, screen
->fd
, devinfo
)))
1615 gen_perf_query_register_mdapi_oa_query(&brw
->screen
->devinfo
,
1616 brw
->perf_ctx
.perf
);
1618 brw
->perf_ctx
.unaccumulated
=
1619 ralloc_array(brw
, struct brw_perf_query_object
*, 2);
1620 brw
->perf_ctx
.unaccumulated_elements
= 0;
1621 brw
->perf_ctx
.unaccumulated_array_size
= 2;
1623 exec_list_make_empty(&brw
->perf_ctx
.sample_buffers
);
1624 exec_list_make_empty(&brw
->perf_ctx
.free_sample_buffers
);
1626 /* It's convenient to guarantee that this linked list of sample
1627 * buffers is never empty so we add an empty head so when we
1628 * Begin an OA query we can always take a reference on a buffer
1631 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(&brw
->perf_ctx
);
1632 exec_list_push_head(&brw
->perf_ctx
.sample_buffers
, &buf
->link
);
1634 brw
->perf_ctx
.oa_stream_fd
= -1;
1636 brw
->perf_ctx
.next_query_start_report_id
= 1000;
1638 return perf_cfg
->n_queries
;
1642 brw_init_performance_queries(struct brw_context
*brw
)
1644 struct gl_context
*ctx
= &brw
->ctx
;
1646 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
1647 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
1648 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
1649 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
1650 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
1651 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
1652 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
1653 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
1654 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
1655 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;