2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
44 /* put before sys/types.h to silence glibc warnings */
46 #include <sys/mkdev.h>
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
51 #include <sys/types.h>
55 #include <sys/ioctl.h>
58 #include "drm-uapi/i915_drm.h"
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "intel_batchbuffer.h"
76 #include "perf/gen_perf.h"
77 #include "perf/gen_perf_mdapi.h"
79 #define FILE_DEBUG_FLAG DEBUG_PERFMON
81 #define OAREPORT_REASON_MASK 0x3f
82 #define OAREPORT_REASON_SHIFT 19
83 #define OAREPORT_REASON_TIMER (1<<0)
84 #define OAREPORT_REASON_TRIGGER1 (1<<1)
85 #define OAREPORT_REASON_TRIGGER2 (1<<2)
86 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
87 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
89 /** Downcasting convenience macro. */
90 static inline struct brw_perf_query_object
*
91 brw_perf_query(struct gl_perf_query_object
*o
)
93 return (struct brw_perf_query_object
*) o
;
96 #define MI_RPC_BO_SIZE 4096
97 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
98 #define MI_FREQ_START_OFFSET_BYTES (3072)
99 #define MI_FREQ_END_OFFSET_BYTES (3076)
101 /******************************************************************************/
104 brw_is_perf_query_ready(struct gl_context
*ctx
,
105 struct gl_perf_query_object
*o
);
108 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
110 struct gl_context
*ctx
= brw_void
;
111 struct gl_perf_query_object
*o
= query_void
;
112 struct brw_perf_query_object
*obj
= query_void
;
114 switch (obj
->query
->kind
) {
115 case GEN_PERF_QUERY_TYPE_OA
:
116 case GEN_PERF_QUERY_TYPE_RAW
:
117 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
119 o
->Used
? "Dirty," : "New,",
120 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
121 obj
->oa
.bo
? "yes," : "no,",
122 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
123 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
125 case GEN_PERF_QUERY_TYPE_PIPELINE
:
126 DBG("%4d: %-6s %-8s BO: %-4s\n",
128 o
->Used
? "Dirty," : "New,",
129 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
130 obj
->pipeline_stats
.bo
? "yes" : "no");
133 unreachable("Unknown query type");
139 dump_perf_queries(struct brw_context
*brw
)
141 struct gl_context
*ctx
= &brw
->ctx
;
142 DBG("Queries: (Open queries = %d, OA users = %d)\n",
143 brw
->perf_ctx
.n_active_oa_queries
, brw
->perf_ctx
.n_oa_users
);
144 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
148 * Driver hook for glGetPerfQueryInfoINTEL().
151 brw_get_perf_query_info(struct gl_context
*ctx
,
152 unsigned query_index
,
158 struct brw_context
*brw
= brw_context(ctx
);
159 const struct gen_perf_query_info
*query
=
160 &brw
->perf_ctx
.perf
->queries
[query_index
];
163 *data_size
= query
->data_size
;
164 *n_counters
= query
->n_counters
;
166 switch (query
->kind
) {
167 case GEN_PERF_QUERY_TYPE_OA
:
168 case GEN_PERF_QUERY_TYPE_RAW
:
169 *n_active
= brw
->perf_ctx
.n_active_oa_queries
;
172 case GEN_PERF_QUERY_TYPE_PIPELINE
:
173 *n_active
= brw
->perf_ctx
.n_active_pipeline_stats_queries
;
177 unreachable("Unknown query type");
183 gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type
)
186 case GEN_PERF_COUNTER_TYPE_EVENT
: return GL_PERFQUERY_COUNTER_EVENT_INTEL
;
187 case GEN_PERF_COUNTER_TYPE_DURATION_NORM
: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL
;
188 case GEN_PERF_COUNTER_TYPE_DURATION_RAW
: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL
;
189 case GEN_PERF_COUNTER_TYPE_THROUGHPUT
: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL
;
190 case GEN_PERF_COUNTER_TYPE_RAW
: return GL_PERFQUERY_COUNTER_RAW_INTEL
;
191 case GEN_PERF_COUNTER_TYPE_TIMESTAMP
: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL
;
193 unreachable("Unknown counter type");
198 gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type
)
201 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL
;
202 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL
;
203 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
;
204 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
;
205 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL
;
207 unreachable("Unknown counter data type");
212 * Driver hook for glGetPerfCounterInfoINTEL().
215 brw_get_perf_counter_info(struct gl_context
*ctx
,
216 unsigned query_index
,
217 unsigned counter_index
,
223 GLuint
*data_type_enum
,
226 struct brw_context
*brw
= brw_context(ctx
);
227 const struct gen_perf_query_info
*query
=
228 &brw
->perf_ctx
.perf
->queries
[query_index
];
229 const struct gen_perf_query_counter
*counter
=
230 &query
->counters
[counter_index
];
232 *name
= counter
->name
;
233 *desc
= counter
->desc
;
234 *offset
= counter
->offset
;
235 *data_size
= gen_perf_query_counter_get_size(counter
);
236 *type_enum
= gen_counter_type_enum_to_gl_type(counter
->type
);
237 *data_type_enum
= gen_counter_data_type_to_gl_type(counter
->data_type
);
238 *raw_max
= counter
->raw_max
;
241 /******************************************************************************/
244 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
245 * pipeline statistics for the performance query object.
248 snapshot_statistics_registers(struct brw_context
*brw
,
249 struct brw_perf_query_object
*obj
,
250 uint32_t offset_in_bytes
)
252 const struct gen_perf_query_info
*query
= obj
->query
;
253 const int n_counters
= query
->n_counters
;
255 for (int i
= 0; i
< n_counters
; i
++) {
256 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
258 assert(counter
->data_type
== GEN_PERF_COUNTER_DATA_TYPE_UINT64
);
260 brw_store_register_mem64(brw
, obj
->pipeline_stats
.bo
,
261 counter
->pipeline_stat
.reg
,
262 offset_in_bytes
+ i
* sizeof(uint64_t));
267 * Add a query to the global list of "unaccumulated queries."
269 * Queries are tracked here until all the associated OA reports have
270 * been accumulated via accumulate_oa_reports() after the end
271 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
274 add_to_unaccumulated_query_list(struct brw_context
*brw
,
275 struct brw_perf_query_object
*obj
)
277 if (brw
->perf_ctx
.unaccumulated_elements
>=
278 brw
->perf_ctx
.unaccumulated_array_size
)
280 brw
->perf_ctx
.unaccumulated_array_size
*= 1.5;
281 brw
->perf_ctx
.unaccumulated
=
282 reralloc(brw
, brw
->perf_ctx
.unaccumulated
,
283 struct brw_perf_query_object
*,
284 brw
->perf_ctx
.unaccumulated_array_size
);
287 brw
->perf_ctx
.unaccumulated
[brw
->perf_ctx
.unaccumulated_elements
++] = obj
;
291 * Remove a query from the global list of unaccumulated queries once
292 * after successfully accumulating the OA reports associated with the
293 * query in accumulate_oa_reports() or when discarding unwanted query
297 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
298 struct brw_perf_query_object
*obj
)
300 for (int i
= 0; i
< brw
->perf_ctx
.unaccumulated_elements
; i
++) {
301 if (brw
->perf_ctx
.unaccumulated
[i
] == obj
) {
302 int last_elt
= --brw
->perf_ctx
.unaccumulated_elements
;
305 brw
->perf_ctx
.unaccumulated
[i
] = NULL
;
307 brw
->perf_ctx
.unaccumulated
[i
] =
308 brw
->perf_ctx
.unaccumulated
[last_elt
];
315 /* Drop our samples_head reference so that associated periodic
316 * sample data buffers can potentially be reaped if they aren't
317 * referenced by any other queries...
320 struct oa_sample_buf
*buf
=
321 exec_node_data(struct oa_sample_buf
, obj
->oa
.samples_head
, link
);
323 assert(buf
->refcount
> 0);
326 obj
->oa
.samples_head
= NULL
;
328 gen_perf_reap_old_sample_buffers(&brw
->perf_ctx
);
332 inc_n_oa_users(struct brw_context
*brw
)
334 if (brw
->perf_ctx
.n_oa_users
== 0 &&
335 drmIoctl(brw
->perf_ctx
.oa_stream_fd
,
336 I915_PERF_IOCTL_ENABLE
, 0) < 0)
340 ++brw
->perf_ctx
.n_oa_users
;
346 dec_n_oa_users(struct brw_context
*brw
)
348 /* Disabling the i915 perf stream will effectively disable the OA
349 * counters. Note it's important to be sure there are no outstanding
350 * MI_RPC commands at this point since they could stall the CS
351 * indefinitely once OACONTROL is disabled.
353 --brw
->perf_ctx
.n_oa_users
;
354 if (brw
->perf_ctx
.n_oa_users
== 0 &&
355 drmIoctl(brw
->perf_ctx
.oa_stream_fd
, I915_PERF_IOCTL_DISABLE
, 0) < 0)
357 DBG("WARNING: Error disabling i915 perf stream: %m\n");
361 /* In general if we see anything spurious while accumulating results,
362 * we don't try and continue accumulating the current query, hoping
363 * for the best, we scrap anything outstanding, and then hope for the
364 * best with new queries.
367 discard_all_queries(struct brw_context
*brw
)
369 while (brw
->perf_ctx
.unaccumulated_elements
) {
370 struct brw_perf_query_object
*obj
= brw
->perf_ctx
.unaccumulated
[0];
372 obj
->oa
.results_accumulated
= true;
373 drop_from_unaccumulated_query_list(brw
, brw
->perf_ctx
.unaccumulated
[0]);
380 OA_READ_STATUS_ERROR
,
381 OA_READ_STATUS_UNFINISHED
,
382 OA_READ_STATUS_FINISHED
,
385 static enum OaReadStatus
386 read_oa_samples_until(struct brw_context
*brw
,
387 uint32_t start_timestamp
,
388 uint32_t end_timestamp
)
390 struct exec_node
*tail_node
=
391 exec_list_get_tail(&brw
->perf_ctx
.sample_buffers
);
392 struct oa_sample_buf
*tail_buf
=
393 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
394 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
397 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(&brw
->perf_ctx
);
401 while ((len
= read(brw
->perf_ctx
.oa_stream_fd
, buf
->buf
,
402 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
406 exec_list_push_tail(&brw
->perf_ctx
.free_sample_buffers
, &buf
->link
);
410 return ((last_timestamp
- start_timestamp
) >=
411 (end_timestamp
- start_timestamp
)) ?
412 OA_READ_STATUS_FINISHED
:
413 OA_READ_STATUS_UNFINISHED
;
415 DBG("Error reading i915 perf samples: %m\n");
418 DBG("Spurious EOF reading i915 perf samples\n");
420 return OA_READ_STATUS_ERROR
;
424 exec_list_push_tail(&brw
->perf_ctx
.sample_buffers
, &buf
->link
);
426 /* Go through the reports and update the last timestamp. */
428 while (offset
< buf
->len
) {
429 const struct drm_i915_perf_record_header
*header
=
430 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
431 uint32_t *report
= (uint32_t *) (header
+ 1);
433 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
434 last_timestamp
= report
[1];
436 offset
+= header
->size
;
439 buf
->last_timestamp
= last_timestamp
;
442 unreachable("not reached");
443 return OA_READ_STATUS_ERROR
;
447 * Try to read all the reports until either the delimiting timestamp
448 * or an error arises.
451 read_oa_samples_for_query(struct brw_context
*brw
,
452 struct brw_perf_query_object
*obj
)
458 /* We need the MI_REPORT_PERF_COUNT to land before we can start
460 assert(!brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
461 !brw_bo_busy(obj
->oa
.bo
));
463 /* Map the BO once here and let accumulate_oa_reports() unmap
465 if (obj
->oa
.map
== NULL
)
466 obj
->oa
.map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_READ
);
468 start
= last
= obj
->oa
.map
;
469 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
471 if (start
[0] != obj
->oa
.begin_report_id
) {
472 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
475 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
476 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
480 /* Read the reports until the end timestamp. */
481 switch (read_oa_samples_until(brw
, start
[1], end
[1])) {
482 case OA_READ_STATUS_ERROR
:
483 /* Fallthrough and let accumulate_oa_reports() deal with the
485 case OA_READ_STATUS_FINISHED
:
487 case OA_READ_STATUS_UNFINISHED
:
491 unreachable("invalid read status");
496 * Accumulate raw OA counter values based on deltas between pairs of
499 * Accumulation starts from the first report captured via
500 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
501 * last MI_RPC report requested by brw_end_perf_query(). Between these
502 * two reports there may also some number of periodically sampled OA
503 * reports collected via the i915 perf interface - depending on the
504 * duration of the query.
506 * These periodic snapshots help to ensure we handle counter overflow
507 * correctly by being frequent enough to ensure we don't miss multiple
508 * overflows of a counter between snapshots. For Gen8+ the i915 perf
509 * snapshots provide the extra context-switch reports that let us
510 * subtract out the progress of counters associated with other
511 * contexts running on the system.
514 accumulate_oa_reports(struct brw_context
*brw
,
515 struct brw_perf_query_object
*obj
)
517 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
518 struct gl_perf_query_object
*o
= &obj
->base
;
522 struct exec_node
*first_samples_node
;
524 int out_duration
= 0;
527 assert(obj
->oa
.map
!= NULL
);
529 start
= last
= obj
->oa
.map
;
530 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
532 if (start
[0] != obj
->oa
.begin_report_id
) {
533 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
536 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
537 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
541 /* See if we have any periodic reports to accumulate too... */
543 /* N.B. The oa.samples_head was set when the query began and
544 * pointed to the tail of the brw->perf_ctx.sample_buffers list at
545 * the time the query started. Since the buffer existed before the
546 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
547 * that no data in this particular node's buffer can possibly be
548 * associated with the query - so skip ahead one...
550 first_samples_node
= obj
->oa
.samples_head
->next
;
552 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
553 &brw
->perf_ctx
.sample_buffers
,
558 while (offset
< buf
->len
) {
559 const struct drm_i915_perf_record_header
*header
=
560 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
562 assert(header
->size
!= 0);
563 assert(header
->size
<= buf
->len
);
565 offset
+= header
->size
;
567 switch (header
->type
) {
568 case DRM_I915_PERF_RECORD_SAMPLE
: {
569 uint32_t *report
= (uint32_t *)(header
+ 1);
572 /* Ignore reports that come before the start marker.
573 * (Note: takes care to allow overflow of 32bit timestamps)
575 if (gen_device_info_timebase_scale(devinfo
,
576 report
[1] - start
[1]) > 5000000000) {
580 /* Ignore reports that come after the end marker.
581 * (Note: takes care to allow overflow of 32bit timestamps)
583 if (gen_device_info_timebase_scale(devinfo
,
584 report
[1] - end
[1]) <= 5000000000) {
588 /* For Gen8+ since the counters continue while other
589 * contexts are running we need to discount any unrelated
590 * deltas. The hardware automatically generates a report
591 * on context switch which gives us a new reference point
592 * to continuing adding deltas from.
594 * For Haswell we can rely on the HW to stop the progress
595 * of OA counters while any other context is acctive.
597 if (devinfo
->gen
>= 8) {
598 if (in_ctx
&& report
[2] != obj
->oa
.result
.hw_id
) {
599 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
602 } else if (in_ctx
== false && report
[2] == obj
->oa
.result
.hw_id
) {
603 DBG("i915 perf: Switch TO\n");
606 /* From experimentation in IGT, we found that the OA unit
607 * might label some report as "idle" (using an invalid
608 * context ID), right after a report for a given context.
609 * Deltas generated by those reports actually belong to the
610 * previous context, even though they're not labelled as
613 * We didn't *really* Switch AWAY in the case that we e.g.
614 * saw a single periodic report while idle...
616 if (out_duration
>= 1)
619 assert(report
[2] == obj
->oa
.result
.hw_id
);
620 DBG("i915 perf: Continuation IN\n");
622 assert(report
[2] != obj
->oa
.result
.hw_id
);
623 DBG("i915 perf: Continuation OUT\n");
630 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->query
,
639 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
640 DBG("i915 perf: OA error: all reports lost\n");
642 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
643 DBG("i915 perf: OA report lost\n");
651 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->query
,
654 DBG("Marking %d accumulated - results gathered\n", o
->Id
);
656 obj
->oa
.results_accumulated
= true;
657 drop_from_unaccumulated_query_list(brw
, obj
);
664 discard_all_queries(brw
);
667 /******************************************************************************/
670 open_i915_perf_oa_stream(struct brw_context
*brw
,
677 uint64_t properties
[] = {
678 /* Single context sampling */
679 DRM_I915_PERF_PROP_CTX_HANDLE
, ctx_id
,
681 /* Include OA reports in samples */
682 DRM_I915_PERF_PROP_SAMPLE_OA
, true,
684 /* OA unit configuration */
685 DRM_I915_PERF_PROP_OA_METRICS_SET
, metrics_set_id
,
686 DRM_I915_PERF_PROP_OA_FORMAT
, report_format
,
687 DRM_I915_PERF_PROP_OA_EXPONENT
, period_exponent
,
689 struct drm_i915_perf_open_param param
= {
690 .flags
= I915_PERF_FLAG_FD_CLOEXEC
|
691 I915_PERF_FLAG_FD_NONBLOCK
|
692 I915_PERF_FLAG_DISABLED
,
693 .num_properties
= ARRAY_SIZE(properties
) / 2,
694 .properties_ptr
= (uintptr_t) properties
,
696 int fd
= drmIoctl(drm_fd
, DRM_IOCTL_I915_PERF_OPEN
, ¶m
);
698 DBG("Error opening i915 perf OA stream: %m\n");
702 brw
->perf_ctx
.oa_stream_fd
= fd
;
704 brw
->perf_ctx
.current_oa_metrics_set_id
= metrics_set_id
;
705 brw
->perf_ctx
.current_oa_format
= report_format
;
711 close_perf(struct brw_context
*brw
,
712 const struct gen_perf_query_info
*query
)
714 if (brw
->perf_ctx
.oa_stream_fd
!= -1) {
715 close(brw
->perf_ctx
.oa_stream_fd
);
716 brw
->perf_ctx
.oa_stream_fd
= -1;
718 if (query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
719 struct gen_perf_query_info
*raw_query
=
720 (struct gen_perf_query_info
*) query
;
721 raw_query
->oa_metrics_set_id
= 0;
726 capture_frequency_stat_register(struct brw_context
*brw
,
730 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
732 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8 &&
733 !devinfo
->is_baytrail
&& !devinfo
->is_cherryview
) {
734 brw_store_register_mem32(brw
, bo
, GEN7_RPSTAT1
, bo_offset
);
735 } else if (devinfo
->gen
>= 9) {
736 brw_store_register_mem32(brw
, bo
, GEN9_RPSTAT0
, bo_offset
);
741 * Driver hook for glBeginPerfQueryINTEL().
744 brw_begin_perf_query(struct gl_context
*ctx
,
745 struct gl_perf_query_object
*o
)
747 struct brw_context
*brw
= brw_context(ctx
);
748 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
749 const struct gen_perf_query_info
*query
= obj
->query
;
750 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
752 /* We can assume the frontend hides mistaken attempts to Begin a
753 * query object multiple times before its End. Similarly if an
754 * application reuses a query object before results have arrived
755 * the frontend will wait for prior results so we don't need
756 * to support abandoning in-flight results.
759 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
761 DBG("Begin(%d)\n", o
->Id
);
763 /* XXX: We have to consider that the command parser unit that parses batch
764 * buffer commands and is used to capture begin/end counter snapshots isn't
765 * implicitly synchronized with what's currently running across other GPU
766 * units (such as the EUs running shaders) that the performance counters are
769 * The intention of performance queries is to measure the work associated
770 * with commands between the begin/end delimiters and so for that to be the
771 * case we need to explicitly synchronize the parsing of commands to capture
772 * Begin/End counter snapshots with what's running across other parts of the
775 * When the command parser reaches a Begin marker it effectively needs to
776 * drain everything currently running on the GPU until the hardware is idle
777 * before capturing the first snapshot of counters - otherwise the results
778 * would also be measuring the effects of earlier commands.
780 * When the command parser reaches an End marker it needs to stall until
781 * everything currently running on the GPU has finished before capturing the
782 * end snapshot - otherwise the results won't be a complete representation
785 * Theoretically there could be opportunities to minimize how much of the
786 * GPU pipeline is drained, or that we stall for, when we know what specific
787 * units the performance counters being queried relate to but we don't
788 * currently attempt to be clever here.
790 * Note: with our current simple approach here then for back-to-back queries
791 * we will redundantly emit duplicate commands to synchronize the command
792 * streamer with the rest of the GPU pipeline, but we assume that in HW the
793 * second synchronization is effectively a NOOP.
795 * N.B. The final results are based on deltas of counters between (inside)
796 * Begin/End markers so even though the total wall clock time of the
797 * workload is stretched by larger pipeline bubbles the bubbles themselves
798 * are generally invisible to the query results. Whether that's a good or a
799 * bad thing depends on the use case. For a lower real-time impact while
800 * capturing metrics then periodic sampling may be a better choice than
801 * INTEL_performance_query.
804 * This is our Begin synchronization point to drain current work on the
805 * GPU before we capture our first counter snapshot...
807 brw_emit_mi_flush(brw
);
809 switch (query
->kind
) {
810 case GEN_PERF_QUERY_TYPE_OA
:
811 case GEN_PERF_QUERY_TYPE_RAW
: {
813 /* Opening an i915 perf stream implies exclusive access to the OA unit
814 * which will generate counter reports for a specific counter set with a
815 * specific layout/format so we can't begin any OA based queries that
816 * require a different counter set or format unless we get an opportunity
817 * to close the stream and open a new one...
819 uint64_t metric_id
= gen_perf_query_get_metric_id(brw
->perf_ctx
.perf
, query
);
821 if (brw
->perf_ctx
.oa_stream_fd
!= -1 &&
822 brw
->perf_ctx
.current_oa_metrics_set_id
!= metric_id
) {
824 if (brw
->perf_ctx
.n_oa_users
!= 0) {
825 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64
"\n",
826 o
->Id
, brw
->perf_ctx
.current_oa_metrics_set_id
, metric_id
);
829 close_perf(brw
, query
);
832 /* If the OA counters aren't already on, enable them. */
833 if (brw
->perf_ctx
.oa_stream_fd
== -1) {
834 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
835 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
837 /* The period_exponent gives a sampling period as follows:
838 * sample_period = timestamp_period * 2^(period_exponent + 1)
840 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
843 * The counter overflow period is derived from the EuActive counter
844 * which reads a counter that increments by the number of clock
845 * cycles multiplied by the number of EUs. It can be calculated as:
847 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
849 * (E.g. 40 EUs @ 1GHz = ~53ms)
851 * We select a sampling period inferior to that overflow period to
852 * ensure we cannot see more than 1 counter overflow, otherwise we
853 * could loose information.
856 int a_counter_in_bits
= 32;
857 if (devinfo
->gen
>= 8)
858 a_counter_in_bits
= 40;
860 uint64_t overflow_period
= pow(2, a_counter_in_bits
) /
861 (brw
->perf_ctx
.perf
->sys_vars
.n_eus
*
862 /* drop 1GHz freq to have units in nanoseconds */
865 DBG("A counter overflow period: %"PRIu64
"ns, %"PRIu64
"ms (n_eus=%"PRIu64
")\n",
866 overflow_period
, overflow_period
/ 1000000ul, brw
->perf_ctx
.perf
->sys_vars
.n_eus
);
868 int period_exponent
= 0;
869 uint64_t prev_sample_period
, next_sample_period
;
870 for (int e
= 0; e
< 30; e
++) {
871 prev_sample_period
= 1000000000ull * pow(2, e
+ 1) / devinfo
->timestamp_frequency
;
872 next_sample_period
= 1000000000ull * pow(2, e
+ 2) / devinfo
->timestamp_frequency
;
874 /* Take the previous sampling period, lower than the overflow
877 if (prev_sample_period
< overflow_period
&&
878 next_sample_period
> overflow_period
)
879 period_exponent
= e
+ 1;
882 if (period_exponent
== 0) {
883 DBG("WARNING: enable to find a sampling exponent\n");
887 DBG("OA sampling exponent: %i ~= %"PRIu64
"ms\n", period_exponent
,
888 prev_sample_period
/ 1000000ul);
890 if (!open_i915_perf_oa_stream(brw
,
894 screen
->fd
, /* drm fd */
898 assert(brw
->perf_ctx
.current_oa_metrics_set_id
== metric_id
&&
899 brw
->perf_ctx
.current_oa_format
== query
->oa_format
);
902 if (!inc_n_oa_users(brw
)) {
903 DBG("WARNING: Error enabling i915 perf stream: %m\n");
908 brw
->perf_ctx
.perf
->vtbl
.bo_unreference(obj
->oa
.bo
);
913 brw
->perf_ctx
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
914 "perf. query OA MI_RPC bo",
917 /* Pre-filling the BO helps debug whether writes landed. */
918 void *map
= brw_bo_map(brw
, obj
->oa
.bo
, MAP_WRITE
);
919 memset(map
, 0x80, MI_RPC_BO_SIZE
);
920 brw_bo_unmap(obj
->oa
.bo
);
923 obj
->oa
.begin_report_id
= brw
->perf_ctx
.next_query_start_report_id
;
924 brw
->perf_ctx
.next_query_start_report_id
+= 2;
926 /* We flush the batchbuffer here to minimize the chances that MI_RPC
927 * delimiting commands end up in different batchbuffers. If that's the
928 * case, the measurement will include the time it takes for the kernel
929 * scheduler to load a new request into the hardware. This is manifested in
930 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
932 perf_cfg
->vtbl
.batchbuffer_flush(brw
, __FILE__
, __LINE__
);
934 /* Take a starting OA counter snapshot. */
935 brw
->perf_ctx
.perf
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
, 0,
936 obj
->oa
.begin_report_id
);
937 perf_cfg
->vtbl
.capture_frequency_stat_register(brw
, obj
->oa
.bo
,
938 MI_FREQ_START_OFFSET_BYTES
);
940 ++brw
->perf_ctx
.n_active_oa_queries
;
942 /* No already-buffered samples can possibly be associated with this query
943 * so create a marker within the list of sample buffers enabling us to
944 * easily ignore earlier samples when processing this query after
947 assert(!exec_list_is_empty(&brw
->perf_ctx
.sample_buffers
));
948 obj
->oa
.samples_head
= exec_list_get_tail(&brw
->perf_ctx
.sample_buffers
);
950 struct oa_sample_buf
*buf
=
951 exec_node_data(struct oa_sample_buf
, obj
->oa
.samples_head
, link
);
953 /* This reference will ensure that future/following sample
954 * buffers (that may relate to this query) can't be freed until
955 * this drops to zero.
959 gen_perf_query_result_clear(&obj
->oa
.result
);
960 obj
->oa
.results_accumulated
= false;
962 add_to_unaccumulated_query_list(brw
, obj
);
966 case GEN_PERF_QUERY_TYPE_PIPELINE
:
967 if (obj
->pipeline_stats
.bo
) {
968 brw
->perf_ctx
.perf
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
969 obj
->pipeline_stats
.bo
= NULL
;
972 obj
->pipeline_stats
.bo
=
973 brw
->perf_ctx
.perf
->vtbl
.bo_alloc(brw
->bufmgr
,
974 "perf. query pipeline stats bo",
977 /* Take starting snapshots. */
978 snapshot_statistics_registers(brw
, obj
, 0);
980 ++brw
->perf_ctx
.n_active_pipeline_stats_queries
;
984 unreachable("Unknown query type");
988 if (INTEL_DEBUG
& DEBUG_PERFMON
)
989 dump_perf_queries(brw
);
995 * Driver hook for glEndPerfQueryINTEL().
998 brw_end_perf_query(struct gl_context
*ctx
,
999 struct gl_perf_query_object
*o
)
1001 struct brw_context
*brw
= brw_context(ctx
);
1002 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1003 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1005 DBG("End(%d)\n", o
->Id
);
1007 /* Ensure that the work associated with the queried commands will have
1008 * finished before taking our query end counter readings.
1010 * For more details see comment in brw_begin_perf_query for
1011 * corresponding flush.
1013 brw_emit_mi_flush(brw
);
1015 switch (obj
->query
->kind
) {
1016 case GEN_PERF_QUERY_TYPE_OA
:
1017 case GEN_PERF_QUERY_TYPE_RAW
:
1019 /* NB: It's possible that the query will have already been marked
1020 * as 'accumulated' if an error was seen while reading samples
1021 * from perf. In this case we mustn't try and emit a closing
1022 * MI_RPC command in case the OA unit has already been disabled
1024 if (!obj
->oa
.results_accumulated
) {
1025 /* Take an ending OA counter snapshot. */
1026 perf_cfg
->vtbl
.capture_frequency_stat_register(brw
, obj
->oa
.bo
,
1027 MI_FREQ_END_OFFSET_BYTES
);
1028 brw
->vtbl
.emit_mi_report_perf_count(brw
, obj
->oa
.bo
,
1029 MI_RPC_BO_END_OFFSET_BYTES
,
1030 obj
->oa
.begin_report_id
+ 1);
1033 --brw
->perf_ctx
.n_active_oa_queries
;
1035 /* NB: even though the query has now ended, it can't be accumulated
1036 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1041 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1042 snapshot_statistics_registers(brw
, obj
,
1043 STATS_BO_END_OFFSET_BYTES
);
1044 --brw
->perf_ctx
.n_active_pipeline_stats_queries
;
1048 unreachable("Unknown query type");
1054 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
1056 struct brw_context
*brw
= brw_context(ctx
);
1057 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1058 struct brw_bo
*bo
= NULL
;
1059 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1063 switch (obj
->query
->kind
) {
1064 case GEN_PERF_QUERY_TYPE_OA
:
1065 case GEN_PERF_QUERY_TYPE_RAW
:
1069 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1070 bo
= obj
->pipeline_stats
.bo
;
1074 unreachable("Unknown query type");
1081 /* If the current batch references our results bo then we need to
1084 if (brw_batch_references(&brw
->batch
, bo
))
1085 perf_cfg
->vtbl
.batchbuffer_flush(brw
, __FILE__
, __LINE__
);
1087 brw_bo_wait_rendering(bo
);
1089 /* Due to a race condition between the OA unit signaling report
1090 * availability and the report actually being written into memory,
1091 * we need to wait for all the reports to come in before we can
1094 if (obj
->query
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1095 obj
->query
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
1096 while (!read_oa_samples_for_query(brw
, obj
))
1102 brw_is_perf_query_ready(struct gl_context
*ctx
,
1103 struct gl_perf_query_object
*o
)
1105 struct brw_context
*brw
= brw_context(ctx
);
1106 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1111 switch (obj
->query
->kind
) {
1112 case GEN_PERF_QUERY_TYPE_OA
:
1113 case GEN_PERF_QUERY_TYPE_RAW
:
1114 return (obj
->oa
.results_accumulated
||
1116 !brw_batch_references(&brw
->batch
, obj
->oa
.bo
) &&
1117 !brw_bo_busy(obj
->oa
.bo
) &&
1118 read_oa_samples_for_query(brw
, obj
)));
1119 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1120 return (obj
->pipeline_stats
.bo
&&
1121 !brw_batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
1122 !brw_bo_busy(obj
->pipeline_stats
.bo
));
1125 unreachable("Unknown query type");
1133 read_slice_unslice_frequencies(struct brw_context
*brw
,
1134 struct brw_perf_query_object
*obj
)
1136 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1137 uint32_t *begin_report
= obj
->oa
.map
, *end_report
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
1139 gen_perf_query_result_read_frequencies(&obj
->oa
.result
,
1140 devinfo
, begin_report
, end_report
);
1144 read_gt_frequency(struct brw_context
*brw
,
1145 struct brw_perf_query_object
*obj
)
1147 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1148 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
1149 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
1151 switch (devinfo
->gen
) {
1154 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1155 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
1160 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1161 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
1164 unreachable("unexpected gen");
1167 /* Put the numbers into Hz. */
1168 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
1169 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
1173 get_oa_counter_data(struct brw_context
*brw
,
1174 struct brw_perf_query_object
*obj
,
1178 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
1179 const struct gen_perf_query_info
*query
= obj
->query
;
1180 int n_counters
= query
->n_counters
;
1183 for (int i
= 0; i
< n_counters
; i
++) {
1184 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1185 uint64_t *out_uint64
;
1187 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
1190 switch (counter
->data_type
) {
1191 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
1192 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
1194 counter
->oa_counter_read_uint64(perf
, query
,
1195 obj
->oa
.result
.accumulator
);
1197 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
1198 out_float
= (float *)(data
+ counter
->offset
);
1200 counter
->oa_counter_read_float(perf
, query
,
1201 obj
->oa
.result
.accumulator
);
1204 /* So far we aren't using uint32, double or bool32... */
1205 unreachable("unexpected counter data type");
1207 written
= counter
->offset
+ counter_size
;
1215 get_pipeline_stats_data(struct brw_context
*brw
,
1216 struct brw_perf_query_object
*obj
,
1221 const struct gen_perf_query_info
*query
= obj
->query
;
1222 int n_counters
= obj
->query
->n_counters
;
1225 uint64_t *start
= brw_bo_map(brw
, obj
->pipeline_stats
.bo
, MAP_READ
);
1226 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
1228 for (int i
= 0; i
< n_counters
; i
++) {
1229 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
1230 uint64_t value
= end
[i
] - start
[i
];
1232 if (counter
->pipeline_stat
.numerator
!=
1233 counter
->pipeline_stat
.denominator
) {
1234 value
*= counter
->pipeline_stat
.numerator
;
1235 value
/= counter
->pipeline_stat
.denominator
;
1238 *((uint64_t *)p
) = value
;
1242 brw_bo_unmap(obj
->pipeline_stats
.bo
);
1248 * Driver hook for glGetPerfQueryDataINTEL().
1251 brw_get_perf_query_data(struct gl_context
*ctx
,
1252 struct gl_perf_query_object
*o
,
1255 GLuint
*bytes_written
)
1257 struct brw_context
*brw
= brw_context(ctx
);
1258 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1261 assert(brw_is_perf_query_ready(ctx
, o
));
1263 DBG("GetData(%d)\n", o
->Id
);
1265 if (INTEL_DEBUG
& DEBUG_PERFMON
)
1266 dump_perf_queries(brw
);
1268 /* We expect that the frontend only calls this hook when it knows
1269 * that results are available.
1273 switch (obj
->query
->kind
) {
1274 case GEN_PERF_QUERY_TYPE_OA
:
1275 case GEN_PERF_QUERY_TYPE_RAW
:
1276 if (!obj
->oa
.results_accumulated
) {
1277 read_gt_frequency(brw
, obj
);
1278 read_slice_unslice_frequencies(brw
, obj
);
1279 accumulate_oa_reports(brw
, obj
);
1280 assert(obj
->oa
.results_accumulated
);
1282 brw_bo_unmap(obj
->oa
.bo
);
1285 if (obj
->query
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
1286 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
1288 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1290 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
1291 devinfo
, &obj
->oa
.result
,
1292 obj
->oa
.gt_frequency
[0],
1293 obj
->oa
.gt_frequency
[1]);
1297 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1298 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
1302 unreachable("Unknown query type");
1307 *bytes_written
= written
;
1310 static struct gl_perf_query_object
*
1311 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
1313 struct brw_context
*brw
= brw_context(ctx
);
1314 const struct gen_perf_query_info
*query
=
1315 &brw
->perf_ctx
.perf
->queries
[query_index
];
1316 struct brw_perf_query_object
*obj
=
1317 calloc(1, sizeof(struct brw_perf_query_object
));
1324 brw
->perf_ctx
.n_query_instances
++;
1330 * Driver hook for glDeletePerfQueryINTEL().
1333 brw_delete_perf_query(struct gl_context
*ctx
,
1334 struct gl_perf_query_object
*o
)
1336 struct brw_context
*brw
= brw_context(ctx
);
1337 struct brw_perf_query_object
*obj
= brw_perf_query(o
);
1338 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1340 /* We can assume that the frontend waits for a query to complete
1341 * before ever calling into here, so we don't have to worry about
1342 * deleting an in-flight query object.
1345 assert(!o
->Used
|| o
->Ready
);
1347 DBG("Delete(%d)\n", o
->Id
);
1349 switch (obj
->query
->kind
) {
1350 case GEN_PERF_QUERY_TYPE_OA
:
1351 case GEN_PERF_QUERY_TYPE_RAW
:
1353 if (!obj
->oa
.results_accumulated
) {
1354 drop_from_unaccumulated_query_list(brw
, obj
);
1355 dec_n_oa_users(brw
);
1358 perf_cfg
->vtbl
.bo_unreference(obj
->oa
.bo
);
1362 obj
->oa
.results_accumulated
= false;
1365 case GEN_PERF_QUERY_TYPE_PIPELINE
:
1366 if (obj
->pipeline_stats
.bo
) {
1367 perf_cfg
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
1368 obj
->pipeline_stats
.bo
= NULL
;
1373 unreachable("Unknown query type");
1377 /* As an indication that the INTEL_performance_query extension is no
1378 * longer in use, it's a good time to free our cache of sample
1379 * buffers and close any current i915-perf stream.
1381 if (--brw
->perf_ctx
.n_query_instances
== 0) {
1382 gen_perf_free_sample_bufs(&brw
->perf_ctx
);
1383 close_perf(brw
, obj
->query
);
1389 /******************************************************************************/
1392 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1394 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1395 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
1396 struct gen_perf_query_info
*query
=
1397 gen_perf_query_append_query_info(perf
, MAX_STAT_COUNTERS
);
1399 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1400 query
->name
= "Pipeline Statistics Registers";
1402 gen_perf_query_info_add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1403 "N vertices submitted");
1404 gen_perf_query_info_add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1405 "N primitives submitted");
1406 gen_perf_query_info_add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1407 "N vertex shader invocations");
1409 if (devinfo
->gen
== 6) {
1410 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1411 "SO_PRIM_STORAGE_NEEDED",
1412 "N geometry shader stream-out primitives (total)");
1413 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1414 "SO_NUM_PRIMS_WRITTEN",
1415 "N geometry shader stream-out primitives (written)");
1417 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1418 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1419 "N stream-out (stream 0) primitives (total)");
1420 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1421 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1422 "N stream-out (stream 1) primitives (total)");
1423 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1424 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1425 "N stream-out (stream 2) primitives (total)");
1426 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1427 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1428 "N stream-out (stream 3) primitives (total)");
1429 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1430 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1431 "N stream-out (stream 0) primitives (written)");
1432 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1433 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1434 "N stream-out (stream 1) primitives (written)");
1435 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1436 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1437 "N stream-out (stream 2) primitives (written)");
1438 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1439 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1440 "N stream-out (stream 3) primitives (written)");
1443 gen_perf_query_info_add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1444 "N TCS shader invocations");
1445 gen_perf_query_info_add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1446 "N TES shader invocations");
1448 gen_perf_query_info_add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1449 "N geometry shader invocations");
1450 gen_perf_query_info_add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1451 "N geometry shader primitives emitted");
1453 gen_perf_query_info_add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1454 "N primitives entering clipping");
1455 gen_perf_query_info_add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1456 "N primitives leaving clipping");
1458 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1459 gen_perf_query_info_add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1460 "N fragment shader invocations",
1461 "N fragment shader invocations");
1463 gen_perf_query_info_add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1464 "N fragment shader invocations");
1467 gen_perf_query_info_add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
1468 "N z-pass fragments");
1470 if (devinfo
->gen
>= 7) {
1471 gen_perf_query_info_add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1472 "N compute shader invocations");
1475 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1478 /* gen_device_info will have incorrect default topology values for unsupported kernels.
1479 * verify kernel support to ensure OA metrics are accurate.
1482 oa_metrics_kernel_support(int fd
, const struct gen_device_info
*devinfo
)
1484 if (devinfo
->gen
>= 10) {
1485 /* topology uAPI required for CNL+ (kernel 4.17+) make a call to the api
1488 struct drm_i915_query_item item
= {
1489 .query_id
= DRM_I915_QUERY_TOPOLOGY_INFO
,
1491 struct drm_i915_query query
= {
1493 .items_ptr
= (uintptr_t) &item
,
1496 /* kernel 4.17+ supports the query */
1497 return drmIoctl(fd
, DRM_IOCTL_I915_QUERY
, &query
) == 0;
1500 if (devinfo
->gen
>= 8) {
1501 /* 4.13+ api required for gen8 - gen9 */
1503 struct drm_i915_getparam gp
= {
1504 .param
= I915_PARAM_SLICE_MASK
,
1507 /* kernel 4.13+ supports this parameter */
1508 return drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == 0;
1511 if (devinfo
->gen
== 7)
1512 /* default topology values are correct for HSW */
1515 /* oa not supported before gen 7*/
1520 brw_oa_bo_alloc(void *bufmgr
, const char *name
, uint64_t size
)
1522 return brw_bo_alloc(bufmgr
, name
, size
, BRW_MEMZONE_OTHER
);
1526 brw_oa_emit_mi_report_perf_count(void *c
,
1528 uint32_t offset_in_bytes
,
1531 struct brw_context
*ctx
= c
;
1532 ctx
->vtbl
.emit_mi_report_perf_count(ctx
,
1538 typedef void (*bo_unreference_t
)(void *);
1539 typedef void (* emit_mi_report_t
)(void *, void *, uint32_t, uint32_t);
1542 brw_oa_batchbuffer_flush(void *c
, const char *file
, int line
)
1544 struct brw_context
*ctx
= c
;
1545 _intel_batchbuffer_flush_fence(ctx
, -1, NULL
, file
, line
);
1548 typedef void (*capture_frequency_stat_register_t
)(void *, void *, uint32_t );
1551 brw_init_perf_query_info(struct gl_context
*ctx
)
1553 struct brw_context
*brw
= brw_context(ctx
);
1554 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1555 __DRIscreen
*screen
= brw
->screen
->driScrnPriv
;
1557 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
1559 return perf_cfg
->n_queries
;
1561 perf_cfg
= gen_perf_new(brw
);
1562 brw
->perf_ctx
.perf
= perf_cfg
;
1563 perf_cfg
->vtbl
.bo_alloc
= brw_oa_bo_alloc
;
1564 perf_cfg
->vtbl
.bo_unreference
= (bo_unreference_t
)brw_bo_unreference
;
1565 perf_cfg
->vtbl
.emit_mi_report_perf_count
=
1566 (emit_mi_report_t
)brw_oa_emit_mi_report_perf_count
;
1567 perf_cfg
->vtbl
.batchbuffer_flush
= brw_oa_batchbuffer_flush
;
1568 perf_cfg
->vtbl
.capture_frequency_stat_register
=
1569 (capture_frequency_stat_register_t
) capture_frequency_stat_register
;
1571 init_pipeline_statistic_query_registers(brw
);
1572 gen_perf_query_register_mdapi_statistic_query(&brw
->screen
->devinfo
,
1573 brw
->perf_ctx
.perf
);
1575 if ((oa_metrics_kernel_support(screen
->fd
, devinfo
)) &&
1576 (gen_perf_load_oa_metrics(perf_cfg
, screen
->fd
, devinfo
)))
1577 gen_perf_query_register_mdapi_oa_query(&brw
->screen
->devinfo
,
1578 brw
->perf_ctx
.perf
);
1580 brw
->perf_ctx
.unaccumulated
=
1581 ralloc_array(brw
, struct brw_perf_query_object
*, 2);
1582 brw
->perf_ctx
.unaccumulated_elements
= 0;
1583 brw
->perf_ctx
.unaccumulated_array_size
= 2;
1585 exec_list_make_empty(&brw
->perf_ctx
.sample_buffers
);
1586 exec_list_make_empty(&brw
->perf_ctx
.free_sample_buffers
);
1588 /* It's convenient to guarantee that this linked list of sample
1589 * buffers is never empty so we add an empty head so when we
1590 * Begin an OA query we can always take a reference on a buffer
1593 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(&brw
->perf_ctx
);
1594 exec_list_push_head(&brw
->perf_ctx
.sample_buffers
, &buf
->link
);
1596 brw
->perf_ctx
.oa_stream_fd
= -1;
1598 brw
->perf_ctx
.next_query_start_report_id
= 1000;
1600 return perf_cfg
->n_queries
;
1604 brw_init_performance_queries(struct brw_context
*brw
)
1606 struct gl_context
*ctx
= &brw
->ctx
;
1608 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
1609 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
1610 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
1611 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
1612 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
1613 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
1614 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
1615 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
1616 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
1617 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;