2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_performance_query.c
27 * Implementation of the GL_INTEL_performance_query extension.
29 * Currently there are two possible counter sources exposed here:
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
44 /* put before sys/types.h to silence glibc warnings */
46 #include <sys/mkdev.h>
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
51 #include <sys/types.h>
55 #include <sys/ioctl.h>
58 #include "drm-uapi/i915_drm.h"
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "intel_batchbuffer.h"
75 #include "perf/gen_perf.h"
76 #include "perf/gen_perf_mdapi.h"
78 #define FILE_DEBUG_FLAG DEBUG_PERFMON
80 #define OAREPORT_REASON_MASK 0x3f
81 #define OAREPORT_REASON_SHIFT 19
82 #define OAREPORT_REASON_TIMER (1<<0)
83 #define OAREPORT_REASON_TRIGGER1 (1<<1)
84 #define OAREPORT_REASON_TRIGGER2 (1<<2)
85 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
86 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
88 struct brw_perf_query_object
{
89 struct gl_perf_query_object base
;
90 struct gen_perf_query_object
*query
;
93 /** Downcasting convenience macro. */
94 static inline struct brw_perf_query_object
*
95 brw_perf_query(struct gl_perf_query_object
*o
)
97 return (struct brw_perf_query_object
*) o
;
100 #define MI_RPC_BO_SIZE 4096
101 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
102 #define MI_FREQ_START_OFFSET_BYTES (3072)
103 #define MI_FREQ_END_OFFSET_BYTES (3076)
105 /******************************************************************************/
108 brw_is_perf_query_ready(struct gl_context
*ctx
,
109 struct gl_perf_query_object
*o
);
112 dump_perf_query_callback(GLuint id
, void *query_void
, void *brw_void
)
114 struct gl_context
*ctx
= brw_void
;
115 struct gl_perf_query_object
*o
= query_void
;
116 struct brw_perf_query_object
* brw_query
= brw_perf_query(o
);
117 struct gen_perf_query_object
*obj
= brw_query
->query
;
119 switch (obj
->queryinfo
->kind
) {
120 case GEN_PERF_QUERY_TYPE_OA
:
121 case GEN_PERF_QUERY_TYPE_RAW
:
122 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
124 o
->Used
? "Dirty," : "New,",
125 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
126 obj
->oa
.bo
? "yes," : "no,",
127 brw_is_perf_query_ready(ctx
, o
) ? "ready," : "not ready,",
128 obj
->oa
.results_accumulated
? "accumulated" : "not accumulated");
130 case GEN_PERF_QUERY_TYPE_PIPELINE
:
131 DBG("%4d: %-6s %-8s BO: %-4s\n",
133 o
->Used
? "Dirty," : "New,",
134 o
->Active
? "Active," : (o
->Ready
? "Ready," : "Pending,"),
135 obj
->pipeline_stats
.bo
? "yes" : "no");
138 unreachable("Unknown query type");
144 dump_perf_queries(struct brw_context
*brw
)
146 struct gl_context
*ctx
= &brw
->ctx
;
147 DBG("Queries: (Open queries = %d, OA users = %d)\n",
148 brw
->perf_ctx
.n_active_oa_queries
, brw
->perf_ctx
.n_oa_users
);
149 _mesa_HashWalk(ctx
->PerfQuery
.Objects
, dump_perf_query_callback
, brw
);
153 * Driver hook for glGetPerfQueryInfoINTEL().
156 brw_get_perf_query_info(struct gl_context
*ctx
,
157 unsigned query_index
,
163 struct brw_context
*brw
= brw_context(ctx
);
164 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
165 const struct gen_perf_query_info
*query
=
166 &perf_ctx
->perf
->queries
[query_index
];
169 *data_size
= query
->data_size
;
170 *n_counters
= query
->n_counters
;
172 switch (query
->kind
) {
173 case GEN_PERF_QUERY_TYPE_OA
:
174 case GEN_PERF_QUERY_TYPE_RAW
:
175 *n_active
= perf_ctx
->n_active_oa_queries
;
178 case GEN_PERF_QUERY_TYPE_PIPELINE
:
179 *n_active
= perf_ctx
->n_active_pipeline_stats_queries
;
183 unreachable("Unknown query type");
189 gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type
)
192 case GEN_PERF_COUNTER_TYPE_EVENT
: return GL_PERFQUERY_COUNTER_EVENT_INTEL
;
193 case GEN_PERF_COUNTER_TYPE_DURATION_NORM
: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL
;
194 case GEN_PERF_COUNTER_TYPE_DURATION_RAW
: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL
;
195 case GEN_PERF_COUNTER_TYPE_THROUGHPUT
: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL
;
196 case GEN_PERF_COUNTER_TYPE_RAW
: return GL_PERFQUERY_COUNTER_RAW_INTEL
;
197 case GEN_PERF_COUNTER_TYPE_TIMESTAMP
: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL
;
199 unreachable("Unknown counter type");
204 gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type
)
207 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL
;
208 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL
;
209 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL
;
210 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL
;
211 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL
;
213 unreachable("Unknown counter data type");
218 * Driver hook for glGetPerfCounterInfoINTEL().
221 brw_get_perf_counter_info(struct gl_context
*ctx
,
222 unsigned query_index
,
223 unsigned counter_index
,
229 GLuint
*data_type_enum
,
232 struct brw_context
*brw
= brw_context(ctx
);
233 const struct gen_perf_query_info
*query
=
234 &brw
->perf_ctx
.perf
->queries
[query_index
];
235 const struct gen_perf_query_counter
*counter
=
236 &query
->counters
[counter_index
];
238 *name
= counter
->name
;
239 *desc
= counter
->desc
;
240 *offset
= counter
->offset
;
241 *data_size
= gen_perf_query_counter_get_size(counter
);
242 *type_enum
= gen_counter_type_enum_to_gl_type(counter
->type
);
243 *data_type_enum
= gen_counter_data_type_to_gl_type(counter
->data_type
);
244 *raw_max
= counter
->raw_max
;
248 * Remove a query from the global list of unaccumulated queries once
249 * after successfully accumulating the OA reports associated with the
250 * query in accumulate_oa_reports() or when discarding unwanted query
254 drop_from_unaccumulated_query_list(struct brw_context
*brw
,
255 struct gen_perf_query_object
*obj
)
257 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
258 for (int i
= 0; i
< perf_ctx
->unaccumulated_elements
; i
++) {
259 if (perf_ctx
->unaccumulated
[i
] == obj
) {
260 int last_elt
= --perf_ctx
->unaccumulated_elements
;
263 perf_ctx
->unaccumulated
[i
] = NULL
;
265 perf_ctx
->unaccumulated
[i
] =
266 perf_ctx
->unaccumulated
[last_elt
];
273 /* Drop our samples_head reference so that associated periodic
274 * sample data buffers can potentially be reaped if they aren't
275 * referenced by any other queries...
278 struct oa_sample_buf
*buf
=
279 exec_node_data(struct oa_sample_buf
, obj
->oa
.samples_head
, link
);
281 assert(buf
->refcount
> 0);
284 obj
->oa
.samples_head
= NULL
;
286 gen_perf_reap_old_sample_buffers(&brw
->perf_ctx
);
289 /* In general if we see anything spurious while accumulating results,
290 * we don't try and continue accumulating the current query, hoping
291 * for the best, we scrap anything outstanding, and then hope for the
292 * best with new queries.
295 discard_all_queries(struct brw_context
*brw
)
297 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
298 while (perf_ctx
->unaccumulated_elements
) {
299 struct gen_perf_query_object
*obj
= perf_ctx
->unaccumulated
[0];
301 obj
->oa
.results_accumulated
= true;
302 drop_from_unaccumulated_query_list(brw
, perf_ctx
->unaccumulated
[0]);
304 gen_perf_dec_n_users(perf_ctx
);
309 OA_READ_STATUS_ERROR
,
310 OA_READ_STATUS_UNFINISHED
,
311 OA_READ_STATUS_FINISHED
,
314 static enum OaReadStatus
315 read_oa_samples_until(struct brw_context
*brw
,
316 uint32_t start_timestamp
,
317 uint32_t end_timestamp
)
319 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
320 struct exec_node
*tail_node
=
321 exec_list_get_tail(&perf_ctx
->sample_buffers
);
322 struct oa_sample_buf
*tail_buf
=
323 exec_node_data(struct oa_sample_buf
, tail_node
, link
);
324 uint32_t last_timestamp
= tail_buf
->last_timestamp
;
327 struct oa_sample_buf
*buf
= gen_perf_get_free_sample_buf(perf_ctx
);
331 while ((len
= read(perf_ctx
->oa_stream_fd
, buf
->buf
,
332 sizeof(buf
->buf
))) < 0 && errno
== EINTR
)
336 exec_list_push_tail(&perf_ctx
->free_sample_buffers
, &buf
->link
);
340 return ((last_timestamp
- start_timestamp
) >=
341 (end_timestamp
- start_timestamp
)) ?
342 OA_READ_STATUS_FINISHED
:
343 OA_READ_STATUS_UNFINISHED
;
345 DBG("Error reading i915 perf samples: %m\n");
348 DBG("Spurious EOF reading i915 perf samples\n");
350 return OA_READ_STATUS_ERROR
;
354 exec_list_push_tail(&perf_ctx
->sample_buffers
, &buf
->link
);
356 /* Go through the reports and update the last timestamp. */
358 while (offset
< buf
->len
) {
359 const struct drm_i915_perf_record_header
*header
=
360 (const struct drm_i915_perf_record_header
*) &buf
->buf
[offset
];
361 uint32_t *report
= (uint32_t *) (header
+ 1);
363 if (header
->type
== DRM_I915_PERF_RECORD_SAMPLE
)
364 last_timestamp
= report
[1];
366 offset
+= header
->size
;
369 buf
->last_timestamp
= last_timestamp
;
372 unreachable("not reached");
373 return OA_READ_STATUS_ERROR
;
377 * Try to read all the reports until either the delimiting timestamp
378 * or an error arises.
381 read_oa_samples_for_query(struct brw_context
*brw
,
382 struct gen_perf_query_object
*obj
)
388 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
390 /* We need the MI_REPORT_PERF_COUNT to land before we can start
392 assert(!perf_cfg
->vtbl
.batch_references(&brw
->batch
, obj
->oa
.bo
) &&
393 !brw_bo_busy(obj
->oa
.bo
));
395 /* Map the BO once here and let accumulate_oa_reports() unmap
397 if (obj
->oa
.map
== NULL
)
398 obj
->oa
.map
= perf_cfg
->vtbl
.bo_map(brw
, obj
->oa
.bo
, MAP_READ
);
400 start
= last
= obj
->oa
.map
;
401 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
403 if (start
[0] != obj
->oa
.begin_report_id
) {
404 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
407 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
408 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
412 /* Read the reports until the end timestamp. */
413 switch (read_oa_samples_until(brw
, start
[1], end
[1])) {
414 case OA_READ_STATUS_ERROR
:
415 /* Fallthrough and let accumulate_oa_reports() deal with the
417 case OA_READ_STATUS_FINISHED
:
419 case OA_READ_STATUS_UNFINISHED
:
423 unreachable("invalid read status");
428 * Accumulate raw OA counter values based on deltas between pairs of
431 * Accumulation starts from the first report captured via
432 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
433 * last MI_RPC report requested by brw_end_perf_query(). Between these
434 * two reports there may also some number of periodically sampled OA
435 * reports collected via the i915 perf interface - depending on the
436 * duration of the query.
438 * These periodic snapshots help to ensure we handle counter overflow
439 * correctly by being frequent enough to ensure we don't miss multiple
440 * overflows of a counter between snapshots. For Gen8+ the i915 perf
441 * snapshots provide the extra context-switch reports that let us
442 * subtract out the progress of counters associated with other
443 * contexts running on the system.
446 accumulate_oa_reports(struct brw_context
*brw
,
447 struct brw_perf_query_object
*brw_query
)
449 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
450 struct gen_perf_query_object
*obj
= brw_query
->query
;
451 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
455 struct exec_node
*first_samples_node
;
457 int out_duration
= 0;
459 assert(brw_query
->base
.Ready
);
460 assert(obj
->oa
.map
!= NULL
);
462 start
= last
= obj
->oa
.map
;
463 end
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
465 if (start
[0] != obj
->oa
.begin_report_id
) {
466 DBG("Spurious start report id=%"PRIu32
"\n", start
[0]);
469 if (end
[0] != (obj
->oa
.begin_report_id
+ 1)) {
470 DBG("Spurious end report id=%"PRIu32
"\n", end
[0]);
474 /* See if we have any periodic reports to accumulate too... */
476 /* N.B. The oa.samples_head was set when the query began and
477 * pointed to the tail of the perf_ctx->sample_buffers list at
478 * the time the query started. Since the buffer existed before the
479 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
480 * that no data in this particular node's buffer can possibly be
481 * associated with the query - so skip ahead one...
483 first_samples_node
= obj
->oa
.samples_head
->next
;
485 foreach_list_typed_from(struct oa_sample_buf
, buf
, link
,
486 &brw
->perf_ctx
.sample_buffers
,
491 while (offset
< buf
->len
) {
492 const struct drm_i915_perf_record_header
*header
=
493 (const struct drm_i915_perf_record_header
*)(buf
->buf
+ offset
);
495 assert(header
->size
!= 0);
496 assert(header
->size
<= buf
->len
);
498 offset
+= header
->size
;
500 switch (header
->type
) {
501 case DRM_I915_PERF_RECORD_SAMPLE
: {
502 uint32_t *report
= (uint32_t *)(header
+ 1);
505 /* Ignore reports that come before the start marker.
506 * (Note: takes care to allow overflow of 32bit timestamps)
508 if (gen_device_info_timebase_scale(devinfo
,
509 report
[1] - start
[1]) > 5000000000) {
513 /* Ignore reports that come after the end marker.
514 * (Note: takes care to allow overflow of 32bit timestamps)
516 if (gen_device_info_timebase_scale(devinfo
,
517 report
[1] - end
[1]) <= 5000000000) {
521 /* For Gen8+ since the counters continue while other
522 * contexts are running we need to discount any unrelated
523 * deltas. The hardware automatically generates a report
524 * on context switch which gives us a new reference point
525 * to continuing adding deltas from.
527 * For Haswell we can rely on the HW to stop the progress
528 * of OA counters while any other context is acctive.
530 if (devinfo
->gen
>= 8) {
531 if (in_ctx
&& report
[2] != obj
->oa
.result
.hw_id
) {
532 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
535 } else if (in_ctx
== false && report
[2] == obj
->oa
.result
.hw_id
) {
536 DBG("i915 perf: Switch TO\n");
539 /* From experimentation in IGT, we found that the OA unit
540 * might label some report as "idle" (using an invalid
541 * context ID), right after a report for a given context.
542 * Deltas generated by those reports actually belong to the
543 * previous context, even though they're not labelled as
546 * We didn't *really* Switch AWAY in the case that we e.g.
547 * saw a single periodic report while idle...
549 if (out_duration
>= 1)
552 assert(report
[2] == obj
->oa
.result
.hw_id
);
553 DBG("i915 perf: Continuation IN\n");
555 assert(report
[2] != obj
->oa
.result
.hw_id
);
556 DBG("i915 perf: Continuation OUT\n");
563 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->queryinfo
,
572 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST
:
573 DBG("i915 perf: OA error: all reports lost\n");
575 case DRM_I915_PERF_RECORD_OA_REPORT_LOST
:
576 DBG("i915 perf: OA report lost\n");
584 gen_perf_query_result_accumulate(&obj
->oa
.result
, obj
->queryinfo
,
587 DBG("Marking %d accumulated - results gathered\n", brw_query
->base
.Id
);
589 obj
->oa
.results_accumulated
= true;
590 drop_from_unaccumulated_query_list(brw
, obj
);
591 gen_perf_dec_n_users(perf_ctx
);
597 discard_all_queries(brw
);
600 /******************************************************************************/
603 capture_frequency_stat_register(struct brw_context
*brw
,
607 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
609 if (devinfo
->gen
>= 7 && devinfo
->gen
<= 8 &&
610 !devinfo
->is_baytrail
&& !devinfo
->is_cherryview
) {
611 brw_store_register_mem32(brw
, bo
, GEN7_RPSTAT1
, bo_offset
);
612 } else if (devinfo
->gen
>= 9) {
613 brw_store_register_mem32(brw
, bo
, GEN9_RPSTAT0
, bo_offset
);
618 * Driver hook for glBeginPerfQueryINTEL().
621 brw_begin_perf_query(struct gl_context
*ctx
,
622 struct gl_perf_query_object
*o
)
624 struct brw_context
*brw
= brw_context(ctx
);
625 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
626 struct gen_perf_query_object
*obj
= brw_query
->query
;
627 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
629 /* We can assume the frontend hides mistaken attempts to Begin a
630 * query object multiple times before its End. Similarly if an
631 * application reuses a query object before results have arrived
632 * the frontend will wait for prior results so we don't need
633 * to support abandoning in-flight results.
636 assert(!o
->Used
|| o
->Ready
); /* no in-flight query to worry about */
638 DBG("Begin(%d)\n", o
->Id
);
640 gen_perf_begin_query(perf_ctx
, obj
);
642 if (INTEL_DEBUG
& DEBUG_PERFMON
)
643 dump_perf_queries(brw
);
649 * Driver hook for glEndPerfQueryINTEL().
652 brw_end_perf_query(struct gl_context
*ctx
,
653 struct gl_perf_query_object
*o
)
655 struct brw_context
*brw
= brw_context(ctx
);
656 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
657 struct gen_perf_query_object
*obj
= brw_query
->query
;
658 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
660 DBG("End(%d)\n", o
->Id
);
661 gen_perf_end_query(perf_ctx
, obj
);
665 brw_wait_perf_query(struct gl_context
*ctx
, struct gl_perf_query_object
*o
)
667 struct brw_context
*brw
= brw_context(ctx
);
668 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
669 struct gen_perf_query_object
*obj
= brw_query
->query
;
670 struct brw_bo
*bo
= NULL
;
671 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
675 switch (obj
->queryinfo
->kind
) {
676 case GEN_PERF_QUERY_TYPE_OA
:
677 case GEN_PERF_QUERY_TYPE_RAW
:
681 case GEN_PERF_QUERY_TYPE_PIPELINE
:
682 bo
= obj
->pipeline_stats
.bo
;
686 unreachable("Unknown query type");
693 /* If the current batch references our results bo then we need to
696 if (perf_cfg
->vtbl
.batch_references(&brw
->batch
, bo
))
697 perf_cfg
->vtbl
.batchbuffer_flush(brw
, __FILE__
, __LINE__
);
699 perf_cfg
->vtbl
.bo_wait_rendering(bo
);
701 /* Due to a race condition between the OA unit signaling report
702 * availability and the report actually being written into memory,
703 * we need to wait for all the reports to come in before we can
706 if (obj
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
||
707 obj
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_RAW
) {
708 while (!read_oa_samples_for_query(brw
, obj
))
714 brw_is_perf_query_ready(struct gl_context
*ctx
,
715 struct gl_perf_query_object
*o
)
717 struct brw_context
*brw
= brw_context(ctx
);
718 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
719 struct gen_perf_query_object
*obj
= brw_query
->query
;
720 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
725 switch (obj
->queryinfo
->kind
) {
726 case GEN_PERF_QUERY_TYPE_OA
:
727 case GEN_PERF_QUERY_TYPE_RAW
:
728 return (obj
->oa
.results_accumulated
||
730 !perf_cfg
->vtbl
.batch_references(&brw
->batch
, obj
->oa
.bo
) &&
731 !brw_bo_busy(obj
->oa
.bo
) &&
732 read_oa_samples_for_query(brw
, obj
)));
733 case GEN_PERF_QUERY_TYPE_PIPELINE
:
734 return (obj
->pipeline_stats
.bo
&&
735 !perf_cfg
->vtbl
.batch_references(&brw
->batch
, obj
->pipeline_stats
.bo
) &&
736 !brw_bo_busy(obj
->pipeline_stats
.bo
));
739 unreachable("Unknown query type");
747 read_slice_unslice_frequencies(struct brw_context
*brw
,
748 struct gen_perf_query_object
*obj
)
750 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
751 uint32_t *begin_report
= obj
->oa
.map
, *end_report
= obj
->oa
.map
+ MI_RPC_BO_END_OFFSET_BYTES
;
753 gen_perf_query_result_read_frequencies(&obj
->oa
.result
,
754 devinfo
, begin_report
, end_report
);
758 read_gt_frequency(struct brw_context
*brw
,
759 struct gen_perf_query_object
*obj
)
761 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
762 uint32_t start
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_START_OFFSET_BYTES
)),
763 end
= *((uint32_t *)(obj
->oa
.map
+ MI_FREQ_END_OFFSET_BYTES
));
765 switch (devinfo
->gen
) {
768 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
769 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN7_RPSTAT1_CURR_GT_FREQ
) * 50ULL;
774 obj
->oa
.gt_frequency
[0] = GET_FIELD(start
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
775 obj
->oa
.gt_frequency
[1] = GET_FIELD(end
, GEN9_RPSTAT0_CURR_GT_FREQ
) * 50ULL / 3ULL;
778 unreachable("unexpected gen");
781 /* Put the numbers into Hz. */
782 obj
->oa
.gt_frequency
[0] *= 1000000ULL;
783 obj
->oa
.gt_frequency
[1] *= 1000000ULL;
787 get_oa_counter_data(struct brw_context
*brw
,
788 struct gen_perf_query_object
*obj
,
792 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
793 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
794 int n_counters
= query
->n_counters
;
797 for (int i
= 0; i
< n_counters
; i
++) {
798 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
799 uint64_t *out_uint64
;
801 size_t counter_size
= gen_perf_query_counter_get_size(counter
);
804 switch (counter
->data_type
) {
805 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
806 out_uint64
= (uint64_t *)(data
+ counter
->offset
);
808 counter
->oa_counter_read_uint64(perf
, query
,
809 obj
->oa
.result
.accumulator
);
811 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
812 out_float
= (float *)(data
+ counter
->offset
);
814 counter
->oa_counter_read_float(perf
, query
,
815 obj
->oa
.result
.accumulator
);
818 /* So far we aren't using uint32, double or bool32... */
819 unreachable("unexpected counter data type");
821 written
= counter
->offset
+ counter_size
;
829 get_pipeline_stats_data(struct brw_context
*brw
,
830 struct gen_perf_query_object
*obj
,
835 const struct gen_perf_query_info
*query
= obj
->queryinfo
;
836 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
837 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
838 int n_counters
= obj
->queryinfo
->n_counters
;
841 uint64_t *start
= perf_cfg
->vtbl
.bo_map(perf_ctx
->ctx
, obj
->pipeline_stats
.bo
, MAP_READ
);
842 uint64_t *end
= start
+ (STATS_BO_END_OFFSET_BYTES
/ sizeof(uint64_t));
844 for (int i
= 0; i
< n_counters
; i
++) {
845 const struct gen_perf_query_counter
*counter
= &query
->counters
[i
];
846 uint64_t value
= end
[i
] - start
[i
];
848 if (counter
->pipeline_stat
.numerator
!=
849 counter
->pipeline_stat
.denominator
) {
850 value
*= counter
->pipeline_stat
.numerator
;
851 value
/= counter
->pipeline_stat
.denominator
;
854 *((uint64_t *)p
) = value
;
858 perf_cfg
->vtbl
.bo_unmap(obj
->pipeline_stats
.bo
);
864 * Driver hook for glGetPerfQueryDataINTEL().
867 brw_get_perf_query_data(struct gl_context
*ctx
,
868 struct gl_perf_query_object
*o
,
871 GLuint
*bytes_written
)
873 struct brw_context
*brw
= brw_context(ctx
);
874 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
875 struct gen_perf_query_object
*obj
= brw_query
->query
;
878 assert(brw_is_perf_query_ready(ctx
, o
));
880 DBG("GetData(%d)\n", o
->Id
);
882 if (INTEL_DEBUG
& DEBUG_PERFMON
)
883 dump_perf_queries(brw
);
885 /* We expect that the frontend only calls this hook when it knows
886 * that results are available.
890 switch (obj
->queryinfo
->kind
) {
891 case GEN_PERF_QUERY_TYPE_OA
:
892 case GEN_PERF_QUERY_TYPE_RAW
:
893 if (!obj
->oa
.results_accumulated
) {
894 read_gt_frequency(brw
, obj
);
895 read_slice_unslice_frequencies(brw
, obj
);
896 accumulate_oa_reports(brw
, brw_query
);
897 assert(obj
->oa
.results_accumulated
);
899 brw
->perf_ctx
.perf
->vtbl
.bo_unmap(obj
->oa
.bo
);
902 if (obj
->queryinfo
->kind
== GEN_PERF_QUERY_TYPE_OA
) {
903 written
= get_oa_counter_data(brw
, obj
, data_size
, (uint8_t *)data
);
905 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
907 written
= gen_perf_query_result_write_mdapi((uint8_t *)data
, data_size
,
908 devinfo
, &obj
->oa
.result
,
909 obj
->oa
.gt_frequency
[0],
910 obj
->oa
.gt_frequency
[1]);
914 case GEN_PERF_QUERY_TYPE_PIPELINE
:
915 written
= get_pipeline_stats_data(brw
, obj
, data_size
, (uint8_t *)data
);
919 unreachable("Unknown query type");
924 *bytes_written
= written
;
927 static struct gl_perf_query_object
*
928 brw_new_perf_query_object(struct gl_context
*ctx
, unsigned query_index
)
930 struct brw_context
*brw
= brw_context(ctx
);
931 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
932 const struct gen_perf_query_info
*queryinfo
=
933 &perf_ctx
->perf
->queries
[query_index
];
934 struct gen_perf_query_object
*obj
=
935 calloc(1, sizeof(struct gen_perf_query_object
));
940 obj
->queryinfo
= queryinfo
;
942 perf_ctx
->n_query_instances
++;
944 struct brw_perf_query_object
*brw_query
= calloc(1, sizeof(struct brw_perf_query_object
));
945 if (unlikely(!brw_query
))
947 brw_query
->query
= obj
;
948 return &brw_query
->base
;
952 * Driver hook for glDeletePerfQueryINTEL().
955 brw_delete_perf_query(struct gl_context
*ctx
,
956 struct gl_perf_query_object
*o
)
958 struct brw_context
*brw
= brw_context(ctx
);
959 struct gen_perf_config
*perf_cfg
= brw
->perf_ctx
.perf
;
960 struct brw_perf_query_object
*brw_query
= brw_perf_query(o
);
961 struct gen_perf_query_object
*obj
= brw_query
->query
;
962 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
964 /* We can assume that the frontend waits for a query to complete
965 * before ever calling into here, so we don't have to worry about
966 * deleting an in-flight query object.
969 assert(!o
->Used
|| o
->Ready
);
971 DBG("Delete(%d)\n", o
->Id
);
973 switch (obj
->queryinfo
->kind
) {
974 case GEN_PERF_QUERY_TYPE_OA
:
975 case GEN_PERF_QUERY_TYPE_RAW
:
977 if (!obj
->oa
.results_accumulated
) {
978 drop_from_unaccumulated_query_list(brw
, obj
);
979 gen_perf_dec_n_users(perf_ctx
);
982 perf_cfg
->vtbl
.bo_unreference(obj
->oa
.bo
);
986 obj
->oa
.results_accumulated
= false;
989 case GEN_PERF_QUERY_TYPE_PIPELINE
:
990 if (obj
->pipeline_stats
.bo
) {
991 perf_cfg
->vtbl
.bo_unreference(obj
->pipeline_stats
.bo
);
992 obj
->pipeline_stats
.bo
= NULL
;
997 unreachable("Unknown query type");
1001 /* As an indication that the INTEL_performance_query extension is no
1002 * longer in use, it's a good time to free our cache of sample
1003 * buffers and close any current i915-perf stream.
1005 if (--perf_ctx
->n_query_instances
== 0) {
1006 gen_perf_free_sample_bufs(perf_ctx
);
1007 gen_perf_close(perf_ctx
, obj
->queryinfo
);
1014 /******************************************************************************/
1017 init_pipeline_statistic_query_registers(struct brw_context
*brw
)
1019 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1020 struct gen_perf_config
*perf
= brw
->perf_ctx
.perf
;
1021 struct gen_perf_query_info
*query
=
1022 gen_perf_query_append_query_info(perf
, MAX_STAT_COUNTERS
);
1024 query
->kind
= GEN_PERF_QUERY_TYPE_PIPELINE
;
1025 query
->name
= "Pipeline Statistics Registers";
1027 gen_perf_query_info_add_basic_stat_reg(query
, IA_VERTICES_COUNT
,
1028 "N vertices submitted");
1029 gen_perf_query_info_add_basic_stat_reg(query
, IA_PRIMITIVES_COUNT
,
1030 "N primitives submitted");
1031 gen_perf_query_info_add_basic_stat_reg(query
, VS_INVOCATION_COUNT
,
1032 "N vertex shader invocations");
1034 if (devinfo
->gen
== 6) {
1035 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_PRIM_STORAGE_NEEDED
, 1, 1,
1036 "SO_PRIM_STORAGE_NEEDED",
1037 "N geometry shader stream-out primitives (total)");
1038 gen_perf_query_info_add_stat_reg(query
, GEN6_SO_NUM_PRIMS_WRITTEN
, 1, 1,
1039 "SO_NUM_PRIMS_WRITTEN",
1040 "N geometry shader stream-out primitives (written)");
1042 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1043 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1044 "N stream-out (stream 0) primitives (total)");
1045 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1046 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1047 "N stream-out (stream 1) primitives (total)");
1048 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1049 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1050 "N stream-out (stream 2) primitives (total)");
1051 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1052 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1053 "N stream-out (stream 3) primitives (total)");
1054 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1055 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1056 "N stream-out (stream 0) primitives (written)");
1057 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1058 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1059 "N stream-out (stream 1) primitives (written)");
1060 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1061 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1062 "N stream-out (stream 2) primitives (written)");
1063 gen_perf_query_info_add_stat_reg(query
, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1064 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1065 "N stream-out (stream 3) primitives (written)");
1068 gen_perf_query_info_add_basic_stat_reg(query
, HS_INVOCATION_COUNT
,
1069 "N TCS shader invocations");
1070 gen_perf_query_info_add_basic_stat_reg(query
, DS_INVOCATION_COUNT
,
1071 "N TES shader invocations");
1073 gen_perf_query_info_add_basic_stat_reg(query
, GS_INVOCATION_COUNT
,
1074 "N geometry shader invocations");
1075 gen_perf_query_info_add_basic_stat_reg(query
, GS_PRIMITIVES_COUNT
,
1076 "N geometry shader primitives emitted");
1078 gen_perf_query_info_add_basic_stat_reg(query
, CL_INVOCATION_COUNT
,
1079 "N primitives entering clipping");
1080 gen_perf_query_info_add_basic_stat_reg(query
, CL_PRIMITIVES_COUNT
,
1081 "N primitives leaving clipping");
1083 if (devinfo
->is_haswell
|| devinfo
->gen
== 8) {
1084 gen_perf_query_info_add_stat_reg(query
, PS_INVOCATION_COUNT
, 1, 4,
1085 "N fragment shader invocations",
1086 "N fragment shader invocations");
1088 gen_perf_query_info_add_basic_stat_reg(query
, PS_INVOCATION_COUNT
,
1089 "N fragment shader invocations");
1092 gen_perf_query_info_add_basic_stat_reg(query
, PS_DEPTH_COUNT
,
1093 "N z-pass fragments");
1095 if (devinfo
->gen
>= 7) {
1096 gen_perf_query_info_add_basic_stat_reg(query
, CS_INVOCATION_COUNT
,
1097 "N compute shader invocations");
1100 query
->data_size
= sizeof(uint64_t) * query
->n_counters
;
1103 /* gen_device_info will have incorrect default topology values for unsupported kernels.
1104 * verify kernel support to ensure OA metrics are accurate.
1107 oa_metrics_kernel_support(int fd
, const struct gen_device_info
*devinfo
)
1109 if (devinfo
->gen
>= 10) {
1110 /* topology uAPI required for CNL+ (kernel 4.17+) make a call to the api
1113 struct drm_i915_query_item item
= {
1114 .query_id
= DRM_I915_QUERY_TOPOLOGY_INFO
,
1116 struct drm_i915_query query
= {
1118 .items_ptr
= (uintptr_t) &item
,
1121 /* kernel 4.17+ supports the query */
1122 return drmIoctl(fd
, DRM_IOCTL_I915_QUERY
, &query
) == 0;
1125 if (devinfo
->gen
>= 8) {
1126 /* 4.13+ api required for gen8 - gen9 */
1128 struct drm_i915_getparam gp
= {
1129 .param
= I915_PARAM_SLICE_MASK
,
1132 /* kernel 4.13+ supports this parameter */
1133 return drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == 0;
1136 if (devinfo
->gen
== 7)
1137 /* default topology values are correct for HSW */
1140 /* oa not supported before gen 7*/
1145 brw_oa_bo_alloc(void *bufmgr
, const char *name
, uint64_t size
)
1147 return brw_bo_alloc(bufmgr
, name
, size
, BRW_MEMZONE_OTHER
);
1151 brw_oa_emit_mi_report_perf_count(void *c
,
1153 uint32_t offset_in_bytes
,
1156 struct brw_context
*ctx
= c
;
1157 ctx
->vtbl
.emit_mi_report_perf_count(ctx
,
1163 typedef void (*bo_unreference_t
)(void *);
1164 typedef void *(*bo_map_t
)(void *, void *, unsigned flags
);
1165 typedef void (*bo_unmap_t
)(void *);
1166 typedef void (* emit_mi_report_t
)(void *, void *, uint32_t, uint32_t);
1167 typedef void (*emit_mi_flush_t
)(void *);
1170 brw_oa_batchbuffer_flush(void *c
, const char *file
, int line
)
1172 struct brw_context
*ctx
= c
;
1173 _intel_batchbuffer_flush_fence(ctx
, -1, NULL
, file
, line
);
1176 typedef void (*capture_frequency_stat_register_t
)(void *, void *, uint32_t );
1177 typedef void (*store_register_mem64_t
)(void *ctx
, void *bo
,
1178 uint32_t reg
, uint32_t offset
);
1179 typedef bool (*batch_references_t
)(void *batch
, void *bo
);
1180 typedef void (*bo_wait_rendering_t
)(void *bo
);
1184 brw_init_perf_query_info(struct gl_context
*ctx
)
1186 struct brw_context
*brw
= brw_context(ctx
);
1187 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1189 struct gen_perf_context
*perf_ctx
= &brw
->perf_ctx
;
1191 return perf_ctx
->perf
->n_queries
;
1193 perf_ctx
->perf
= gen_perf_new(brw
);
1194 struct gen_perf_config
*perf_cfg
= perf_ctx
->perf
;
1196 perf_cfg
->vtbl
.bo_alloc
= brw_oa_bo_alloc
;
1197 perf_cfg
->vtbl
.bo_unreference
= (bo_unreference_t
)brw_bo_unreference
;
1198 perf_cfg
->vtbl
.bo_map
= (bo_map_t
)brw_bo_map
;
1199 perf_cfg
->vtbl
.bo_unmap
= (bo_unmap_t
)brw_bo_unmap
;
1200 perf_cfg
->vtbl
.emit_mi_flush
= (emit_mi_flush_t
)brw_emit_mi_flush
;
1201 perf_cfg
->vtbl
.emit_mi_report_perf_count
=
1202 (emit_mi_report_t
)brw_oa_emit_mi_report_perf_count
;
1203 perf_cfg
->vtbl
.batchbuffer_flush
= brw_oa_batchbuffer_flush
;
1204 perf_cfg
->vtbl
.capture_frequency_stat_register
=
1205 (capture_frequency_stat_register_t
) capture_frequency_stat_register
;
1206 perf_cfg
->vtbl
.store_register_mem64
=
1207 (store_register_mem64_t
) brw_store_register_mem64
;
1208 perf_cfg
->vtbl
.batch_references
= (batch_references_t
)brw_batch_references
;
1209 perf_cfg
->vtbl
.bo_wait_rendering
= (bo_wait_rendering_t
)brw_bo_wait_rendering
;
1211 gen_perf_init_context(perf_ctx
, perf_cfg
, brw
, brw
->bufmgr
, devinfo
,
1212 brw
->hw_ctx
, brw
->screen
->driScrnPriv
->fd
);
1214 init_pipeline_statistic_query_registers(brw
);
1215 gen_perf_query_register_mdapi_statistic_query(devinfo
, perf_cfg
);
1217 if ((oa_metrics_kernel_support(perf_ctx
->drm_fd
, devinfo
)) &&
1218 (gen_perf_load_oa_metrics(perf_cfg
, perf_ctx
->drm_fd
, devinfo
)))
1219 gen_perf_query_register_mdapi_oa_query(devinfo
, perf_cfg
);
1221 return perf_cfg
->n_queries
;
1225 brw_init_performance_queries(struct brw_context
*brw
)
1227 struct gl_context
*ctx
= &brw
->ctx
;
1229 ctx
->Driver
.InitPerfQueryInfo
= brw_init_perf_query_info
;
1230 ctx
->Driver
.GetPerfQueryInfo
= brw_get_perf_query_info
;
1231 ctx
->Driver
.GetPerfCounterInfo
= brw_get_perf_counter_info
;
1232 ctx
->Driver
.NewPerfQueryObject
= brw_new_perf_query_object
;
1233 ctx
->Driver
.DeletePerfQuery
= brw_delete_perf_query
;
1234 ctx
->Driver
.BeginPerfQuery
= brw_begin_perf_query
;
1235 ctx
->Driver
.EndPerfQuery
= brw_end_perf_query
;
1236 ctx
->Driver
.WaitPerfQuery
= brw_wait_perf_query
;
1237 ctx
->Driver
.IsPerfQueryReady
= brw_is_perf_query_ready
;
1238 ctx
->Driver
.GetPerfQueryData
= brw_get_perf_query_data
;