intel/perf: drop counter size field
[mesa.git] / src / mesa / drivers / dri / i965 / brw_performance_query.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43
44 /* put before sys/types.h to silence glibc warnings */
45 #ifdef MAJOR_IN_MKDEV
46 #include <sys/mkdev.h>
47 #endif
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
50 #endif
51 #include <sys/types.h>
52 #include <sys/stat.h>
53 #include <fcntl.h>
54 #include <sys/mman.h>
55 #include <sys/ioctl.h>
56
57 #include <xf86drm.h>
58 #include "drm-uapi/i915_drm.h"
59
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
64
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_performance_query.h"
74 #include "intel_batchbuffer.h"
75
76 #include "perf/gen_perf.h"
77 #include "perf/gen_perf_mdapi.h"
78
79 #define FILE_DEBUG_FLAG DEBUG_PERFMON
80
81 #define OAREPORT_REASON_MASK 0x3f
82 #define OAREPORT_REASON_SHIFT 19
83 #define OAREPORT_REASON_TIMER (1<<0)
84 #define OAREPORT_REASON_TRIGGER1 (1<<1)
85 #define OAREPORT_REASON_TRIGGER2 (1<<2)
86 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
87 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
88
89 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
90 256) /* OA counter report */
91
92 /**
93 * Periodic OA samples are read() into these buffer structures via the
94 * i915 perf kernel interface and appended to the
95 * brw->perfquery.sample_buffers linked list. When we process the
96 * results of an OA metrics query we need to consider all the periodic
97 * samples between the Begin and End MI_REPORT_PERF_COUNT command
98 * markers.
99 *
100 * 'Periodic' is a simplification as there are other automatic reports
101 * written by the hardware also buffered here.
102 *
103 * Considering three queries, A, B and C:
104 *
105 * Time ---->
106 * ________________A_________________
107 * | |
108 * | ________B_________ _____C___________
109 * | | | | | |
110 *
111 * And an illustration of sample buffers read over this time frame:
112 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
113 *
114 * These nodes may hold samples for query A:
115 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
116 *
117 * These nodes may hold samples for query B:
118 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
119 *
120 * These nodes may hold samples for query C:
121 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
122 *
123 * The illustration assumes we have an even distribution of periodic
124 * samples so all nodes have the same size plotted against time:
125 *
126 * Note, to simplify code, the list is never empty.
127 *
128 * With overlapping queries we can see that periodic OA reports may
129 * relate to multiple queries and care needs to be take to keep
130 * track of sample buffers until there are no queries that might
131 * depend on their contents.
132 *
133 * We use a node ref counting system where a reference ensures that a
134 * node and all following nodes can't be freed/recycled until the
135 * reference drops to zero.
136 *
137 * E.g. with a ref of one here:
138 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
139 *
140 * These nodes could be freed or recycled ("reaped"):
141 * [ 0 ][ 0 ]
142 *
143 * These must be preserved until the leading ref drops to zero:
144 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
145 *
146 * When a query starts we take a reference on the current tail of
147 * the list, knowing that no already-buffered samples can possibly
148 * relate to the newly-started query. A pointer to this node is
149 * also saved in the query object's ->oa.samples_head.
150 *
151 * E.g. starting query A while there are two nodes in .sample_buffers:
152 * ________________A________
153 * |
154 *
155 * [ 0 ][ 1 ]
156 * ^_______ Add a reference and store pointer to node in
157 * A->oa.samples_head
158 *
159 * Moving forward to when the B query starts with no new buffer nodes:
160 * (for reference, i915 perf reads() are only done when queries finish)
161 * ________________A_______
162 * | ________B___
163 * | |
164 *
165 * [ 0 ][ 2 ]
166 * ^_______ Add a reference and store pointer to
167 * node in B->oa.samples_head
168 *
169 * Once a query is finished, after an OA query has become 'Ready',
170 * once the End OA report has landed and after we we have processed
171 * all the intermediate periodic samples then we drop the
172 * ->oa.samples_head reference we took at the start.
173 *
174 * So when the B query has finished we have:
175 * ________________A________
176 * | ______B___________
177 * | | |
178 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
179 * ^_______ Drop B->oa.samples_head reference
180 *
181 * We still can't free these due to the A->oa.samples_head ref:
182 * [ 1 ][ 0 ][ 0 ][ 0 ]
183 *
184 * When the A query finishes: (note there's a new ref for C's samples_head)
185 * ________________A_________________
186 * | |
187 * | _____C_________
188 * | | |
189 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
190 * ^_______ Drop A->oa.samples_head reference
191 *
192 * And we can now reap these nodes up to the C->oa.samples_head:
193 * [ X ][ X ][ X ][ X ]
194 * keeping -> [ 1 ][ 0 ][ 0 ]
195 *
196 * We reap old sample buffers each time we finish processing an OA
197 * query by iterating the sample_buffers list from the head until we
198 * find a referenced node and stop.
199 *
200 * Reaped buffers move to a perfquery.free_sample_buffers list and
201 * when we come to read() we first look to recycle a buffer from the
202 * free_sample_buffers list before allocating a new buffer.
203 */
204 struct brw_oa_sample_buf {
205 struct exec_node link;
206 int refcount;
207 int len;
208 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
209 uint32_t last_timestamp;
210 };
211
212 /** Downcasting convenience macro. */
213 static inline struct brw_perf_query_object *
214 brw_perf_query(struct gl_perf_query_object *o)
215 {
216 return (struct brw_perf_query_object *) o;
217 }
218
219 #define MI_RPC_BO_SIZE 4096
220 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
221 #define MI_FREQ_START_OFFSET_BYTES (3072)
222 #define MI_FREQ_END_OFFSET_BYTES (3076)
223
224 /******************************************************************************/
225
226 static bool
227 brw_is_perf_query_ready(struct gl_context *ctx,
228 struct gl_perf_query_object *o);
229
230 static uint64_t
231 brw_perf_query_get_metric_id(struct brw_context *brw,
232 const struct gen_perf_query_info *query)
233 {
234 /* These queries are know not to ever change, their config ID has been
235 * loaded upon the first query creation. No need to look them up again.
236 */
237 if (query->kind == GEN_PERF_QUERY_TYPE_OA)
238 return query->oa_metrics_set_id;
239
240 assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
241
242 /* Raw queries can be reprogrammed up by an external application/library.
243 * When a raw query is used for the first time it's id is set to a value !=
244 * 0. When it stops being used the id returns to 0. No need to reload the
245 * ID when it's already loaded.
246 */
247 if (query->oa_metrics_set_id != 0) {
248 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
249 query->name, query->guid, query->oa_metrics_set_id);
250 return query->oa_metrics_set_id;
251 }
252
253 struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
254 if (!gen_perf_load_metric_id(brw->perfquery.perf, query->guid,
255 &raw_query->oa_metrics_set_id)) {
256 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
257 raw_query->oa_metrics_set_id = 1ULL;
258 } else {
259 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
260 query->name, query->guid, query->oa_metrics_set_id);
261 }
262 return query->oa_metrics_set_id;
263 }
264
265 static void
266 dump_perf_query_callback(GLuint id, void *query_void, void *brw_void)
267 {
268 struct gl_context *ctx = brw_void;
269 struct gl_perf_query_object *o = query_void;
270 struct brw_perf_query_object *obj = query_void;
271
272 switch (obj->query->kind) {
273 case GEN_PERF_QUERY_TYPE_OA:
274 case GEN_PERF_QUERY_TYPE_RAW:
275 DBG("%4d: %-6s %-8s BO: %-4s OA data: %-10s %-15s\n",
276 id,
277 o->Used ? "Dirty," : "New,",
278 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
279 obj->oa.bo ? "yes," : "no,",
280 brw_is_perf_query_ready(ctx, o) ? "ready," : "not ready,",
281 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
282 break;
283 case GEN_PERF_QUERY_TYPE_PIPELINE:
284 DBG("%4d: %-6s %-8s BO: %-4s\n",
285 id,
286 o->Used ? "Dirty," : "New,",
287 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"),
288 obj->pipeline_stats.bo ? "yes" : "no");
289 break;
290 default:
291 unreachable("Unknown query type");
292 break;
293 }
294 }
295
296 static void
297 dump_perf_queries(struct brw_context *brw)
298 {
299 struct gl_context *ctx = &brw->ctx;
300 DBG("Queries: (Open queries = %d, OA users = %d)\n",
301 brw->perfquery.n_active_oa_queries, brw->perfquery.n_oa_users);
302 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
303 }
304
305 /******************************************************************************/
306
307 static struct brw_oa_sample_buf *
308 get_free_sample_buf(struct brw_context *brw)
309 {
310 struct exec_node *node = exec_list_pop_head(&brw->perfquery.free_sample_buffers);
311 struct brw_oa_sample_buf *buf;
312
313 if (node)
314 buf = exec_node_data(struct brw_oa_sample_buf, node, link);
315 else {
316 buf = ralloc_size(brw, sizeof(*buf));
317
318 exec_node_init(&buf->link);
319 buf->refcount = 0;
320 buf->len = 0;
321 }
322
323 return buf;
324 }
325
326 static void
327 reap_old_sample_buffers(struct brw_context *brw)
328 {
329 struct exec_node *tail_node =
330 exec_list_get_tail(&brw->perfquery.sample_buffers);
331 struct brw_oa_sample_buf *tail_buf =
332 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
333
334 /* Remove all old, unreferenced sample buffers walking forward from
335 * the head of the list, except always leave at least one node in
336 * the list so we always have a node to reference when we Begin
337 * a new query.
338 */
339 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
340 &brw->perfquery.sample_buffers)
341 {
342 if (buf->refcount == 0 && buf != tail_buf) {
343 exec_node_remove(&buf->link);
344 exec_list_push_head(&brw->perfquery.free_sample_buffers, &buf->link);
345 } else
346 return;
347 }
348 }
349
350 static void
351 free_sample_bufs(struct brw_context *brw)
352 {
353 foreach_list_typed_safe(struct brw_oa_sample_buf, buf, link,
354 &brw->perfquery.free_sample_buffers)
355 ralloc_free(buf);
356
357 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
358 }
359
360 /******************************************************************************/
361
362 /**
363 * Driver hook for glGetPerfQueryInfoINTEL().
364 */
365 static void
366 brw_get_perf_query_info(struct gl_context *ctx,
367 unsigned query_index,
368 const char **name,
369 GLuint *data_size,
370 GLuint *n_counters,
371 GLuint *n_active)
372 {
373 struct brw_context *brw = brw_context(ctx);
374 const struct gen_perf_query_info *query =
375 &brw->perfquery.perf->queries[query_index];
376
377 *name = query->name;
378 *data_size = query->data_size;
379 *n_counters = query->n_counters;
380
381 switch (query->kind) {
382 case GEN_PERF_QUERY_TYPE_OA:
383 case GEN_PERF_QUERY_TYPE_RAW:
384 *n_active = brw->perfquery.n_active_oa_queries;
385 break;
386
387 case GEN_PERF_QUERY_TYPE_PIPELINE:
388 *n_active = brw->perfquery.n_active_pipeline_stats_queries;
389 break;
390
391 default:
392 unreachable("Unknown query type");
393 break;
394 }
395 }
396
397 static GLuint
398 gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type)
399 {
400 switch (type) {
401 case GEN_PERF_COUNTER_TYPE_EVENT: return GL_PERFQUERY_COUNTER_EVENT_INTEL;
402 case GEN_PERF_COUNTER_TYPE_DURATION_NORM: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL;
403 case GEN_PERF_COUNTER_TYPE_DURATION_RAW: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL;
404 case GEN_PERF_COUNTER_TYPE_THROUGHPUT: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL;
405 case GEN_PERF_COUNTER_TYPE_RAW: return GL_PERFQUERY_COUNTER_RAW_INTEL;
406 case GEN_PERF_COUNTER_TYPE_TIMESTAMP: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL;
407 default:
408 unreachable("Unknown counter type");
409 }
410 }
411
412 static GLuint
413 gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type)
414 {
415 switch (type) {
416 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL;
417 case GEN_PERF_COUNTER_DATA_TYPE_UINT32: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL;
418 case GEN_PERF_COUNTER_DATA_TYPE_UINT64: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL;
419 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL;
420 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL;
421 default:
422 unreachable("Unknown counter data type");
423 }
424 }
425
426 /**
427 * Driver hook for glGetPerfCounterInfoINTEL().
428 */
429 static void
430 brw_get_perf_counter_info(struct gl_context *ctx,
431 unsigned query_index,
432 unsigned counter_index,
433 const char **name,
434 const char **desc,
435 GLuint *offset,
436 GLuint *data_size,
437 GLuint *type_enum,
438 GLuint *data_type_enum,
439 GLuint64 *raw_max)
440 {
441 struct brw_context *brw = brw_context(ctx);
442 const struct gen_perf_query_info *query =
443 &brw->perfquery.perf->queries[query_index];
444 const struct gen_perf_query_counter *counter =
445 &query->counters[counter_index];
446
447 *name = counter->name;
448 *desc = counter->desc;
449 *offset = counter->offset;
450 *data_size = gen_perf_query_counter_get_size(counter);
451 *type_enum = gen_counter_type_enum_to_gl_type(counter->type);
452 *data_type_enum = gen_counter_data_type_to_gl_type(counter->data_type);
453 *raw_max = counter->raw_max;
454 }
455
456 /******************************************************************************/
457
458 /**
459 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
460 * pipeline statistics for the performance query object.
461 */
462 static void
463 snapshot_statistics_registers(struct brw_context *brw,
464 struct brw_perf_query_object *obj,
465 uint32_t offset_in_bytes)
466 {
467 const struct gen_perf_query_info *query = obj->query;
468 const int n_counters = query->n_counters;
469
470 for (int i = 0; i < n_counters; i++) {
471 const struct gen_perf_query_counter *counter = &query->counters[i];
472
473 assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
474
475 brw_store_register_mem64(brw, obj->pipeline_stats.bo,
476 counter->pipeline_stat.reg,
477 offset_in_bytes + i * sizeof(uint64_t));
478 }
479 }
480
481 /**
482 * Add a query to the global list of "unaccumulated queries."
483 *
484 * Queries are tracked here until all the associated OA reports have
485 * been accumulated via accumulate_oa_reports() after the end
486 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
487 */
488 static void
489 add_to_unaccumulated_query_list(struct brw_context *brw,
490 struct brw_perf_query_object *obj)
491 {
492 if (brw->perfquery.unaccumulated_elements >=
493 brw->perfquery.unaccumulated_array_size)
494 {
495 brw->perfquery.unaccumulated_array_size *= 1.5;
496 brw->perfquery.unaccumulated =
497 reralloc(brw, brw->perfquery.unaccumulated,
498 struct brw_perf_query_object *,
499 brw->perfquery.unaccumulated_array_size);
500 }
501
502 brw->perfquery.unaccumulated[brw->perfquery.unaccumulated_elements++] = obj;
503 }
504
505 /**
506 * Remove a query from the global list of unaccumulated queries once
507 * after successfully accumulating the OA reports associated with the
508 * query in accumulate_oa_reports() or when discarding unwanted query
509 * results.
510 */
511 static void
512 drop_from_unaccumulated_query_list(struct brw_context *brw,
513 struct brw_perf_query_object *obj)
514 {
515 for (int i = 0; i < brw->perfquery.unaccumulated_elements; i++) {
516 if (brw->perfquery.unaccumulated[i] == obj) {
517 int last_elt = --brw->perfquery.unaccumulated_elements;
518
519 if (i == last_elt)
520 brw->perfquery.unaccumulated[i] = NULL;
521 else {
522 brw->perfquery.unaccumulated[i] =
523 brw->perfquery.unaccumulated[last_elt];
524 }
525
526 break;
527 }
528 }
529
530 /* Drop our samples_head reference so that associated periodic
531 * sample data buffers can potentially be reaped if they aren't
532 * referenced by any other queries...
533 */
534
535 struct brw_oa_sample_buf *buf =
536 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
537
538 assert(buf->refcount > 0);
539 buf->refcount--;
540
541 obj->oa.samples_head = NULL;
542
543 reap_old_sample_buffers(brw);
544 }
545
546 static bool
547 inc_n_oa_users(struct brw_context *brw)
548 {
549 if (brw->perfquery.n_oa_users == 0 &&
550 drmIoctl(brw->perfquery.oa_stream_fd,
551 I915_PERF_IOCTL_ENABLE, 0) < 0)
552 {
553 return false;
554 }
555 ++brw->perfquery.n_oa_users;
556
557 return true;
558 }
559
560 static void
561 dec_n_oa_users(struct brw_context *brw)
562 {
563 /* Disabling the i915 perf stream will effectively disable the OA
564 * counters. Note it's important to be sure there are no outstanding
565 * MI_RPC commands at this point since they could stall the CS
566 * indefinitely once OACONTROL is disabled.
567 */
568 --brw->perfquery.n_oa_users;
569 if (brw->perfquery.n_oa_users == 0 &&
570 drmIoctl(brw->perfquery.oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
571 {
572 DBG("WARNING: Error disabling i915 perf stream: %m\n");
573 }
574 }
575
576 /* In general if we see anything spurious while accumulating results,
577 * we don't try and continue accumulating the current query, hoping
578 * for the best, we scrap anything outstanding, and then hope for the
579 * best with new queries.
580 */
581 static void
582 discard_all_queries(struct brw_context *brw)
583 {
584 while (brw->perfquery.unaccumulated_elements) {
585 struct brw_perf_query_object *obj = brw->perfquery.unaccumulated[0];
586
587 obj->oa.results_accumulated = true;
588 drop_from_unaccumulated_query_list(brw, brw->perfquery.unaccumulated[0]);
589
590 dec_n_oa_users(brw);
591 }
592 }
593
594 enum OaReadStatus {
595 OA_READ_STATUS_ERROR,
596 OA_READ_STATUS_UNFINISHED,
597 OA_READ_STATUS_FINISHED,
598 };
599
600 static enum OaReadStatus
601 read_oa_samples_until(struct brw_context *brw,
602 uint32_t start_timestamp,
603 uint32_t end_timestamp)
604 {
605 struct exec_node *tail_node =
606 exec_list_get_tail(&brw->perfquery.sample_buffers);
607 struct brw_oa_sample_buf *tail_buf =
608 exec_node_data(struct brw_oa_sample_buf, tail_node, link);
609 uint32_t last_timestamp = tail_buf->last_timestamp;
610
611 while (1) {
612 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
613 uint32_t offset;
614 int len;
615
616 while ((len = read(brw->perfquery.oa_stream_fd, buf->buf,
617 sizeof(buf->buf))) < 0 && errno == EINTR)
618 ;
619
620 if (len <= 0) {
621 exec_list_push_tail(&brw->perfquery.free_sample_buffers, &buf->link);
622
623 if (len < 0) {
624 if (errno == EAGAIN)
625 return ((last_timestamp - start_timestamp) >=
626 (end_timestamp - start_timestamp)) ?
627 OA_READ_STATUS_FINISHED :
628 OA_READ_STATUS_UNFINISHED;
629 else {
630 DBG("Error reading i915 perf samples: %m\n");
631 }
632 } else
633 DBG("Spurious EOF reading i915 perf samples\n");
634
635 return OA_READ_STATUS_ERROR;
636 }
637
638 buf->len = len;
639 exec_list_push_tail(&brw->perfquery.sample_buffers, &buf->link);
640
641 /* Go through the reports and update the last timestamp. */
642 offset = 0;
643 while (offset < buf->len) {
644 const struct drm_i915_perf_record_header *header =
645 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
646 uint32_t *report = (uint32_t *) (header + 1);
647
648 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
649 last_timestamp = report[1];
650
651 offset += header->size;
652 }
653
654 buf->last_timestamp = last_timestamp;
655 }
656
657 unreachable("not reached");
658 return OA_READ_STATUS_ERROR;
659 }
660
661 /**
662 * Try to read all the reports until either the delimiting timestamp
663 * or an error arises.
664 */
665 static bool
666 read_oa_samples_for_query(struct brw_context *brw,
667 struct brw_perf_query_object *obj)
668 {
669 uint32_t *start;
670 uint32_t *last;
671 uint32_t *end;
672
673 /* We need the MI_REPORT_PERF_COUNT to land before we can start
674 * accumulate. */
675 assert(!brw_batch_references(&brw->batch, obj->oa.bo) &&
676 !brw_bo_busy(obj->oa.bo));
677
678 /* Map the BO once here and let accumulate_oa_reports() unmap
679 * it. */
680 if (obj->oa.map == NULL)
681 obj->oa.map = brw_bo_map(brw, obj->oa.bo, MAP_READ);
682
683 start = last = obj->oa.map;
684 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
685
686 if (start[0] != obj->oa.begin_report_id) {
687 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
688 return true;
689 }
690 if (end[0] != (obj->oa.begin_report_id + 1)) {
691 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
692 return true;
693 }
694
695 /* Read the reports until the end timestamp. */
696 switch (read_oa_samples_until(brw, start[1], end[1])) {
697 case OA_READ_STATUS_ERROR:
698 /* Fallthrough and let accumulate_oa_reports() deal with the
699 * error. */
700 case OA_READ_STATUS_FINISHED:
701 return true;
702 case OA_READ_STATUS_UNFINISHED:
703 return false;
704 }
705
706 unreachable("invalid read status");
707 return false;
708 }
709
710 /**
711 * Accumulate raw OA counter values based on deltas between pairs of
712 * OA reports.
713 *
714 * Accumulation starts from the first report captured via
715 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
716 * last MI_RPC report requested by brw_end_perf_query(). Between these
717 * two reports there may also some number of periodically sampled OA
718 * reports collected via the i915 perf interface - depending on the
719 * duration of the query.
720 *
721 * These periodic snapshots help to ensure we handle counter overflow
722 * correctly by being frequent enough to ensure we don't miss multiple
723 * overflows of a counter between snapshots. For Gen8+ the i915 perf
724 * snapshots provide the extra context-switch reports that let us
725 * subtract out the progress of counters associated with other
726 * contexts running on the system.
727 */
728 static void
729 accumulate_oa_reports(struct brw_context *brw,
730 struct brw_perf_query_object *obj)
731 {
732 const struct gen_device_info *devinfo = &brw->screen->devinfo;
733 struct gl_perf_query_object *o = &obj->base;
734 uint32_t *start;
735 uint32_t *last;
736 uint32_t *end;
737 struct exec_node *first_samples_node;
738 bool in_ctx = true;
739 int out_duration = 0;
740
741 assert(o->Ready);
742 assert(obj->oa.map != NULL);
743
744 start = last = obj->oa.map;
745 end = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
746
747 if (start[0] != obj->oa.begin_report_id) {
748 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
749 goto error;
750 }
751 if (end[0] != (obj->oa.begin_report_id + 1)) {
752 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
753 goto error;
754 }
755
756 /* See if we have any periodic reports to accumulate too... */
757
758 /* N.B. The oa.samples_head was set when the query began and
759 * pointed to the tail of the brw->perfquery.sample_buffers list at
760 * the time the query started. Since the buffer existed before the
761 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
762 * that no data in this particular node's buffer can possibly be
763 * associated with the query - so skip ahead one...
764 */
765 first_samples_node = obj->oa.samples_head->next;
766
767 foreach_list_typed_from(struct brw_oa_sample_buf, buf, link,
768 &brw->perfquery.sample_buffers,
769 first_samples_node)
770 {
771 int offset = 0;
772
773 while (offset < buf->len) {
774 const struct drm_i915_perf_record_header *header =
775 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
776
777 assert(header->size != 0);
778 assert(header->size <= buf->len);
779
780 offset += header->size;
781
782 switch (header->type) {
783 case DRM_I915_PERF_RECORD_SAMPLE: {
784 uint32_t *report = (uint32_t *)(header + 1);
785 bool add = true;
786
787 /* Ignore reports that come before the start marker.
788 * (Note: takes care to allow overflow of 32bit timestamps)
789 */
790 if (gen_device_info_timebase_scale(devinfo,
791 report[1] - start[1]) > 5000000000) {
792 continue;
793 }
794
795 /* Ignore reports that come after the end marker.
796 * (Note: takes care to allow overflow of 32bit timestamps)
797 */
798 if (gen_device_info_timebase_scale(devinfo,
799 report[1] - end[1]) <= 5000000000) {
800 goto end;
801 }
802
803 /* For Gen8+ since the counters continue while other
804 * contexts are running we need to discount any unrelated
805 * deltas. The hardware automatically generates a report
806 * on context switch which gives us a new reference point
807 * to continuing adding deltas from.
808 *
809 * For Haswell we can rely on the HW to stop the progress
810 * of OA counters while any other context is acctive.
811 */
812 if (devinfo->gen >= 8) {
813 if (in_ctx && report[2] != obj->oa.result.hw_id) {
814 DBG("i915 perf: Switch AWAY (observed by ID change)\n");
815 in_ctx = false;
816 out_duration = 0;
817 } else if (in_ctx == false && report[2] == obj->oa.result.hw_id) {
818 DBG("i915 perf: Switch TO\n");
819 in_ctx = true;
820
821 /* From experimentation in IGT, we found that the OA unit
822 * might label some report as "idle" (using an invalid
823 * context ID), right after a report for a given context.
824 * Deltas generated by those reports actually belong to the
825 * previous context, even though they're not labelled as
826 * such.
827 *
828 * We didn't *really* Switch AWAY in the case that we e.g.
829 * saw a single periodic report while idle...
830 */
831 if (out_duration >= 1)
832 add = false;
833 } else if (in_ctx) {
834 assert(report[2] == obj->oa.result.hw_id);
835 DBG("i915 perf: Continuation IN\n");
836 } else {
837 assert(report[2] != obj->oa.result.hw_id);
838 DBG("i915 perf: Continuation OUT\n");
839 add = false;
840 out_duration++;
841 }
842 }
843
844 if (add) {
845 gen_perf_query_result_accumulate(&obj->oa.result, obj->query,
846 last, report);
847 }
848
849 last = report;
850
851 break;
852 }
853
854 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
855 DBG("i915 perf: OA error: all reports lost\n");
856 goto error;
857 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
858 DBG("i915 perf: OA report lost\n");
859 break;
860 }
861 }
862 }
863
864 end:
865
866 gen_perf_query_result_accumulate(&obj->oa.result, obj->query,
867 last, end);
868
869 DBG("Marking %d accumulated - results gathered\n", o->Id);
870
871 obj->oa.results_accumulated = true;
872 drop_from_unaccumulated_query_list(brw, obj);
873 dec_n_oa_users(brw);
874
875 return;
876
877 error:
878
879 discard_all_queries(brw);
880 }
881
882 /******************************************************************************/
883
884 static bool
885 open_i915_perf_oa_stream(struct brw_context *brw,
886 int metrics_set_id,
887 int report_format,
888 int period_exponent,
889 int drm_fd,
890 uint32_t ctx_id)
891 {
892 uint64_t properties[] = {
893 /* Single context sampling */
894 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
895
896 /* Include OA reports in samples */
897 DRM_I915_PERF_PROP_SAMPLE_OA, true,
898
899 /* OA unit configuration */
900 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
901 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
902 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
903 };
904 struct drm_i915_perf_open_param param = {
905 .flags = I915_PERF_FLAG_FD_CLOEXEC |
906 I915_PERF_FLAG_FD_NONBLOCK |
907 I915_PERF_FLAG_DISABLED,
908 .num_properties = ARRAY_SIZE(properties) / 2,
909 .properties_ptr = (uintptr_t) properties,
910 };
911 int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
912 if (fd == -1) {
913 DBG("Error opening i915 perf OA stream: %m\n");
914 return false;
915 }
916
917 brw->perfquery.oa_stream_fd = fd;
918
919 brw->perfquery.current_oa_metrics_set_id = metrics_set_id;
920 brw->perfquery.current_oa_format = report_format;
921
922 return true;
923 }
924
925 static void
926 close_perf(struct brw_context *brw,
927 const struct gen_perf_query_info *query)
928 {
929 if (brw->perfquery.oa_stream_fd != -1) {
930 close(brw->perfquery.oa_stream_fd);
931 brw->perfquery.oa_stream_fd = -1;
932 }
933 if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
934 struct gen_perf_query_info *raw_query =
935 (struct gen_perf_query_info *) query;
936 raw_query->oa_metrics_set_id = 0;
937 }
938 }
939
940 static void
941 capture_frequency_stat_register(struct brw_context *brw,
942 struct brw_bo *bo,
943 uint32_t bo_offset)
944 {
945 const struct gen_device_info *devinfo = &brw->screen->devinfo;
946
947 if (devinfo->gen >= 7 && devinfo->gen <= 8 &&
948 !devinfo->is_baytrail && !devinfo->is_cherryview) {
949 brw_store_register_mem32(brw, bo, GEN7_RPSTAT1, bo_offset);
950 } else if (devinfo->gen >= 9) {
951 brw_store_register_mem32(brw, bo, GEN9_RPSTAT0, bo_offset);
952 }
953 }
954
955 /**
956 * Driver hook for glBeginPerfQueryINTEL().
957 */
958 static bool
959 brw_begin_perf_query(struct gl_context *ctx,
960 struct gl_perf_query_object *o)
961 {
962 struct brw_context *brw = brw_context(ctx);
963 struct brw_perf_query_object *obj = brw_perf_query(o);
964 const struct gen_perf_query_info *query = obj->query;
965
966 /* We can assume the frontend hides mistaken attempts to Begin a
967 * query object multiple times before its End. Similarly if an
968 * application reuses a query object before results have arrived
969 * the frontend will wait for prior results so we don't need
970 * to support abandoning in-flight results.
971 */
972 assert(!o->Active);
973 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
974
975 DBG("Begin(%d)\n", o->Id);
976
977 /* XXX: We have to consider that the command parser unit that parses batch
978 * buffer commands and is used to capture begin/end counter snapshots isn't
979 * implicitly synchronized with what's currently running across other GPU
980 * units (such as the EUs running shaders) that the performance counters are
981 * associated with.
982 *
983 * The intention of performance queries is to measure the work associated
984 * with commands between the begin/end delimiters and so for that to be the
985 * case we need to explicitly synchronize the parsing of commands to capture
986 * Begin/End counter snapshots with what's running across other parts of the
987 * GPU.
988 *
989 * When the command parser reaches a Begin marker it effectively needs to
990 * drain everything currently running on the GPU until the hardware is idle
991 * before capturing the first snapshot of counters - otherwise the results
992 * would also be measuring the effects of earlier commands.
993 *
994 * When the command parser reaches an End marker it needs to stall until
995 * everything currently running on the GPU has finished before capturing the
996 * end snapshot - otherwise the results won't be a complete representation
997 * of the work.
998 *
999 * Theoretically there could be opportunities to minimize how much of the
1000 * GPU pipeline is drained, or that we stall for, when we know what specific
1001 * units the performance counters being queried relate to but we don't
1002 * currently attempt to be clever here.
1003 *
1004 * Note: with our current simple approach here then for back-to-back queries
1005 * we will redundantly emit duplicate commands to synchronize the command
1006 * streamer with the rest of the GPU pipeline, but we assume that in HW the
1007 * second synchronization is effectively a NOOP.
1008 *
1009 * N.B. The final results are based on deltas of counters between (inside)
1010 * Begin/End markers so even though the total wall clock time of the
1011 * workload is stretched by larger pipeline bubbles the bubbles themselves
1012 * are generally invisible to the query results. Whether that's a good or a
1013 * bad thing depends on the use case. For a lower real-time impact while
1014 * capturing metrics then periodic sampling may be a better choice than
1015 * INTEL_performance_query.
1016 *
1017 *
1018 * This is our Begin synchronization point to drain current work on the
1019 * GPU before we capture our first counter snapshot...
1020 */
1021 brw_emit_mi_flush(brw);
1022
1023 switch (query->kind) {
1024 case GEN_PERF_QUERY_TYPE_OA:
1025 case GEN_PERF_QUERY_TYPE_RAW: {
1026
1027 /* Opening an i915 perf stream implies exclusive access to the OA unit
1028 * which will generate counter reports for a specific counter set with a
1029 * specific layout/format so we can't begin any OA based queries that
1030 * require a different counter set or format unless we get an opportunity
1031 * to close the stream and open a new one...
1032 */
1033 uint64_t metric_id = brw_perf_query_get_metric_id(brw, query);
1034
1035 if (brw->perfquery.oa_stream_fd != -1 &&
1036 brw->perfquery.current_oa_metrics_set_id != metric_id) {
1037
1038 if (brw->perfquery.n_oa_users != 0) {
1039 DBG("WARNING: Begin(%d) failed already using perf config=%i/%"PRIu64"\n",
1040 o->Id, brw->perfquery.current_oa_metrics_set_id, metric_id);
1041 return false;
1042 } else
1043 close_perf(brw, query);
1044 }
1045
1046 /* If the OA counters aren't already on, enable them. */
1047 if (brw->perfquery.oa_stream_fd == -1) {
1048 __DRIscreen *screen = brw->screen->driScrnPriv;
1049 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1050
1051 /* The period_exponent gives a sampling period as follows:
1052 * sample_period = timestamp_period * 2^(period_exponent + 1)
1053 *
1054 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
1055 * ~83ns (GEN8/9).
1056 *
1057 * The counter overflow period is derived from the EuActive counter
1058 * which reads a counter that increments by the number of clock
1059 * cycles multiplied by the number of EUs. It can be calculated as:
1060 *
1061 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
1062 *
1063 * (E.g. 40 EUs @ 1GHz = ~53ms)
1064 *
1065 * We select a sampling period inferior to that overflow period to
1066 * ensure we cannot see more than 1 counter overflow, otherwise we
1067 * could loose information.
1068 */
1069
1070 int a_counter_in_bits = 32;
1071 if (devinfo->gen >= 8)
1072 a_counter_in_bits = 40;
1073
1074 uint64_t overflow_period = pow(2, a_counter_in_bits) /
1075 (brw->perfquery.perf->sys_vars.n_eus *
1076 /* drop 1GHz freq to have units in nanoseconds */
1077 2);
1078
1079 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
1080 overflow_period, overflow_period / 1000000ul, brw->perfquery.perf->sys_vars.n_eus);
1081
1082 int period_exponent = 0;
1083 uint64_t prev_sample_period, next_sample_period;
1084 for (int e = 0; e < 30; e++) {
1085 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
1086 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
1087
1088 /* Take the previous sampling period, lower than the overflow
1089 * period.
1090 */
1091 if (prev_sample_period < overflow_period &&
1092 next_sample_period > overflow_period)
1093 period_exponent = e + 1;
1094 }
1095
1096 if (period_exponent == 0) {
1097 DBG("WARNING: enable to find a sampling exponent\n");
1098 return false;
1099 }
1100
1101 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
1102 prev_sample_period / 1000000ul);
1103
1104 if (!open_i915_perf_oa_stream(brw,
1105 metric_id,
1106 query->oa_format,
1107 period_exponent,
1108 screen->fd, /* drm fd */
1109 brw->hw_ctx))
1110 return false;
1111 } else {
1112 assert(brw->perfquery.current_oa_metrics_set_id == metric_id &&
1113 brw->perfquery.current_oa_format == query->oa_format);
1114 }
1115
1116 if (!inc_n_oa_users(brw)) {
1117 DBG("WARNING: Error enabling i915 perf stream: %m\n");
1118 return false;
1119 }
1120
1121 if (obj->oa.bo) {
1122 brw_bo_unreference(obj->oa.bo);
1123 obj->oa.bo = NULL;
1124 }
1125
1126 obj->oa.bo =
1127 brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo", MI_RPC_BO_SIZE,
1128 BRW_MEMZONE_OTHER);
1129 #ifdef DEBUG
1130 /* Pre-filling the BO helps debug whether writes landed. */
1131 void *map = brw_bo_map(brw, obj->oa.bo, MAP_WRITE);
1132 memset(map, 0x80, MI_RPC_BO_SIZE);
1133 brw_bo_unmap(obj->oa.bo);
1134 #endif
1135
1136 obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
1137 brw->perfquery.next_query_start_report_id += 2;
1138
1139 /* We flush the batchbuffer here to minimize the chances that MI_RPC
1140 * delimiting commands end up in different batchbuffers. If that's the
1141 * case, the measurement will include the time it takes for the kernel
1142 * scheduler to load a new request into the hardware. This is manifested in
1143 * tools like frameretrace by spikes in the "GPU Core Clocks" counter.
1144 */
1145 intel_batchbuffer_flush(brw);
1146
1147 /* Take a starting OA counter snapshot. */
1148 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo, 0,
1149 obj->oa.begin_report_id);
1150 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_START_OFFSET_BYTES);
1151
1152 ++brw->perfquery.n_active_oa_queries;
1153
1154 /* No already-buffered samples can possibly be associated with this query
1155 * so create a marker within the list of sample buffers enabling us to
1156 * easily ignore earlier samples when processing this query after
1157 * completion.
1158 */
1159 assert(!exec_list_is_empty(&brw->perfquery.sample_buffers));
1160 obj->oa.samples_head = exec_list_get_tail(&brw->perfquery.sample_buffers);
1161
1162 struct brw_oa_sample_buf *buf =
1163 exec_node_data(struct brw_oa_sample_buf, obj->oa.samples_head, link);
1164
1165 /* This reference will ensure that future/following sample
1166 * buffers (that may relate to this query) can't be freed until
1167 * this drops to zero.
1168 */
1169 buf->refcount++;
1170
1171 gen_perf_query_result_clear(&obj->oa.result);
1172 obj->oa.results_accumulated = false;
1173
1174 add_to_unaccumulated_query_list(brw, obj);
1175 break;
1176 }
1177
1178 case GEN_PERF_QUERY_TYPE_PIPELINE:
1179 if (obj->pipeline_stats.bo) {
1180 brw_bo_unreference(obj->pipeline_stats.bo);
1181 obj->pipeline_stats.bo = NULL;
1182 }
1183
1184 obj->pipeline_stats.bo =
1185 brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
1186 STATS_BO_SIZE, BRW_MEMZONE_OTHER);
1187
1188 /* Take starting snapshots. */
1189 snapshot_statistics_registers(brw, obj, 0);
1190
1191 ++brw->perfquery.n_active_pipeline_stats_queries;
1192 break;
1193
1194 default:
1195 unreachable("Unknown query type");
1196 break;
1197 }
1198
1199 if (INTEL_DEBUG & DEBUG_PERFMON)
1200 dump_perf_queries(brw);
1201
1202 return true;
1203 }
1204
1205 /**
1206 * Driver hook for glEndPerfQueryINTEL().
1207 */
1208 static void
1209 brw_end_perf_query(struct gl_context *ctx,
1210 struct gl_perf_query_object *o)
1211 {
1212 struct brw_context *brw = brw_context(ctx);
1213 struct brw_perf_query_object *obj = brw_perf_query(o);
1214
1215 DBG("End(%d)\n", o->Id);
1216
1217 /* Ensure that the work associated with the queried commands will have
1218 * finished before taking our query end counter readings.
1219 *
1220 * For more details see comment in brw_begin_perf_query for
1221 * corresponding flush.
1222 */
1223 brw_emit_mi_flush(brw);
1224
1225 switch (obj->query->kind) {
1226 case GEN_PERF_QUERY_TYPE_OA:
1227 case GEN_PERF_QUERY_TYPE_RAW:
1228
1229 /* NB: It's possible that the query will have already been marked
1230 * as 'accumulated' if an error was seen while reading samples
1231 * from perf. In this case we mustn't try and emit a closing
1232 * MI_RPC command in case the OA unit has already been disabled
1233 */
1234 if (!obj->oa.results_accumulated) {
1235 /* Take an ending OA counter snapshot. */
1236 capture_frequency_stat_register(brw, obj->oa.bo, MI_FREQ_END_OFFSET_BYTES);
1237 brw->vtbl.emit_mi_report_perf_count(brw, obj->oa.bo,
1238 MI_RPC_BO_END_OFFSET_BYTES,
1239 obj->oa.begin_report_id + 1);
1240 }
1241
1242 --brw->perfquery.n_active_oa_queries;
1243
1244 /* NB: even though the query has now ended, it can't be accumulated
1245 * until the end MI_REPORT_PERF_COUNT snapshot has been written
1246 * to query->oa.bo
1247 */
1248 break;
1249
1250 case GEN_PERF_QUERY_TYPE_PIPELINE:
1251 snapshot_statistics_registers(brw, obj,
1252 STATS_BO_END_OFFSET_BYTES);
1253 --brw->perfquery.n_active_pipeline_stats_queries;
1254 break;
1255
1256 default:
1257 unreachable("Unknown query type");
1258 break;
1259 }
1260 }
1261
1262 static void
1263 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
1264 {
1265 struct brw_context *brw = brw_context(ctx);
1266 struct brw_perf_query_object *obj = brw_perf_query(o);
1267 struct brw_bo *bo = NULL;
1268
1269 assert(!o->Ready);
1270
1271 switch (obj->query->kind) {
1272 case GEN_PERF_QUERY_TYPE_OA:
1273 case GEN_PERF_QUERY_TYPE_RAW:
1274 bo = obj->oa.bo;
1275 break;
1276
1277 case GEN_PERF_QUERY_TYPE_PIPELINE:
1278 bo = obj->pipeline_stats.bo;
1279 break;
1280
1281 default:
1282 unreachable("Unknown query type");
1283 break;
1284 }
1285
1286 if (bo == NULL)
1287 return;
1288
1289 /* If the current batch references our results bo then we need to
1290 * flush first...
1291 */
1292 if (brw_batch_references(&brw->batch, bo))
1293 intel_batchbuffer_flush(brw);
1294
1295 brw_bo_wait_rendering(bo);
1296
1297 /* Due to a race condition between the OA unit signaling report
1298 * availability and the report actually being written into memory,
1299 * we need to wait for all the reports to come in before we can
1300 * read them.
1301 */
1302 if (obj->query->kind == GEN_PERF_QUERY_TYPE_OA ||
1303 obj->query->kind == GEN_PERF_QUERY_TYPE_RAW) {
1304 while (!read_oa_samples_for_query(brw, obj))
1305 ;
1306 }
1307 }
1308
1309 static bool
1310 brw_is_perf_query_ready(struct gl_context *ctx,
1311 struct gl_perf_query_object *o)
1312 {
1313 struct brw_context *brw = brw_context(ctx);
1314 struct brw_perf_query_object *obj = brw_perf_query(o);
1315
1316 if (o->Ready)
1317 return true;
1318
1319 switch (obj->query->kind) {
1320 case GEN_PERF_QUERY_TYPE_OA:
1321 case GEN_PERF_QUERY_TYPE_RAW:
1322 return (obj->oa.results_accumulated ||
1323 (obj->oa.bo &&
1324 !brw_batch_references(&brw->batch, obj->oa.bo) &&
1325 !brw_bo_busy(obj->oa.bo) &&
1326 read_oa_samples_for_query(brw, obj)));
1327 case GEN_PERF_QUERY_TYPE_PIPELINE:
1328 return (obj->pipeline_stats.bo &&
1329 !brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
1330 !brw_bo_busy(obj->pipeline_stats.bo));
1331
1332 default:
1333 unreachable("Unknown query type");
1334 break;
1335 }
1336
1337 return false;
1338 }
1339
1340 static void
1341 read_slice_unslice_frequencies(struct brw_context *brw,
1342 struct brw_perf_query_object *obj)
1343 {
1344 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1345 uint32_t *begin_report = obj->oa.map, *end_report = obj->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
1346
1347 gen_perf_query_result_read_frequencies(&obj->oa.result,
1348 devinfo, begin_report, end_report);
1349 }
1350
1351 static void
1352 read_gt_frequency(struct brw_context *brw,
1353 struct brw_perf_query_object *obj)
1354 {
1355 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1356 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
1357 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
1358
1359 switch (devinfo->gen) {
1360 case 7:
1361 case 8:
1362 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1363 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1364 break;
1365 case 9:
1366 case 10:
1367 case 11:
1368 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1369 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1370 break;
1371 default:
1372 unreachable("unexpected gen");
1373 }
1374
1375 /* Put the numbers into Hz. */
1376 obj->oa.gt_frequency[0] *= 1000000ULL;
1377 obj->oa.gt_frequency[1] *= 1000000ULL;
1378 }
1379
1380 static int
1381 get_oa_counter_data(struct brw_context *brw,
1382 struct brw_perf_query_object *obj,
1383 size_t data_size,
1384 uint8_t *data)
1385 {
1386 struct gen_perf *perf = brw->perfquery.perf;
1387 const struct gen_perf_query_info *query = obj->query;
1388 int n_counters = query->n_counters;
1389 int written = 0;
1390
1391 for (int i = 0; i < n_counters; i++) {
1392 const struct gen_perf_query_counter *counter = &query->counters[i];
1393 uint64_t *out_uint64;
1394 float *out_float;
1395 size_t counter_size = gen_perf_query_counter_get_size(counter);
1396
1397 if (counter_size) {
1398 switch (counter->data_type) {
1399 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
1400 out_uint64 = (uint64_t *)(data + counter->offset);
1401 *out_uint64 =
1402 counter->oa_counter_read_uint64(perf, query,
1403 obj->oa.result.accumulator);
1404 break;
1405 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
1406 out_float = (float *)(data + counter->offset);
1407 *out_float =
1408 counter->oa_counter_read_float(perf, query,
1409 obj->oa.result.accumulator);
1410 break;
1411 default:
1412 /* So far we aren't using uint32, double or bool32... */
1413 unreachable("unexpected counter data type");
1414 }
1415 written = counter->offset + counter_size;
1416 }
1417 }
1418
1419 return written;
1420 }
1421
1422 static int
1423 get_pipeline_stats_data(struct brw_context *brw,
1424 struct brw_perf_query_object *obj,
1425 size_t data_size,
1426 uint8_t *data)
1427
1428 {
1429 const struct gen_perf_query_info *query = obj->query;
1430 int n_counters = obj->query->n_counters;
1431 uint8_t *p = data;
1432
1433 uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, MAP_READ);
1434 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1435
1436 for (int i = 0; i < n_counters; i++) {
1437 const struct gen_perf_query_counter *counter = &query->counters[i];
1438 uint64_t value = end[i] - start[i];
1439
1440 if (counter->pipeline_stat.numerator !=
1441 counter->pipeline_stat.denominator) {
1442 value *= counter->pipeline_stat.numerator;
1443 value /= counter->pipeline_stat.denominator;
1444 }
1445
1446 *((uint64_t *)p) = value;
1447 p += 8;
1448 }
1449
1450 brw_bo_unmap(obj->pipeline_stats.bo);
1451
1452 return p - data;
1453 }
1454
1455 /**
1456 * Driver hook for glGetPerfQueryDataINTEL().
1457 */
1458 static void
1459 brw_get_perf_query_data(struct gl_context *ctx,
1460 struct gl_perf_query_object *o,
1461 GLsizei data_size,
1462 GLuint *data,
1463 GLuint *bytes_written)
1464 {
1465 struct brw_context *brw = brw_context(ctx);
1466 struct brw_perf_query_object *obj = brw_perf_query(o);
1467 int written = 0;
1468
1469 assert(brw_is_perf_query_ready(ctx, o));
1470
1471 DBG("GetData(%d)\n", o->Id);
1472
1473 if (INTEL_DEBUG & DEBUG_PERFMON)
1474 dump_perf_queries(brw);
1475
1476 /* We expect that the frontend only calls this hook when it knows
1477 * that results are available.
1478 */
1479 assert(o->Ready);
1480
1481 switch (obj->query->kind) {
1482 case GEN_PERF_QUERY_TYPE_OA:
1483 case GEN_PERF_QUERY_TYPE_RAW:
1484 if (!obj->oa.results_accumulated) {
1485 read_gt_frequency(brw, obj);
1486 read_slice_unslice_frequencies(brw, obj);
1487 accumulate_oa_reports(brw, obj);
1488 assert(obj->oa.results_accumulated);
1489
1490 brw_bo_unmap(obj->oa.bo);
1491 obj->oa.map = NULL;
1492 }
1493 if (obj->query->kind == GEN_PERF_QUERY_TYPE_OA) {
1494 written = get_oa_counter_data(brw, obj, data_size, (uint8_t *)data);
1495 } else {
1496 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1497
1498 written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
1499 devinfo, &obj->oa.result,
1500 obj->oa.gt_frequency[0],
1501 obj->oa.gt_frequency[1]);
1502 }
1503 break;
1504
1505 case GEN_PERF_QUERY_TYPE_PIPELINE:
1506 written = get_pipeline_stats_data(brw, obj, data_size, (uint8_t *)data);
1507 break;
1508
1509 default:
1510 unreachable("Unknown query type");
1511 break;
1512 }
1513
1514 if (bytes_written)
1515 *bytes_written = written;
1516 }
1517
1518 static struct gl_perf_query_object *
1519 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
1520 {
1521 struct brw_context *brw = brw_context(ctx);
1522 const struct gen_perf_query_info *query =
1523 &brw->perfquery.perf->queries[query_index];
1524 struct brw_perf_query_object *obj =
1525 calloc(1, sizeof(struct brw_perf_query_object));
1526
1527 if (!obj)
1528 return NULL;
1529
1530 obj->query = query;
1531
1532 brw->perfquery.n_query_instances++;
1533
1534 return &obj->base;
1535 }
1536
1537 /**
1538 * Driver hook for glDeletePerfQueryINTEL().
1539 */
1540 static void
1541 brw_delete_perf_query(struct gl_context *ctx,
1542 struct gl_perf_query_object *o)
1543 {
1544 struct brw_context *brw = brw_context(ctx);
1545 struct brw_perf_query_object *obj = brw_perf_query(o);
1546
1547 /* We can assume that the frontend waits for a query to complete
1548 * before ever calling into here, so we don't have to worry about
1549 * deleting an in-flight query object.
1550 */
1551 assert(!o->Active);
1552 assert(!o->Used || o->Ready);
1553
1554 DBG("Delete(%d)\n", o->Id);
1555
1556 switch (obj->query->kind) {
1557 case GEN_PERF_QUERY_TYPE_OA:
1558 case GEN_PERF_QUERY_TYPE_RAW:
1559 if (obj->oa.bo) {
1560 if (!obj->oa.results_accumulated) {
1561 drop_from_unaccumulated_query_list(brw, obj);
1562 dec_n_oa_users(brw);
1563 }
1564
1565 brw_bo_unreference(obj->oa.bo);
1566 obj->oa.bo = NULL;
1567 }
1568
1569 obj->oa.results_accumulated = false;
1570 break;
1571
1572 case GEN_PERF_QUERY_TYPE_PIPELINE:
1573 if (obj->pipeline_stats.bo) {
1574 brw_bo_unreference(obj->pipeline_stats.bo);
1575 obj->pipeline_stats.bo = NULL;
1576 }
1577 break;
1578
1579 default:
1580 unreachable("Unknown query type");
1581 break;
1582 }
1583
1584 /* As an indication that the INTEL_performance_query extension is no
1585 * longer in use, it's a good time to free our cache of sample
1586 * buffers and close any current i915-perf stream.
1587 */
1588 if (--brw->perfquery.n_query_instances == 0) {
1589 free_sample_bufs(brw);
1590 close_perf(brw, obj->query);
1591 }
1592
1593 free(obj);
1594 }
1595
1596 /******************************************************************************/
1597
1598 static void
1599 init_pipeline_statistic_query_registers(struct brw_context *brw)
1600 {
1601 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1602 struct gen_perf *perf = brw->perfquery.perf;
1603 struct gen_perf_query_info *query =
1604 gen_perf_query_append_query_info(perf, MAX_STAT_COUNTERS);
1605
1606 query->kind = GEN_PERF_QUERY_TYPE_PIPELINE;
1607 query->name = "Pipeline Statistics Registers";
1608
1609 gen_perf_query_info_add_basic_stat_reg(query, IA_VERTICES_COUNT,
1610 "N vertices submitted");
1611 gen_perf_query_info_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
1612 "N primitives submitted");
1613 gen_perf_query_info_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
1614 "N vertex shader invocations");
1615
1616 if (devinfo->gen == 6) {
1617 gen_perf_query_info_add_stat_reg(query, GEN6_SO_PRIM_STORAGE_NEEDED, 1, 1,
1618 "SO_PRIM_STORAGE_NEEDED",
1619 "N geometry shader stream-out primitives (total)");
1620 gen_perf_query_info_add_stat_reg(query, GEN6_SO_NUM_PRIMS_WRITTEN, 1, 1,
1621 "SO_NUM_PRIMS_WRITTEN",
1622 "N geometry shader stream-out primitives (written)");
1623 } else {
1624 gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
1625 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
1626 "N stream-out (stream 0) primitives (total)");
1627 gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
1628 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
1629 "N stream-out (stream 1) primitives (total)");
1630 gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
1631 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
1632 "N stream-out (stream 2) primitives (total)");
1633 gen_perf_query_info_add_stat_reg(query, GEN7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
1634 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
1635 "N stream-out (stream 3) primitives (total)");
1636 gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
1637 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
1638 "N stream-out (stream 0) primitives (written)");
1639 gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
1640 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
1641 "N stream-out (stream 1) primitives (written)");
1642 gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
1643 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
1644 "N stream-out (stream 2) primitives (written)");
1645 gen_perf_query_info_add_stat_reg(query, GEN7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
1646 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
1647 "N stream-out (stream 3) primitives (written)");
1648 }
1649
1650 gen_perf_query_info_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
1651 "N TCS shader invocations");
1652 gen_perf_query_info_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
1653 "N TES shader invocations");
1654
1655 gen_perf_query_info_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
1656 "N geometry shader invocations");
1657 gen_perf_query_info_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
1658 "N geometry shader primitives emitted");
1659
1660 gen_perf_query_info_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
1661 "N primitives entering clipping");
1662 gen_perf_query_info_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
1663 "N primitives leaving clipping");
1664
1665 if (devinfo->is_haswell || devinfo->gen == 8) {
1666 gen_perf_query_info_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
1667 "N fragment shader invocations",
1668 "N fragment shader invocations");
1669 } else {
1670 gen_perf_query_info_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
1671 "N fragment shader invocations");
1672 }
1673
1674 gen_perf_query_info_add_basic_stat_reg(query, PS_DEPTH_COUNT,
1675 "N z-pass fragments");
1676
1677 if (devinfo->gen >= 7) {
1678 gen_perf_query_info_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
1679 "N compute shader invocations");
1680 }
1681
1682 query->data_size = sizeof(uint64_t) * query->n_counters;
1683 }
1684
1685 static bool
1686 query_topology(struct brw_context *brw)
1687 {
1688 __DRIscreen *screen = brw->screen->driScrnPriv;
1689 struct drm_i915_query_item item = {
1690 .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
1691 };
1692 struct drm_i915_query query = {
1693 .num_items = 1,
1694 .items_ptr = (uintptr_t) &item,
1695 };
1696
1697 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query))
1698 return false;
1699
1700 struct drm_i915_query_topology_info *topo_info =
1701 (struct drm_i915_query_topology_info *) calloc(1, item.length);
1702 item.data_ptr = (uintptr_t) topo_info;
1703
1704 if (drmIoctl(screen->fd, DRM_IOCTL_I915_QUERY, &query) ||
1705 item.length <= 0)
1706 return false;
1707
1708 gen_device_info_update_from_topology(&brw->screen->devinfo,
1709 topo_info);
1710
1711 free(topo_info);
1712
1713 return true;
1714 }
1715
1716 static bool
1717 getparam_topology(struct brw_context *brw)
1718 {
1719 __DRIscreen *screen = brw->screen->driScrnPriv;
1720 drm_i915_getparam_t gp;
1721 int ret;
1722
1723 int slice_mask = 0;
1724 gp.param = I915_PARAM_SLICE_MASK;
1725 gp.value = &slice_mask;
1726 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1727 if (ret)
1728 return false;
1729
1730 int subslice_mask = 0;
1731 gp.param = I915_PARAM_SUBSLICE_MASK;
1732 gp.value = &subslice_mask;
1733 ret = drmIoctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1734 if (ret)
1735 return false;
1736
1737 gen_device_info_update_from_masks(&brw->screen->devinfo,
1738 slice_mask,
1739 subslice_mask,
1740 brw->screen->eu_total);
1741
1742 return true;
1743 }
1744
1745 static unsigned
1746 brw_init_perf_query_info(struct gl_context *ctx)
1747 {
1748 struct brw_context *brw = brw_context(ctx);
1749 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1750 __DRIscreen *screen = brw->screen->driScrnPriv;
1751
1752 if (brw->perfquery.perf)
1753 return brw->perfquery.perf->n_queries;
1754
1755 brw->perfquery.perf = gen_perf_new(brw, drmIoctl);
1756
1757 init_pipeline_statistic_query_registers(brw);
1758 brw_perf_query_register_mdapi_statistic_query(brw);
1759
1760 if (!query_topology(brw)) {
1761 /* We need the i915 query uAPI on CNL+ (kernel 4.17+). */
1762 if (devinfo->gen >= 10)
1763 return false;
1764
1765 if (!getparam_topology(brw)) {
1766 /* We need the SLICE_MASK/SUBSLICE_MASK on gen8+ (kernel 4.13+). */
1767 if (devinfo->gen >= 8)
1768 return false;
1769
1770 /* On Haswell, the values are already computed for us in
1771 * gen_device_info.
1772 */
1773 }
1774 }
1775
1776 if (gen_perf_load_oa_metrics(brw->perfquery.perf, screen->fd, devinfo))
1777 brw_perf_query_register_mdapi_oa_query(brw);
1778
1779 brw->perfquery.unaccumulated =
1780 ralloc_array(brw, struct brw_perf_query_object *, 2);
1781 brw->perfquery.unaccumulated_elements = 0;
1782 brw->perfquery.unaccumulated_array_size = 2;
1783
1784 exec_list_make_empty(&brw->perfquery.sample_buffers);
1785 exec_list_make_empty(&brw->perfquery.free_sample_buffers);
1786
1787 /* It's convenient to guarantee that this linked list of sample
1788 * buffers is never empty so we add an empty head so when we
1789 * Begin an OA query we can always take a reference on a buffer
1790 * in this list.
1791 */
1792 struct brw_oa_sample_buf *buf = get_free_sample_buf(brw);
1793 exec_list_push_head(&brw->perfquery.sample_buffers, &buf->link);
1794
1795 brw->perfquery.oa_stream_fd = -1;
1796
1797 brw->perfquery.next_query_start_report_id = 1000;
1798
1799 return brw->perfquery.perf->n_queries;
1800 }
1801
1802 void
1803 brw_init_performance_queries(struct brw_context *brw)
1804 {
1805 struct gl_context *ctx = &brw->ctx;
1806
1807 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
1808 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
1809 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
1810 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
1811 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
1812 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
1813 ctx->Driver.EndPerfQuery = brw_end_perf_query;
1814 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
1815 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
1816 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
1817 }