7ffb6c265ea20936f1a579d83151a399364b8751
[mesa.git] / src / gallium / drivers / radeon / r600_perfcounter.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Nicolai Hähnle <nicolai.haehnle@amd.com>
25 *
26 */
27
28 #include "util/u_memory.h"
29 #include "r600_query.h"
30 #include "r600_pipe_common.h"
31 #include "r600d_common.h"
32
33 /* Max counters per HW block */
34 #define R600_QUERY_MAX_COUNTERS 16
35
36 static const char * const r600_pc_shader_suffix[] = {
37 "", "_PS", "_VS", "_GS", "_ES", "_HS", "_LS", "_CS"
38 };
39
40 static struct r600_perfcounter_block *
41 lookup_counter(struct r600_perfcounters *pc, unsigned index,
42 unsigned *base_gid, unsigned *sub_index)
43 {
44 struct r600_perfcounter_block *block = pc->blocks;
45 unsigned bid;
46
47 *base_gid = 0;
48 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
49 unsigned total = block->num_groups * block->num_selectors;
50
51 if (index < total) {
52 *sub_index = index;
53 return block;
54 }
55
56 index -= total;
57 *base_gid += block->num_groups;
58 }
59
60 return NULL;
61 }
62
63 static struct r600_perfcounter_block *
64 lookup_group(struct r600_perfcounters *pc, unsigned *index)
65 {
66 unsigned bid;
67 struct r600_perfcounter_block *block = pc->blocks;
68
69 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
70 if (*index < block->num_groups)
71 return block;
72 *index -= block->num_groups;
73 }
74
75 return NULL;
76 }
77
78 struct r600_pc_group {
79 struct r600_pc_group *next;
80 struct r600_perfcounter_block *block;
81 unsigned sub_gid; /* only used during init */
82 unsigned result_base; /* only used during init */
83 int se;
84 int instance;
85 unsigned num_counters;
86 unsigned selectors[R600_QUERY_MAX_COUNTERS];
87 };
88
89 struct r600_pc_counter {
90 unsigned base;
91 unsigned dwords;
92 unsigned stride;
93 };
94
95 struct r600_query_pc {
96 struct r600_query_hw b;
97
98 unsigned shaders;
99 unsigned num_counters;
100 struct r600_pc_counter *counters;
101 struct r600_pc_group *groups;
102 };
103
104 static void r600_pc_query_destroy(struct r600_common_context *ctx,
105 struct r600_query *rquery)
106 {
107 struct r600_query_pc *query = (struct r600_query_pc *)rquery;
108
109 while (query->groups) {
110 struct r600_pc_group *group = query->groups;
111 query->groups = group->next;
112 FREE(group);
113 }
114
115 FREE(query->counters);
116
117 r600_query_hw_destroy(ctx, rquery);
118 }
119
120 static void r600_pc_query_emit_start(struct r600_common_context *ctx,
121 struct r600_query_hw *hwquery,
122 struct r600_resource *buffer, uint64_t va)
123 {
124 struct r600_perfcounters *pc = ctx->screen->perfcounters;
125 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
126 struct r600_pc_group *group;
127 int current_se = -1;
128 int current_instance = -1;
129
130 if (query->shaders)
131 pc->emit_shaders(ctx, query->shaders);
132
133 for (group = query->groups; group; group = group->next) {
134 struct r600_perfcounter_block *block = group->block;
135
136 if (group->se != current_se || group->instance != current_instance) {
137 current_se = group->se;
138 current_instance = group->instance;
139 pc->emit_instance(ctx, group->se, group->instance);
140 }
141
142 pc->emit_select(ctx, block, group->num_counters, group->selectors);
143 }
144
145 if (current_se != -1 || current_instance != -1)
146 pc->emit_instance(ctx, -1, -1);
147
148 pc->emit_start(ctx, buffer, va);
149 }
150
151 static void r600_pc_query_emit_stop(struct r600_common_context *ctx,
152 struct r600_query_hw *hwquery,
153 struct r600_resource *buffer, uint64_t va)
154 {
155 struct r600_perfcounters *pc = ctx->screen->perfcounters;
156 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
157 struct r600_pc_group *group;
158
159 pc->emit_stop(ctx, buffer, va);
160
161 for (group = query->groups; group; group = group->next) {
162 struct r600_perfcounter_block *block = group->block;
163 unsigned se = group->se >= 0 ? group->se : 0;
164 unsigned se_end = se + 1;
165
166 if ((block->flags & R600_PC_BLOCK_SE) && (group->se < 0))
167 se_end = ctx->screen->info.max_se;
168
169 do {
170 unsigned instance = group->instance >= 0 ? group->instance : 0;
171
172 do {
173 pc->emit_instance(ctx, se, instance);
174 pc->emit_read(ctx, block,
175 group->num_counters, group->selectors,
176 buffer, va);
177 va += 4 * group->num_counters;
178 } while (group->instance < 0 && ++instance < block->num_instances);
179 } while (++se < se_end);
180 }
181
182 pc->emit_instance(ctx, -1, -1);
183 }
184
185 static void r600_pc_query_clear_result(struct r600_query_hw *hwquery,
186 union pipe_query_result *result)
187 {
188 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
189
190 memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
191 }
192
193 static void r600_pc_query_add_result(struct r600_common_context *ctx,
194 struct r600_query_hw *hwquery,
195 void *buffer,
196 union pipe_query_result *result)
197 {
198 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
199 uint32_t *results = buffer;
200 unsigned i, j;
201
202 for (i = 0; i < query->num_counters; ++i) {
203 struct r600_pc_counter *counter = &query->counters[i];
204
205 if (counter->base == ~0)
206 continue;
207
208 for (j = 0; j < counter->dwords; ++j) {
209 uint32_t value = results[counter->base + j * counter->stride];
210 result->batch[i].u32 += value;
211 }
212 }
213 }
214
215 static struct r600_query_ops batch_query_ops = {
216 .destroy = r600_pc_query_destroy,
217 .begin = r600_query_hw_begin,
218 .end = r600_query_hw_end,
219 .get_result = r600_query_hw_get_result
220 };
221
222 static struct r600_query_hw_ops batch_query_hw_ops = {
223 .emit_start = r600_pc_query_emit_start,
224 .emit_stop = r600_pc_query_emit_stop,
225 .clear_result = r600_pc_query_clear_result,
226 .add_result = r600_pc_query_add_result,
227 };
228
229 static struct r600_pc_group *get_group_state(struct r600_common_screen *screen,
230 struct r600_query_pc *query,
231 struct r600_perfcounter_block *block,
232 unsigned sub_gid)
233 {
234 struct r600_pc_group *group = query->groups;
235
236 while (group) {
237 if (group->block == block && group->sub_gid == sub_gid)
238 return group;
239 group = group->next;
240 }
241
242 group = CALLOC_STRUCT(r600_pc_group);
243 if (!group)
244 return NULL;
245
246 group->block = block;
247 group->sub_gid = sub_gid;
248
249 if (block->flags & R600_PC_BLOCK_SHADER) {
250 unsigned sub_gids = block->num_instances;
251 unsigned shader_id;
252 unsigned shader_mask;
253 unsigned query_shader_mask;
254
255 if (block->flags & R600_PC_BLOCK_SE_GROUPS)
256 sub_gids = sub_gids * screen->info.max_se;
257 shader_id = sub_gid / sub_gids;
258 sub_gid = sub_gid % sub_gids;
259
260 if (shader_id == 0)
261 shader_mask = R600_PC_SHADER_ALL;
262 else
263 shader_mask = 1 << (shader_id - 1);
264
265 query_shader_mask = query->shaders & R600_PC_SHADER_ALL;
266 if (query_shader_mask && query_shader_mask != shader_mask) {
267 fprintf(stderr, "r600_perfcounter: incompatible shader groups\n");
268 FREE(group);
269 return NULL;
270 }
271 query->shaders |= shader_mask;
272 }
273
274 if (block->flags & R600_PC_BLOCK_SHADER_WINDOWED) {
275 // A non-zero value in query->shaders ensures that the shader
276 // masking is reset unless the user explicitly requests one.
277 query->shaders |= R600_PC_SHADER_WINDOWING;
278 }
279
280 if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
281 group->se = sub_gid / block->num_instances;
282 sub_gid = sub_gid % block->num_instances;
283 } else {
284 group->se = -1;
285 }
286
287 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
288 group->instance = sub_gid;
289 } else {
290 group->instance = -1;
291 }
292
293 group->next = query->groups;
294 query->groups = group;
295
296 return group;
297 }
298
299 struct pipe_query *r600_create_batch_query(struct pipe_context *ctx,
300 unsigned num_queries,
301 unsigned *query_types)
302 {
303 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
304 struct r600_common_screen *screen = rctx->screen;
305 struct r600_perfcounters *pc = screen->perfcounters;
306 struct r600_perfcounter_block *block;
307 struct r600_pc_group *group;
308 struct r600_query_pc *query;
309 unsigned base_gid, sub_gid, sub_index;
310 unsigned i, j;
311
312 if (!pc)
313 return NULL;
314
315 query = CALLOC_STRUCT(r600_query_pc);
316 if (!query)
317 return NULL;
318
319 query->b.b.ops = &batch_query_ops;
320 query->b.ops = &batch_query_hw_ops;
321 query->b.flags = R600_QUERY_HW_FLAG_TIMER;
322
323 query->num_counters = num_queries;
324
325 /* Collect selectors per group */
326 for (i = 0; i < num_queries; ++i) {
327 unsigned sub_gid;
328
329 if (query_types[i] < R600_QUERY_FIRST_PERFCOUNTER)
330 goto error;
331
332 block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
333 &base_gid, &sub_index);
334 if (!block)
335 goto error;
336
337 sub_gid = sub_index / block->num_selectors;
338 sub_index = sub_index % block->num_selectors;
339
340 group = get_group_state(screen, query, block, sub_gid);
341 if (!group)
342 goto error;
343
344 if (group->num_counters >= block->num_counters) {
345 fprintf(stderr,
346 "perfcounter group %s: too many selected\n",
347 block->basename);
348 goto error;
349 }
350 group->selectors[group->num_counters] = sub_index;
351 ++group->num_counters;
352 }
353
354 /* Compute result bases and CS size per group */
355 query->b.num_cs_dw_begin = pc->num_start_cs_dwords;
356 query->b.num_cs_dw_end = pc->num_stop_cs_dwords;
357
358 query->b.num_cs_dw_begin += pc->num_instance_cs_dwords; /* conservative */
359 query->b.num_cs_dw_end += pc->num_instance_cs_dwords;
360
361 i = 0;
362 for (group = query->groups; group; group = group->next) {
363 struct r600_perfcounter_block *block = group->block;
364 unsigned select_dw, read_dw;
365 unsigned instances = 1;
366
367 if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
368 instances = rctx->screen->info.max_se;
369 if (group->instance < 0)
370 instances *= block->num_instances;
371
372 group->result_base = i;
373 query->b.result_size += 4 * instances * group->num_counters;
374 i += instances * group->num_counters;
375
376 pc->get_size(block, group->num_counters, group->selectors,
377 &select_dw, &read_dw);
378 query->b.num_cs_dw_begin += select_dw;
379 query->b.num_cs_dw_end += instances * read_dw;
380 query->b.num_cs_dw_begin += pc->num_instance_cs_dwords; /* conservative */
381 query->b.num_cs_dw_end += instances * pc->num_instance_cs_dwords;
382 }
383
384 if (query->shaders) {
385 if ((query->shaders & R600_PC_SHADER_ALL) == 0)
386 query->shaders |= R600_PC_SHADER_ALL;
387 query->b.num_cs_dw_begin += pc->num_shaders_cs_dwords;
388 }
389
390 /* Map user-supplied query array to result indices */
391 query->counters = CALLOC(num_queries, sizeof(*query->counters));
392 for (i = 0; i < num_queries; ++i) {
393 struct r600_pc_counter *counter = &query->counters[i];
394 struct r600_perfcounter_block *block;
395
396 block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
397 &base_gid, &sub_index);
398
399 sub_gid = sub_index / block->num_selectors;
400 sub_index = sub_index % block->num_selectors;
401
402 group = get_group_state(screen, query, block, sub_gid);
403 assert(group != NULL);
404
405 for (j = 0; j < group->num_counters; ++j) {
406 if (group->selectors[j] == sub_index)
407 break;
408 }
409
410 counter->base = group->result_base + j;
411 counter->stride = group->num_counters;
412
413 counter->dwords = 1;
414 if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
415 counter->dwords = screen->info.max_se;
416 if (group->instance < 0)
417 counter->dwords *= block->num_instances;
418 }
419
420 if (!r600_query_hw_init(rctx, &query->b))
421 goto error;
422
423 return (struct pipe_query *)query;
424
425 error:
426 r600_pc_query_destroy(rctx, &query->b.b);
427 return NULL;
428 }
429
430 static boolean r600_init_block_names(struct r600_common_screen *screen,
431 struct r600_perfcounter_block *block)
432 {
433 unsigned i, j, k;
434 unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
435 unsigned namelen;
436 char *groupname;
437 char *p;
438
439 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
440 groups_instance = block->num_instances;
441 if (block->flags & R600_PC_BLOCK_SE_GROUPS)
442 groups_se = screen->info.max_se;
443 if (block->flags & R600_PC_BLOCK_SHADER)
444 groups_shader = ARRAY_SIZE(r600_pc_shader_suffix);
445
446 namelen = strlen(block->basename);
447 block->group_name_stride = namelen + 1;
448 if (block->flags & R600_PC_BLOCK_SHADER)
449 block->group_name_stride += 3;
450 if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
451 assert(groups_se <= 10);
452 block->group_name_stride += 1;
453
454 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
455 block->group_name_stride += 1;
456 }
457 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
458 assert(groups_instance <= 100);
459 block->group_name_stride += 2;
460 }
461
462 block->group_names = MALLOC(block->num_groups * block->group_name_stride);
463 if (!block->group_names)
464 return FALSE;
465
466 groupname = block->group_names;
467 for (i = 0; i < groups_shader; ++i) {
468 unsigned shaderlen = strlen(r600_pc_shader_suffix[i]);
469 for (j = 0; j < groups_se; ++j) {
470 for (k = 0; k < groups_instance; ++k) {
471 strcpy(groupname, block->basename);
472 p = groupname + namelen;
473
474 if (block->flags & R600_PC_BLOCK_SHADER) {
475 strcpy(p, r600_pc_shader_suffix[i]);
476 p += shaderlen;
477 }
478
479 if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
480 p += sprintf(p, "%d", j);
481 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
482 *p++ = '_';
483 }
484
485 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
486 p += sprintf(p, "%d", k);
487
488 groupname += block->group_name_stride;
489 }
490 }
491 }
492
493 assert(block->num_selectors <= 1000);
494 block->selector_name_stride = block->group_name_stride + 4;
495 block->selector_names = MALLOC(block->num_groups * block->num_selectors *
496 block->selector_name_stride);
497 if (!block->selector_names)
498 return FALSE;
499
500 groupname = block->group_names;
501 p = block->selector_names;
502 for (i = 0; i < block->num_groups; ++i) {
503 for (j = 0; j < block->num_selectors; ++j) {
504 sprintf(p, "%s_%03d", groupname, j);
505 p += block->selector_name_stride;
506 }
507 groupname += block->group_name_stride;
508 }
509
510 return TRUE;
511 }
512
513 int r600_get_perfcounter_info(struct r600_common_screen *screen,
514 unsigned index,
515 struct pipe_driver_query_info *info)
516 {
517 struct r600_perfcounters *pc = screen->perfcounters;
518 struct r600_perfcounter_block *block;
519 unsigned base_gid, sub;
520
521 if (!pc)
522 return 0;
523
524 if (!info) {
525 unsigned bid, num_queries = 0;
526
527 for (bid = 0; bid < pc->num_blocks; ++bid) {
528 num_queries += pc->blocks[bid].num_selectors *
529 pc->blocks[bid].num_groups;
530 }
531
532 return num_queries;
533 }
534
535 block = lookup_counter(pc, index, &base_gid, &sub);
536 if (!block)
537 return 0;
538
539 if (!block->selector_names) {
540 if (!r600_init_block_names(screen, block))
541 return 0;
542 }
543 info->name = block->selector_names + sub * block->selector_name_stride;
544 info->query_type = R600_QUERY_FIRST_PERFCOUNTER + index;
545 info->max_value.u64 = 0;
546 info->type = PIPE_DRIVER_QUERY_TYPE_UINT;
547 info->result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_CUMULATIVE;
548 info->group_id = base_gid + sub / block->num_selectors;
549 info->flags = PIPE_DRIVER_QUERY_FLAG_BATCH;
550 return 1;
551 }
552
553 int r600_get_perfcounter_group_info(struct r600_common_screen *screen,
554 unsigned index,
555 struct pipe_driver_query_group_info *info)
556 {
557 struct r600_perfcounters *pc = screen->perfcounters;
558 struct r600_perfcounter_block *block;
559
560 if (!pc)
561 return 0;
562
563 if (!info)
564 return pc->num_groups;
565
566 block = lookup_group(pc, &index);
567 if (!block)
568 return 0;
569
570 if (!block->group_names) {
571 if (!r600_init_block_names(screen, block))
572 return 0;
573 }
574 info->name = block->group_names + index * block->group_name_stride;
575 info->num_queries = block->num_selectors;
576 info->max_active_queries = block->num_counters;
577 return 1;
578 }
579
580 void r600_perfcounters_destroy(struct r600_common_screen *rscreen)
581 {
582 if (rscreen->perfcounters)
583 rscreen->perfcounters->cleanup(rscreen);
584 }
585
586 boolean r600_perfcounters_init(struct r600_perfcounters *pc,
587 unsigned num_blocks)
588 {
589 pc->blocks = CALLOC(num_blocks, sizeof(struct r600_perfcounter_block));
590 if (!pc->blocks)
591 return FALSE;
592
593 pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", FALSE);
594 pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", FALSE);
595
596 return TRUE;
597 }
598
599 void r600_perfcounters_add_block(struct r600_common_screen *rscreen,
600 struct r600_perfcounters *pc,
601 const char *name, unsigned flags,
602 unsigned counters, unsigned selectors,
603 unsigned instances, void *data)
604 {
605 struct r600_perfcounter_block *block = &pc->blocks[pc->num_blocks];
606
607 assert(counters <= R600_QUERY_MAX_COUNTERS);
608
609 block->basename = name;
610 block->flags = flags;
611 block->num_counters = counters;
612 block->num_selectors = selectors;
613 block->num_instances = MAX2(instances, 1);
614 block->data = data;
615
616 if (pc->separate_se && (block->flags & R600_PC_BLOCK_SE))
617 block->flags |= R600_PC_BLOCK_SE_GROUPS;
618 if (pc->separate_instance && block->num_instances > 1)
619 block->flags |= R600_PC_BLOCK_INSTANCE_GROUPS;
620
621 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
622 block->num_groups = block->num_instances;
623 } else {
624 block->num_groups = 1;
625 }
626
627 if (block->flags & R600_PC_BLOCK_SE_GROUPS)
628 block->num_groups *= rscreen->info.max_se;
629 if (block->flags & R600_PC_BLOCK_SHADER)
630 block->num_groups *= ARRAY_SIZE(r600_pc_shader_suffix);
631
632 ++pc->num_blocks;
633 pc->num_groups += block->num_groups;
634 }
635
636 void r600_perfcounters_do_destroy(struct r600_perfcounters *pc)
637 {
638 unsigned i;
639
640 for (i = 0; i < pc->num_blocks; ++i) {
641 FREE(pc->blocks[i].group_names);
642 FREE(pc->blocks[i].selector_names);
643 }
644 FREE(pc->blocks);
645 FREE(pc);
646 }