radeonsi: update copyrights
[mesa.git] / src / gallium / drivers / radeon / r600_perfcounter.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "r600_query.h"
27 #include "radeonsi/si_pipe.h"
28 #include "amd/common/sid.h"
29
30 /* Max counters per HW block */
31 #define R600_QUERY_MAX_COUNTERS 16
32
33 static struct r600_perfcounter_block *
34 lookup_counter(struct r600_perfcounters *pc, unsigned index,
35 unsigned *base_gid, unsigned *sub_index)
36 {
37 struct r600_perfcounter_block *block = pc->blocks;
38 unsigned bid;
39
40 *base_gid = 0;
41 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
42 unsigned total = block->num_groups * block->num_selectors;
43
44 if (index < total) {
45 *sub_index = index;
46 return block;
47 }
48
49 index -= total;
50 *base_gid += block->num_groups;
51 }
52
53 return NULL;
54 }
55
56 static struct r600_perfcounter_block *
57 lookup_group(struct r600_perfcounters *pc, unsigned *index)
58 {
59 unsigned bid;
60 struct r600_perfcounter_block *block = pc->blocks;
61
62 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
63 if (*index < block->num_groups)
64 return block;
65 *index -= block->num_groups;
66 }
67
68 return NULL;
69 }
70
71 struct r600_pc_group {
72 struct r600_pc_group *next;
73 struct r600_perfcounter_block *block;
74 unsigned sub_gid; /* only used during init */
75 unsigned result_base; /* only used during init */
76 int se;
77 int instance;
78 unsigned num_counters;
79 unsigned selectors[R600_QUERY_MAX_COUNTERS];
80 };
81
82 struct r600_pc_counter {
83 unsigned base;
84 unsigned qwords;
85 unsigned stride; /* in uint64s */
86 };
87
88 #define R600_PC_SHADERS_WINDOWING (1 << 31)
89
90 struct r600_query_pc {
91 struct r600_query_hw b;
92
93 unsigned shaders;
94 unsigned num_counters;
95 struct r600_pc_counter *counters;
96 struct r600_pc_group *groups;
97 };
98
99 static void r600_pc_query_destroy(struct si_screen *sscreen,
100 struct r600_query *rquery)
101 {
102 struct r600_query_pc *query = (struct r600_query_pc *)rquery;
103
104 while (query->groups) {
105 struct r600_pc_group *group = query->groups;
106 query->groups = group->next;
107 FREE(group);
108 }
109
110 FREE(query->counters);
111
112 si_query_hw_destroy(sscreen, rquery);
113 }
114
115 static bool r600_pc_query_prepare_buffer(struct si_screen *screen,
116 struct r600_query_hw *hwquery,
117 struct r600_resource *buffer)
118 {
119 /* no-op */
120 return true;
121 }
122
123 static void r600_pc_query_emit_start(struct si_context *sctx,
124 struct r600_query_hw *hwquery,
125 struct r600_resource *buffer, uint64_t va)
126 {
127 struct r600_perfcounters *pc = sctx->screen->perfcounters;
128 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
129 struct r600_pc_group *group;
130 int current_se = -1;
131 int current_instance = -1;
132
133 if (query->shaders)
134 pc->emit_shaders(sctx, query->shaders);
135
136 for (group = query->groups; group; group = group->next) {
137 struct r600_perfcounter_block *block = group->block;
138
139 if (group->se != current_se || group->instance != current_instance) {
140 current_se = group->se;
141 current_instance = group->instance;
142 pc->emit_instance(sctx, group->se, group->instance);
143 }
144
145 pc->emit_select(sctx, block, group->num_counters, group->selectors);
146 }
147
148 if (current_se != -1 || current_instance != -1)
149 pc->emit_instance(sctx, -1, -1);
150
151 pc->emit_start(sctx, buffer, va);
152 }
153
154 static void r600_pc_query_emit_stop(struct si_context *sctx,
155 struct r600_query_hw *hwquery,
156 struct r600_resource *buffer, uint64_t va)
157 {
158 struct r600_perfcounters *pc = sctx->screen->perfcounters;
159 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
160 struct r600_pc_group *group;
161
162 pc->emit_stop(sctx, buffer, va);
163
164 for (group = query->groups; group; group = group->next) {
165 struct r600_perfcounter_block *block = group->block;
166 unsigned se = group->se >= 0 ? group->se : 0;
167 unsigned se_end = se + 1;
168
169 if ((block->flags & R600_PC_BLOCK_SE) && (group->se < 0))
170 se_end = sctx->screen->info.max_se;
171
172 do {
173 unsigned instance = group->instance >= 0 ? group->instance : 0;
174
175 do {
176 pc->emit_instance(sctx, se, instance);
177 pc->emit_read(sctx, block,
178 group->num_counters, group->selectors,
179 buffer, va);
180 va += sizeof(uint64_t) * group->num_counters;
181 } while (group->instance < 0 && ++instance < block->num_instances);
182 } while (++se < se_end);
183 }
184
185 pc->emit_instance(sctx, -1, -1);
186 }
187
188 static void r600_pc_query_clear_result(struct r600_query_hw *hwquery,
189 union pipe_query_result *result)
190 {
191 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
192
193 memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
194 }
195
196 static void r600_pc_query_add_result(struct si_screen *sscreen,
197 struct r600_query_hw *hwquery,
198 void *buffer,
199 union pipe_query_result *result)
200 {
201 struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
202 uint64_t *results = buffer;
203 unsigned i, j;
204
205 for (i = 0; i < query->num_counters; ++i) {
206 struct r600_pc_counter *counter = &query->counters[i];
207
208 for (j = 0; j < counter->qwords; ++j) {
209 uint32_t value = results[counter->base + j * counter->stride];
210 result->batch[i].u64 += value;
211 }
212 }
213 }
214
215 static struct r600_query_ops batch_query_ops = {
216 .destroy = r600_pc_query_destroy,
217 .begin = si_query_hw_begin,
218 .end = si_query_hw_end,
219 .get_result = si_query_hw_get_result
220 };
221
222 static struct r600_query_hw_ops batch_query_hw_ops = {
223 .prepare_buffer = r600_pc_query_prepare_buffer,
224 .emit_start = r600_pc_query_emit_start,
225 .emit_stop = r600_pc_query_emit_stop,
226 .clear_result = r600_pc_query_clear_result,
227 .add_result = r600_pc_query_add_result,
228 };
229
230 static struct r600_pc_group *get_group_state(struct si_screen *screen,
231 struct r600_query_pc *query,
232 struct r600_perfcounter_block *block,
233 unsigned sub_gid)
234 {
235 struct r600_pc_group *group = query->groups;
236
237 while (group) {
238 if (group->block == block && group->sub_gid == sub_gid)
239 return group;
240 group = group->next;
241 }
242
243 group = CALLOC_STRUCT(r600_pc_group);
244 if (!group)
245 return NULL;
246
247 group->block = block;
248 group->sub_gid = sub_gid;
249
250 if (block->flags & R600_PC_BLOCK_SHADER) {
251 unsigned sub_gids = block->num_instances;
252 unsigned shader_id;
253 unsigned shaders;
254 unsigned query_shaders;
255
256 if (block->flags & R600_PC_BLOCK_SE_GROUPS)
257 sub_gids = sub_gids * screen->info.max_se;
258 shader_id = sub_gid / sub_gids;
259 sub_gid = sub_gid % sub_gids;
260
261 shaders = screen->perfcounters->shader_type_bits[shader_id];
262
263 query_shaders = query->shaders & ~R600_PC_SHADERS_WINDOWING;
264 if (query_shaders && query_shaders != shaders) {
265 fprintf(stderr, "r600_perfcounter: incompatible shader groups\n");
266 FREE(group);
267 return NULL;
268 }
269 query->shaders = shaders;
270 }
271
272 if (block->flags & R600_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
273 // A non-zero value in query->shaders ensures that the shader
274 // masking is reset unless the user explicitly requests one.
275 query->shaders = R600_PC_SHADERS_WINDOWING;
276 }
277
278 if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
279 group->se = sub_gid / block->num_instances;
280 sub_gid = sub_gid % block->num_instances;
281 } else {
282 group->se = -1;
283 }
284
285 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
286 group->instance = sub_gid;
287 } else {
288 group->instance = -1;
289 }
290
291 group->next = query->groups;
292 query->groups = group;
293
294 return group;
295 }
296
297 struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
298 unsigned num_queries,
299 unsigned *query_types)
300 {
301 struct si_screen *screen =
302 (struct si_screen *)ctx->screen;
303 struct r600_perfcounters *pc = screen->perfcounters;
304 struct r600_perfcounter_block *block;
305 struct r600_pc_group *group;
306 struct r600_query_pc *query;
307 unsigned base_gid, sub_gid, sub_index;
308 unsigned i, j;
309
310 if (!pc)
311 return NULL;
312
313 query = CALLOC_STRUCT(r600_query_pc);
314 if (!query)
315 return NULL;
316
317 query->b.b.ops = &batch_query_ops;
318 query->b.ops = &batch_query_hw_ops;
319
320 query->num_counters = num_queries;
321
322 /* Collect selectors per group */
323 for (i = 0; i < num_queries; ++i) {
324 unsigned sub_gid;
325
326 if (query_types[i] < R600_QUERY_FIRST_PERFCOUNTER)
327 goto error;
328
329 block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
330 &base_gid, &sub_index);
331 if (!block)
332 goto error;
333
334 sub_gid = sub_index / block->num_selectors;
335 sub_index = sub_index % block->num_selectors;
336
337 group = get_group_state(screen, query, block, sub_gid);
338 if (!group)
339 goto error;
340
341 if (group->num_counters >= block->num_counters) {
342 fprintf(stderr,
343 "perfcounter group %s: too many selected\n",
344 block->basename);
345 goto error;
346 }
347 group->selectors[group->num_counters] = sub_index;
348 ++group->num_counters;
349 }
350
351 /* Compute result bases and CS size per group */
352 query->b.num_cs_dw_end = pc->num_stop_cs_dwords;
353 query->b.num_cs_dw_end += pc->num_instance_cs_dwords;
354
355 i = 0;
356 for (group = query->groups; group; group = group->next) {
357 struct r600_perfcounter_block *block = group->block;
358 unsigned read_dw;
359 unsigned instances = 1;
360
361 if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
362 instances = screen->info.max_se;
363 if (group->instance < 0)
364 instances *= block->num_instances;
365
366 group->result_base = i;
367 query->b.result_size += sizeof(uint64_t) * instances * group->num_counters;
368 i += instances * group->num_counters;
369
370 read_dw = 6 * group->num_counters;
371 query->b.num_cs_dw_end += instances * read_dw;
372 query->b.num_cs_dw_end += instances * pc->num_instance_cs_dwords;
373 }
374
375 if (query->shaders) {
376 if (query->shaders == R600_PC_SHADERS_WINDOWING)
377 query->shaders = 0xffffffff;
378 }
379
380 /* Map user-supplied query array to result indices */
381 query->counters = CALLOC(num_queries, sizeof(*query->counters));
382 for (i = 0; i < num_queries; ++i) {
383 struct r600_pc_counter *counter = &query->counters[i];
384 struct r600_perfcounter_block *block;
385
386 block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
387 &base_gid, &sub_index);
388
389 sub_gid = sub_index / block->num_selectors;
390 sub_index = sub_index % block->num_selectors;
391
392 group = get_group_state(screen, query, block, sub_gid);
393 assert(group != NULL);
394
395 for (j = 0; j < group->num_counters; ++j) {
396 if (group->selectors[j] == sub_index)
397 break;
398 }
399
400 counter->base = group->result_base + j;
401 counter->stride = group->num_counters;
402
403 counter->qwords = 1;
404 if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
405 counter->qwords = screen->info.max_se;
406 if (group->instance < 0)
407 counter->qwords *= block->num_instances;
408 }
409
410 if (!si_query_hw_init(screen, &query->b))
411 goto error;
412
413 return (struct pipe_query *)query;
414
415 error:
416 r600_pc_query_destroy(screen, &query->b.b);
417 return NULL;
418 }
419
420 static bool r600_init_block_names(struct si_screen *screen,
421 struct r600_perfcounter_block *block)
422 {
423 unsigned i, j, k;
424 unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
425 unsigned namelen;
426 char *groupname;
427 char *p;
428
429 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
430 groups_instance = block->num_instances;
431 if (block->flags & R600_PC_BLOCK_SE_GROUPS)
432 groups_se = screen->info.max_se;
433 if (block->flags & R600_PC_BLOCK_SHADER)
434 groups_shader = screen->perfcounters->num_shader_types;
435
436 namelen = strlen(block->basename);
437 block->group_name_stride = namelen + 1;
438 if (block->flags & R600_PC_BLOCK_SHADER)
439 block->group_name_stride += 3;
440 if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
441 assert(groups_se <= 10);
442 block->group_name_stride += 1;
443
444 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
445 block->group_name_stride += 1;
446 }
447 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
448 assert(groups_instance <= 100);
449 block->group_name_stride += 2;
450 }
451
452 block->group_names = MALLOC(block->num_groups * block->group_name_stride);
453 if (!block->group_names)
454 return false;
455
456 groupname = block->group_names;
457 for (i = 0; i < groups_shader; ++i) {
458 const char *shader_suffix = screen->perfcounters->shader_type_suffixes[i];
459 unsigned shaderlen = strlen(shader_suffix);
460 for (j = 0; j < groups_se; ++j) {
461 for (k = 0; k < groups_instance; ++k) {
462 strcpy(groupname, block->basename);
463 p = groupname + namelen;
464
465 if (block->flags & R600_PC_BLOCK_SHADER) {
466 strcpy(p, shader_suffix);
467 p += shaderlen;
468 }
469
470 if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
471 p += sprintf(p, "%d", j);
472 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
473 *p++ = '_';
474 }
475
476 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS)
477 p += sprintf(p, "%d", k);
478
479 groupname += block->group_name_stride;
480 }
481 }
482 }
483
484 assert(block->num_selectors <= 1000);
485 block->selector_name_stride = block->group_name_stride + 4;
486 block->selector_names = MALLOC(block->num_groups * block->num_selectors *
487 block->selector_name_stride);
488 if (!block->selector_names)
489 return false;
490
491 groupname = block->group_names;
492 p = block->selector_names;
493 for (i = 0; i < block->num_groups; ++i) {
494 for (j = 0; j < block->num_selectors; ++j) {
495 sprintf(p, "%s_%03d", groupname, j);
496 p += block->selector_name_stride;
497 }
498 groupname += block->group_name_stride;
499 }
500
501 return true;
502 }
503
504 int si_get_perfcounter_info(struct si_screen *screen,
505 unsigned index,
506 struct pipe_driver_query_info *info)
507 {
508 struct r600_perfcounters *pc = screen->perfcounters;
509 struct r600_perfcounter_block *block;
510 unsigned base_gid, sub;
511
512 if (!pc)
513 return 0;
514
515 if (!info) {
516 unsigned bid, num_queries = 0;
517
518 for (bid = 0; bid < pc->num_blocks; ++bid) {
519 num_queries += pc->blocks[bid].num_selectors *
520 pc->blocks[bid].num_groups;
521 }
522
523 return num_queries;
524 }
525
526 block = lookup_counter(pc, index, &base_gid, &sub);
527 if (!block)
528 return 0;
529
530 if (!block->selector_names) {
531 if (!r600_init_block_names(screen, block))
532 return 0;
533 }
534 info->name = block->selector_names + sub * block->selector_name_stride;
535 info->query_type = R600_QUERY_FIRST_PERFCOUNTER + index;
536 info->max_value.u64 = 0;
537 info->type = PIPE_DRIVER_QUERY_TYPE_UINT64;
538 info->result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE;
539 info->group_id = base_gid + sub / block->num_selectors;
540 info->flags = PIPE_DRIVER_QUERY_FLAG_BATCH;
541 if (sub > 0 && sub + 1 < block->num_selectors * block->num_groups)
542 info->flags |= PIPE_DRIVER_QUERY_FLAG_DONT_LIST;
543 return 1;
544 }
545
546 int si_get_perfcounter_group_info(struct si_screen *screen,
547 unsigned index,
548 struct pipe_driver_query_group_info *info)
549 {
550 struct r600_perfcounters *pc = screen->perfcounters;
551 struct r600_perfcounter_block *block;
552
553 if (!pc)
554 return 0;
555
556 if (!info)
557 return pc->num_groups;
558
559 block = lookup_group(pc, &index);
560 if (!block)
561 return 0;
562
563 if (!block->group_names) {
564 if (!r600_init_block_names(screen, block))
565 return 0;
566 }
567 info->name = block->group_names + index * block->group_name_stride;
568 info->num_queries = block->num_selectors;
569 info->max_active_queries = block->num_counters;
570 return 1;
571 }
572
573 void si_perfcounters_destroy(struct si_screen *sscreen)
574 {
575 if (sscreen->perfcounters)
576 sscreen->perfcounters->cleanup(sscreen);
577 }
578
579 bool si_perfcounters_init(struct r600_perfcounters *pc,
580 unsigned num_blocks)
581 {
582 pc->blocks = CALLOC(num_blocks, sizeof(struct r600_perfcounter_block));
583 if (!pc->blocks)
584 return false;
585
586 pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", false);
587 pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", false);
588
589 return true;
590 }
591
592 void si_perfcounters_add_block(struct si_screen *sscreen,
593 struct r600_perfcounters *pc,
594 const char *name, unsigned flags,
595 unsigned counters, unsigned selectors,
596 unsigned instances, void *data)
597 {
598 struct r600_perfcounter_block *block = &pc->blocks[pc->num_blocks];
599
600 assert(counters <= R600_QUERY_MAX_COUNTERS);
601
602 block->basename = name;
603 block->flags = flags;
604 block->num_counters = counters;
605 block->num_selectors = selectors;
606 block->num_instances = MAX2(instances, 1);
607 block->data = data;
608
609 if (pc->separate_se && (block->flags & R600_PC_BLOCK_SE))
610 block->flags |= R600_PC_BLOCK_SE_GROUPS;
611 if (pc->separate_instance && block->num_instances > 1)
612 block->flags |= R600_PC_BLOCK_INSTANCE_GROUPS;
613
614 if (block->flags & R600_PC_BLOCK_INSTANCE_GROUPS) {
615 block->num_groups = block->num_instances;
616 } else {
617 block->num_groups = 1;
618 }
619
620 if (block->flags & R600_PC_BLOCK_SE_GROUPS)
621 block->num_groups *= sscreen->info.max_se;
622 if (block->flags & R600_PC_BLOCK_SHADER)
623 block->num_groups *= pc->num_shader_types;
624
625 ++pc->num_blocks;
626 pc->num_groups += block->num_groups;
627 }
628
629 void si_perfcounters_do_destroy(struct r600_perfcounters *pc)
630 {
631 unsigned i;
632
633 for (i = 0; i < pc->num_blocks; ++i) {
634 FREE(pc->blocks[i].group_names);
635 FREE(pc->blocks[i].selector_names);
636 }
637 FREE(pc->blocks);
638 FREE(pc);
639 }