radeonsi: const-ify the si_query_ops
[mesa.git] / src / gallium / drivers / radeonsi / si_perfcounter.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "si_build_pm4.h"
26 #include "si_query.h"
27 #include "util/u_memory.h"
28
29
30 enum si_pc_block_flags {
31 /* This block is part of the shader engine */
32 SI_PC_BLOCK_SE = (1 << 0),
33
34 /* Expose per-instance groups instead of summing all instances (within
35 * an SE). */
36 SI_PC_BLOCK_INSTANCE_GROUPS = (1 << 1),
37
38 /* Expose per-SE groups instead of summing instances across SEs. */
39 SI_PC_BLOCK_SE_GROUPS = (1 << 2),
40
41 /* Shader block */
42 SI_PC_BLOCK_SHADER = (1 << 3),
43
44 /* Non-shader block with perfcounters windowed by shaders. */
45 SI_PC_BLOCK_SHADER_WINDOWED = (1 << 4),
46 };
47
48 enum si_pc_reg_layout {
49 /* All secondary selector dwords follow as one block after the primary
50 * selector dwords for the counters that have secondary selectors.
51 */
52 SI_PC_MULTI_BLOCK = 0,
53
54 /* Each secondary selector dword follows immediately afters the
55 * corresponding primary.
56 */
57 SI_PC_MULTI_ALTERNATE = 1,
58
59 /* All secondary selector dwords follow as one block after all primary
60 * selector dwords.
61 */
62 SI_PC_MULTI_TAIL = 2,
63
64 /* Free-form arrangement of selector registers. */
65 SI_PC_MULTI_CUSTOM = 3,
66
67 SI_PC_MULTI_MASK = 3,
68
69 /* Registers are laid out in decreasing rather than increasing order. */
70 SI_PC_REG_REVERSE = 4,
71
72 SI_PC_FAKE = 8,
73 };
74
75 struct si_pc_block_base {
76 const char *name;
77 unsigned num_counters;
78 unsigned flags;
79
80 unsigned select_or;
81 unsigned select0;
82 unsigned counter0_lo;
83 unsigned *select;
84 unsigned *counters;
85 unsigned num_multi;
86 unsigned num_prelude;
87 unsigned layout;
88 };
89
90 struct si_pc_block_gfxdescr {
91 struct si_pc_block_base *b;
92 unsigned selectors;
93 unsigned instances;
94 };
95
96 struct si_pc_block {
97 const struct si_pc_block_gfxdescr *b;
98 unsigned num_instances;
99
100 unsigned num_groups;
101 char *group_names;
102 unsigned group_name_stride;
103
104 char *selector_names;
105 unsigned selector_name_stride;
106 };
107
108 /* The order is chosen to be compatible with GPUPerfStudio's hardcoding of
109 * performance counter group IDs.
110 */
111 static const char * const si_pc_shader_type_suffixes[] = {
112 "", "_ES", "_GS", "_VS", "_PS", "_LS", "_HS", "_CS"
113 };
114
115 static const unsigned si_pc_shader_type_bits[] = {
116 0x7f,
117 S_036780_ES_EN(1),
118 S_036780_GS_EN(1),
119 S_036780_VS_EN(1),
120 S_036780_PS_EN(1),
121 S_036780_LS_EN(1),
122 S_036780_HS_EN(1),
123 S_036780_CS_EN(1),
124 };
125
126 /* Max counters per HW block */
127 #define SI_QUERY_MAX_COUNTERS 16
128
129 #define SI_PC_SHADERS_WINDOWING (1 << 31)
130
131 struct si_query_group {
132 struct si_query_group *next;
133 struct si_pc_block *block;
134 unsigned sub_gid; /* only used during init */
135 unsigned result_base; /* only used during init */
136 int se;
137 int instance;
138 unsigned num_counters;
139 unsigned selectors[SI_QUERY_MAX_COUNTERS];
140 };
141
142 struct si_query_counter {
143 unsigned base;
144 unsigned qwords;
145 unsigned stride; /* in uint64s */
146 };
147
148 struct si_query_pc {
149 struct si_query b;
150 struct si_query_buffer buffer;
151
152 /* Size of the results in memory, in bytes. */
153 unsigned result_size;
154
155 unsigned shaders;
156 unsigned num_counters;
157 struct si_query_counter *counters;
158 struct si_query_group *groups;
159 };
160
161
162 static struct si_pc_block_base cik_CB = {
163 .name = "CB",
164 .num_counters = 4,
165 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS,
166
167 .select0 = R_037000_CB_PERFCOUNTER_FILTER,
168 .counter0_lo = R_035018_CB_PERFCOUNTER0_LO,
169 .num_multi = 1,
170 .num_prelude = 1,
171 .layout = SI_PC_MULTI_ALTERNATE,
172 };
173
174 static unsigned cik_CPC_select[] = {
175 R_036024_CPC_PERFCOUNTER0_SELECT,
176 R_036010_CPC_PERFCOUNTER0_SELECT1,
177 R_03600C_CPC_PERFCOUNTER1_SELECT,
178 };
179 static struct si_pc_block_base cik_CPC = {
180 .name = "CPC",
181 .num_counters = 2,
182
183 .select = cik_CPC_select,
184 .counter0_lo = R_034018_CPC_PERFCOUNTER0_LO,
185 .num_multi = 1,
186 .layout = SI_PC_MULTI_CUSTOM | SI_PC_REG_REVERSE,
187 };
188
189 static struct si_pc_block_base cik_CPF = {
190 .name = "CPF",
191 .num_counters = 2,
192
193 .select0 = R_03601C_CPF_PERFCOUNTER0_SELECT,
194 .counter0_lo = R_034028_CPF_PERFCOUNTER0_LO,
195 .num_multi = 1,
196 .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
197 };
198
199 static struct si_pc_block_base cik_CPG = {
200 .name = "CPG",
201 .num_counters = 2,
202
203 .select0 = R_036008_CPG_PERFCOUNTER0_SELECT,
204 .counter0_lo = R_034008_CPG_PERFCOUNTER0_LO,
205 .num_multi = 1,
206 .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
207 };
208
209 static struct si_pc_block_base cik_DB = {
210 .name = "DB",
211 .num_counters = 4,
212 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS,
213
214 .select0 = R_037100_DB_PERFCOUNTER0_SELECT,
215 .counter0_lo = R_035100_DB_PERFCOUNTER0_LO,
216 .num_multi = 3, // really only 2, but there's a gap between registers
217 .layout = SI_PC_MULTI_ALTERNATE,
218 };
219
220 static struct si_pc_block_base cik_GDS = {
221 .name = "GDS",
222 .num_counters = 4,
223
224 .select0 = R_036A00_GDS_PERFCOUNTER0_SELECT,
225 .counter0_lo = R_034A00_GDS_PERFCOUNTER0_LO,
226 .num_multi = 1,
227 .layout = SI_PC_MULTI_TAIL,
228 };
229
230 static unsigned cik_GRBM_counters[] = {
231 R_034100_GRBM_PERFCOUNTER0_LO,
232 R_03410C_GRBM_PERFCOUNTER1_LO,
233 };
234 static struct si_pc_block_base cik_GRBM = {
235 .name = "GRBM",
236 .num_counters = 2,
237
238 .select0 = R_036100_GRBM_PERFCOUNTER0_SELECT,
239 .counters = cik_GRBM_counters,
240 };
241
242 static struct si_pc_block_base cik_GRBMSE = {
243 .name = "GRBMSE",
244 .num_counters = 4,
245
246 .select0 = R_036108_GRBM_SE0_PERFCOUNTER_SELECT,
247 .counter0_lo = R_034114_GRBM_SE0_PERFCOUNTER_LO,
248 };
249
250 static struct si_pc_block_base cik_IA = {
251 .name = "IA",
252 .num_counters = 4,
253
254 .select0 = R_036210_IA_PERFCOUNTER0_SELECT,
255 .counter0_lo = R_034220_IA_PERFCOUNTER0_LO,
256 .num_multi = 1,
257 .layout = SI_PC_MULTI_TAIL,
258 };
259
260 static struct si_pc_block_base cik_PA_SC = {
261 .name = "PA_SC",
262 .num_counters = 8,
263 .flags = SI_PC_BLOCK_SE,
264
265 .select0 = R_036500_PA_SC_PERFCOUNTER0_SELECT,
266 .counter0_lo = R_034500_PA_SC_PERFCOUNTER0_LO,
267 .num_multi = 1,
268 .layout = SI_PC_MULTI_ALTERNATE,
269 };
270
271 /* According to docs, PA_SU counters are only 48 bits wide. */
272 static struct si_pc_block_base cik_PA_SU = {
273 .name = "PA_SU",
274 .num_counters = 4,
275 .flags = SI_PC_BLOCK_SE,
276
277 .select0 = R_036400_PA_SU_PERFCOUNTER0_SELECT,
278 .counter0_lo = R_034400_PA_SU_PERFCOUNTER0_LO,
279 .num_multi = 2,
280 .layout = SI_PC_MULTI_ALTERNATE,
281 };
282
283 static struct si_pc_block_base cik_SPI = {
284 .name = "SPI",
285 .num_counters = 6,
286 .flags = SI_PC_BLOCK_SE,
287
288 .select0 = R_036600_SPI_PERFCOUNTER0_SELECT,
289 .counter0_lo = R_034604_SPI_PERFCOUNTER0_LO,
290 .num_multi = 4,
291 .layout = SI_PC_MULTI_BLOCK,
292 };
293
294 static struct si_pc_block_base cik_SQ = {
295 .name = "SQ",
296 .num_counters = 16,
297 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_SHADER,
298
299 .select0 = R_036700_SQ_PERFCOUNTER0_SELECT,
300 .select_or = S_036700_SQC_BANK_MASK(15) |
301 S_036700_SQC_CLIENT_MASK(15) |
302 S_036700_SIMD_MASK(15),
303 .counter0_lo = R_034700_SQ_PERFCOUNTER0_LO,
304 };
305
306 static struct si_pc_block_base cik_SX = {
307 .name = "SX",
308 .num_counters = 4,
309 .flags = SI_PC_BLOCK_SE,
310
311 .select0 = R_036900_SX_PERFCOUNTER0_SELECT,
312 .counter0_lo = R_034900_SX_PERFCOUNTER0_LO,
313 .num_multi = 2,
314 .layout = SI_PC_MULTI_TAIL,
315 };
316
317 static struct si_pc_block_base cik_TA = {
318 .name = "TA",
319 .num_counters = 2,
320 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
321
322 .select0 = R_036B00_TA_PERFCOUNTER0_SELECT,
323 .counter0_lo = R_034B00_TA_PERFCOUNTER0_LO,
324 .num_multi = 1,
325 .layout = SI_PC_MULTI_ALTERNATE,
326 };
327
328 static struct si_pc_block_base cik_TD = {
329 .name = "TD",
330 .num_counters = 2,
331 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
332
333 .select0 = R_036C00_TD_PERFCOUNTER0_SELECT,
334 .counter0_lo = R_034C00_TD_PERFCOUNTER0_LO,
335 .num_multi = 1,
336 .layout = SI_PC_MULTI_ALTERNATE,
337 };
338
339 static struct si_pc_block_base cik_TCA = {
340 .name = "TCA",
341 .num_counters = 4,
342 .flags = SI_PC_BLOCK_INSTANCE_GROUPS,
343
344 .select0 = R_036E40_TCA_PERFCOUNTER0_SELECT,
345 .counter0_lo = R_034E40_TCA_PERFCOUNTER0_LO,
346 .num_multi = 2,
347 .layout = SI_PC_MULTI_ALTERNATE,
348 };
349
350 static struct si_pc_block_base cik_TCC = {
351 .name = "TCC",
352 .num_counters = 4,
353 .flags = SI_PC_BLOCK_INSTANCE_GROUPS,
354
355 .select0 = R_036E00_TCC_PERFCOUNTER0_SELECT,
356 .counter0_lo = R_034E00_TCC_PERFCOUNTER0_LO,
357 .num_multi = 2,
358 .layout = SI_PC_MULTI_ALTERNATE,
359 };
360
361 static struct si_pc_block_base cik_TCP = {
362 .name = "TCP",
363 .num_counters = 4,
364 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
365
366 .select0 = R_036D00_TCP_PERFCOUNTER0_SELECT,
367 .counter0_lo = R_034D00_TCP_PERFCOUNTER0_LO,
368 .num_multi = 2,
369 .layout = SI_PC_MULTI_ALTERNATE,
370 };
371
372 static struct si_pc_block_base cik_VGT = {
373 .name = "VGT",
374 .num_counters = 4,
375 .flags = SI_PC_BLOCK_SE,
376
377 .select0 = R_036230_VGT_PERFCOUNTER0_SELECT,
378 .counter0_lo = R_034240_VGT_PERFCOUNTER0_LO,
379 .num_multi = 1,
380 .layout = SI_PC_MULTI_TAIL,
381 };
382
383 static struct si_pc_block_base cik_WD = {
384 .name = "WD",
385 .num_counters = 4,
386
387 .select0 = R_036200_WD_PERFCOUNTER0_SELECT,
388 .counter0_lo = R_034200_WD_PERFCOUNTER0_LO,
389 };
390
391 static struct si_pc_block_base cik_MC = {
392 .name = "MC",
393 .num_counters = 4,
394
395 .layout = SI_PC_FAKE,
396 };
397
398 static struct si_pc_block_base cik_SRBM = {
399 .name = "SRBM",
400 .num_counters = 2,
401
402 .layout = SI_PC_FAKE,
403 };
404
405 /* Both the number of instances and selectors varies between chips of the same
406 * class. We only differentiate by class here and simply expose the maximum
407 * number over all chips in a class.
408 *
409 * Unfortunately, GPUPerfStudio uses the order of performance counter groups
410 * blindly once it believes it has identified the hardware, so the order of
411 * blocks here matters.
412 */
413 static struct si_pc_block_gfxdescr groups_CIK[] = {
414 { &cik_CB, 226},
415 { &cik_CPF, 17 },
416 { &cik_DB, 257},
417 { &cik_GRBM, 34 },
418 { &cik_GRBMSE, 15 },
419 { &cik_PA_SU, 153 },
420 { &cik_PA_SC, 395 },
421 { &cik_SPI, 186 },
422 { &cik_SQ, 252 },
423 { &cik_SX, 32 },
424 { &cik_TA, 111, 11 },
425 { &cik_TCA, 39, 2 },
426 { &cik_TCC, 160},
427 { &cik_TD, 55, 11 },
428 { &cik_TCP, 154, 11 },
429 { &cik_GDS, 121 },
430 { &cik_VGT, 140 },
431 { &cik_IA, 22 },
432 { &cik_MC, 22 },
433 { &cik_SRBM, 19 },
434 { &cik_WD, 22 },
435 { &cik_CPG, 46 },
436 { &cik_CPC, 22 },
437
438 };
439
440 static struct si_pc_block_gfxdescr groups_VI[] = {
441 { &cik_CB, 405},
442 { &cik_CPF, 19 },
443 { &cik_DB, 257},
444 { &cik_GRBM, 34 },
445 { &cik_GRBMSE, 15 },
446 { &cik_PA_SU, 154 },
447 { &cik_PA_SC, 397 },
448 { &cik_SPI, 197 },
449 { &cik_SQ, 273 },
450 { &cik_SX, 34 },
451 { &cik_TA, 119, 16 },
452 { &cik_TCA, 35, 2 },
453 { &cik_TCC, 192},
454 { &cik_TD, 55, 16 },
455 { &cik_TCP, 180, 16 },
456 { &cik_GDS, 121 },
457 { &cik_VGT, 147 },
458 { &cik_IA, 24 },
459 { &cik_MC, 22 },
460 { &cik_SRBM, 27 },
461 { &cik_WD, 37 },
462 { &cik_CPG, 48 },
463 { &cik_CPC, 24 },
464
465 };
466
467 static struct si_pc_block_gfxdescr groups_gfx9[] = {
468 { &cik_CB, 438},
469 { &cik_CPF, 32 },
470 { &cik_DB, 328},
471 { &cik_GRBM, 38 },
472 { &cik_GRBMSE, 16 },
473 { &cik_PA_SU, 292 },
474 { &cik_PA_SC, 491 },
475 { &cik_SPI, 196 },
476 { &cik_SQ, 374 },
477 { &cik_SX, 208 },
478 { &cik_TA, 119, 16 },
479 { &cik_TCA, 35, 2 },
480 { &cik_TCC, 256},
481 { &cik_TD, 57, 16 },
482 { &cik_TCP, 85, 16 },
483 { &cik_GDS, 121 },
484 { &cik_VGT, 148 },
485 { &cik_IA, 32 },
486 { &cik_WD, 58 },
487 { &cik_CPG, 59 },
488 { &cik_CPC, 35 },
489 };
490
491 static bool si_pc_block_has_per_se_groups(const struct si_perfcounters *pc,
492 const struct si_pc_block *block)
493 {
494 return block->b->b->flags & SI_PC_BLOCK_SE_GROUPS ||
495 (block->b->b->flags & SI_PC_BLOCK_SE && pc->separate_se);
496 }
497
498 static bool si_pc_block_has_per_instance_groups(const struct si_perfcounters *pc,
499 const struct si_pc_block *block)
500 {
501 return block->b->b->flags & SI_PC_BLOCK_INSTANCE_GROUPS ||
502 (block->num_instances > 1 && pc->separate_instance);
503 }
504
505 static struct si_pc_block *
506 lookup_counter(struct si_perfcounters *pc, unsigned index,
507 unsigned *base_gid, unsigned *sub_index)
508 {
509 struct si_pc_block *block = pc->blocks;
510 unsigned bid;
511
512 *base_gid = 0;
513 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
514 unsigned total = block->num_groups * block->b->selectors;
515
516 if (index < total) {
517 *sub_index = index;
518 return block;
519 }
520
521 index -= total;
522 *base_gid += block->num_groups;
523 }
524
525 return NULL;
526 }
527
528 static struct si_pc_block *
529 lookup_group(struct si_perfcounters *pc, unsigned *index)
530 {
531 unsigned bid;
532 struct si_pc_block *block = pc->blocks;
533
534 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
535 if (*index < block->num_groups)
536 return block;
537 *index -= block->num_groups;
538 }
539
540 return NULL;
541 }
542
543 static void si_pc_emit_instance(struct si_context *sctx,
544 int se, int instance)
545 {
546 struct radeon_cmdbuf *cs = sctx->gfx_cs;
547 unsigned value = S_030800_SH_BROADCAST_WRITES(1);
548
549 if (se >= 0) {
550 value |= S_030800_SE_INDEX(se);
551 } else {
552 value |= S_030800_SE_BROADCAST_WRITES(1);
553 }
554
555 if (instance >= 0) {
556 value |= S_030800_INSTANCE_INDEX(instance);
557 } else {
558 value |= S_030800_INSTANCE_BROADCAST_WRITES(1);
559 }
560
561 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, value);
562 }
563
564 static void si_pc_emit_shaders(struct si_context *sctx,
565 unsigned shaders)
566 {
567 struct radeon_cmdbuf *cs = sctx->gfx_cs;
568
569 radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2);
570 radeon_emit(cs, shaders & 0x7f);
571 radeon_emit(cs, 0xffffffff);
572 }
573
574 static void si_pc_emit_select(struct si_context *sctx,
575 struct si_pc_block *block,
576 unsigned count, unsigned *selectors)
577 {
578 struct si_pc_block_base *regs = block->b->b;
579 struct radeon_cmdbuf *cs = sctx->gfx_cs;
580 unsigned idx;
581 unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK;
582 unsigned dw;
583
584 assert(count <= regs->num_counters);
585
586 if (regs->layout & SI_PC_FAKE)
587 return;
588
589 if (layout_multi == SI_PC_MULTI_BLOCK) {
590 assert(!(regs->layout & SI_PC_REG_REVERSE));
591
592 dw = count + regs->num_prelude;
593 if (count >= regs->num_multi)
594 dw += regs->num_multi;
595 radeon_set_uconfig_reg_seq(cs, regs->select0, dw);
596 for (idx = 0; idx < regs->num_prelude; ++idx)
597 radeon_emit(cs, 0);
598 for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
599 radeon_emit(cs, selectors[idx] | regs->select_or);
600
601 if (count < regs->num_multi) {
602 unsigned select1 =
603 regs->select0 + 4 * regs->num_multi;
604 radeon_set_uconfig_reg_seq(cs, select1, count);
605 }
606
607 for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
608 radeon_emit(cs, 0);
609
610 if (count > regs->num_multi) {
611 for (idx = regs->num_multi; idx < count; ++idx)
612 radeon_emit(cs, selectors[idx] | regs->select_or);
613 }
614 } else if (layout_multi == SI_PC_MULTI_TAIL) {
615 unsigned select1, select1_count;
616
617 assert(!(regs->layout & SI_PC_REG_REVERSE));
618
619 radeon_set_uconfig_reg_seq(cs, regs->select0, count + regs->num_prelude);
620 for (idx = 0; idx < regs->num_prelude; ++idx)
621 radeon_emit(cs, 0);
622 for (idx = 0; idx < count; ++idx)
623 radeon_emit(cs, selectors[idx] | regs->select_or);
624
625 select1 = regs->select0 + 4 * regs->num_counters;
626 select1_count = MIN2(count, regs->num_multi);
627 radeon_set_uconfig_reg_seq(cs, select1, select1_count);
628 for (idx = 0; idx < select1_count; ++idx)
629 radeon_emit(cs, 0);
630 } else if (layout_multi == SI_PC_MULTI_CUSTOM) {
631 unsigned *reg = regs->select;
632 for (idx = 0; idx < count; ++idx) {
633 radeon_set_uconfig_reg(cs, *reg++, selectors[idx] | regs->select_or);
634 if (idx < regs->num_multi)
635 radeon_set_uconfig_reg(cs, *reg++, 0);
636 }
637 } else {
638 assert(layout_multi == SI_PC_MULTI_ALTERNATE);
639
640 unsigned reg_base = regs->select0;
641 unsigned reg_count = count + MIN2(count, regs->num_multi);
642 reg_count += regs->num_prelude;
643
644 if (!(regs->layout & SI_PC_REG_REVERSE)) {
645 radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
646
647 for (idx = 0; idx < regs->num_prelude; ++idx)
648 radeon_emit(cs, 0);
649 for (idx = 0; idx < count; ++idx) {
650 radeon_emit(cs, selectors[idx] | regs->select_or);
651 if (idx < regs->num_multi)
652 radeon_emit(cs, 0);
653 }
654 } else {
655 reg_base -= (reg_count - 1) * 4;
656 radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
657
658 for (idx = count; idx > 0; --idx) {
659 if (idx <= regs->num_multi)
660 radeon_emit(cs, 0);
661 radeon_emit(cs, selectors[idx - 1] | regs->select_or);
662 }
663 for (idx = 0; idx < regs->num_prelude; ++idx)
664 radeon_emit(cs, 0);
665 }
666 }
667 }
668
669 static void si_pc_emit_start(struct si_context *sctx,
670 struct r600_resource *buffer, uint64_t va)
671 {
672 struct radeon_cmdbuf *cs = sctx->gfx_cs;
673
674 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer,
675 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
676
677 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
678 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
679 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM));
680 radeon_emit(cs, 1); /* immediate */
681 radeon_emit(cs, 0); /* unused */
682 radeon_emit(cs, va);
683 radeon_emit(cs, va >> 32);
684
685 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
686 S_036020_PERFMON_STATE(V_036020_DISABLE_AND_RESET));
687 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
688 radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_START) | EVENT_INDEX(0));
689 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
690 S_036020_PERFMON_STATE(V_036020_START_COUNTING));
691 }
692
693 /* Note: The buffer was already added in si_pc_emit_start, so we don't have to
694 * do it again in here. */
695 static void si_pc_emit_stop(struct si_context *sctx,
696 struct r600_resource *buffer, uint64_t va)
697 {
698 struct radeon_cmdbuf *cs = sctx->gfx_cs;
699
700 si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
701 EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
702 EOP_DATA_SEL_VALUE_32BIT,
703 buffer, va, 0, SI_NOT_QUERY);
704 si_cp_wait_mem(sctx, va, 0, 0xffffffff, 0);
705
706 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
707 radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_SAMPLE) | EVENT_INDEX(0));
708 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
709 radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_STOP) | EVENT_INDEX(0));
710 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
711 S_036020_PERFMON_STATE(V_036020_STOP_COUNTING) |
712 S_036020_PERFMON_SAMPLE_ENABLE(1));
713 }
714
715 static void si_pc_emit_read(struct si_context *sctx,
716 struct si_pc_block *block,
717 unsigned count, uint64_t va)
718 {
719 struct si_pc_block_base *regs = block->b->b;
720 struct radeon_cmdbuf *cs = sctx->gfx_cs;
721 unsigned idx;
722 unsigned reg = regs->counter0_lo;
723 unsigned reg_delta = 8;
724
725 if (!(regs->layout & SI_PC_FAKE)) {
726 if (regs->layout & SI_PC_REG_REVERSE)
727 reg_delta = -reg_delta;
728
729 for (idx = 0; idx < count; ++idx) {
730 if (regs->counters)
731 reg = regs->counters[idx];
732
733 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
734 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
735 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM) |
736 COPY_DATA_COUNT_SEL); /* 64 bits */
737 radeon_emit(cs, reg >> 2);
738 radeon_emit(cs, 0); /* unused */
739 radeon_emit(cs, va);
740 radeon_emit(cs, va >> 32);
741 va += sizeof(uint64_t);
742 reg += reg_delta;
743 }
744 } else {
745 for (idx = 0; idx < count; ++idx) {
746 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
747 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
748 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM_GRBM) |
749 COPY_DATA_COUNT_SEL);
750 radeon_emit(cs, 0); /* immediate */
751 radeon_emit(cs, 0);
752 radeon_emit(cs, va);
753 radeon_emit(cs, va >> 32);
754 va += sizeof(uint64_t);
755 }
756 }
757 }
758
759 static void si_pc_query_destroy(struct si_screen *sscreen,
760 struct si_query *rquery)
761 {
762 struct si_query_pc *query = (struct si_query_pc *)rquery;
763
764 while (query->groups) {
765 struct si_query_group *group = query->groups;
766 query->groups = group->next;
767 FREE(group);
768 }
769
770 FREE(query->counters);
771
772 si_query_buffer_destroy(sscreen, &query->buffer);
773 FREE(query);
774 }
775
776 static void si_pc_query_resume(struct si_context *sctx, struct si_query *rquery)
777 /*
778 struct si_query_hw *hwquery,
779 struct r600_resource *buffer, uint64_t va)*/
780 {
781 struct si_query_pc *query = (struct si_query_pc *)rquery;
782 int current_se = -1;
783 int current_instance = -1;
784
785 if (!si_query_buffer_alloc(sctx, &query->buffer, NULL, query->result_size))
786 return;
787 si_need_gfx_cs_space(sctx);
788
789 if (query->shaders)
790 si_pc_emit_shaders(sctx, query->shaders);
791
792 for (struct si_query_group *group = query->groups; group; group = group->next) {
793 struct si_pc_block *block = group->block;
794
795 if (group->se != current_se || group->instance != current_instance) {
796 current_se = group->se;
797 current_instance = group->instance;
798 si_pc_emit_instance(sctx, group->se, group->instance);
799 }
800
801 si_pc_emit_select(sctx, block, group->num_counters, group->selectors);
802 }
803
804 if (current_se != -1 || current_instance != -1)
805 si_pc_emit_instance(sctx, -1, -1);
806
807 uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end;
808 si_pc_emit_start(sctx, query->buffer.buf, va);
809 }
810
811 static void si_pc_query_suspend(struct si_context *sctx, struct si_query *rquery)
812 {
813 struct si_query_pc *query = (struct si_query_pc *)rquery;
814
815 if (!query->buffer.buf)
816 return;
817
818 uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end;
819 query->buffer.results_end += query->result_size;
820
821 si_pc_emit_stop(sctx, query->buffer.buf, va);
822
823 for (struct si_query_group *group = query->groups; group; group = group->next) {
824 struct si_pc_block *block = group->block;
825 unsigned se = group->se >= 0 ? group->se : 0;
826 unsigned se_end = se + 1;
827
828 if ((block->b->b->flags & SI_PC_BLOCK_SE) && (group->se < 0))
829 se_end = sctx->screen->info.max_se;
830
831 do {
832 unsigned instance = group->instance >= 0 ? group->instance : 0;
833
834 do {
835 si_pc_emit_instance(sctx, se, instance);
836 si_pc_emit_read(sctx, block, group->num_counters, va);
837 va += sizeof(uint64_t) * group->num_counters;
838 } while (group->instance < 0 && ++instance < block->num_instances);
839 } while (++se < se_end);
840 }
841
842 si_pc_emit_instance(sctx, -1, -1);
843 }
844
845 static bool si_pc_query_begin(struct si_context *ctx, struct si_query *rquery)
846 {
847 struct si_query_pc *query = (struct si_query_pc *)rquery;
848
849 si_query_buffer_reset(ctx, &query->buffer);
850
851 LIST_ADDTAIL(&query->b.active_list, &ctx->active_queries);
852 ctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend;
853
854 si_pc_query_resume(ctx, rquery);
855
856 return true;
857 }
858
859 static bool si_pc_query_end(struct si_context *ctx, struct si_query *rquery)
860 {
861 struct si_query_pc *query = (struct si_query_pc *)rquery;
862
863 si_pc_query_suspend(ctx, rquery);
864
865 LIST_DEL(&rquery->active_list);
866 ctx->num_cs_dw_queries_suspend -= rquery->num_cs_dw_suspend;
867
868 return query->buffer.buf != NULL;
869 }
870
871 static void si_pc_query_add_result(struct si_query_pc *query,
872 void *buffer,
873 union pipe_query_result *result)
874 {
875 uint64_t *results = buffer;
876 unsigned i, j;
877
878 for (i = 0; i < query->num_counters; ++i) {
879 struct si_query_counter *counter = &query->counters[i];
880
881 for (j = 0; j < counter->qwords; ++j) {
882 uint32_t value = results[counter->base + j * counter->stride];
883 result->batch[i].u64 += value;
884 }
885 }
886 }
887
888 static bool si_pc_query_get_result(struct si_context *sctx, struct si_query *rquery,
889 bool wait, union pipe_query_result *result)
890 {
891 struct si_query_pc *query = (struct si_query_pc *)rquery;
892
893 memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
894
895 for (struct si_query_buffer *qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
896 unsigned usage = PIPE_TRANSFER_READ |
897 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
898 unsigned results_base = 0;
899 void *map;
900
901 if (rquery->b.flushed)
902 map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
903 else
904 map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
905
906 if (!map)
907 return false;
908
909 while (results_base != qbuf->results_end) {
910 si_pc_query_add_result(query, map + results_base, result);
911 results_base += query->result_size;
912 }
913 }
914
915 return true;
916 }
917
918 static const struct si_query_ops batch_query_ops = {
919 .destroy = si_pc_query_destroy,
920 .begin = si_pc_query_begin,
921 .end = si_pc_query_end,
922 .get_result = si_pc_query_get_result,
923
924 .suspend = si_pc_query_suspend,
925 .resume = si_pc_query_resume,
926 };
927
928 static struct si_query_group *get_group_state(struct si_screen *screen,
929 struct si_query_pc *query,
930 struct si_pc_block *block,
931 unsigned sub_gid)
932 {
933 struct si_query_group *group = query->groups;
934
935 while (group) {
936 if (group->block == block && group->sub_gid == sub_gid)
937 return group;
938 group = group->next;
939 }
940
941 group = CALLOC_STRUCT(si_query_group);
942 if (!group)
943 return NULL;
944
945 group->block = block;
946 group->sub_gid = sub_gid;
947
948 if (block->b->b->flags & SI_PC_BLOCK_SHADER) {
949 unsigned sub_gids = block->num_instances;
950 unsigned shader_id;
951 unsigned shaders;
952 unsigned query_shaders;
953
954 if (si_pc_block_has_per_se_groups(screen->perfcounters, block))
955 sub_gids = sub_gids * screen->info.max_se;
956 shader_id = sub_gid / sub_gids;
957 sub_gid = sub_gid % sub_gids;
958
959 shaders = si_pc_shader_type_bits[shader_id];
960
961 query_shaders = query->shaders & ~SI_PC_SHADERS_WINDOWING;
962 if (query_shaders && query_shaders != shaders) {
963 fprintf(stderr, "si_perfcounter: incompatible shader groups\n");
964 FREE(group);
965 return NULL;
966 }
967 query->shaders = shaders;
968 }
969
970 if (block->b->b->flags & SI_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
971 // A non-zero value in query->shaders ensures that the shader
972 // masking is reset unless the user explicitly requests one.
973 query->shaders = SI_PC_SHADERS_WINDOWING;
974 }
975
976 if (si_pc_block_has_per_se_groups(screen->perfcounters, block)) {
977 group->se = sub_gid / block->num_instances;
978 sub_gid = sub_gid % block->num_instances;
979 } else {
980 group->se = -1;
981 }
982
983 if (si_pc_block_has_per_instance_groups(screen->perfcounters, block)) {
984 group->instance = sub_gid;
985 } else {
986 group->instance = -1;
987 }
988
989 group->next = query->groups;
990 query->groups = group;
991
992 return group;
993 }
994
995 struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
996 unsigned num_queries,
997 unsigned *query_types)
998 {
999 struct si_screen *screen =
1000 (struct si_screen *)ctx->screen;
1001 struct si_perfcounters *pc = screen->perfcounters;
1002 struct si_pc_block *block;
1003 struct si_query_group *group;
1004 struct si_query_pc *query;
1005 unsigned base_gid, sub_gid, sub_index;
1006 unsigned i, j;
1007
1008 if (!pc)
1009 return NULL;
1010
1011 query = CALLOC_STRUCT(si_query_pc);
1012 if (!query)
1013 return NULL;
1014
1015 query->b.ops = &batch_query_ops;
1016
1017 query->num_counters = num_queries;
1018
1019 /* Collect selectors per group */
1020 for (i = 0; i < num_queries; ++i) {
1021 unsigned sub_gid;
1022
1023 if (query_types[i] < SI_QUERY_FIRST_PERFCOUNTER)
1024 goto error;
1025
1026 block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER,
1027 &base_gid, &sub_index);
1028 if (!block)
1029 goto error;
1030
1031 sub_gid = sub_index / block->b->selectors;
1032 sub_index = sub_index % block->b->selectors;
1033
1034 group = get_group_state(screen, query, block, sub_gid);
1035 if (!group)
1036 goto error;
1037
1038 if (group->num_counters >= block->b->b->num_counters) {
1039 fprintf(stderr,
1040 "perfcounter group %s: too many selected\n",
1041 block->b->b->name);
1042 goto error;
1043 }
1044 group->selectors[group->num_counters] = sub_index;
1045 ++group->num_counters;
1046 }
1047
1048 /* Compute result bases and CS size per group */
1049 query->b.num_cs_dw_suspend = pc->num_stop_cs_dwords;
1050 query->b.num_cs_dw_suspend += pc->num_instance_cs_dwords;
1051
1052 i = 0;
1053 for (group = query->groups; group; group = group->next) {
1054 struct si_pc_block *block = group->block;
1055 unsigned read_dw;
1056 unsigned instances = 1;
1057
1058 if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0)
1059 instances = screen->info.max_se;
1060 if (group->instance < 0)
1061 instances *= block->num_instances;
1062
1063 group->result_base = i;
1064 query->result_size += sizeof(uint64_t) * instances * group->num_counters;
1065 i += instances * group->num_counters;
1066
1067 read_dw = 6 * group->num_counters;
1068 query->b.num_cs_dw_suspend += instances * read_dw;
1069 query->b.num_cs_dw_suspend += instances * pc->num_instance_cs_dwords;
1070 }
1071
1072 if (query->shaders) {
1073 if (query->shaders == SI_PC_SHADERS_WINDOWING)
1074 query->shaders = 0xffffffff;
1075 }
1076
1077 /* Map user-supplied query array to result indices */
1078 query->counters = CALLOC(num_queries, sizeof(*query->counters));
1079 for (i = 0; i < num_queries; ++i) {
1080 struct si_query_counter *counter = &query->counters[i];
1081 struct si_pc_block *block;
1082
1083 block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER,
1084 &base_gid, &sub_index);
1085
1086 sub_gid = sub_index / block->b->selectors;
1087 sub_index = sub_index % block->b->selectors;
1088
1089 group = get_group_state(screen, query, block, sub_gid);
1090 assert(group != NULL);
1091
1092 for (j = 0; j < group->num_counters; ++j) {
1093 if (group->selectors[j] == sub_index)
1094 break;
1095 }
1096
1097 counter->base = group->result_base + j;
1098 counter->stride = group->num_counters;
1099
1100 counter->qwords = 1;
1101 if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0)
1102 counter->qwords = screen->info.max_se;
1103 if (group->instance < 0)
1104 counter->qwords *= block->num_instances;
1105 }
1106
1107 return (struct pipe_query *)query;
1108
1109 error:
1110 si_pc_query_destroy(screen, &query->b);
1111 return NULL;
1112 }
1113
1114 static bool si_init_block_names(struct si_screen *screen,
1115 struct si_pc_block *block)
1116 {
1117 bool per_instance_groups = si_pc_block_has_per_instance_groups(screen->perfcounters, block);
1118 bool per_se_groups = si_pc_block_has_per_se_groups(screen->perfcounters, block);
1119 unsigned i, j, k;
1120 unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
1121 unsigned namelen;
1122 char *groupname;
1123 char *p;
1124
1125 if (per_instance_groups)
1126 groups_instance = block->num_instances;
1127 if (per_se_groups)
1128 groups_se = screen->info.max_se;
1129 if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1130 groups_shader = ARRAY_SIZE(si_pc_shader_type_bits);
1131
1132 namelen = strlen(block->b->b->name);
1133 block->group_name_stride = namelen + 1;
1134 if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1135 block->group_name_stride += 3;
1136 if (per_se_groups) {
1137 assert(groups_se <= 10);
1138 block->group_name_stride += 1;
1139
1140 if (per_instance_groups)
1141 block->group_name_stride += 1;
1142 }
1143 if (per_instance_groups) {
1144 assert(groups_instance <= 100);
1145 block->group_name_stride += 2;
1146 }
1147
1148 block->group_names = MALLOC(block->num_groups * block->group_name_stride);
1149 if (!block->group_names)
1150 return false;
1151
1152 groupname = block->group_names;
1153 for (i = 0; i < groups_shader; ++i) {
1154 const char *shader_suffix = si_pc_shader_type_suffixes[i];
1155 unsigned shaderlen = strlen(shader_suffix);
1156 for (j = 0; j < groups_se; ++j) {
1157 for (k = 0; k < groups_instance; ++k) {
1158 strcpy(groupname, block->b->b->name);
1159 p = groupname + namelen;
1160
1161 if (block->b->b->flags & SI_PC_BLOCK_SHADER) {
1162 strcpy(p, shader_suffix);
1163 p += shaderlen;
1164 }
1165
1166 if (per_se_groups) {
1167 p += sprintf(p, "%d", j);
1168 if (per_instance_groups)
1169 *p++ = '_';
1170 }
1171
1172 if (per_instance_groups)
1173 p += sprintf(p, "%d", k);
1174
1175 groupname += block->group_name_stride;
1176 }
1177 }
1178 }
1179
1180 assert(block->b->selectors <= 1000);
1181 block->selector_name_stride = block->group_name_stride + 4;
1182 block->selector_names = MALLOC(block->num_groups * block->b->selectors *
1183 block->selector_name_stride);
1184 if (!block->selector_names)
1185 return false;
1186
1187 groupname = block->group_names;
1188 p = block->selector_names;
1189 for (i = 0; i < block->num_groups; ++i) {
1190 for (j = 0; j < block->b->selectors; ++j) {
1191 sprintf(p, "%s_%03d", groupname, j);
1192 p += block->selector_name_stride;
1193 }
1194 groupname += block->group_name_stride;
1195 }
1196
1197 return true;
1198 }
1199
1200 int si_get_perfcounter_info(struct si_screen *screen,
1201 unsigned index,
1202 struct pipe_driver_query_info *info)
1203 {
1204 struct si_perfcounters *pc = screen->perfcounters;
1205 struct si_pc_block *block;
1206 unsigned base_gid, sub;
1207
1208 if (!pc)
1209 return 0;
1210
1211 if (!info) {
1212 unsigned bid, num_queries = 0;
1213
1214 for (bid = 0; bid < pc->num_blocks; ++bid) {
1215 num_queries += pc->blocks[bid].b->selectors *
1216 pc->blocks[bid].num_groups;
1217 }
1218
1219 return num_queries;
1220 }
1221
1222 block = lookup_counter(pc, index, &base_gid, &sub);
1223 if (!block)
1224 return 0;
1225
1226 if (!block->selector_names) {
1227 if (!si_init_block_names(screen, block))
1228 return 0;
1229 }
1230 info->name = block->selector_names + sub * block->selector_name_stride;
1231 info->query_type = SI_QUERY_FIRST_PERFCOUNTER + index;
1232 info->max_value.u64 = 0;
1233 info->type = PIPE_DRIVER_QUERY_TYPE_UINT64;
1234 info->result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE;
1235 info->group_id = base_gid + sub / block->b->selectors;
1236 info->flags = PIPE_DRIVER_QUERY_FLAG_BATCH;
1237 if (sub > 0 && sub + 1 < block->b->selectors * block->num_groups)
1238 info->flags |= PIPE_DRIVER_QUERY_FLAG_DONT_LIST;
1239 return 1;
1240 }
1241
1242 int si_get_perfcounter_group_info(struct si_screen *screen,
1243 unsigned index,
1244 struct pipe_driver_query_group_info *info)
1245 {
1246 struct si_perfcounters *pc = screen->perfcounters;
1247 struct si_pc_block *block;
1248
1249 if (!pc)
1250 return 0;
1251
1252 if (!info)
1253 return pc->num_groups;
1254
1255 block = lookup_group(pc, &index);
1256 if (!block)
1257 return 0;
1258
1259 if (!block->group_names) {
1260 if (!si_init_block_names(screen, block))
1261 return 0;
1262 }
1263 info->name = block->group_names + index * block->group_name_stride;
1264 info->num_queries = block->b->selectors;
1265 info->max_active_queries = block->b->b->num_counters;
1266 return 1;
1267 }
1268
1269 void si_destroy_perfcounters(struct si_screen *screen)
1270 {
1271 struct si_perfcounters *pc = screen->perfcounters;
1272 unsigned i;
1273
1274 if (!pc)
1275 return;
1276
1277 for (i = 0; i < pc->num_blocks; ++i) {
1278 FREE(pc->blocks[i].group_names);
1279 FREE(pc->blocks[i].selector_names);
1280 }
1281 FREE(pc->blocks);
1282 FREE(pc);
1283 screen->perfcounters = NULL;
1284 }
1285
1286 void si_init_perfcounters(struct si_screen *screen)
1287 {
1288 struct si_perfcounters *pc;
1289 const struct si_pc_block_gfxdescr *blocks;
1290 unsigned num_blocks;
1291 unsigned i;
1292
1293 switch (screen->info.chip_class) {
1294 case CIK:
1295 blocks = groups_CIK;
1296 num_blocks = ARRAY_SIZE(groups_CIK);
1297 break;
1298 case VI:
1299 blocks = groups_VI;
1300 num_blocks = ARRAY_SIZE(groups_VI);
1301 break;
1302 case GFX9:
1303 blocks = groups_gfx9;
1304 num_blocks = ARRAY_SIZE(groups_gfx9);
1305 break;
1306 case SI:
1307 default:
1308 return; /* not implemented */
1309 }
1310
1311 if (screen->info.max_sh_per_se != 1) {
1312 /* This should not happen on non-SI chips. */
1313 fprintf(stderr, "si_init_perfcounters: max_sh_per_se = %d not "
1314 "supported (inaccurate performance counters)\n",
1315 screen->info.max_sh_per_se);
1316 }
1317
1318 screen->perfcounters = pc = CALLOC_STRUCT(si_perfcounters);
1319 if (!pc)
1320 return;
1321
1322 pc->num_stop_cs_dwords = 14 + si_cp_write_fence_dwords(screen);
1323 pc->num_instance_cs_dwords = 3;
1324
1325 pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", false);
1326 pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", false);
1327
1328 pc->blocks = CALLOC(num_blocks, sizeof(struct si_pc_block));
1329 if (!pc->blocks)
1330 goto error;
1331 pc->num_blocks = num_blocks;
1332
1333 for (i = 0; i < num_blocks; ++i) {
1334 struct si_pc_block *block = &pc->blocks[i];
1335 block->b = &blocks[i];
1336 block->num_instances = block->b->instances;
1337
1338 if (!strcmp(block->b->b->name, "CB") ||
1339 !strcmp(block->b->b->name, "DB"))
1340 block->num_instances = screen->info.max_se;
1341 else if (!strcmp(block->b->b->name, "TCC"))
1342 block->num_instances = screen->info.num_tcc_blocks;
1343 else if (!strcmp(block->b->b->name, "IA"))
1344 block->num_instances = MAX2(1, screen->info.max_se / 2);
1345
1346 if (si_pc_block_has_per_instance_groups(pc, block)) {
1347 block->num_groups = block->num_instances;
1348 } else {
1349 block->num_groups = 1;
1350 }
1351
1352 if (si_pc_block_has_per_se_groups(pc, block))
1353 block->num_groups *= screen->info.max_se;
1354 if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1355 block->num_groups *= ARRAY_SIZE(si_pc_shader_type_bits);
1356
1357 pc->num_groups += block->num_groups;
1358 }
1359
1360 return;
1361
1362 error:
1363 si_destroy_perfcounters(screen);
1364 }