radeonsi: switch to 3-spaces style
[mesa.git] / src / gallium / drivers / radeonsi / si_perfcounter.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "si_build_pm4.h"
26 #include "si_query.h"
27 #include "util/u_memory.h"
28
29 enum si_pc_block_flags
30 {
31 /* This block is part of the shader engine */
32 SI_PC_BLOCK_SE = (1 << 0),
33
34 /* Expose per-instance groups instead of summing all instances (within
35 * an SE). */
36 SI_PC_BLOCK_INSTANCE_GROUPS = (1 << 1),
37
38 /* Expose per-SE groups instead of summing instances across SEs. */
39 SI_PC_BLOCK_SE_GROUPS = (1 << 2),
40
41 /* Shader block */
42 SI_PC_BLOCK_SHADER = (1 << 3),
43
44 /* Non-shader block with perfcounters windowed by shaders. */
45 SI_PC_BLOCK_SHADER_WINDOWED = (1 << 4),
46 };
47
48 enum si_pc_reg_layout
49 {
50 /* All secondary selector dwords follow as one block after the primary
51 * selector dwords for the counters that have secondary selectors.
52 */
53 SI_PC_MULTI_BLOCK = 0,
54
55 /* Each secondary selector dword follows immediately afters the
56 * corresponding primary.
57 */
58 SI_PC_MULTI_ALTERNATE = 1,
59
60 /* All secondary selector dwords follow as one block after all primary
61 * selector dwords.
62 */
63 SI_PC_MULTI_TAIL = 2,
64
65 /* Free-form arrangement of selector registers. */
66 SI_PC_MULTI_CUSTOM = 3,
67
68 SI_PC_MULTI_MASK = 3,
69
70 /* Registers are laid out in decreasing rather than increasing order. */
71 SI_PC_REG_REVERSE = 4,
72
73 SI_PC_FAKE = 8,
74 };
75
76 struct si_pc_block_base {
77 const char *name;
78 unsigned num_counters;
79 unsigned flags;
80
81 unsigned select_or;
82 unsigned select0;
83 unsigned counter0_lo;
84 unsigned *select;
85 unsigned *counters;
86 unsigned num_multi;
87 unsigned num_prelude;
88 unsigned layout;
89 };
90
91 struct si_pc_block_gfxdescr {
92 struct si_pc_block_base *b;
93 unsigned selectors;
94 unsigned instances;
95 };
96
97 struct si_pc_block {
98 const struct si_pc_block_gfxdescr *b;
99 unsigned num_instances;
100
101 unsigned num_groups;
102 char *group_names;
103 unsigned group_name_stride;
104
105 char *selector_names;
106 unsigned selector_name_stride;
107 };
108
109 /* The order is chosen to be compatible with GPUPerfStudio's hardcoding of
110 * performance counter group IDs.
111 */
112 static const char *const si_pc_shader_type_suffixes[] = {"", "_ES", "_GS", "_VS",
113 "_PS", "_LS", "_HS", "_CS"};
114
115 static const unsigned si_pc_shader_type_bits[] = {
116 0x7f,
117 S_036780_ES_EN(1),
118 S_036780_GS_EN(1),
119 S_036780_VS_EN(1),
120 S_036780_PS_EN(1),
121 S_036780_LS_EN(1),
122 S_036780_HS_EN(1),
123 S_036780_CS_EN(1),
124 };
125
126 /* Max counters per HW block */
127 #define SI_QUERY_MAX_COUNTERS 16
128
129 #define SI_PC_SHADERS_WINDOWING (1u << 31)
130
131 struct si_query_group {
132 struct si_query_group *next;
133 struct si_pc_block *block;
134 unsigned sub_gid; /* only used during init */
135 unsigned result_base; /* only used during init */
136 int se;
137 int instance;
138 unsigned num_counters;
139 unsigned selectors[SI_QUERY_MAX_COUNTERS];
140 };
141
142 struct si_query_counter {
143 unsigned base;
144 unsigned qwords;
145 unsigned stride; /* in uint64s */
146 };
147
148 struct si_query_pc {
149 struct si_query b;
150 struct si_query_buffer buffer;
151
152 /* Size of the results in memory, in bytes. */
153 unsigned result_size;
154
155 unsigned shaders;
156 unsigned num_counters;
157 struct si_query_counter *counters;
158 struct si_query_group *groups;
159 };
160
161 static struct si_pc_block_base cik_CB = {
162 .name = "CB",
163 .num_counters = 4,
164 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS,
165
166 .select0 = R_037000_CB_PERFCOUNTER_FILTER,
167 .counter0_lo = R_035018_CB_PERFCOUNTER0_LO,
168 .num_multi = 1,
169 .num_prelude = 1,
170 .layout = SI_PC_MULTI_ALTERNATE,
171 };
172
173 static unsigned cik_CPC_select[] = {
174 R_036024_CPC_PERFCOUNTER0_SELECT,
175 R_036010_CPC_PERFCOUNTER0_SELECT1,
176 R_03600C_CPC_PERFCOUNTER1_SELECT,
177 };
178 static struct si_pc_block_base cik_CPC = {
179 .name = "CPC",
180 .num_counters = 2,
181
182 .select = cik_CPC_select,
183 .counter0_lo = R_034018_CPC_PERFCOUNTER0_LO,
184 .num_multi = 1,
185 .layout = SI_PC_MULTI_CUSTOM | SI_PC_REG_REVERSE,
186 };
187
188 static struct si_pc_block_base cik_CPF = {
189 .name = "CPF",
190 .num_counters = 2,
191
192 .select0 = R_03601C_CPF_PERFCOUNTER0_SELECT,
193 .counter0_lo = R_034028_CPF_PERFCOUNTER0_LO,
194 .num_multi = 1,
195 .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
196 };
197
198 static struct si_pc_block_base cik_CPG = {
199 .name = "CPG",
200 .num_counters = 2,
201
202 .select0 = R_036008_CPG_PERFCOUNTER0_SELECT,
203 .counter0_lo = R_034008_CPG_PERFCOUNTER0_LO,
204 .num_multi = 1,
205 .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
206 };
207
208 static struct si_pc_block_base cik_DB = {
209 .name = "DB",
210 .num_counters = 4,
211 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS,
212
213 .select0 = R_037100_DB_PERFCOUNTER0_SELECT,
214 .counter0_lo = R_035100_DB_PERFCOUNTER0_LO,
215 .num_multi = 3, // really only 2, but there's a gap between registers
216 .layout = SI_PC_MULTI_ALTERNATE,
217 };
218
219 static struct si_pc_block_base cik_GDS = {
220 .name = "GDS",
221 .num_counters = 4,
222
223 .select0 = R_036A00_GDS_PERFCOUNTER0_SELECT,
224 .counter0_lo = R_034A00_GDS_PERFCOUNTER0_LO,
225 .num_multi = 1,
226 .layout = SI_PC_MULTI_TAIL,
227 };
228
229 static unsigned cik_GRBM_counters[] = {
230 R_034100_GRBM_PERFCOUNTER0_LO,
231 R_03410C_GRBM_PERFCOUNTER1_LO,
232 };
233 static struct si_pc_block_base cik_GRBM = {
234 .name = "GRBM",
235 .num_counters = 2,
236
237 .select0 = R_036100_GRBM_PERFCOUNTER0_SELECT,
238 .counters = cik_GRBM_counters,
239 };
240
241 static struct si_pc_block_base cik_GRBMSE = {
242 .name = "GRBMSE",
243 .num_counters = 4,
244
245 .select0 = R_036108_GRBM_SE0_PERFCOUNTER_SELECT,
246 .counter0_lo = R_034114_GRBM_SE0_PERFCOUNTER_LO,
247 };
248
249 static struct si_pc_block_base cik_IA = {
250 .name = "IA",
251 .num_counters = 4,
252
253 .select0 = R_036210_IA_PERFCOUNTER0_SELECT,
254 .counter0_lo = R_034220_IA_PERFCOUNTER0_LO,
255 .num_multi = 1,
256 .layout = SI_PC_MULTI_TAIL,
257 };
258
259 static struct si_pc_block_base cik_PA_SC = {
260 .name = "PA_SC",
261 .num_counters = 8,
262 .flags = SI_PC_BLOCK_SE,
263
264 .select0 = R_036500_PA_SC_PERFCOUNTER0_SELECT,
265 .counter0_lo = R_034500_PA_SC_PERFCOUNTER0_LO,
266 .num_multi = 1,
267 .layout = SI_PC_MULTI_ALTERNATE,
268 };
269
270 /* According to docs, PA_SU counters are only 48 bits wide. */
271 static struct si_pc_block_base cik_PA_SU = {
272 .name = "PA_SU",
273 .num_counters = 4,
274 .flags = SI_PC_BLOCK_SE,
275
276 .select0 = R_036400_PA_SU_PERFCOUNTER0_SELECT,
277 .counter0_lo = R_034400_PA_SU_PERFCOUNTER0_LO,
278 .num_multi = 2,
279 .layout = SI_PC_MULTI_ALTERNATE,
280 };
281
282 static struct si_pc_block_base cik_SPI = {
283 .name = "SPI",
284 .num_counters = 6,
285 .flags = SI_PC_BLOCK_SE,
286
287 .select0 = R_036600_SPI_PERFCOUNTER0_SELECT,
288 .counter0_lo = R_034604_SPI_PERFCOUNTER0_LO,
289 .num_multi = 4,
290 .layout = SI_PC_MULTI_BLOCK,
291 };
292
293 static struct si_pc_block_base cik_SQ = {
294 .name = "SQ",
295 .num_counters = 16,
296 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_SHADER,
297
298 .select0 = R_036700_SQ_PERFCOUNTER0_SELECT,
299 .select_or = S_036700_SQC_BANK_MASK(15) | S_036700_SQC_CLIENT_MASK(15) | S_036700_SIMD_MASK(15),
300 .counter0_lo = R_034700_SQ_PERFCOUNTER0_LO,
301 };
302
303 static struct si_pc_block_base cik_SX = {
304 .name = "SX",
305 .num_counters = 4,
306 .flags = SI_PC_BLOCK_SE,
307
308 .select0 = R_036900_SX_PERFCOUNTER0_SELECT,
309 .counter0_lo = R_034900_SX_PERFCOUNTER0_LO,
310 .num_multi = 2,
311 .layout = SI_PC_MULTI_TAIL,
312 };
313
314 static struct si_pc_block_base cik_TA = {
315 .name = "TA",
316 .num_counters = 2,
317 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
318
319 .select0 = R_036B00_TA_PERFCOUNTER0_SELECT,
320 .counter0_lo = R_034B00_TA_PERFCOUNTER0_LO,
321 .num_multi = 1,
322 .layout = SI_PC_MULTI_ALTERNATE,
323 };
324
325 static struct si_pc_block_base cik_TD = {
326 .name = "TD",
327 .num_counters = 2,
328 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
329
330 .select0 = R_036C00_TD_PERFCOUNTER0_SELECT,
331 .counter0_lo = R_034C00_TD_PERFCOUNTER0_LO,
332 .num_multi = 1,
333 .layout = SI_PC_MULTI_ALTERNATE,
334 };
335
336 static struct si_pc_block_base cik_TCA = {
337 .name = "TCA",
338 .num_counters = 4,
339 .flags = SI_PC_BLOCK_INSTANCE_GROUPS,
340
341 .select0 = R_036E40_TCA_PERFCOUNTER0_SELECT,
342 .counter0_lo = R_034E40_TCA_PERFCOUNTER0_LO,
343 .num_multi = 2,
344 .layout = SI_PC_MULTI_ALTERNATE,
345 };
346
347 static struct si_pc_block_base cik_TCC = {
348 .name = "TCC",
349 .num_counters = 4,
350 .flags = SI_PC_BLOCK_INSTANCE_GROUPS,
351
352 .select0 = R_036E00_TCC_PERFCOUNTER0_SELECT,
353 .counter0_lo = R_034E00_TCC_PERFCOUNTER0_LO,
354 .num_multi = 2,
355 .layout = SI_PC_MULTI_ALTERNATE,
356 };
357
358 static struct si_pc_block_base cik_TCP = {
359 .name = "TCP",
360 .num_counters = 4,
361 .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
362
363 .select0 = R_036D00_TCP_PERFCOUNTER0_SELECT,
364 .counter0_lo = R_034D00_TCP_PERFCOUNTER0_LO,
365 .num_multi = 2,
366 .layout = SI_PC_MULTI_ALTERNATE,
367 };
368
369 static struct si_pc_block_base cik_VGT = {
370 .name = "VGT",
371 .num_counters = 4,
372 .flags = SI_PC_BLOCK_SE,
373
374 .select0 = R_036230_VGT_PERFCOUNTER0_SELECT,
375 .counter0_lo = R_034240_VGT_PERFCOUNTER0_LO,
376 .num_multi = 1,
377 .layout = SI_PC_MULTI_TAIL,
378 };
379
380 static struct si_pc_block_base cik_WD = {
381 .name = "WD",
382 .num_counters = 4,
383
384 .select0 = R_036200_WD_PERFCOUNTER0_SELECT,
385 .counter0_lo = R_034200_WD_PERFCOUNTER0_LO,
386 };
387
388 static struct si_pc_block_base cik_MC = {
389 .name = "MC",
390 .num_counters = 4,
391
392 .layout = SI_PC_FAKE,
393 };
394
395 static struct si_pc_block_base cik_SRBM = {
396 .name = "SRBM",
397 .num_counters = 2,
398
399 .layout = SI_PC_FAKE,
400 };
401
402 /* Both the number of instances and selectors varies between chips of the same
403 * class. We only differentiate by class here and simply expose the maximum
404 * number over all chips in a class.
405 *
406 * Unfortunately, GPUPerfStudio uses the order of performance counter groups
407 * blindly once it believes it has identified the hardware, so the order of
408 * blocks here matters.
409 */
410 static struct si_pc_block_gfxdescr groups_CIK[] = {
411 {&cik_CB, 226}, {&cik_CPF, 17}, {&cik_DB, 257}, {&cik_GRBM, 34}, {&cik_GRBMSE, 15},
412 {&cik_PA_SU, 153}, {&cik_PA_SC, 395}, {&cik_SPI, 186}, {&cik_SQ, 252}, {&cik_SX, 32},
413 {&cik_TA, 111, 11}, {&cik_TCA, 39, 2}, {&cik_TCC, 160}, {&cik_TD, 55, 11}, {&cik_TCP, 154, 11},
414 {&cik_GDS, 121}, {&cik_VGT, 140}, {&cik_IA, 22}, {&cik_MC, 22}, {&cik_SRBM, 19},
415 {&cik_WD, 22}, {&cik_CPG, 46}, {&cik_CPC, 22},
416
417 };
418
419 static struct si_pc_block_gfxdescr groups_VI[] = {
420 {&cik_CB, 405}, {&cik_CPF, 19}, {&cik_DB, 257}, {&cik_GRBM, 34}, {&cik_GRBMSE, 15},
421 {&cik_PA_SU, 154}, {&cik_PA_SC, 397}, {&cik_SPI, 197}, {&cik_SQ, 273}, {&cik_SX, 34},
422 {&cik_TA, 119, 16}, {&cik_TCA, 35, 2}, {&cik_TCC, 192}, {&cik_TD, 55, 16}, {&cik_TCP, 180, 16},
423 {&cik_GDS, 121}, {&cik_VGT, 147}, {&cik_IA, 24}, {&cik_MC, 22}, {&cik_SRBM, 27},
424 {&cik_WD, 37}, {&cik_CPG, 48}, {&cik_CPC, 24},
425
426 };
427
428 static struct si_pc_block_gfxdescr groups_gfx9[] = {
429 {&cik_CB, 438}, {&cik_CPF, 32}, {&cik_DB, 328}, {&cik_GRBM, 38}, {&cik_GRBMSE, 16},
430 {&cik_PA_SU, 292}, {&cik_PA_SC, 491}, {&cik_SPI, 196}, {&cik_SQ, 374}, {&cik_SX, 208},
431 {&cik_TA, 119, 16}, {&cik_TCA, 35, 2}, {&cik_TCC, 256}, {&cik_TD, 57, 16}, {&cik_TCP, 85, 16},
432 {&cik_GDS, 121}, {&cik_VGT, 148}, {&cik_IA, 32}, {&cik_WD, 58}, {&cik_CPG, 59},
433 {&cik_CPC, 35},
434 };
435
436 static bool si_pc_block_has_per_se_groups(const struct si_perfcounters *pc,
437 const struct si_pc_block *block)
438 {
439 return block->b->b->flags & SI_PC_BLOCK_SE_GROUPS ||
440 (block->b->b->flags & SI_PC_BLOCK_SE && pc->separate_se);
441 }
442
443 static bool si_pc_block_has_per_instance_groups(const struct si_perfcounters *pc,
444 const struct si_pc_block *block)
445 {
446 return block->b->b->flags & SI_PC_BLOCK_INSTANCE_GROUPS ||
447 (block->num_instances > 1 && pc->separate_instance);
448 }
449
450 static struct si_pc_block *lookup_counter(struct si_perfcounters *pc, unsigned index,
451 unsigned *base_gid, unsigned *sub_index)
452 {
453 struct si_pc_block *block = pc->blocks;
454 unsigned bid;
455
456 *base_gid = 0;
457 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
458 unsigned total = block->num_groups * block->b->selectors;
459
460 if (index < total) {
461 *sub_index = index;
462 return block;
463 }
464
465 index -= total;
466 *base_gid += block->num_groups;
467 }
468
469 return NULL;
470 }
471
472 static struct si_pc_block *lookup_group(struct si_perfcounters *pc, unsigned *index)
473 {
474 unsigned bid;
475 struct si_pc_block *block = pc->blocks;
476
477 for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
478 if (*index < block->num_groups)
479 return block;
480 *index -= block->num_groups;
481 }
482
483 return NULL;
484 }
485
486 static void si_pc_emit_instance(struct si_context *sctx, int se, int instance)
487 {
488 struct radeon_cmdbuf *cs = sctx->gfx_cs;
489 unsigned value = S_030800_SH_BROADCAST_WRITES(1);
490
491 if (se >= 0) {
492 value |= S_030800_SE_INDEX(se);
493 } else {
494 value |= S_030800_SE_BROADCAST_WRITES(1);
495 }
496
497 if (instance >= 0) {
498 value |= S_030800_INSTANCE_INDEX(instance);
499 } else {
500 value |= S_030800_INSTANCE_BROADCAST_WRITES(1);
501 }
502
503 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, value);
504 }
505
506 static void si_pc_emit_shaders(struct si_context *sctx, unsigned shaders)
507 {
508 struct radeon_cmdbuf *cs = sctx->gfx_cs;
509
510 radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2);
511 radeon_emit(cs, shaders & 0x7f);
512 radeon_emit(cs, 0xffffffff);
513 }
514
515 static void si_pc_emit_select(struct si_context *sctx, struct si_pc_block *block, unsigned count,
516 unsigned *selectors)
517 {
518 struct si_pc_block_base *regs = block->b->b;
519 struct radeon_cmdbuf *cs = sctx->gfx_cs;
520 unsigned idx;
521 unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK;
522 unsigned dw;
523
524 assert(count <= regs->num_counters);
525
526 if (regs->layout & SI_PC_FAKE)
527 return;
528
529 if (layout_multi == SI_PC_MULTI_BLOCK) {
530 assert(!(regs->layout & SI_PC_REG_REVERSE));
531
532 dw = count + regs->num_prelude;
533 if (count >= regs->num_multi)
534 dw += regs->num_multi;
535 radeon_set_uconfig_reg_seq(cs, regs->select0, dw);
536 for (idx = 0; idx < regs->num_prelude; ++idx)
537 radeon_emit(cs, 0);
538 for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
539 radeon_emit(cs, selectors[idx] | regs->select_or);
540
541 if (count < regs->num_multi) {
542 unsigned select1 = regs->select0 + 4 * regs->num_multi;
543 radeon_set_uconfig_reg_seq(cs, select1, count);
544 }
545
546 for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
547 radeon_emit(cs, 0);
548
549 if (count > regs->num_multi) {
550 for (idx = regs->num_multi; idx < count; ++idx)
551 radeon_emit(cs, selectors[idx] | regs->select_or);
552 }
553 } else if (layout_multi == SI_PC_MULTI_TAIL) {
554 unsigned select1, select1_count;
555
556 assert(!(regs->layout & SI_PC_REG_REVERSE));
557
558 radeon_set_uconfig_reg_seq(cs, regs->select0, count + regs->num_prelude);
559 for (idx = 0; idx < regs->num_prelude; ++idx)
560 radeon_emit(cs, 0);
561 for (idx = 0; idx < count; ++idx)
562 radeon_emit(cs, selectors[idx] | regs->select_or);
563
564 select1 = regs->select0 + 4 * regs->num_counters;
565 select1_count = MIN2(count, regs->num_multi);
566 radeon_set_uconfig_reg_seq(cs, select1, select1_count);
567 for (idx = 0; idx < select1_count; ++idx)
568 radeon_emit(cs, 0);
569 } else if (layout_multi == SI_PC_MULTI_CUSTOM) {
570 unsigned *reg = regs->select;
571 for (idx = 0; idx < count; ++idx) {
572 radeon_set_uconfig_reg(cs, *reg++, selectors[idx] | regs->select_or);
573 if (idx < regs->num_multi)
574 radeon_set_uconfig_reg(cs, *reg++, 0);
575 }
576 } else {
577 assert(layout_multi == SI_PC_MULTI_ALTERNATE);
578
579 unsigned reg_base = regs->select0;
580 unsigned reg_count = count + MIN2(count, regs->num_multi);
581 reg_count += regs->num_prelude;
582
583 if (!(regs->layout & SI_PC_REG_REVERSE)) {
584 radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
585
586 for (idx = 0; idx < regs->num_prelude; ++idx)
587 radeon_emit(cs, 0);
588 for (idx = 0; idx < count; ++idx) {
589 radeon_emit(cs, selectors[idx] | regs->select_or);
590 if (idx < regs->num_multi)
591 radeon_emit(cs, 0);
592 }
593 } else {
594 reg_base -= (reg_count - 1) * 4;
595 radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
596
597 for (idx = count; idx > 0; --idx) {
598 if (idx <= regs->num_multi)
599 radeon_emit(cs, 0);
600 radeon_emit(cs, selectors[idx - 1] | regs->select_or);
601 }
602 for (idx = 0; idx < regs->num_prelude; ++idx)
603 radeon_emit(cs, 0);
604 }
605 }
606 }
607
608 static void si_pc_emit_start(struct si_context *sctx, struct si_resource *buffer, uint64_t va)
609 {
610 struct radeon_cmdbuf *cs = sctx->gfx_cs;
611
612 si_cp_copy_data(sctx, sctx->gfx_cs, COPY_DATA_DST_MEM, buffer, va - buffer->gpu_address,
613 COPY_DATA_IMM, NULL, 1);
614
615 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
616 S_036020_PERFMON_STATE(V_036020_DISABLE_AND_RESET));
617 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
618 radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_START) | EVENT_INDEX(0));
619 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
620 S_036020_PERFMON_STATE(V_036020_START_COUNTING));
621 }
622
623 /* Note: The buffer was already added in si_pc_emit_start, so we don't have to
624 * do it again in here. */
625 static void si_pc_emit_stop(struct si_context *sctx, struct si_resource *buffer, uint64_t va)
626 {
627 struct radeon_cmdbuf *cs = sctx->gfx_cs;
628
629 si_cp_release_mem(sctx, cs, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
630 EOP_DATA_SEL_VALUE_32BIT, buffer, va, 0, SI_NOT_QUERY);
631 si_cp_wait_mem(sctx, cs, va, 0, 0xffffffff, WAIT_REG_MEM_EQUAL);
632
633 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
634 radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_SAMPLE) | EVENT_INDEX(0));
635 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
636 radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_STOP) | EVENT_INDEX(0));
637 radeon_set_uconfig_reg(
638 cs, R_036020_CP_PERFMON_CNTL,
639 S_036020_PERFMON_STATE(V_036020_STOP_COUNTING) | S_036020_PERFMON_SAMPLE_ENABLE(1));
640 }
641
642 static void si_pc_emit_read(struct si_context *sctx, struct si_pc_block *block, unsigned count,
643 uint64_t va)
644 {
645 struct si_pc_block_base *regs = block->b->b;
646 struct radeon_cmdbuf *cs = sctx->gfx_cs;
647 unsigned idx;
648 unsigned reg = regs->counter0_lo;
649 unsigned reg_delta = 8;
650
651 if (!(regs->layout & SI_PC_FAKE)) {
652 if (regs->layout & SI_PC_REG_REVERSE)
653 reg_delta = -reg_delta;
654
655 for (idx = 0; idx < count; ++idx) {
656 if (regs->counters)
657 reg = regs->counters[idx];
658
659 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
660 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
661 COPY_DATA_COUNT_SEL); /* 64 bits */
662 radeon_emit(cs, reg >> 2);
663 radeon_emit(cs, 0); /* unused */
664 radeon_emit(cs, va);
665 radeon_emit(cs, va >> 32);
666 va += sizeof(uint64_t);
667 reg += reg_delta;
668 }
669 } else {
670 for (idx = 0; idx < count; ++idx) {
671 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
672 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
673 COPY_DATA_COUNT_SEL);
674 radeon_emit(cs, 0); /* immediate */
675 radeon_emit(cs, 0);
676 radeon_emit(cs, va);
677 radeon_emit(cs, va >> 32);
678 va += sizeof(uint64_t);
679 }
680 }
681 }
682
683 static void si_pc_query_destroy(struct si_context *sctx, struct si_query *squery)
684 {
685 struct si_query_pc *query = (struct si_query_pc *)squery;
686
687 while (query->groups) {
688 struct si_query_group *group = query->groups;
689 query->groups = group->next;
690 FREE(group);
691 }
692
693 FREE(query->counters);
694
695 si_query_buffer_destroy(sctx->screen, &query->buffer);
696 FREE(query);
697 }
698
699 static void si_pc_query_resume(struct si_context *sctx, struct si_query *squery)
700 /*
701 struct si_query_hw *hwquery,
702 struct si_resource *buffer, uint64_t va)*/
703 {
704 struct si_query_pc *query = (struct si_query_pc *)squery;
705 int current_se = -1;
706 int current_instance = -1;
707
708 if (!si_query_buffer_alloc(sctx, &query->buffer, NULL, query->result_size))
709 return;
710 si_need_gfx_cs_space(sctx);
711
712 if (query->shaders)
713 si_pc_emit_shaders(sctx, query->shaders);
714
715 for (struct si_query_group *group = query->groups; group; group = group->next) {
716 struct si_pc_block *block = group->block;
717
718 if (group->se != current_se || group->instance != current_instance) {
719 current_se = group->se;
720 current_instance = group->instance;
721 si_pc_emit_instance(sctx, group->se, group->instance);
722 }
723
724 si_pc_emit_select(sctx, block, group->num_counters, group->selectors);
725 }
726
727 if (current_se != -1 || current_instance != -1)
728 si_pc_emit_instance(sctx, -1, -1);
729
730 uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end;
731 si_pc_emit_start(sctx, query->buffer.buf, va);
732 }
733
734 static void si_pc_query_suspend(struct si_context *sctx, struct si_query *squery)
735 {
736 struct si_query_pc *query = (struct si_query_pc *)squery;
737
738 if (!query->buffer.buf)
739 return;
740
741 uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end;
742 query->buffer.results_end += query->result_size;
743
744 si_pc_emit_stop(sctx, query->buffer.buf, va);
745
746 for (struct si_query_group *group = query->groups; group; group = group->next) {
747 struct si_pc_block *block = group->block;
748 unsigned se = group->se >= 0 ? group->se : 0;
749 unsigned se_end = se + 1;
750
751 if ((block->b->b->flags & SI_PC_BLOCK_SE) && (group->se < 0))
752 se_end = sctx->screen->info.max_se;
753
754 do {
755 unsigned instance = group->instance >= 0 ? group->instance : 0;
756
757 do {
758 si_pc_emit_instance(sctx, se, instance);
759 si_pc_emit_read(sctx, block, group->num_counters, va);
760 va += sizeof(uint64_t) * group->num_counters;
761 } while (group->instance < 0 && ++instance < block->num_instances);
762 } while (++se < se_end);
763 }
764
765 si_pc_emit_instance(sctx, -1, -1);
766 }
767
768 static bool si_pc_query_begin(struct si_context *ctx, struct si_query *squery)
769 {
770 struct si_query_pc *query = (struct si_query_pc *)squery;
771
772 si_query_buffer_reset(ctx, &query->buffer);
773
774 list_addtail(&query->b.active_list, &ctx->active_queries);
775 ctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend;
776
777 si_pc_query_resume(ctx, squery);
778
779 return true;
780 }
781
782 static bool si_pc_query_end(struct si_context *ctx, struct si_query *squery)
783 {
784 struct si_query_pc *query = (struct si_query_pc *)squery;
785
786 si_pc_query_suspend(ctx, squery);
787
788 list_del(&squery->active_list);
789 ctx->num_cs_dw_queries_suspend -= squery->num_cs_dw_suspend;
790
791 return query->buffer.buf != NULL;
792 }
793
794 static void si_pc_query_add_result(struct si_query_pc *query, void *buffer,
795 union pipe_query_result *result)
796 {
797 uint64_t *results = buffer;
798 unsigned i, j;
799
800 for (i = 0; i < query->num_counters; ++i) {
801 struct si_query_counter *counter = &query->counters[i];
802
803 for (j = 0; j < counter->qwords; ++j) {
804 uint32_t value = results[counter->base + j * counter->stride];
805 result->batch[i].u64 += value;
806 }
807 }
808 }
809
810 static bool si_pc_query_get_result(struct si_context *sctx, struct si_query *squery, bool wait,
811 union pipe_query_result *result)
812 {
813 struct si_query_pc *query = (struct si_query_pc *)squery;
814
815 memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
816
817 for (struct si_query_buffer *qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
818 unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
819 unsigned results_base = 0;
820 void *map;
821
822 if (squery->b.flushed)
823 map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
824 else
825 map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
826
827 if (!map)
828 return false;
829
830 while (results_base != qbuf->results_end) {
831 si_pc_query_add_result(query, map + results_base, result);
832 results_base += query->result_size;
833 }
834 }
835
836 return true;
837 }
838
839 static const struct si_query_ops batch_query_ops = {
840 .destroy = si_pc_query_destroy,
841 .begin = si_pc_query_begin,
842 .end = si_pc_query_end,
843 .get_result = si_pc_query_get_result,
844
845 .suspend = si_pc_query_suspend,
846 .resume = si_pc_query_resume,
847 };
848
849 static struct si_query_group *get_group_state(struct si_screen *screen, struct si_query_pc *query,
850 struct si_pc_block *block, unsigned sub_gid)
851 {
852 struct si_query_group *group = query->groups;
853
854 while (group) {
855 if (group->block == block && group->sub_gid == sub_gid)
856 return group;
857 group = group->next;
858 }
859
860 group = CALLOC_STRUCT(si_query_group);
861 if (!group)
862 return NULL;
863
864 group->block = block;
865 group->sub_gid = sub_gid;
866
867 if (block->b->b->flags & SI_PC_BLOCK_SHADER) {
868 unsigned sub_gids = block->num_instances;
869 unsigned shader_id;
870 unsigned shaders;
871 unsigned query_shaders;
872
873 if (si_pc_block_has_per_se_groups(screen->perfcounters, block))
874 sub_gids = sub_gids * screen->info.max_se;
875 shader_id = sub_gid / sub_gids;
876 sub_gid = sub_gid % sub_gids;
877
878 shaders = si_pc_shader_type_bits[shader_id];
879
880 query_shaders = query->shaders & ~SI_PC_SHADERS_WINDOWING;
881 if (query_shaders && query_shaders != shaders) {
882 fprintf(stderr, "si_perfcounter: incompatible shader groups\n");
883 FREE(group);
884 return NULL;
885 }
886 query->shaders = shaders;
887 }
888
889 if (block->b->b->flags & SI_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
890 // A non-zero value in query->shaders ensures that the shader
891 // masking is reset unless the user explicitly requests one.
892 query->shaders = SI_PC_SHADERS_WINDOWING;
893 }
894
895 if (si_pc_block_has_per_se_groups(screen->perfcounters, block)) {
896 group->se = sub_gid / block->num_instances;
897 sub_gid = sub_gid % block->num_instances;
898 } else {
899 group->se = -1;
900 }
901
902 if (si_pc_block_has_per_instance_groups(screen->perfcounters, block)) {
903 group->instance = sub_gid;
904 } else {
905 group->instance = -1;
906 }
907
908 group->next = query->groups;
909 query->groups = group;
910
911 return group;
912 }
913
914 struct pipe_query *si_create_batch_query(struct pipe_context *ctx, unsigned num_queries,
915 unsigned *query_types)
916 {
917 struct si_screen *screen = (struct si_screen *)ctx->screen;
918 struct si_perfcounters *pc = screen->perfcounters;
919 struct si_pc_block *block;
920 struct si_query_group *group;
921 struct si_query_pc *query;
922 unsigned base_gid, sub_gid, sub_index;
923 unsigned i, j;
924
925 if (!pc)
926 return NULL;
927
928 query = CALLOC_STRUCT(si_query_pc);
929 if (!query)
930 return NULL;
931
932 query->b.ops = &batch_query_ops;
933
934 query->num_counters = num_queries;
935
936 /* Collect selectors per group */
937 for (i = 0; i < num_queries; ++i) {
938 unsigned sub_gid;
939
940 if (query_types[i] < SI_QUERY_FIRST_PERFCOUNTER)
941 goto error;
942
943 block =
944 lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER, &base_gid, &sub_index);
945 if (!block)
946 goto error;
947
948 sub_gid = sub_index / block->b->selectors;
949 sub_index = sub_index % block->b->selectors;
950
951 group = get_group_state(screen, query, block, sub_gid);
952 if (!group)
953 goto error;
954
955 if (group->num_counters >= block->b->b->num_counters) {
956 fprintf(stderr, "perfcounter group %s: too many selected\n", block->b->b->name);
957 goto error;
958 }
959 group->selectors[group->num_counters] = sub_index;
960 ++group->num_counters;
961 }
962
963 /* Compute result bases and CS size per group */
964 query->b.num_cs_dw_suspend = pc->num_stop_cs_dwords;
965 query->b.num_cs_dw_suspend += pc->num_instance_cs_dwords;
966
967 i = 0;
968 for (group = query->groups; group; group = group->next) {
969 struct si_pc_block *block = group->block;
970 unsigned read_dw;
971 unsigned instances = 1;
972
973 if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0)
974 instances = screen->info.max_se;
975 if (group->instance < 0)
976 instances *= block->num_instances;
977
978 group->result_base = i;
979 query->result_size += sizeof(uint64_t) * instances * group->num_counters;
980 i += instances * group->num_counters;
981
982 read_dw = 6 * group->num_counters;
983 query->b.num_cs_dw_suspend += instances * read_dw;
984 query->b.num_cs_dw_suspend += instances * pc->num_instance_cs_dwords;
985 }
986
987 if (query->shaders) {
988 if (query->shaders == SI_PC_SHADERS_WINDOWING)
989 query->shaders = 0xffffffff;
990 }
991
992 /* Map user-supplied query array to result indices */
993 query->counters = CALLOC(num_queries, sizeof(*query->counters));
994 for (i = 0; i < num_queries; ++i) {
995 struct si_query_counter *counter = &query->counters[i];
996 struct si_pc_block *block;
997
998 block =
999 lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER, &base_gid, &sub_index);
1000
1001 sub_gid = sub_index / block->b->selectors;
1002 sub_index = sub_index % block->b->selectors;
1003
1004 group = get_group_state(screen, query, block, sub_gid);
1005 assert(group != NULL);
1006
1007 for (j = 0; j < group->num_counters; ++j) {
1008 if (group->selectors[j] == sub_index)
1009 break;
1010 }
1011
1012 counter->base = group->result_base + j;
1013 counter->stride = group->num_counters;
1014
1015 counter->qwords = 1;
1016 if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0)
1017 counter->qwords = screen->info.max_se;
1018 if (group->instance < 0)
1019 counter->qwords *= block->num_instances;
1020 }
1021
1022 return (struct pipe_query *)query;
1023
1024 error:
1025 si_pc_query_destroy((struct si_context *)ctx, &query->b);
1026 return NULL;
1027 }
1028
1029 static bool si_init_block_names(struct si_screen *screen, struct si_pc_block *block)
1030 {
1031 bool per_instance_groups = si_pc_block_has_per_instance_groups(screen->perfcounters, block);
1032 bool per_se_groups = si_pc_block_has_per_se_groups(screen->perfcounters, block);
1033 unsigned i, j, k;
1034 unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
1035 unsigned namelen;
1036 char *groupname;
1037 char *p;
1038
1039 if (per_instance_groups)
1040 groups_instance = block->num_instances;
1041 if (per_se_groups)
1042 groups_se = screen->info.max_se;
1043 if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1044 groups_shader = ARRAY_SIZE(si_pc_shader_type_bits);
1045
1046 namelen = strlen(block->b->b->name);
1047 block->group_name_stride = namelen + 1;
1048 if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1049 block->group_name_stride += 3;
1050 if (per_se_groups) {
1051 assert(groups_se <= 10);
1052 block->group_name_stride += 1;
1053
1054 if (per_instance_groups)
1055 block->group_name_stride += 1;
1056 }
1057 if (per_instance_groups) {
1058 assert(groups_instance <= 100);
1059 block->group_name_stride += 2;
1060 }
1061
1062 block->group_names = MALLOC(block->num_groups * block->group_name_stride);
1063 if (!block->group_names)
1064 return false;
1065
1066 groupname = block->group_names;
1067 for (i = 0; i < groups_shader; ++i) {
1068 const char *shader_suffix = si_pc_shader_type_suffixes[i];
1069 unsigned shaderlen = strlen(shader_suffix);
1070 for (j = 0; j < groups_se; ++j) {
1071 for (k = 0; k < groups_instance; ++k) {
1072 strcpy(groupname, block->b->b->name);
1073 p = groupname + namelen;
1074
1075 if (block->b->b->flags & SI_PC_BLOCK_SHADER) {
1076 strcpy(p, shader_suffix);
1077 p += shaderlen;
1078 }
1079
1080 if (per_se_groups) {
1081 p += sprintf(p, "%d", j);
1082 if (per_instance_groups)
1083 *p++ = '_';
1084 }
1085
1086 if (per_instance_groups)
1087 p += sprintf(p, "%d", k);
1088
1089 groupname += block->group_name_stride;
1090 }
1091 }
1092 }
1093
1094 assert(block->b->selectors <= 1000);
1095 block->selector_name_stride = block->group_name_stride + 4;
1096 block->selector_names =
1097 MALLOC(block->num_groups * block->b->selectors * block->selector_name_stride);
1098 if (!block->selector_names)
1099 return false;
1100
1101 groupname = block->group_names;
1102 p = block->selector_names;
1103 for (i = 0; i < block->num_groups; ++i) {
1104 for (j = 0; j < block->b->selectors; ++j) {
1105 sprintf(p, "%s_%03d", groupname, j);
1106 p += block->selector_name_stride;
1107 }
1108 groupname += block->group_name_stride;
1109 }
1110
1111 return true;
1112 }
1113
1114 int si_get_perfcounter_info(struct si_screen *screen, unsigned index,
1115 struct pipe_driver_query_info *info)
1116 {
1117 struct si_perfcounters *pc = screen->perfcounters;
1118 struct si_pc_block *block;
1119 unsigned base_gid, sub;
1120
1121 if (!pc)
1122 return 0;
1123
1124 if (!info) {
1125 unsigned bid, num_queries = 0;
1126
1127 for (bid = 0; bid < pc->num_blocks; ++bid) {
1128 num_queries += pc->blocks[bid].b->selectors * pc->blocks[bid].num_groups;
1129 }
1130
1131 return num_queries;
1132 }
1133
1134 block = lookup_counter(pc, index, &base_gid, &sub);
1135 if (!block)
1136 return 0;
1137
1138 if (!block->selector_names) {
1139 if (!si_init_block_names(screen, block))
1140 return 0;
1141 }
1142 info->name = block->selector_names + sub * block->selector_name_stride;
1143 info->query_type = SI_QUERY_FIRST_PERFCOUNTER + index;
1144 info->max_value.u64 = 0;
1145 info->type = PIPE_DRIVER_QUERY_TYPE_UINT64;
1146 info->result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE;
1147 info->group_id = base_gid + sub / block->b->selectors;
1148 info->flags = PIPE_DRIVER_QUERY_FLAG_BATCH;
1149 if (sub > 0 && sub + 1 < block->b->selectors * block->num_groups)
1150 info->flags |= PIPE_DRIVER_QUERY_FLAG_DONT_LIST;
1151 return 1;
1152 }
1153
1154 int si_get_perfcounter_group_info(struct si_screen *screen, unsigned index,
1155 struct pipe_driver_query_group_info *info)
1156 {
1157 struct si_perfcounters *pc = screen->perfcounters;
1158 struct si_pc_block *block;
1159
1160 if (!pc)
1161 return 0;
1162
1163 if (!info)
1164 return pc->num_groups;
1165
1166 block = lookup_group(pc, &index);
1167 if (!block)
1168 return 0;
1169
1170 if (!block->group_names) {
1171 if (!si_init_block_names(screen, block))
1172 return 0;
1173 }
1174 info->name = block->group_names + index * block->group_name_stride;
1175 info->num_queries = block->b->selectors;
1176 info->max_active_queries = block->b->b->num_counters;
1177 return 1;
1178 }
1179
1180 void si_destroy_perfcounters(struct si_screen *screen)
1181 {
1182 struct si_perfcounters *pc = screen->perfcounters;
1183 unsigned i;
1184
1185 if (!pc)
1186 return;
1187
1188 for (i = 0; i < pc->num_blocks; ++i) {
1189 FREE(pc->blocks[i].group_names);
1190 FREE(pc->blocks[i].selector_names);
1191 }
1192 FREE(pc->blocks);
1193 FREE(pc);
1194 screen->perfcounters = NULL;
1195 }
1196
1197 void si_init_perfcounters(struct si_screen *screen)
1198 {
1199 struct si_perfcounters *pc;
1200 const struct si_pc_block_gfxdescr *blocks;
1201 unsigned num_blocks;
1202 unsigned i;
1203
1204 switch (screen->info.chip_class) {
1205 case GFX7:
1206 blocks = groups_CIK;
1207 num_blocks = ARRAY_SIZE(groups_CIK);
1208 break;
1209 case GFX8:
1210 blocks = groups_VI;
1211 num_blocks = ARRAY_SIZE(groups_VI);
1212 break;
1213 case GFX9:
1214 blocks = groups_gfx9;
1215 num_blocks = ARRAY_SIZE(groups_gfx9);
1216 break;
1217 case GFX6:
1218 default:
1219 return; /* not implemented */
1220 }
1221
1222 if (screen->info.max_sh_per_se != 1) {
1223 /* This should not happen on non-GFX6 chips. */
1224 fprintf(stderr,
1225 "si_init_perfcounters: max_sh_per_se = %d not "
1226 "supported (inaccurate performance counters)\n",
1227 screen->info.max_sh_per_se);
1228 }
1229
1230 screen->perfcounters = pc = CALLOC_STRUCT(si_perfcounters);
1231 if (!pc)
1232 return;
1233
1234 pc->num_stop_cs_dwords = 14 + si_cp_write_fence_dwords(screen);
1235 pc->num_instance_cs_dwords = 3;
1236
1237 pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", false);
1238 pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", false);
1239
1240 pc->blocks = CALLOC(num_blocks, sizeof(struct si_pc_block));
1241 if (!pc->blocks)
1242 goto error;
1243 pc->num_blocks = num_blocks;
1244
1245 for (i = 0; i < num_blocks; ++i) {
1246 struct si_pc_block *block = &pc->blocks[i];
1247 block->b = &blocks[i];
1248 block->num_instances = MAX2(1, block->b->instances);
1249
1250 if (!strcmp(block->b->b->name, "CB") || !strcmp(block->b->b->name, "DB"))
1251 block->num_instances = screen->info.max_se;
1252 else if (!strcmp(block->b->b->name, "TCC"))
1253 block->num_instances = screen->info.num_tcc_blocks;
1254 else if (!strcmp(block->b->b->name, "IA"))
1255 block->num_instances = MAX2(1, screen->info.max_se / 2);
1256
1257 if (si_pc_block_has_per_instance_groups(pc, block)) {
1258 block->num_groups = block->num_instances;
1259 } else {
1260 block->num_groups = 1;
1261 }
1262
1263 if (si_pc_block_has_per_se_groups(pc, block))
1264 block->num_groups *= screen->info.max_se;
1265 if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1266 block->num_groups *= ARRAY_SIZE(si_pc_shader_type_bits);
1267
1268 pc->num_groups += block->num_groups;
1269 }
1270
1271 return;
1272
1273 error:
1274 si_destroy_perfcounters(screen);
1275 }