2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #include "radeon/r600_cs.h"
25 #include "radeon/r600_query.h"
26 #include "util/u_memory.h"
31 enum si_pc_reg_layout
{
32 /* All secondary selector dwords follow as one block after the primary
33 * selector dwords for the counters that have secondary selectors.
35 SI_PC_MULTI_BLOCK
= 0,
37 /* Each secondary selector dword follows immediately afters the
38 * corresponding primary.
40 SI_PC_MULTI_ALTERNATE
= 1,
42 /* All secondary selector dwords follow as one block after all primary
47 /* Free-form arrangement of selector registers. */
48 SI_PC_MULTI_CUSTOM
= 3,
52 /* Registers are laid out in decreasing rather than increasing order. */
53 SI_PC_REG_REVERSE
= 4,
58 struct si_pc_block_base
{
60 unsigned num_counters
;
74 struct si_pc_block_base
*b
;
79 /* The order is chosen to be compatible with GPUPerfStudio's hardcoding of
80 * performance counter group IDs.
82 static const char * const si_pc_shader_type_suffixes
[] = {
83 "", "_ES", "_GS", "_VS", "_PS", "_LS", "_HS", "_CS"
86 static const unsigned si_pc_shader_type_bits
[] = {
97 static struct si_pc_block_base cik_CB
= {
100 .flags
= R600_PC_BLOCK_SE
| R600_PC_BLOCK_INSTANCE_GROUPS
,
102 .select0
= R_037000_CB_PERFCOUNTER_FILTER
,
103 .counter0_lo
= R_035018_CB_PERFCOUNTER0_LO
,
106 .layout
= SI_PC_MULTI_ALTERNATE
,
109 static unsigned cik_CPC_select
[] = {
110 R_036024_CPC_PERFCOUNTER0_SELECT
,
111 R_036010_CPC_PERFCOUNTER0_SELECT1
,
112 R_03600C_CPC_PERFCOUNTER1_SELECT
,
114 static struct si_pc_block_base cik_CPC
= {
118 .select
= cik_CPC_select
,
119 .counter0_lo
= R_034018_CPC_PERFCOUNTER0_LO
,
121 .layout
= SI_PC_MULTI_CUSTOM
| SI_PC_REG_REVERSE
,
124 static struct si_pc_block_base cik_CPF
= {
128 .select0
= R_03601C_CPF_PERFCOUNTER0_SELECT
,
129 .counter0_lo
= R_034028_CPF_PERFCOUNTER0_LO
,
131 .layout
= SI_PC_MULTI_ALTERNATE
| SI_PC_REG_REVERSE
,
134 static struct si_pc_block_base cik_CPG
= {
138 .select0
= R_036008_CPG_PERFCOUNTER0_SELECT
,
139 .counter0_lo
= R_034008_CPG_PERFCOUNTER0_LO
,
141 .layout
= SI_PC_MULTI_ALTERNATE
| SI_PC_REG_REVERSE
,
144 static struct si_pc_block_base cik_DB
= {
147 .flags
= R600_PC_BLOCK_SE
| R600_PC_BLOCK_INSTANCE_GROUPS
,
149 .select0
= R_037100_DB_PERFCOUNTER0_SELECT
,
150 .counter0_lo
= R_035100_DB_PERFCOUNTER0_LO
,
151 .num_multi
= 3, // really only 2, but there's a gap between registers
152 .layout
= SI_PC_MULTI_ALTERNATE
,
155 static struct si_pc_block_base cik_GDS
= {
159 .select0
= R_036A00_GDS_PERFCOUNTER0_SELECT
,
160 .counter0_lo
= R_034A00_GDS_PERFCOUNTER0_LO
,
162 .layout
= SI_PC_MULTI_TAIL
,
165 static unsigned cik_GRBM_counters
[] = {
166 R_034100_GRBM_PERFCOUNTER0_LO
,
167 R_03410C_GRBM_PERFCOUNTER1_LO
,
169 static struct si_pc_block_base cik_GRBM
= {
173 .select0
= R_036100_GRBM_PERFCOUNTER0_SELECT
,
174 .counters
= cik_GRBM_counters
,
177 static struct si_pc_block_base cik_GRBMSE
= {
181 .select0
= R_036108_GRBM_SE0_PERFCOUNTER_SELECT
,
182 .counter0_lo
= R_034114_GRBM_SE0_PERFCOUNTER_LO
,
185 static struct si_pc_block_base cik_IA
= {
189 .select0
= R_036210_IA_PERFCOUNTER0_SELECT
,
190 .counter0_lo
= R_034220_IA_PERFCOUNTER0_LO
,
192 .layout
= SI_PC_MULTI_TAIL
,
195 static struct si_pc_block_base cik_PA_SC
= {
198 .flags
= R600_PC_BLOCK_SE
,
200 .select0
= R_036500_PA_SC_PERFCOUNTER0_SELECT
,
201 .counter0_lo
= R_034500_PA_SC_PERFCOUNTER0_LO
,
203 .layout
= SI_PC_MULTI_ALTERNATE
,
206 /* According to docs, PA_SU counters are only 48 bits wide. */
207 static struct si_pc_block_base cik_PA_SU
= {
210 .flags
= R600_PC_BLOCK_SE
,
212 .select0
= R_036400_PA_SU_PERFCOUNTER0_SELECT
,
213 .counter0_lo
= R_034400_PA_SU_PERFCOUNTER0_LO
,
215 .layout
= SI_PC_MULTI_ALTERNATE
,
218 static struct si_pc_block_base cik_SPI
= {
221 .flags
= R600_PC_BLOCK_SE
,
223 .select0
= R_036600_SPI_PERFCOUNTER0_SELECT
,
224 .counter0_lo
= R_034604_SPI_PERFCOUNTER0_LO
,
226 .layout
= SI_PC_MULTI_BLOCK
,
229 static struct si_pc_block_base cik_SQ
= {
232 .flags
= R600_PC_BLOCK_SE
| R600_PC_BLOCK_SHADER
,
234 .select0
= R_036700_SQ_PERFCOUNTER0_SELECT
,
235 .select_or
= S_036700_SQC_BANK_MASK(15) |
236 S_036700_SQC_CLIENT_MASK(15) |
237 S_036700_SIMD_MASK(15),
238 .counter0_lo
= R_034700_SQ_PERFCOUNTER0_LO
,
241 static struct si_pc_block_base cik_SX
= {
244 .flags
= R600_PC_BLOCK_SE
,
246 .select0
= R_036900_SX_PERFCOUNTER0_SELECT
,
247 .counter0_lo
= R_034900_SX_PERFCOUNTER0_LO
,
249 .layout
= SI_PC_MULTI_TAIL
,
252 static struct si_pc_block_base cik_TA
= {
255 .flags
= R600_PC_BLOCK_SE
| R600_PC_BLOCK_INSTANCE_GROUPS
| R600_PC_BLOCK_SHADER_WINDOWED
,
257 .select0
= R_036B00_TA_PERFCOUNTER0_SELECT
,
258 .counter0_lo
= R_034B00_TA_PERFCOUNTER0_LO
,
260 .layout
= SI_PC_MULTI_ALTERNATE
,
263 static struct si_pc_block_base cik_TD
= {
266 .flags
= R600_PC_BLOCK_SE
| R600_PC_BLOCK_INSTANCE_GROUPS
| R600_PC_BLOCK_SHADER_WINDOWED
,
268 .select0
= R_036C00_TD_PERFCOUNTER0_SELECT
,
269 .counter0_lo
= R_034C00_TD_PERFCOUNTER0_LO
,
271 .layout
= SI_PC_MULTI_ALTERNATE
,
274 static struct si_pc_block_base cik_TCA
= {
277 .flags
= R600_PC_BLOCK_INSTANCE_GROUPS
,
279 .select0
= R_036E40_TCA_PERFCOUNTER0_SELECT
,
280 .counter0_lo
= R_034E40_TCA_PERFCOUNTER0_LO
,
282 .layout
= SI_PC_MULTI_ALTERNATE
,
285 static struct si_pc_block_base cik_TCC
= {
288 .flags
= R600_PC_BLOCK_INSTANCE_GROUPS
,
290 .select0
= R_036E00_TCC_PERFCOUNTER0_SELECT
,
291 .counter0_lo
= R_034E00_TCC_PERFCOUNTER0_LO
,
293 .layout
= SI_PC_MULTI_ALTERNATE
,
296 static struct si_pc_block_base cik_TCP
= {
299 .flags
= R600_PC_BLOCK_SE
| R600_PC_BLOCK_INSTANCE_GROUPS
| R600_PC_BLOCK_SHADER_WINDOWED
,
301 .select0
= R_036D00_TCP_PERFCOUNTER0_SELECT
,
302 .counter0_lo
= R_034D00_TCP_PERFCOUNTER0_LO
,
304 .layout
= SI_PC_MULTI_ALTERNATE
,
307 static struct si_pc_block_base cik_VGT
= {
310 .flags
= R600_PC_BLOCK_SE
,
312 .select0
= R_036230_VGT_PERFCOUNTER0_SELECT
,
313 .counter0_lo
= R_034240_VGT_PERFCOUNTER0_LO
,
315 .layout
= SI_PC_MULTI_TAIL
,
318 static struct si_pc_block_base cik_WD
= {
322 .select0
= R_036200_WD_PERFCOUNTER0_SELECT
,
323 .counter0_lo
= R_034200_WD_PERFCOUNTER0_LO
,
326 static struct si_pc_block_base cik_MC
= {
330 .layout
= SI_PC_FAKE
,
333 static struct si_pc_block_base cik_SRBM
= {
337 .layout
= SI_PC_FAKE
,
340 /* Both the number of instances and selectors varies between chips of the same
341 * class. We only differentiate by class here and simply expose the maximum
342 * number over all chips in a class.
344 * Unfortunately, GPUPerfStudio uses the order of performance counter groups
345 * blindly once it believes it has identified the hardware, so the order of
346 * blocks here matters.
348 static struct si_pc_block groups_CIK
[] = {
359 { &cik_TA
, 111, 11 },
361 { &cik_TCC
, 160, 16 },
363 { &cik_TCP
, 154, 11 },
375 static struct si_pc_block groups_VI
[] = {
386 { &cik_TA
, 119, 16 },
388 { &cik_TCC
, 192, 16 },
390 { &cik_TCP
, 180, 16 },
402 static struct si_pc_block groups_gfx9
[] = {
413 { &cik_TA
, 119, 16 },
415 { &cik_TCC
, 256, 16 },
417 { &cik_TCP
, 85, 16 },
426 static void si_pc_get_size(struct r600_perfcounter_block
*group
,
427 unsigned count
, unsigned *selectors
,
428 unsigned *num_select_dw
, unsigned *num_read_dw
)
430 struct si_pc_block
*sigroup
= (struct si_pc_block
*)group
->data
;
431 struct si_pc_block_base
*regs
= sigroup
->b
;
432 unsigned layout_multi
= regs
->layout
& SI_PC_MULTI_MASK
;
434 if (regs
->layout
& SI_PC_FAKE
) {
436 } else if (layout_multi
== SI_PC_MULTI_BLOCK
) {
437 if (count
< regs
->num_multi
)
438 *num_select_dw
= 2 * (count
+ 2) + regs
->num_prelude
;
440 *num_select_dw
= 2 + count
+ regs
->num_multi
+ regs
->num_prelude
;
441 } else if (layout_multi
== SI_PC_MULTI_TAIL
) {
442 *num_select_dw
= 4 + count
+ MIN2(count
, regs
->num_multi
) + regs
->num_prelude
;
443 } else if (layout_multi
== SI_PC_MULTI_CUSTOM
) {
444 assert(regs
->num_prelude
== 0);
445 *num_select_dw
= 3 * (count
+ MIN2(count
, regs
->num_multi
));
447 assert(layout_multi
== SI_PC_MULTI_ALTERNATE
);
449 *num_select_dw
= 2 + count
+ MIN2(count
, regs
->num_multi
) + regs
->num_prelude
;
452 *num_read_dw
= 6 * count
;
455 static void si_pc_emit_instance(struct r600_common_context
*ctx
,
456 int se
, int instance
)
458 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
459 unsigned value
= S_030800_SH_BROADCAST_WRITES(1);
462 value
|= S_030800_SE_INDEX(se
);
464 value
|= S_030800_SE_BROADCAST_WRITES(1);
468 value
|= S_030800_INSTANCE_INDEX(instance
);
470 value
|= S_030800_INSTANCE_BROADCAST_WRITES(1);
473 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
, value
);
476 static void si_pc_emit_shaders(struct r600_common_context
*ctx
,
479 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
481 radeon_set_uconfig_reg_seq(cs
, R_036780_SQ_PERFCOUNTER_CTRL
, 2);
482 radeon_emit(cs
, shaders
& 0x7f);
483 radeon_emit(cs
, 0xffffffff);
486 static void si_pc_emit_select(struct r600_common_context
*ctx
,
487 struct r600_perfcounter_block
*group
,
488 unsigned count
, unsigned *selectors
)
490 struct si_pc_block
*sigroup
= (struct si_pc_block
*)group
->data
;
491 struct si_pc_block_base
*regs
= sigroup
->b
;
492 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
494 unsigned layout_multi
= regs
->layout
& SI_PC_MULTI_MASK
;
497 assert(count
<= regs
->num_counters
);
499 if (regs
->layout
& SI_PC_FAKE
)
502 if (layout_multi
== SI_PC_MULTI_BLOCK
) {
503 assert(!(regs
->layout
& SI_PC_REG_REVERSE
));
505 dw
= count
+ regs
->num_prelude
;
506 if (count
>= regs
->num_multi
)
507 dw
+= regs
->num_multi
;
508 radeon_set_uconfig_reg_seq(cs
, regs
->select0
, dw
);
509 for (idx
= 0; idx
< regs
->num_prelude
; ++idx
)
511 for (idx
= 0; idx
< MIN2(count
, regs
->num_multi
); ++idx
)
512 radeon_emit(cs
, selectors
[idx
] | regs
->select_or
);
514 if (count
< regs
->num_multi
) {
516 regs
->select0
+ 4 * regs
->num_multi
;
517 radeon_set_uconfig_reg_seq(cs
, select1
, count
);
520 for (idx
= 0; idx
< MIN2(count
, regs
->num_multi
); ++idx
)
523 if (count
> regs
->num_multi
) {
524 for (idx
= regs
->num_multi
; idx
< count
; ++idx
)
525 radeon_emit(cs
, selectors
[idx
] | regs
->select_or
);
527 } else if (layout_multi
== SI_PC_MULTI_TAIL
) {
528 unsigned select1
, select1_count
;
530 assert(!(regs
->layout
& SI_PC_REG_REVERSE
));
532 radeon_set_uconfig_reg_seq(cs
, regs
->select0
, count
+ regs
->num_prelude
);
533 for (idx
= 0; idx
< regs
->num_prelude
; ++idx
)
535 for (idx
= 0; idx
< count
; ++idx
)
536 radeon_emit(cs
, selectors
[idx
] | regs
->select_or
);
538 select1
= regs
->select0
+ 4 * regs
->num_counters
;
539 select1_count
= MIN2(count
, regs
->num_multi
);
540 radeon_set_uconfig_reg_seq(cs
, select1
, select1_count
);
541 for (idx
= 0; idx
< select1_count
; ++idx
)
543 } else if (layout_multi
== SI_PC_MULTI_CUSTOM
) {
544 unsigned *reg
= regs
->select
;
545 for (idx
= 0; idx
< count
; ++idx
) {
546 radeon_set_uconfig_reg(cs
, *reg
++, selectors
[idx
] | regs
->select_or
);
547 if (idx
< regs
->num_multi
)
548 radeon_set_uconfig_reg(cs
, *reg
++, 0);
551 assert(layout_multi
== SI_PC_MULTI_ALTERNATE
);
553 unsigned reg_base
= regs
->select0
;
554 unsigned reg_count
= count
+ MIN2(count
, regs
->num_multi
);
555 reg_count
+= regs
->num_prelude
;
557 if (!(regs
->layout
& SI_PC_REG_REVERSE
)) {
558 radeon_set_uconfig_reg_seq(cs
, reg_base
, reg_count
);
560 for (idx
= 0; idx
< regs
->num_prelude
; ++idx
)
562 for (idx
= 0; idx
< count
; ++idx
) {
563 radeon_emit(cs
, selectors
[idx
] | regs
->select_or
);
564 if (idx
< regs
->num_multi
)
568 reg_base
-= (reg_count
- 1) * 4;
569 radeon_set_uconfig_reg_seq(cs
, reg_base
, reg_count
);
571 for (idx
= count
; idx
> 0; --idx
) {
572 if (idx
<= regs
->num_multi
)
574 radeon_emit(cs
, selectors
[idx
- 1] | regs
->select_or
);
576 for (idx
= 0; idx
< regs
->num_prelude
; ++idx
)
582 static void si_pc_emit_start(struct r600_common_context
*ctx
,
583 struct r600_resource
*buffer
, uint64_t va
)
585 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
587 radeon_add_to_buffer_list(ctx
, &ctx
->gfx
, buffer
,
588 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
590 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
591 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_IMM
) |
592 COPY_DATA_DST_SEL(COPY_DATA_MEM
));
593 radeon_emit(cs
, 1); /* immediate */
594 radeon_emit(cs
, 0); /* unused */
596 radeon_emit(cs
, va
>> 32);
598 radeon_set_uconfig_reg(cs
, R_036020_CP_PERFMON_CNTL
,
599 S_036020_PERFMON_STATE(V_036020_DISABLE_AND_RESET
));
600 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
601 radeon_emit(cs
, EVENT_TYPE(V_028A90_PERFCOUNTER_START
) | EVENT_INDEX(0));
602 radeon_set_uconfig_reg(cs
, R_036020_CP_PERFMON_CNTL
,
603 S_036020_PERFMON_STATE(V_036020_START_COUNTING
));
606 /* Note: The buffer was already added in si_pc_emit_start, so we don't have to
607 * do it again in here. */
608 static void si_pc_emit_stop(struct r600_common_context
*ctx
,
609 struct r600_resource
*buffer
, uint64_t va
)
611 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
613 si_gfx_write_event_eop(ctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
614 EOP_DATA_SEL_VALUE_32BIT
,
615 buffer
, va
, 0, SI_NOT_QUERY
);
616 si_gfx_wait_fence(ctx
, va
, 0, 0xffffffff);
618 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
619 radeon_emit(cs
, EVENT_TYPE(V_028A90_PERFCOUNTER_SAMPLE
) | EVENT_INDEX(0));
620 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
621 radeon_emit(cs
, EVENT_TYPE(V_028A90_PERFCOUNTER_STOP
) | EVENT_INDEX(0));
622 radeon_set_uconfig_reg(cs
, R_036020_CP_PERFMON_CNTL
,
623 S_036020_PERFMON_STATE(V_036020_STOP_COUNTING
) |
624 S_036020_PERFMON_SAMPLE_ENABLE(1));
627 static void si_pc_emit_read(struct r600_common_context
*ctx
,
628 struct r600_perfcounter_block
*group
,
629 unsigned count
, unsigned *selectors
,
630 struct r600_resource
*buffer
, uint64_t va
)
632 struct si_pc_block
*sigroup
= (struct si_pc_block
*)group
->data
;
633 struct si_pc_block_base
*regs
= sigroup
->b
;
634 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
636 unsigned reg
= regs
->counter0_lo
;
637 unsigned reg_delta
= 8;
639 if (!(regs
->layout
& SI_PC_FAKE
)) {
640 if (regs
->layout
& SI_PC_REG_REVERSE
)
641 reg_delta
= -reg_delta
;
643 for (idx
= 0; idx
< count
; ++idx
) {
645 reg
= regs
->counters
[idx
];
647 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
648 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_PERF
) |
649 COPY_DATA_DST_SEL(COPY_DATA_MEM
) |
650 COPY_DATA_COUNT_SEL
); /* 64 bits */
651 radeon_emit(cs
, reg
>> 2);
652 radeon_emit(cs
, 0); /* unused */
654 radeon_emit(cs
, va
>> 32);
655 va
+= sizeof(uint64_t);
659 for (idx
= 0; idx
< count
; ++idx
) {
660 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
661 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_IMM
) |
662 COPY_DATA_DST_SEL(COPY_DATA_MEM
) |
663 COPY_DATA_COUNT_SEL
);
664 radeon_emit(cs
, 0); /* immediate */
667 radeon_emit(cs
, va
>> 32);
668 va
+= sizeof(uint64_t);
673 static void si_pc_cleanup(struct si_screen
*sscreen
)
675 si_perfcounters_do_destroy(sscreen
->perfcounters
);
676 sscreen
->perfcounters
= NULL
;
679 void si_init_perfcounters(struct si_screen
*screen
)
681 struct r600_perfcounters
*pc
;
682 struct si_pc_block
*blocks
;
686 switch (screen
->info
.chip_class
) {
689 num_blocks
= ARRAY_SIZE(groups_CIK
);
693 num_blocks
= ARRAY_SIZE(groups_VI
);
696 blocks
= groups_gfx9
;
697 num_blocks
= ARRAY_SIZE(groups_gfx9
);
701 return; /* not implemented */
704 if (screen
->info
.max_sh_per_se
!= 1) {
705 /* This should not happen on non-SI chips. */
706 fprintf(stderr
, "si_init_perfcounters: max_sh_per_se = %d not "
707 "supported (inaccurate performance counters)\n",
708 screen
->info
.max_sh_per_se
);
711 pc
= CALLOC_STRUCT(r600_perfcounters
);
715 pc
->num_start_cs_dwords
= 14;
716 pc
->num_stop_cs_dwords
= 14 + si_gfx_write_fence_dwords(screen
);
717 pc
->num_instance_cs_dwords
= 3;
718 pc
->num_shaders_cs_dwords
= 4;
720 pc
->num_shader_types
= ARRAY_SIZE(si_pc_shader_type_bits
);
721 pc
->shader_type_suffixes
= si_pc_shader_type_suffixes
;
722 pc
->shader_type_bits
= si_pc_shader_type_bits
;
724 pc
->get_size
= si_pc_get_size
;
725 pc
->emit_instance
= si_pc_emit_instance
;
726 pc
->emit_shaders
= si_pc_emit_shaders
;
727 pc
->emit_select
= si_pc_emit_select
;
728 pc
->emit_start
= si_pc_emit_start
;
729 pc
->emit_stop
= si_pc_emit_stop
;
730 pc
->emit_read
= si_pc_emit_read
;
731 pc
->cleanup
= si_pc_cleanup
;
733 if (!si_perfcounters_init(pc
, num_blocks
))
736 for (i
= 0; i
< num_blocks
; ++i
) {
737 struct si_pc_block
*block
= &blocks
[i
];
738 unsigned instances
= block
->instances
;
740 if (!strcmp(block
->b
->name
, "IA")) {
741 if (screen
->info
.max_se
> 2)
745 si_perfcounters_add_block(screen
, pc
,
748 block
->b
->num_counters
,
754 screen
->perfcounters
= pc
;
758 si_perfcounters_do_destroy(pc
);