2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <arpa/inet.h>
36 #include <sys/types.h>
42 #include <libconfig.h>
46 #include "drm/freedreno_drmif.h"
47 #include "drm/freedreno_ringbuffer.h"
49 #include "freedreno_perfcntr.h"
51 #define MAX_CNTR_PER_GROUP 24
53 /* NOTE first counter group should always be CP, since we unconditionally
54 * use CP counter to measure the gpu freq.
57 struct counter_group
{
58 const struct fd_perfcntr_group
*group
;
61 const struct fd_perfcntr_counter
*counter
;
63 volatile uint32_t *val_hi
;
64 volatile uint32_t *val_lo
;
65 } counter
[MAX_CNTR_PER_GROUP
];
67 /* last sample time: */
68 uint32_t stime
[MAX_CNTR_PER_GROUP
];
69 /* for now just care about the low 32b value.. at least then we don't
70 * have to really care that we can't sample both hi and lo regs at the
73 uint32_t last
[MAX_CNTR_PER_GROUP
];
74 /* current value, ie. by how many did the counter increase in last
75 * sampling period divided by the sampling period:
77 float current
[MAX_CNTR_PER_GROUP
];
78 /* name of currently selected counters (for UI): */
79 const char *label
[MAX_CNTR_PER_GROUP
];
84 int address_cells
, size_cells
;
91 /* per-generation table of counters: */
93 struct counter_group
*groups
;
94 /* drm device (for writing select regs via ring): */
95 struct fd_device
*dev
;
97 struct fd_submit
*submit
;
98 struct fd_ringbuffer
*ring
;
101 static void config_save(void);
102 static void config_restore(void);
103 static void restore_counter_groups(void);
112 readfile(const char *path
, int *sz
)
117 fd
= open(path
, O_RDONLY
);
122 buf
= realloc(buf
, n
+ CHUNKSIZE
);
123 ret
= read(fd
, buf
+ n
, CHUNKSIZE
);
129 } else if (ret
< CHUNKSIZE
) {
144 clock_gettime(CLOCK_MONOTONIC
, &ts
);
145 return (ts
.tv_sec
* 1000000) + (ts
.tv_nsec
/ 1000);
149 delta(uint32_t a
, uint32_t b
)
151 /* deal with rollover: */
153 return 0xffffffff - a
+ b
;
159 * code to find stuff in /proc/device-tree:
161 * NOTE: if we sampled the counters from the cmdstream, we could avoid needing
162 * /dev/mem and /proc/device-tree crawling. OTOH when the GPU is heavily loaded
163 * we would be competing with whatever else is using the GPU.
167 readdt(const char *node
)
173 (void) asprintf(&path
, "%s/%s", dev
.dtnode
, node
);
174 buf
= readfile(path
, &sz
);
181 find_freqs_fn(const char *fpath
, const struct stat
*sb
, int typeflag
, struct FTW
*ftwbuf
)
183 const char *fname
= fpath
+ ftwbuf
->base
;
186 if (strcmp(fname
, "qcom,gpu-freq") == 0) {
187 uint32_t *buf
= readfile(fpath
, &sz
);
188 uint32_t freq
= ntohl(buf
[0]);
190 dev
.max_freq
= MAX2(dev
.max_freq
, freq
);
191 dev
.min_freq
= MIN2(dev
.min_freq
, freq
);
206 (void) asprintf(&path
, "%s/%s", dev
.dtnode
, "qcom,gpu-pwrlevels");
208 ret
= nftw(path
, find_freqs_fn
, 64, 0);
210 err(1, "could not find power levels");
216 find_device_fn(const char *fpath
, const struct stat
*sb
, int typeflag
, struct FTW
*ftwbuf
)
218 const char *fname
= fpath
+ ftwbuf
->base
;
221 if (strcmp(fname
, "compatible") == 0) {
222 char *str
= readfile(fpath
, &sz
);
223 if ((strcmp(str
, "qcom,adreno-3xx") == 0) ||
224 (strcmp(str
, "qcom,kgsl-3d0") == 0) ||
225 (strstr(str
, "amd,imageon") == str
) ||
226 (strstr(str
, "qcom,adreno") == str
)) {
227 int dlen
= strlen(fpath
) - strlen("/compatible");
228 dev
.dtnode
= malloc(dlen
+ 1);
229 memcpy(dev
.dtnode
, fpath
, dlen
);
230 printf("found dt node: %s\n", dev
.dtnode
);
232 char buf
[dlen
+ sizeof("/../#address-cells") + 1];
235 sprintf(buf
, "%s/../#address-cells", dev
.dtnode
);
236 val
= readfile(buf
, &sz
);
237 dev
.address_cells
= ntohl(*val
);
240 sprintf(buf
, "%s/../#size-cells", dev
.dtnode
);
241 val
= readfile(buf
, &sz
);
242 dev
.size_cells
= ntohl(*val
);
245 printf("#address-cells=%d, #size-cells=%d\n",
246 dev
.address_cells
, dev
.size_cells
);
263 ret
= nftw("/proc/device-tree/", find_device_fn
, 64, 0);
265 err(1, "could not find adreno gpu");
268 errx(1, "could not find qcom,adreno-3xx node");
270 fd
= drmOpen("msm", NULL
);
272 err(1, "could not open drm device");
274 dev
.dev
= fd_device_new(fd
);
275 dev
.pipe
= fd_pipe_new(dev
.dev
, FD_PIPE_3D
);
278 ret
= fd_pipe_get_param(dev
.pipe
, FD_CHIP_ID
, &val
);
280 err(1, "could not get gpu-id");
284 #define CHIP_FMT "d%d%d.%d"
285 #define CHIP_ARGS(chipid) \
286 ((chipid) >> 24) & 0xff, \
287 ((chipid) >> 16) & 0xff, \
288 ((chipid) >> 8) & 0xff, \
289 ((chipid) >> 0) & 0xff
290 printf("device: a%"CHIP_FMT
"\n", CHIP_ARGS(dev
.chipid
));
292 b
= buf
= readdt("reg");
294 if (dev
.address_cells
== 2) {
295 uint32_t u
[2] = { ntohl(buf
[0]), ntohl(buf
[1]) };
296 dev
.base
= (((uint64_t)u
[0]) << 32) | u
[1];
299 dev
.base
= ntohl(buf
[0]);
303 if (dev
.size_cells
== 2) {
304 uint32_t u
[2] = { ntohl(buf
[0]), ntohl(buf
[1]) };
305 dev
.size
= (((uint64_t)u
[0]) << 32) | u
[1];
308 dev
.size
= ntohl(buf
[0]);
314 printf("i/o region at %08"PRIu64
" (size: %x)\n", dev
.base
, dev
.size
);
316 /* try MAX_FREQ first as that will work regardless of old dt
317 * dt bindings vs upstream bindings:
319 ret
= fd_pipe_get_param(dev
.pipe
, FD_MAX_FREQ
, &val
);
321 printf("falling back to parsing DT bindings for freq\n");
328 printf("min_freq=%u, max_freq=%u\n", dev
.min_freq
, dev
.max_freq
);
330 fd
= open("/dev/mem", O_RDWR
| O_SYNC
);
332 err(1, "could not open /dev/mem");
334 dev
.io
= mmap(0, dev
.size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, dev
.base
);
337 err(1, "could not map device");
353 ret
= fd_submit_flush(dev
.submit
, -1, NULL
, NULL
);
355 errx(1, "submit failed: %d", ret
);
356 fd_ringbuffer_del(dev
.ring
);
357 fd_submit_del(dev
.submit
);
364 select_counter(struct counter_group
*group
, int ctr
, int n
)
366 assert(n
< group
->group
->num_countables
);
367 assert(ctr
< group
->group
->num_counters
);
369 group
->label
[ctr
] = group
->group
->countables
[n
].name
;
370 group
->counter
[ctr
].select_val
= n
;
373 dev
.submit
= fd_submit_new(dev
.pipe
);
374 dev
.ring
= fd_submit_new_ringbuffer(dev
.submit
, 0x1000,
375 FD_RINGBUFFER_PRIMARY
| FD_RINGBUFFER_GROWABLE
);
378 /* bashing select register directly while gpu is active will end
379 * in tears.. so we need to write it via the ring:
381 * TODO it would help startup time, if gpu is loaded, to batch
382 * all the initial writes and do a single flush.. although that
383 * makes things more complicated for capturing inital sample value
385 struct fd_ringbuffer
*ring
= dev
.ring
;
386 switch (dev
.chipid
>> 24) {
390 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
391 OUT_RING(ring
, 0x00000000);
393 if (group
->group
->counters
[ctr
].enable
) {
394 OUT_PKT0(ring
, group
->group
->counters
[ctr
].enable
, 1);
398 if (group
->group
->counters
[ctr
].clear
) {
399 OUT_PKT0(ring
, group
->group
->counters
[ctr
].clear
, 1);
402 OUT_PKT0(ring
, group
->group
->counters
[ctr
].clear
, 1);
406 OUT_PKT0(ring
, group
->group
->counters
[ctr
].select_reg
, 1);
409 if (group
->group
->counters
[ctr
].enable
) {
410 OUT_PKT0(ring
, group
->group
->counters
[ctr
].enable
, 1);
417 OUT_PKT7(ring
, CP_WAIT_FOR_IDLE
, 0);
419 if (group
->group
->counters
[ctr
].enable
) {
420 OUT_PKT4(ring
, group
->group
->counters
[ctr
].enable
, 1);
424 if (group
->group
->counters
[ctr
].clear
) {
425 OUT_PKT4(ring
, group
->group
->counters
[ctr
].clear
, 1);
428 OUT_PKT4(ring
, group
->group
->counters
[ctr
].clear
, 1);
432 OUT_PKT4(ring
, group
->group
->counters
[ctr
].select_reg
, 1);
435 if (group
->group
->counters
[ctr
].enable
) {
436 OUT_PKT4(ring
, group
->group
->counters
[ctr
].enable
, 1);
443 group
->last
[ctr
] = *group
->counter
[ctr
].val_lo
;
444 group
->stime
[ctr
] = gettime_us();
448 resample_counter(struct counter_group
*group
, int ctr
)
450 uint32_t val
= *group
->counter
[ctr
].val_lo
;
451 uint32_t t
= gettime_us();
452 uint32_t dt
= delta(group
->stime
[ctr
], t
);
453 uint32_t dval
= delta(group
->last
[ctr
], val
);
454 group
->current
[ctr
] = (float)dval
* 1000000.0 / (float)dt
;
455 group
->last
[ctr
] = val
;
456 group
->stime
[ctr
] = t
;
459 #define REFRESH_MS 500
461 /* sample all the counters: */
465 static uint64_t last_time
;
466 uint64_t current_time
= gettime_us();
468 if ((current_time
- last_time
) < (REFRESH_MS
* 1000 / 2))
471 last_time
= current_time
;
473 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
474 struct counter_group
*group
= &dev
.groups
[i
];
475 for (unsigned j
= 0; j
< group
->group
->num_counters
; j
++) {
476 resample_counter(group
, j
);
485 #define COLOR_GROUP_HEADER 1
486 #define COLOR_FOOTER 2
487 #define COLOR_INVERSE 3
490 static int ctr_width
;
491 static int max_rows
, current_cntr
= 1;
494 redraw_footer(WINDOW
*win
)
499 n
= asprintf(&footer
, " fdperf: a%"CHIP_FMT
" (%.2fMHz..%.2fMHz)",
500 CHIP_ARGS(dev
.chipid
),
501 ((float)dev
.min_freq
) / 1000000.0,
502 ((float)dev
.max_freq
) / 1000000.0);
504 wmove(win
, h
- 1, 0);
505 wattron(win
, COLOR_PAIR(COLOR_FOOTER
));
506 waddstr(win
, footer
);
507 whline(win
, ' ', w
- n
);
508 wattroff(win
, COLOR_PAIR(COLOR_FOOTER
));
514 redraw_group_header(WINDOW
*win
, int row
, const char *name
)
517 wattron(win
, A_BOLD
);
518 wattron(win
, COLOR_PAIR(COLOR_GROUP_HEADER
));
520 whline(win
, ' ', w
- strlen(name
));
521 wattroff(win
, COLOR_PAIR(COLOR_GROUP_HEADER
));
522 wattroff(win
, A_BOLD
);
526 redraw_counter_label(WINDOW
*win
, int row
, const char *name
, bool selected
)
528 int n
= strlen(name
);
529 assert(n
<= ctr_width
);
531 whline(win
, ' ', ctr_width
- n
);
532 wmove(win
, row
, ctr_width
- n
);
534 wattron(win
, COLOR_PAIR(COLOR_INVERSE
));
537 wattroff(win
, COLOR_PAIR(COLOR_INVERSE
));
542 redraw_counter_value_cycles(WINDOW
*win
, float val
)
545 int x
= getcurx(win
);
546 int valwidth
= w
- x
;
549 /* convert to fraction of max freq: */
550 val
= val
/ (float)dev
.max_freq
;
552 /* figure out percentage-bar width: */
553 barwidth
= (int)(val
* valwidth
);
555 /* sometimes things go over 100%.. idk why, could be
556 * things running faster than base clock, or counter
557 * summing up cycles in multiple cores?
559 barwidth
= MIN2(barwidth
, valwidth
- 1);
561 n
= asprintf(&str
, "%.2f%%", 100.0 * val
);
562 wattron(win
, COLOR_PAIR(COLOR_INVERSE
));
563 waddnstr(win
, str
, barwidth
);
565 whline(win
, ' ', barwidth
- n
);
566 wmove(win
, getcury(win
), x
+ barwidth
);
568 wattroff(win
, COLOR_PAIR(COLOR_INVERSE
));
570 waddstr(win
, str
+ barwidth
);
571 whline(win
, ' ', w
- getcurx(win
));
577 redraw_counter_value_raw(WINDOW
*win
, float val
)
580 (void) asprintf(&str
, "%'.2f", val
);
582 whline(win
, ' ', w
- getcurx(win
));
587 redraw_counter(WINDOW
*win
, int row
, struct counter_group
*group
,
588 int ctr
, bool selected
)
590 redraw_counter_label(win
, row
, group
->label
[ctr
], selected
);
592 /* quick hack, if the label has "CYCLE" in the name, it is
593 * probably a cycle counter ;-)
594 * Perhaps add more info in rnndb schema to know how to
595 * treat individual counters (ie. which are cycles, and
596 * for those we want to present as a percentage do we
597 * need to scale the result.. ie. is it running at some
598 * multiple or divisor of core clk, etc)
600 * TODO it would be much more clever to get this from xml
601 * Also.. in some cases I think we want to know how many
602 * units the counter is counting for, ie. if a320 has 2x
603 * shader as a306 we might need to scale the result..
605 if (strstr(group
->label
[ctr
], "CYCLE") ||
606 strstr(group
->label
[ctr
], "BUSY") ||
607 strstr(group
->label
[ctr
], "IDLE"))
608 redraw_counter_value_cycles(win
, group
->current
[ctr
]);
610 redraw_counter_value_raw(win
, group
->current
[ctr
]);
616 static int scroll
= 0;
624 if ((current_cntr
- scroll
) > (max
- 1)) {
625 scroll
= current_cntr
- (max
- 1);
626 } else if ((current_cntr
- 1) < scroll
) {
627 scroll
= current_cntr
- 1;
630 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
631 struct counter_group
*group
= &dev
.groups
[i
];
634 /* NOTE skip CP the first CP counter */
638 if (j
< group
->group
->num_counters
) {
639 if ((scroll
<= row
) && ((row
- scroll
) < max
))
640 redraw_group_header(win
, row
- scroll
, group
->group
->name
);
644 for (; j
< group
->group
->num_counters
; j
++) {
645 if ((scroll
<= row
) && ((row
- scroll
) < max
))
646 redraw_counter(win
, row
- scroll
, group
, j
, row
== current_cntr
);
651 /* convert back to physical (unscrolled) offset: */
654 redraw_group_header(win
, row
, "Status");
657 /* Draw GPU freq row: */
658 redraw_counter_label(win
, row
, "Freq (MHz)", false);
659 redraw_counter_value_raw(win
, dev
.groups
[0].current
[0] / 1000000.0);
667 static struct counter_group
*
668 current_counter(int *ctr
)
672 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
673 struct counter_group
*group
= &dev
.groups
[i
];
676 /* NOTE skip the first CP counter (CP_ALWAYS_COUNT) */
680 /* account for group header: */
681 if (j
< group
->group
->num_counters
) {
682 /* cannot select group header.. return null to indicate this
685 if (n
== current_cntr
)
691 for (; j
< group
->group
->num_counters
; j
++) {
692 if (n
== current_cntr
) {
709 struct counter_group
*group
;
710 int cnt
, current
= 0, scroll
;
712 /* figure out dialog size: */
714 int dw
= ctr_width
+ 2;
716 group
= current_counter(&cnt
);
718 /* find currently selected idx (note there can be discontinuities
719 * so the selected value does not map 1:1 to current idx)
721 uint32_t selected
= group
->counter
[cnt
].select_val
;
722 for (int i
= 0; i
< group
->group
->num_countables
; i
++) {
723 if (group
->group
->countables
[i
].selector
== selected
) {
729 /* scrolling offset, if dialog is too small for all the choices: */
732 dialog
= newwin(dh
, dw
, (h
-dh
)/2, (w
-dw
)/2);
735 keypad(dialog
, TRUE
);
738 int max
= MIN2(dh
- 2, group
->group
->num_countables
);
741 if ((current
- scroll
) >= (dh
- 3)) {
742 scroll
= current
- (dh
- 3);
743 } else if (current
< scroll
) {
747 for (int i
= 0; i
< max
; i
++) {
749 wmove(dialog
, i
+1, 1);
751 assert (n
< group
->group
->num_countables
);
752 selector
= group
->group
->countables
[n
].selector
;
753 wattron(dialog
, COLOR_PAIR(COLOR_INVERSE
));
755 if (n
< group
->group
->num_countables
)
756 waddstr(dialog
, group
->group
->countables
[n
].name
);
757 whline(dialog
, ' ', dw
- getcurx(dialog
) - 1);
759 wattroff(dialog
, COLOR_PAIR(COLOR_INVERSE
));
762 assert (selector
>= 0);
764 switch (wgetch(dialog
)) {
766 current
= MAX2(0, current
- 1);
769 current
= MIN2(group
->group
->num_countables
- 1, current
+ 1);
773 /* select new sampler */
774 select_counter(group
, cnt
, selector
);
789 wborder(dialog
, ' ', ' ', ' ',' ',' ',' ',' ',' ');
794 scroll_cntr(int amount
)
797 current_cntr
= MAX2(1, current_cntr
+ amount
);
798 if (current_counter(NULL
) == NULL
) {
799 current_cntr
= MAX2(1, current_cntr
- 1);
802 current_cntr
= MIN2(max_rows
- 1, current_cntr
+ amount
);
803 if (current_counter(NULL
) == NULL
)
804 current_cntr
= MIN2(max_rows
- 1, current_cntr
+ 1);
812 uint32_t last_time
= gettime_us();
820 wtimeout(mainwin
, REFRESH_MS
);
822 keypad(mainwin
, TRUE
);
825 init_pair(COLOR_GROUP_HEADER
, COLOR_WHITE
, COLOR_GREEN
);
826 init_pair(COLOR_FOOTER
, COLOR_WHITE
, COLOR_BLUE
);
827 init_pair(COLOR_INVERSE
, COLOR_BLACK
, COLOR_WHITE
);
830 switch (wgetch(mainwin
)) {
837 case KEY_NPAGE
: /* page-down */
838 /* TODO figure out # of rows visible? */
841 case KEY_PPAGE
: /* page-up */
842 /* TODO figure out # of rows visible? */
858 /* restore the counters every 0.5s in case the GPU has suspended,
859 * in which case the current selected countables will have reset:
861 uint32_t t
= gettime_us();
862 if (delta(last_time
, t
) > 500000) {
863 restore_counter_groups();
869 /* restore settings.. maybe we need an atexit()??*/
877 restore_counter_groups(void)
879 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
880 struct counter_group
*group
= &dev
.groups
[i
];
883 /* NOTE skip CP the first CP counter */
887 for (; j
< group
->group
->num_counters
; j
++) {
888 select_counter(group
, j
, group
->counter
[j
].select_val
);
894 setup_counter_groups(const struct fd_perfcntr_group
*groups
)
896 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
897 struct counter_group
*group
= &dev
.groups
[i
];
899 group
->group
= &groups
[i
];
901 max_rows
+= group
->group
->num_counters
+ 1;
903 /* the first CP counter is hidden: */
906 if (group
->group
->num_counters
<= 1)
910 for (unsigned j
= 0; j
< group
->group
->num_counters
; j
++) {
911 group
->counter
[j
].counter
= &group
->group
->counters
[j
];
913 group
->counter
[j
].val_hi
= dev
.io
+ (group
->counter
[j
].counter
->counter_reg_hi
* 4);
914 group
->counter
[j
].val_lo
= dev
.io
+ (group
->counter
[j
].counter
->counter_reg_lo
* 4);
916 group
->counter
[j
].select_val
= j
;
919 for (unsigned j
= 0; j
< group
->group
->num_countables
; j
++) {
920 ctr_width
= MAX2(ctr_width
, strlen(group
->group
->countables
[j
].name
) + 1);
926 * configuration / persistence
930 static config_setting_t
*setting
;
935 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
936 struct counter_group
*group
= &dev
.groups
[i
];
939 /* NOTE skip CP the first CP counter */
943 config_setting_t
*sect
=
944 config_setting_get_member(setting
, group
->group
->name
);
946 for (; j
< group
->group
->num_counters
; j
++) {
947 char name
[] = "counter0000";
948 sprintf(name
, "counter%d", j
);
949 config_setting_t
*s
=
950 config_setting_lookup(sect
, name
);
951 config_setting_set_int(s
, group
->counter
[j
].select_val
);
955 config_write_file(&cfg
, "fdperf.cfg");
965 /* Read the file. If there is an error, report it and exit. */
966 if(!config_read_file(&cfg
, "fdperf.cfg")) {
967 warn("could not restore settings");
970 config_setting_t
*root
= config_root_setting(&cfg
);
972 /* per device settings: */
973 (void) asprintf(&str
, "a%dxx", dev
.chipid
>> 24);
974 setting
= config_setting_get_member(root
, str
);
976 setting
= config_setting_add(root
, str
, CONFIG_TYPE_GROUP
);
979 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
980 struct counter_group
*group
= &dev
.groups
[i
];
983 /* NOTE skip CP the first CP counter */
987 config_setting_t
*sect
=
988 config_setting_get_member(setting
, group
->group
->name
);
991 sect
= config_setting_add(setting
, group
->group
->name
,
995 for (; j
< group
->group
->num_counters
; j
++) {
996 char name
[] = "counter0000";
997 sprintf(name
, "counter%d", j
);
998 config_setting_t
*s
= config_setting_lookup(sect
, name
);
1000 config_setting_add(sect
, name
, CONFIG_TYPE_INT
);
1003 select_counter(group
, j
, config_setting_get_int(s
));
1013 main(int argc
, char **argv
)
1017 const struct fd_perfcntr_group
*groups
;
1018 groups
= fd_perfcntrs((dev
.chipid
>> 24) * 100, &dev
.ngroups
);
1020 errx(1, "no perfcntr support");
1023 dev
.groups
= calloc(dev
.ngroups
, sizeof(struct counter_group
));
1025 setlocale(LC_NUMERIC
, "en_US.UTF-8");
1027 setup_counter_groups(groups
);
1028 restore_counter_groups();