2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <arpa/inet.h>
35 #include <sys/types.h>
41 #include <libconfig.h>
43 #include "drm/freedreno_drmif.h"
44 #include "drm/freedreno_ringbuffer.h"
46 #include "freedreno_perfcntr.h"
48 #define MAX_CNTR_PER_GROUP 24
50 /* NOTE first counter group should always be CP, since we unconditionally
51 * use CP counter to measure the gpu freq.
54 struct counter_group
{
55 const struct fd_perfcntr_group
*group
;
58 const struct fd_perfcntr_counter
*counter
;
60 volatile uint32_t *val_hi
;
61 volatile uint32_t *val_lo
;
62 } counter
[MAX_CNTR_PER_GROUP
];
64 /* last sample time: */
65 uint32_t stime
[MAX_CNTR_PER_GROUP
];
66 /* for now just care about the low 32b value.. at least then we don't
67 * have to really care that we can't sample both hi and lo regs at the
70 uint32_t last
[MAX_CNTR_PER_GROUP
];
71 /* current value, ie. by how many did the counter increase in last
72 * sampling period divided by the sampling period:
74 float current
[MAX_CNTR_PER_GROUP
];
75 /* name of currently selected counters (for UI): */
76 const char *label
[MAX_CNTR_PER_GROUP
];
81 int address_cells
, size_cells
;
88 /* per-generation table of counters: */
90 struct counter_group
*groups
;
91 /* drm device (for writing select regs via ring): */
92 struct fd_device
*dev
;
94 struct fd_submit
*submit
;
95 struct fd_ringbuffer
*ring
;
98 static void config_save(void);
99 static void config_restore(void);
108 readfile(const char *path
, int *sz
)
113 fd
= open(path
, O_RDONLY
);
118 buf
= realloc(buf
, n
+ CHUNKSIZE
);
119 ret
= read(fd
, buf
+ n
, CHUNKSIZE
);
124 } else if (ret
< CHUNKSIZE
) {
138 clock_gettime(CLOCK_MONOTONIC
, &ts
);
139 return (ts
.tv_sec
* 1000000) + (ts
.tv_nsec
/ 1000);
143 delta(uint32_t a
, uint32_t b
)
145 /* deal with rollover: */
147 return 0xffffffff - a
+ b
;
153 * TODO de-duplicate OUT_RING() and friends
156 #define CP_WAIT_FOR_IDLE 38
157 #define CP_TYPE0_PKT 0x00000000
158 #define CP_TYPE3_PKT 0xc0000000
159 #define CP_TYPE4_PKT 0x40000000
160 #define CP_TYPE7_PKT 0x70000000
163 OUT_RING(struct fd_ringbuffer
*ring
, uint32_t data
)
165 *(ring
->cur
++) = data
;
169 OUT_PKT0(struct fd_ringbuffer
*ring
, uint16_t regindx
, uint16_t cnt
)
171 OUT_RING(ring
, CP_TYPE0_PKT
| ((cnt
-1) << 16) | (regindx
& 0x7FFF));
175 OUT_PKT3(struct fd_ringbuffer
*ring
, uint8_t opcode
, uint16_t cnt
)
177 OUT_RING(ring
, CP_TYPE3_PKT
| ((cnt
-1) << 16) | ((opcode
& 0xFF) << 8));
182 * Starting with a5xx, pkt4/pkt7 are used instead of pkt0/pkt3
185 static inline unsigned
186 _odd_parity_bit(unsigned val
)
188 /* See: http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
189 * note that we want odd parity so 0x6996 is inverted.
195 return (~0x6996 >> val
) & 1;
199 OUT_PKT4(struct fd_ringbuffer
*ring
, uint16_t regindx
, uint16_t cnt
)
201 OUT_RING(ring
, CP_TYPE4_PKT
| cnt
|
202 (_odd_parity_bit(cnt
) << 7) |
203 ((regindx
& 0x3ffff) << 8) |
204 ((_odd_parity_bit(regindx
) << 27)));
208 OUT_PKT7(struct fd_ringbuffer
*ring
, uint8_t opcode
, uint16_t cnt
)
210 OUT_RING(ring
, CP_TYPE7_PKT
| cnt
|
211 (_odd_parity_bit(cnt
) << 15) |
212 ((opcode
& 0x7f) << 16) |
213 ((_odd_parity_bit(opcode
) << 23)));
217 * code to find stuff in /proc/device-tree:
219 * NOTE: if we sampled the counters from the cmdstream, we could avoid needing
220 * /dev/mem and /proc/device-tree crawling. OTOH when the GPU is heavily loaded
221 * we would be competing with whatever else is using the GPU.
225 readdt(const char *node
)
231 asprintf(&path
, "%s/%s", dev
.dtnode
, node
);
232 buf
= readfile(path
, &sz
);
239 find_freqs_fn(const char *fpath
, const struct stat
*sb
, int typeflag
, struct FTW
*ftwbuf
)
241 const char *fname
= fpath
+ ftwbuf
->base
;
244 if (strcmp(fname
, "qcom,gpu-freq") == 0) {
245 uint32_t *buf
= readfile(fpath
, &sz
);
246 uint32_t freq
= ntohl(buf
[0]);
248 dev
.max_freq
= MAX2(dev
.max_freq
, freq
);
249 dev
.min_freq
= MIN2(dev
.min_freq
, freq
);
264 asprintf(&path
, "%s/%s", dev
.dtnode
, "qcom,gpu-pwrlevels");
266 ret
= nftw(path
, find_freqs_fn
, 64, 0);
268 err(1, "could not find power levels");
274 find_device_fn(const char *fpath
, const struct stat
*sb
, int typeflag
, struct FTW
*ftwbuf
)
276 const char *fname
= fpath
+ ftwbuf
->base
;
279 if (strcmp(fname
, "compatible") == 0) {
280 char *str
= readfile(fpath
, &sz
);
281 if ((strcmp(str
, "qcom,adreno-3xx") == 0) ||
282 (strcmp(str
, "qcom,kgsl-3d0") == 0) ||
283 (strstr(str
, "qcom,adreno") == str
)) {
284 int dlen
= strlen(fpath
) - strlen("/compatible");
285 dev
.dtnode
= malloc(dlen
+ 1);
286 memcpy(dev
.dtnode
, fpath
, dlen
);
287 printf("found dt node: %s\n", dev
.dtnode
);
289 char buf
[dlen
+ sizeof("/../#address-cells") + 1];
292 sprintf(buf
, "%s/../#address-cells", dev
.dtnode
);
293 val
= readfile(buf
, &sz
);
294 dev
.address_cells
= ntohl(*val
);
297 sprintf(buf
, "%s/../#size-cells", dev
.dtnode
);
298 val
= readfile(buf
, &sz
);
299 dev
.size_cells
= ntohl(*val
);
302 printf("#address-cells=%d, #size-cells=%d\n",
303 dev
.address_cells
, dev
.size_cells
);
320 ret
= nftw("/proc/device-tree/", find_device_fn
, 64, 0);
322 err(1, "could not find adreno gpu");
325 errx(1, "could not find qcom,adreno-3xx node");
327 fd
= open("/dev/dri/card0", O_RDWR
);
329 err(1, "could not open drm device");
331 dev
.dev
= fd_device_new(fd
);
332 dev
.pipe
= fd_pipe_new(dev
.dev
, FD_PIPE_3D
);
335 ret
= fd_pipe_get_param(dev
.pipe
, FD_CHIP_ID
, &val
);
337 err(1, "could not get gpu-id");
341 #define CHIP_FMT "d%d%d.%d"
342 #define CHIP_ARGS(chipid) \
343 ((chipid) >> 24) & 0xff, \
344 ((chipid) >> 16) & 0xff, \
345 ((chipid) >> 8) & 0xff, \
346 ((chipid) >> 0) & 0xff
347 printf("device: a%"CHIP_FMT
"\n", CHIP_ARGS(dev
.chipid
));
349 b
= buf
= readdt("reg");
351 if (dev
.address_cells
== 2) {
352 uint32_t u
[2] = { ntohl(buf
[0]), ntohl(buf
[1]) };
353 dev
.base
= (((uint64_t)u
[0]) << 32) | u
[1];
356 dev
.base
= ntohl(buf
[0]);
360 if (dev
.size_cells
== 2) {
361 uint32_t u
[2] = { ntohl(buf
[0]), ntohl(buf
[1]) };
362 dev
.size
= (((uint64_t)u
[0]) << 32) | u
[1];
365 dev
.size
= ntohl(buf
[0]);
371 printf("i/o region at %08lx (size: %x)\n", dev
.base
, dev
.size
);
373 /* try MAX_FREQ first as that will work regardless of old dt
374 * dt bindings vs upstream bindings:
376 ret
= fd_pipe_get_param(dev
.pipe
, FD_MAX_FREQ
, &val
);
378 printf("falling back to parsing DT bindings for freq\n");
385 printf("min_freq=%u, max_freq=%u\n", dev
.min_freq
, dev
.max_freq
);
387 fd
= open("/dev/mem", O_RDWR
| O_SYNC
);
389 err(1, "could not open /dev/mem");
391 dev
.io
= mmap(0, dev
.size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, dev
.base
);
393 err(1, "could not map device");
408 ret
= fd_submit_flush(dev
.submit
, -1, NULL
, NULL
);
410 errx(1, "submit failed: %d", ret
);
411 fd_ringbuffer_del(dev
.ring
);
412 fd_submit_del(dev
.submit
);
419 select_counter(struct counter_group
*group
, int ctr
, int n
)
421 assert(n
< group
->group
->num_countables
);
422 assert(ctr
< group
->group
->num_counters
);
424 group
->label
[ctr
] = group
->group
->countables
[n
].name
;
425 group
->counter
[ctr
].select_val
= n
;
428 dev
.submit
= fd_submit_new(dev
.pipe
);
429 dev
.ring
= fd_submit_new_ringbuffer(dev
.submit
, 0x1000,
430 FD_RINGBUFFER_PRIMARY
| FD_RINGBUFFER_GROWABLE
);
433 /* bashing select register directly while gpu is active will end
434 * in tears.. so we need to write it via the ring:
436 * TODO it would help startup time, if gpu is loaded, to batch
437 * all the initial writes and do a single flush.. although that
438 * makes things more complicated for capturing inital sample value
440 struct fd_ringbuffer
*ring
= dev
.ring
;
441 switch (dev
.chipid
>> 24) {
444 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
445 OUT_RING(ring
, 0x00000000);
447 if (group
->group
->counters
[ctr
].enable
) {
448 OUT_PKT0(ring
, group
->group
->counters
[ctr
].enable
, 1);
452 if (group
->group
->counters
[ctr
].clear
) {
453 OUT_PKT0(ring
, group
->group
->counters
[ctr
].clear
, 1);
456 OUT_PKT0(ring
, group
->group
->counters
[ctr
].clear
, 1);
460 OUT_PKT0(ring
, group
->group
->counters
[ctr
].select_reg
, 1);
463 if (group
->group
->counters
[ctr
].enable
) {
464 OUT_PKT0(ring
, group
->group
->counters
[ctr
].enable
, 1);
471 OUT_PKT7(ring
, CP_WAIT_FOR_IDLE
, 0);
473 if (group
->group
->counters
[ctr
].enable
) {
474 OUT_PKT4(ring
, group
->group
->counters
[ctr
].enable
, 1);
478 if (group
->group
->counters
[ctr
].clear
) {
479 OUT_PKT4(ring
, group
->group
->counters
[ctr
].clear
, 1);
482 OUT_PKT4(ring
, group
->group
->counters
[ctr
].clear
, 1);
486 OUT_PKT4(ring
, group
->group
->counters
[ctr
].select_reg
, 1);
489 if (group
->group
->counters
[ctr
].enable
) {
490 OUT_PKT4(ring
, group
->group
->counters
[ctr
].enable
, 1);
497 group
->last
[ctr
] = *group
->counter
[ctr
].val_lo
;
498 group
->stime
[ctr
] = gettime_us();
502 resample_counter(struct counter_group
*group
, int ctr
)
504 uint32_t val
= *group
->counter
[ctr
].val_lo
;
505 uint32_t t
= gettime_us();
506 uint32_t dt
= delta(group
->stime
[ctr
], t
);
507 uint32_t dval
= delta(group
->last
[ctr
], val
);
508 group
->current
[ctr
] = (float)dval
* 1000000.0 / (float)dt
;
509 group
->last
[ctr
] = val
;
510 group
->stime
[ctr
] = t
;
513 #define REFRESH_MS 500
515 /* sample all the counters: */
519 static uint64_t last_time
;
520 uint64_t current_time
= gettime_us();
522 if ((current_time
- last_time
) < (REFRESH_MS
* 1000 / 2))
525 last_time
= current_time
;
527 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
528 struct counter_group
*group
= &dev
.groups
[i
];
529 for (unsigned j
= 0; j
< group
->group
->num_counters
; j
++) {
530 resample_counter(group
, j
);
539 #define COLOR_GROUP_HEADER 1
540 #define COLOR_FOOTER 2
541 #define COLOR_INVERSE 3
544 static int ctr_width
;
545 static int max_rows
, current_cntr
= 1;
548 redraw_footer(WINDOW
*win
)
553 n
= asprintf(&footer
, " fdperf: a%"CHIP_FMT
" (%.2fMHz..%.2fMHz)",
554 CHIP_ARGS(dev
.chipid
),
555 ((float)dev
.min_freq
) / 1000000.0,
556 ((float)dev
.max_freq
) / 1000000.0);
558 wmove(win
, h
- 1, 0);
559 wattron(win
, COLOR_PAIR(COLOR_FOOTER
));
560 waddstr(win
, footer
);
561 whline(win
, ' ', w
- n
);
562 wattroff(win
, COLOR_PAIR(COLOR_FOOTER
));
568 redraw_group_header(WINDOW
*win
, int row
, const char *name
)
571 wattron(win
, A_BOLD
);
572 wattron(win
, COLOR_PAIR(COLOR_GROUP_HEADER
));
574 whline(win
, ' ', w
- strlen(name
));
575 wattroff(win
, COLOR_PAIR(COLOR_GROUP_HEADER
));
576 wattroff(win
, A_BOLD
);
580 redraw_counter_label(WINDOW
*win
, int row
, const char *name
, bool selected
)
582 int n
= strlen(name
);
583 assert(n
<= ctr_width
);
585 whline(win
, ' ', ctr_width
- n
);
586 wmove(win
, row
, ctr_width
- n
);
588 wattron(win
, COLOR_PAIR(COLOR_INVERSE
));
591 wattroff(win
, COLOR_PAIR(COLOR_INVERSE
));
596 redraw_counter_value_cycles(WINDOW
*win
, float val
)
599 int x
= getcurx(win
);
600 int valwidth
= w
- x
;
603 /* convert to fraction of max freq: */
604 val
= val
/ (float)dev
.max_freq
;
606 /* figure out percentage-bar width: */
607 barwidth
= (int)(val
* valwidth
);
609 /* sometimes things go over 100%.. idk why, could be
610 * things running faster than base clock, or counter
611 * summing up cycles in multiple cores?
613 barwidth
= MIN2(barwidth
, valwidth
- 1);
615 n
= asprintf(&str
, "%.2f%%", 100.0 * val
);
616 wattron(win
, COLOR_PAIR(COLOR_INVERSE
));
617 waddnstr(win
, str
, barwidth
);
619 whline(win
, ' ', barwidth
- n
);
620 wmove(win
, getcury(win
), x
+ barwidth
);
622 wattroff(win
, COLOR_PAIR(COLOR_INVERSE
));
624 waddstr(win
, str
+ barwidth
);
625 whline(win
, ' ', w
- getcurx(win
));
631 redraw_counter_value_raw(WINDOW
*win
, float val
)
634 asprintf(&str
, "%'.2f", val
);
636 whline(win
, ' ', w
- getcurx(win
));
641 redraw_counter(WINDOW
*win
, int row
, struct counter_group
*group
,
642 int ctr
, bool selected
)
644 redraw_counter_label(win
, row
, group
->label
[ctr
], selected
);
646 /* quick hack, if the label has "CYCLE" in the name, it is
647 * probably a cycle counter ;-)
648 * Perhaps add more info in rnndb schema to know how to
649 * treat individual counters (ie. which are cycles, and
650 * for those we want to present as a percentage do we
651 * need to scale the result.. ie. is it running at some
652 * multiple or divisor of core clk, etc)
654 * TODO it would be much more clever to get this from xml
655 * Also.. in some cases I think we want to know how many
656 * units the counter is counting for, ie. if a320 has 2x
657 * shader as a306 we might need to scale the result..
659 if (strstr(group
->label
[ctr
], "CYCLE") ||
660 strstr(group
->label
[ctr
], "BUSY") ||
661 strstr(group
->label
[ctr
], "IDLE"))
662 redraw_counter_value_cycles(win
, group
->current
[ctr
]);
664 redraw_counter_value_raw(win
, group
->current
[ctr
]);
670 static int scroll
= 0;
678 if ((current_cntr
- scroll
) > (max
- 1)) {
679 scroll
= current_cntr
- (max
- 1);
680 } else if ((current_cntr
- 1) < scroll
) {
681 scroll
= current_cntr
- 1;
684 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
685 struct counter_group
*group
= &dev
.groups
[i
];
688 /* NOTE skip CP the first CP counter */
692 if (j
< group
->group
->num_counters
) {
693 if ((scroll
<= row
) && ((row
- scroll
) < max
))
694 redraw_group_header(win
, row
- scroll
, group
->group
->name
);
698 for (; j
< group
->group
->num_counters
; j
++) {
699 if ((scroll
<= row
) && ((row
- scroll
) < max
))
700 redraw_counter(win
, row
- scroll
, group
, j
, row
== current_cntr
);
705 /* convert back to physical (unscrolled) offset: */
708 redraw_group_header(win
, row
, "Status");
711 /* Draw GPU freq row: */
712 redraw_counter_label(win
, row
, "Freq (MHz)", false);
713 redraw_counter_value_raw(win
, dev
.groups
[0].current
[0] / 1000000.0);
721 static struct counter_group
*
722 current_counter(int *ctr
)
726 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
727 struct counter_group
*group
= &dev
.groups
[i
];
730 /* NOTE skip the first CP counter (CP_ALWAYS_COUNT) */
734 /* account for group header: */
735 if (j
< group
->group
->num_counters
) {
736 /* cannot select group header.. return null to indicate this
739 if (n
== current_cntr
)
745 for (; j
< group
->group
->num_counters
; j
++) {
746 if (n
== current_cntr
) {
763 struct counter_group
*group
;
764 int cnt
, current
= 0, scroll
;
766 /* figure out dialog size: */
768 int dw
= ctr_width
+ 2;
770 group
= current_counter(&cnt
);
772 /* find currently selected idx (note there can be discontinuities
773 * so the selected value does not map 1:1 to current idx)
775 uint32_t selected
= group
->counter
[cnt
].select_val
;
776 for (int i
= 0; i
< group
->group
->num_countables
; i
++) {
777 if (group
->group
->countables
[i
].selector
== selected
) {
783 /* scrolling offset, if dialog is too small for all the choices: */
786 dialog
= newwin(dh
, dw
, (h
-dh
)/2, (w
-dw
)/2);
789 keypad(dialog
, TRUE
);
792 int max
= MIN2(dh
- 2, group
->group
->num_countables
);
795 if ((current
- scroll
) >= (dh
- 3)) {
796 scroll
= current
- (dh
- 3);
797 } else if (current
< scroll
) {
801 for (int i
= 0; i
< max
; i
++) {
803 wmove(dialog
, i
+1, 1);
805 assert (n
< group
->group
->num_countables
);
806 selector
= group
->group
->countables
[n
].selector
;
807 wattron(dialog
, COLOR_PAIR(COLOR_INVERSE
));
809 if (n
< group
->group
->num_countables
)
810 waddstr(dialog
, group
->group
->countables
[n
].name
);
811 whline(dialog
, ' ', dw
- getcurx(dialog
) - 1);
813 wattroff(dialog
, COLOR_PAIR(COLOR_INVERSE
));
816 assert (selector
>= 0);
818 switch (wgetch(dialog
)) {
820 current
= MAX2(0, current
- 1);
823 current
= MIN2(group
->group
->num_countables
- 1, current
+ 1);
827 /* select new sampler */
828 select_counter(group
, cnt
, selector
);
843 wborder(dialog
, ' ', ' ', ' ',' ',' ',' ',' ',' ');
848 scroll_cntr(int amount
)
851 current_cntr
= MAX2(1, current_cntr
+ amount
);
852 if (current_counter(NULL
) == NULL
) {
853 current_cntr
= MAX2(1, current_cntr
- 1);
856 current_cntr
= MIN2(max_rows
- 1, current_cntr
+ amount
);
857 if (current_counter(NULL
) == NULL
)
858 current_cntr
= MIN2(max_rows
- 1, current_cntr
+ 1);
873 wtimeout(mainwin
, REFRESH_MS
);
875 keypad(mainwin
, TRUE
);
878 init_pair(COLOR_GROUP_HEADER
, COLOR_WHITE
, COLOR_GREEN
);
879 init_pair(COLOR_FOOTER
, COLOR_WHITE
, COLOR_BLUE
);
880 init_pair(COLOR_INVERSE
, COLOR_BLACK
, COLOR_WHITE
);
883 switch (wgetch(mainwin
)) {
890 case KEY_NPAGE
: /* page-down */
891 /* TODO figure out # of rows visible? */
894 case KEY_PPAGE
: /* page-up */
895 /* TODO figure out # of rows visible? */
912 /* restore settings.. maybe we need an atexit()??*/
920 setup_counter_groups(const struct fd_perfcntr_group
*groups
)
922 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
923 struct counter_group
*group
= &dev
.groups
[i
];
925 group
->group
= &groups
[i
];
927 max_rows
+= group
->group
->num_counters
+ 1;
929 /* the first CP counter is hidden: */
932 if (group
->group
->num_counters
<= 1)
936 for (unsigned j
= 0; j
< group
->group
->num_counters
; j
++) {
937 group
->counter
[j
].counter
= &group
->group
->counters
[j
];
939 group
->counter
[j
].val_hi
= dev
.io
+ (group
->counter
[j
].counter
->counter_reg_hi
* 4);
940 group
->counter
[j
].val_lo
= dev
.io
+ (group
->counter
[j
].counter
->counter_reg_lo
* 4);
942 select_counter(group
, j
, j
);
945 for (unsigned j
= 0; j
< group
->group
->num_countables
; j
++) {
946 ctr_width
= MAX2(ctr_width
, strlen(group
->group
->countables
[j
].name
) + 1);
952 * configuration / persistence
956 static config_setting_t
*setting
;
961 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
962 struct counter_group
*group
= &dev
.groups
[i
];
965 /* NOTE skip CP the first CP counter */
969 config_setting_t
*sect
=
970 config_setting_get_member(setting
, group
->group
->name
);
972 for (; j
< group
->group
->num_counters
; j
++) {
973 char name
[] = "counter0000";
974 sprintf(name
, "counter%d", j
);
975 config_setting_t
*s
=
976 config_setting_lookup(sect
, name
);
977 config_setting_set_int(s
, group
->counter
[j
].select_val
);
981 config_write_file(&cfg
, "fdperf.cfg");
991 /* Read the file. If there is an error, report it and exit. */
992 if(!config_read_file(&cfg
, "fdperf.cfg")) {
993 warn("could not restore settings");
996 config_setting_t
*root
= config_root_setting(&cfg
);
998 /* per device settings: */
999 asprintf(&str
, "a%dxx", dev
.chipid
>> 24);
1000 setting
= config_setting_get_member(root
, str
);
1002 setting
= config_setting_add(root
, str
, CONFIG_TYPE_GROUP
);
1005 for (unsigned i
= 0; i
< dev
.ngroups
; i
++) {
1006 struct counter_group
*group
= &dev
.groups
[i
];
1009 /* NOTE skip CP the first CP counter */
1013 config_setting_t
*sect
=
1014 config_setting_get_member(setting
, group
->group
->name
);
1017 sect
= config_setting_add(setting
, group
->group
->name
,
1021 for (; j
< group
->group
->num_counters
; j
++) {
1022 char name
[] = "counter0000";
1023 sprintf(name
, "counter%d", j
);
1024 config_setting_t
*s
= config_setting_lookup(sect
, name
);
1026 config_setting_add(sect
, name
, CONFIG_TYPE_INT
);
1029 select_counter(group
, j
, config_setting_get_int(s
));
1039 main(int argc
, char **argv
)
1043 const struct fd_perfcntr_group
*groups
;
1044 groups
= fd_perfcntrs((dev
.chipid
>> 24) * 100, &dev
.ngroups
);
1046 errx(1, "no perfcntr support");
1049 dev
.groups
= calloc(dev
.ngroups
, sizeof(struct counter_group
));
1051 setup_counter_groups(groups
);