7059226985d9a03f4a4bcf5d97f8f2f7956fde8b
[mesa.git] / src / gallium / auxiliary / hud / hud_cpu.c
1 /**************************************************************************
2 *
3 * Copyright 2013 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* This file contains code for reading CPU load for displaying on the HUD.
29 */
30
31 #include "hud/hud_private.h"
32 #include "util/os_time.h"
33 #include "os/os_thread.h"
34 #include "util/u_memory.h"
35 #include "util/u_queue.h"
36 #include <stdio.h>
37 #include <inttypes.h>
38 #ifdef PIPE_OS_WINDOWS
39 #include <windows.h>
40 #endif
41 #ifdef PIPE_OS_FREEBSD
42 #include <sys/types.h>
43 #include <sys/sysctl.h>
44 #include <sys/resource.h>
45 #endif
46
47
48 #ifdef PIPE_OS_WINDOWS
49
50 static inline uint64_t
51 filetime_to_scalar(FILETIME ft)
52 {
53 ULARGE_INTEGER uli;
54 uli.LowPart = ft.dwLowDateTime;
55 uli.HighPart = ft.dwHighDateTime;
56 return uli.QuadPart;
57 }
58
59 static boolean
60 get_cpu_stats(unsigned cpu_index, uint64_t *busy_time, uint64_t *total_time)
61 {
62 SYSTEM_INFO sysInfo;
63 FILETIME ftNow, ftCreation, ftExit, ftKernel, ftUser;
64
65 GetSystemInfo(&sysInfo);
66 assert(sysInfo.dwNumberOfProcessors >= 1);
67 if (cpu_index != ALL_CPUS && cpu_index >= sysInfo.dwNumberOfProcessors) {
68 /* Tell hud_get_num_cpus there are only this many CPUs. */
69 return FALSE;
70 }
71
72 /* Get accumulated user and sys time for all threads */
73 if (!GetProcessTimes(GetCurrentProcess(), &ftCreation, &ftExit,
74 &ftKernel, &ftUser))
75 return FALSE;
76
77 GetSystemTimeAsFileTime(&ftNow);
78
79 *busy_time = filetime_to_scalar(ftUser) + filetime_to_scalar(ftKernel);
80 *total_time = filetime_to_scalar(ftNow) - filetime_to_scalar(ftCreation);
81
82 /* busy_time already has the time accross all cpus.
83 * XXX: if we want 100% to mean one CPU, 200% two cpus, eliminate the
84 * following line.
85 */
86 *total_time *= sysInfo.dwNumberOfProcessors;
87
88 /* XXX: we ignore cpu_index, i.e, we assume that the individual CPU usage
89 * and the system usage are one and the same.
90 */
91 return TRUE;
92 }
93
94 #elif defined(PIPE_OS_FREEBSD)
95
96 static boolean
97 get_cpu_stats(unsigned cpu_index, uint64_t *busy_time, uint64_t *total_time)
98 {
99 long cp_time[CPUSTATES];
100 size_t len;
101
102 if (cpu_index == ALL_CPUS) {
103 len = sizeof(cp_time);
104
105 if (sysctlbyname("kern.cp_time", cp_time, &len, NULL, 0) == -1)
106 return FALSE;
107 } else {
108 long *cp_times = NULL;
109
110 if (sysctlbyname("kern.cp_times", NULL, &len, NULL, 0) == -1)
111 return FALSE;
112
113 if (len < (cpu_index + 1) * sizeof(cp_time))
114 return FALSE;
115
116 cp_times = malloc(len);
117
118 if (sysctlbyname("kern.cp_times", cp_times, &len, NULL, 0) == -1)
119 return FALSE;
120
121 memcpy(cp_time, cp_times + (cpu_index * CPUSTATES),
122 sizeof(cp_time));
123 free(cp_times);
124 }
125
126 *busy_time = cp_time[CP_USER] + cp_time[CP_NICE] +
127 cp_time[CP_SYS] + cp_time[CP_INTR];
128
129 *total_time = *busy_time + cp_time[CP_IDLE];
130
131 return TRUE;
132 }
133
134 #else
135
136 static boolean
137 get_cpu_stats(unsigned cpu_index, uint64_t *busy_time, uint64_t *total_time)
138 {
139 char cpuname[32];
140 char line[1024];
141 FILE *f;
142
143 if (cpu_index == ALL_CPUS)
144 strcpy(cpuname, "cpu");
145 else
146 sprintf(cpuname, "cpu%u", cpu_index);
147
148 f = fopen("/proc/stat", "r");
149 if (!f)
150 return FALSE;
151
152 while (!feof(f) && fgets(line, sizeof(line), f)) {
153 if (strstr(line, cpuname) == line) {
154 uint64_t v[12];
155 int i, num;
156
157 num = sscanf(line,
158 "%s %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64
159 " %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64
160 " %"PRIu64" %"PRIu64"",
161 cpuname, &v[0], &v[1], &v[2], &v[3], &v[4], &v[5],
162 &v[6], &v[7], &v[8], &v[9], &v[10], &v[11]);
163 if (num < 5) {
164 fclose(f);
165 return FALSE;
166 }
167
168 /* user + nice + system */
169 *busy_time = v[0] + v[1] + v[2];
170 *total_time = *busy_time;
171
172 /* ... + idle + iowait + irq + softirq + ... */
173 for (i = 3; i < num-1; i++) {
174 *total_time += v[i];
175 }
176 fclose(f);
177 return TRUE;
178 }
179 }
180 fclose(f);
181 return FALSE;
182 }
183 #endif
184
185
186 struct cpu_info {
187 unsigned cpu_index;
188 uint64_t last_cpu_busy, last_cpu_total, last_time;
189 };
190
191 static void
192 query_cpu_load(struct hud_graph *gr, struct pipe_context *pipe)
193 {
194 struct cpu_info *info = gr->query_data;
195 uint64_t now = os_time_get();
196
197 if (info->last_time) {
198 if (info->last_time + gr->pane->period <= now) {
199 uint64_t cpu_busy, cpu_total;
200 double cpu_load;
201
202 get_cpu_stats(info->cpu_index, &cpu_busy, &cpu_total);
203
204 cpu_load = (cpu_busy - info->last_cpu_busy) * 100 /
205 (double)(cpu_total - info->last_cpu_total);
206 hud_graph_add_value(gr, cpu_load);
207
208 info->last_cpu_busy = cpu_busy;
209 info->last_cpu_total = cpu_total;
210 info->last_time = now;
211 }
212 }
213 else {
214 /* initialize */
215 info->last_time = now;
216 get_cpu_stats(info->cpu_index, &info->last_cpu_busy,
217 &info->last_cpu_total);
218 }
219 }
220
221 static void
222 free_query_data(void *p, struct pipe_context *pipe)
223 {
224 FREE(p);
225 }
226
227 void
228 hud_cpu_graph_install(struct hud_pane *pane, unsigned cpu_index)
229 {
230 struct hud_graph *gr;
231 struct cpu_info *info;
232 uint64_t busy, total;
233
234 /* see if the cpu exists */
235 if (cpu_index != ALL_CPUS && !get_cpu_stats(cpu_index, &busy, &total)) {
236 return;
237 }
238
239 gr = CALLOC_STRUCT(hud_graph);
240 if (!gr)
241 return;
242
243 if (cpu_index == ALL_CPUS)
244 strcpy(gr->name, "cpu");
245 else
246 sprintf(gr->name, "cpu%u", cpu_index);
247
248 gr->query_data = CALLOC_STRUCT(cpu_info);
249 if (!gr->query_data) {
250 FREE(gr);
251 return;
252 }
253
254 gr->query_new_value = query_cpu_load;
255
256 /* Don't use free() as our callback as that messes up Gallium's
257 * memory debugger. Use simple free_query_data() wrapper.
258 */
259 gr->free_query_data = free_query_data;
260
261 info = gr->query_data;
262 info->cpu_index = cpu_index;
263
264 hud_pane_add_graph(pane, gr);
265 hud_pane_set_max_value(pane, 100);
266 }
267
268 int
269 hud_get_num_cpus(void)
270 {
271 uint64_t busy, total;
272 int i = 0;
273
274 while (get_cpu_stats(i, &busy, &total))
275 i++;
276
277 return i;
278 }
279
280 struct thread_info {
281 bool main_thread;
282 int64_t last_time;
283 int64_t last_thread_time;
284 };
285
286 static void
287 query_api_thread_busy_status(struct hud_graph *gr, struct pipe_context *pipe)
288 {
289 struct thread_info *info = gr->query_data;
290 int64_t now = os_time_get_nano();
291
292 if (info->last_time) {
293 if (info->last_time + gr->pane->period*1000 <= now) {
294 int64_t thread_now;
295
296 if (info->main_thread) {
297 thread_now = pipe_current_thread_get_time_nano();
298 } else {
299 struct util_queue_monitoring *mon = gr->pane->hud->monitored_queue;
300
301 if (mon && mon->queue)
302 thread_now = util_queue_get_thread_time_nano(mon->queue, 0);
303 else
304 thread_now = 0;
305 }
306
307 double percent = (thread_now - info->last_thread_time) * 100.0 /
308 (now - info->last_time);
309
310 /* Check if the context changed a thread, so that we don't show
311 * a random value. When a thread is changed, the new thread clock
312 * is different, which can result in "percent" being very high.
313 */
314 if (percent > 100.0)
315 percent = 0.0;
316 hud_graph_add_value(gr, percent);
317
318 info->last_thread_time = thread_now;
319 info->last_time = now;
320 }
321 } else {
322 /* initialize */
323 info->last_time = now;
324 info->last_thread_time = pipe_current_thread_get_time_nano();
325 }
326 }
327
328 void
329 hud_thread_busy_install(struct hud_pane *pane, const char *name, bool main)
330 {
331 struct hud_graph *gr;
332
333 gr = CALLOC_STRUCT(hud_graph);
334 if (!gr)
335 return;
336
337 strcpy(gr->name, name);
338
339 gr->query_data = CALLOC_STRUCT(thread_info);
340 if (!gr->query_data) {
341 FREE(gr);
342 return;
343 }
344
345 ((struct thread_info*)gr->query_data)->main_thread = main;
346 gr->query_new_value = query_api_thread_busy_status;
347
348 /* Don't use free() as our callback as that messes up Gallium's
349 * memory debugger. Use simple free_query_data() wrapper.
350 */
351 gr->free_query_data = free_query_data;
352
353 hud_pane_add_graph(pane, gr);
354 hud_pane_set_max_value(pane, 100);
355 }
356
357 struct counter_info {
358 enum hud_counter counter;
359 unsigned last_value;
360 int64_t last_time;
361 };
362
363 static unsigned get_counter(struct hud_graph *gr, enum hud_counter counter)
364 {
365 struct util_queue_monitoring *mon = gr->pane->hud->monitored_queue;
366
367 if (!mon || !mon->queue)
368 return 0;
369
370 switch (counter) {
371 case HUD_COUNTER_OFFLOADED:
372 return mon->num_offloaded_items;
373 case HUD_COUNTER_DIRECT:
374 return mon->num_direct_items;
375 case HUD_COUNTER_SYNCS:
376 return mon->num_syncs;
377 default:
378 assert(0);
379 return 0;
380 }
381 }
382
383 static void
384 query_thread_counter(struct hud_graph *gr, struct pipe_context *pipe)
385 {
386 struct counter_info *info = gr->query_data;
387 int64_t now = os_time_get_nano();
388
389 if (info->last_time) {
390 if (info->last_time + gr->pane->period*1000 <= now) {
391 unsigned current_value = get_counter(gr, info->counter);
392
393 hud_graph_add_value(gr, current_value - info->last_value);
394 info->last_value = current_value;
395 info->last_time = now;
396 }
397 } else {
398 /* initialize */
399 info->last_value = get_counter(gr, info->counter);
400 info->last_time = now;
401 }
402 }
403
404 void hud_thread_counter_install(struct hud_pane *pane, const char *name,
405 enum hud_counter counter)
406 {
407 struct hud_graph *gr = CALLOC_STRUCT(hud_graph);
408 if (!gr)
409 return;
410
411 strcpy(gr->name, name);
412
413 gr->query_data = CALLOC_STRUCT(counter_info);
414 if (!gr->query_data) {
415 FREE(gr);
416 return;
417 }
418
419 ((struct counter_info*)gr->query_data)->counter = counter;
420 gr->query_new_value = query_thread_counter;
421
422 /* Don't use free() as our callback as that messes up Gallium's
423 * memory debugger. Use simple free_query_data() wrapper.
424 */
425 gr->free_query_data = free_query_data;
426
427 hud_pane_add_graph(pane, gr);
428 hud_pane_set_max_value(pane, 100);
429 }