util: move u_cpu_detect to util
[mesa.git] / src / util / u_cpu_detect.c
1 /**************************************************************************
2 *
3 * Copyright 2008 Dennis Smit
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 /**
28 * @file
29 * CPU feature detection.
30 *
31 * @author Dennis Smit
32 * @author Based on the work of Eric Anholt <anholt@FreeBSD.org>
33 */
34
35 #include "pipe/p_config.h"
36
37 #include "util/u_debug.h"
38 #include "u_cpu_detect.h"
39 #include "c11/threads.h"
40
41 #if defined(PIPE_ARCH_PPC)
42 #if defined(PIPE_OS_APPLE)
43 #include <sys/sysctl.h>
44 #else
45 #include <signal.h>
46 #include <setjmp.h>
47 #endif
48 #endif
49
50 #if defined(PIPE_OS_NETBSD) || defined(PIPE_OS_OPENBSD)
51 #include <sys/param.h>
52 #include <sys/sysctl.h>
53 #include <machine/cpu.h>
54 #endif
55
56 #if defined(PIPE_OS_FREEBSD) || defined(PIPE_OS_DRAGONFLY)
57 #include <sys/types.h>
58 #include <sys/sysctl.h>
59 #endif
60
61 #if defined(PIPE_OS_LINUX)
62 #include <signal.h>
63 #include <fcntl.h>
64 #include <elf.h>
65 #endif
66
67 #ifdef PIPE_OS_UNIX
68 #include <unistd.h>
69 #endif
70
71 #if defined(HAS_ANDROID_CPUFEATURES)
72 #include <cpu-features.h>
73 #endif
74
75 #if defined(PIPE_OS_WINDOWS)
76 #include <windows.h>
77 #if defined(PIPE_CC_MSVC)
78 #include <intrin.h>
79 #endif
80 #endif
81
82
83 #ifdef DEBUG
84 DEBUG_GET_ONCE_BOOL_OPTION(dump_cpu, "GALLIUM_DUMP_CPU", FALSE)
85 #endif
86
87
88 struct util_cpu_caps util_cpu_caps;
89
90 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
91 static int has_cpuid(void);
92 #endif
93
94
95 #if defined(PIPE_ARCH_PPC) && !defined(PIPE_OS_APPLE)
96 static jmp_buf __lv_powerpc_jmpbuf;
97 static volatile sig_atomic_t __lv_powerpc_canjump = 0;
98
99 static void
100 sigill_handler(int sig)
101 {
102 if (!__lv_powerpc_canjump) {
103 signal (sig, SIG_DFL);
104 raise (sig);
105 }
106
107 __lv_powerpc_canjump = 0;
108 longjmp(__lv_powerpc_jmpbuf, 1);
109 }
110 #endif
111
112 #if defined(PIPE_ARCH_PPC)
113 static void
114 check_os_altivec_support(void)
115 {
116 #if defined(PIPE_OS_APPLE)
117 int sels[2] = {CTL_HW, HW_VECTORUNIT};
118 int has_vu = 0;
119 int len = sizeof (has_vu);
120 int err;
121
122 err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
123
124 if (err == 0) {
125 if (has_vu != 0) {
126 util_cpu_caps.has_altivec = 1;
127 }
128 }
129 #else /* !PIPE_OS_APPLE */
130 /* not on Apple/Darwin, do it the brute-force way */
131 /* this is borrowed from the libmpeg2 library */
132 signal(SIGILL, sigill_handler);
133 if (setjmp(__lv_powerpc_jmpbuf)) {
134 signal(SIGILL, SIG_DFL);
135 } else {
136 boolean enable_altivec = TRUE; /* Default: enable if available, and if not overridden */
137 boolean enable_vsx = TRUE;
138 #ifdef DEBUG
139 /* Disabling Altivec code generation is not the same as disabling VSX code generation,
140 * which can be done simply by passing -mattr=-vsx to the LLVM compiler; cf.
141 * lp_build_create_jit_compiler_for_module().
142 * If you want to disable Altivec code generation, the best place to do it is here.
143 */
144 char *env_control = getenv("GALLIVM_ALTIVEC"); /* 1=enable (default); 0=disable */
145 if (env_control && env_control[0] == '0') {
146 enable_altivec = FALSE;
147 }
148 #endif
149 /* VSX instructions can be explicitly enabled/disabled via GALLIVM_VSX=1 or 0 */
150 char *env_vsx = getenv("GALLIVM_VSX");
151 if (env_vsx && env_vsx[0] == '0') {
152 enable_vsx = FALSE;
153 }
154 if (enable_altivec) {
155 __lv_powerpc_canjump = 1;
156
157 __asm __volatile
158 ("mtspr 256, %0\n\t"
159 "vand %%v0, %%v0, %%v0"
160 :
161 : "r" (-1));
162
163 util_cpu_caps.has_altivec = 1;
164
165 if (enable_vsx) {
166 __asm __volatile("xxland %vs0, %vs0, %vs0");
167 util_cpu_caps.has_vsx = 1;
168 }
169 signal(SIGILL, SIG_DFL);
170 } else {
171 util_cpu_caps.has_altivec = 0;
172 }
173 }
174 #endif /* !PIPE_OS_APPLE */
175 }
176 #endif /* PIPE_ARCH_PPC */
177
178
179 #if defined(PIPE_ARCH_X86) || defined (PIPE_ARCH_X86_64)
180 static int has_cpuid(void)
181 {
182 #if defined(PIPE_ARCH_X86)
183 #if defined(PIPE_OS_GCC)
184 int a, c;
185
186 __asm __volatile
187 ("pushf\n"
188 "popl %0\n"
189 "movl %0, %1\n"
190 "xorl $0x200000, %0\n"
191 "push %0\n"
192 "popf\n"
193 "pushf\n"
194 "popl %0\n"
195 : "=a" (a), "=c" (c)
196 :
197 : "cc");
198
199 return a != c;
200 #else
201 /* FIXME */
202 return 1;
203 #endif
204 #elif defined(PIPE_ARCH_X86_64)
205 return 1;
206 #else
207 return 0;
208 #endif
209 }
210
211
212 /**
213 * @sa cpuid.h included in gcc-4.3 onwards.
214 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
215 */
216 static inline void
217 cpuid(uint32_t ax, uint32_t *p)
218 {
219 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
220 __asm __volatile (
221 "xchgl %%ebx, %1\n\t"
222 "cpuid\n\t"
223 "xchgl %%ebx, %1"
224 : "=a" (p[0]),
225 "=S" (p[1]),
226 "=c" (p[2]),
227 "=d" (p[3])
228 : "0" (ax)
229 );
230 #elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
231 __asm __volatile (
232 "cpuid\n\t"
233 : "=a" (p[0]),
234 "=b" (p[1]),
235 "=c" (p[2]),
236 "=d" (p[3])
237 : "0" (ax)
238 );
239 #elif defined(PIPE_CC_MSVC)
240 __cpuid(p, ax);
241 #else
242 p[0] = 0;
243 p[1] = 0;
244 p[2] = 0;
245 p[3] = 0;
246 #endif
247 }
248
249 /**
250 * @sa cpuid.h included in gcc-4.4 onwards.
251 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh%28v=vs.90%29.aspx
252 */
253 static inline void
254 cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p)
255 {
256 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
257 __asm __volatile (
258 "xchgl %%ebx, %1\n\t"
259 "cpuid\n\t"
260 "xchgl %%ebx, %1"
261 : "=a" (p[0]),
262 "=S" (p[1]),
263 "=c" (p[2]),
264 "=d" (p[3])
265 : "0" (ax), "2" (cx)
266 );
267 #elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
268 __asm __volatile (
269 "cpuid\n\t"
270 : "=a" (p[0]),
271 "=b" (p[1]),
272 "=c" (p[2]),
273 "=d" (p[3])
274 : "0" (ax), "2" (cx)
275 );
276 #elif defined(PIPE_CC_MSVC)
277 __cpuidex(p, ax, cx);
278 #else
279 p[0] = 0;
280 p[1] = 0;
281 p[2] = 0;
282 p[3] = 0;
283 #endif
284 }
285
286
287 static inline uint64_t xgetbv(void)
288 {
289 #if defined(PIPE_CC_GCC)
290 uint32_t eax, edx;
291
292 __asm __volatile (
293 ".byte 0x0f, 0x01, 0xd0" // xgetbv isn't supported on gcc < 4.4
294 : "=a"(eax),
295 "=d"(edx)
296 : "c"(0)
297 );
298
299 return ((uint64_t)edx << 32) | eax;
300 #elif defined(PIPE_CC_MSVC) && defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
301 return _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
302 #else
303 return 0;
304 #endif
305 }
306
307
308 #if defined(PIPE_ARCH_X86)
309 PIPE_ALIGN_STACK static inline boolean sse2_has_daz(void)
310 {
311 struct {
312 uint32_t pad1[7];
313 uint32_t mxcsr_mask;
314 uint32_t pad2[128-8];
315 } PIPE_ALIGN_VAR(16) fxarea;
316
317 fxarea.mxcsr_mask = 0;
318 #if defined(PIPE_CC_GCC)
319 __asm __volatile ("fxsave %0" : "+m" (fxarea));
320 #elif defined(PIPE_CC_MSVC) || defined(PIPE_CC_ICL)
321 _fxsave(&fxarea);
322 #else
323 fxarea.mxcsr_mask = 0;
324 #endif
325 return !!(fxarea.mxcsr_mask & (1 << 6));
326 }
327 #endif
328
329 #endif /* X86 or X86_64 */
330
331 #if defined(PIPE_ARCH_ARM)
332 static void
333 check_os_arm_support(void)
334 {
335 /*
336 * On Android, the cpufeatures library is preferred way of checking
337 * CPU capabilities. However, it is not available for standalone Mesa
338 * builds, i.e. when Android build system (Android.mk-based) is not
339 * used. Because of this we cannot use PIPE_OS_ANDROID here, but rather
340 * have a separate macro that only gets enabled from respective Android.mk.
341 */
342 #if defined(HAS_ANDROID_CPUFEATURES)
343 AndroidCpuFamily cpu_family = android_getCpuFamily();
344 uint64_t cpu_features = android_getCpuFeatures();
345
346 if (cpu_family == ANDROID_CPU_FAMILY_ARM) {
347 if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON)
348 util_cpu_caps.has_neon = 1;
349 }
350 #elif defined(PIPE_OS_LINUX)
351 Elf32_auxv_t aux;
352 int fd;
353
354 fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
355 if (fd >= 0) {
356 while (read(fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t)) {
357 if (aux.a_type == AT_HWCAP) {
358 uint32_t hwcap = aux.a_un.a_val;
359
360 util_cpu_caps.has_neon = (hwcap >> 12) & 1;
361 break;
362 }
363 }
364 close (fd);
365 }
366 #endif /* PIPE_OS_LINUX */
367 }
368 #endif /* PIPE_ARCH_ARM */
369
370 static void
371 get_cpu_topology(void)
372 {
373 uint32_t regs[4];
374
375 /* Default. This is correct if L3 is not present or there is only one. */
376 util_cpu_caps.cores_per_L3 = util_cpu_caps.nr_cpus;
377
378 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
379 /* AMD Zen */
380 if (util_cpu_caps.x86_cpu_type == 0x17) {
381 /* Query the L3 cache topology information. */
382 cpuid_count(0x8000001D, 3, regs);
383 unsigned cache_level = (regs[0] >> 5) & 0x7;
384 unsigned cores_per_cache = ((regs[0] >> 14) & 0xfff) + 1;
385
386 if (cache_level == 3)
387 util_cpu_caps.cores_per_L3 = cores_per_cache;
388 }
389 #endif
390 }
391
392 static void
393 util_cpu_detect_once(void)
394 {
395 memset(&util_cpu_caps, 0, sizeof util_cpu_caps);
396
397 /* Count the number of CPUs in system */
398 #if defined(PIPE_OS_WINDOWS)
399 {
400 SYSTEM_INFO system_info;
401 GetSystemInfo(&system_info);
402 util_cpu_caps.nr_cpus = system_info.dwNumberOfProcessors;
403 }
404 #elif defined(PIPE_OS_UNIX) && defined(_SC_NPROCESSORS_ONLN)
405 util_cpu_caps.nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
406 if (util_cpu_caps.nr_cpus == ~0)
407 util_cpu_caps.nr_cpus = 1;
408 #elif defined(PIPE_OS_BSD)
409 {
410 int mib[2], ncpu;
411 int len;
412
413 mib[0] = CTL_HW;
414 mib[1] = HW_NCPU;
415
416 len = sizeof (ncpu);
417 sysctl(mib, 2, &ncpu, &len, NULL, 0);
418 util_cpu_caps.nr_cpus = ncpu;
419 }
420 #else
421 util_cpu_caps.nr_cpus = 1;
422 #endif
423
424 /* Make the fallback cacheline size nonzero so that it can be
425 * safely passed to align().
426 */
427 util_cpu_caps.cacheline = sizeof(void *);
428
429 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
430 if (has_cpuid()) {
431 uint32_t regs[4];
432 uint32_t regs2[4];
433
434 util_cpu_caps.cacheline = 32;
435
436 /* Get max cpuid level */
437 cpuid(0x00000000, regs);
438
439 if (regs[0] >= 0x00000001) {
440 unsigned int cacheline;
441
442 cpuid (0x00000001, regs2);
443
444 util_cpu_caps.x86_cpu_type = (regs2[0] >> 8) & 0xf;
445 /* Add "extended family". */
446 if (util_cpu_caps.x86_cpu_type == 0xf)
447 util_cpu_caps.x86_cpu_type += ((regs2[0] >> 20) & 0xff);
448
449 /* general feature flags */
450 util_cpu_caps.has_tsc = (regs2[3] >> 4) & 1; /* 0x0000010 */
451 util_cpu_caps.has_mmx = (regs2[3] >> 23) & 1; /* 0x0800000 */
452 util_cpu_caps.has_sse = (regs2[3] >> 25) & 1; /* 0x2000000 */
453 util_cpu_caps.has_sse2 = (regs2[3] >> 26) & 1; /* 0x4000000 */
454 util_cpu_caps.has_sse3 = (regs2[2] >> 0) & 1; /* 0x0000001 */
455 util_cpu_caps.has_ssse3 = (regs2[2] >> 9) & 1; /* 0x0000020 */
456 util_cpu_caps.has_sse4_1 = (regs2[2] >> 19) & 1;
457 util_cpu_caps.has_sse4_2 = (regs2[2] >> 20) & 1;
458 util_cpu_caps.has_popcnt = (regs2[2] >> 23) & 1;
459 util_cpu_caps.has_avx = ((regs2[2] >> 28) & 1) && // AVX
460 ((regs2[2] >> 27) & 1) && // OSXSAVE
461 ((xgetbv() & 6) == 6); // XMM & YMM
462 util_cpu_caps.has_f16c = ((regs2[2] >> 29) & 1) && util_cpu_caps.has_avx;
463 util_cpu_caps.has_fma = ((regs2[2] >> 12) & 1) && util_cpu_caps.has_avx;
464 util_cpu_caps.has_mmx2 = util_cpu_caps.has_sse; /* SSE cpus supports mmxext too */
465 #if defined(PIPE_ARCH_X86_64)
466 util_cpu_caps.has_daz = 1;
467 #else
468 util_cpu_caps.has_daz = util_cpu_caps.has_sse3 ||
469 (util_cpu_caps.has_sse2 && sse2_has_daz());
470 #endif
471
472 cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
473 if (cacheline > 0)
474 util_cpu_caps.cacheline = cacheline;
475 }
476 if (util_cpu_caps.has_avx && regs[0] >= 0x00000007) {
477 uint32_t regs7[4];
478 cpuid_count(0x00000007, 0x00000000, regs7);
479 util_cpu_caps.has_avx2 = (regs7[1] >> 5) & 1;
480 }
481
482 // check for avx512
483 if (((regs2[2] >> 27) & 1) && // OSXSAVE
484 (xgetbv() & (0x7 << 5)) && // OPMASK: upper-256 enabled by OS
485 ((xgetbv() & 6) == 6)) { // XMM/YMM enabled by OS
486 uint32_t regs3[4];
487 cpuid_count(0x00000007, 0x00000000, regs3);
488 util_cpu_caps.has_avx512f = (regs3[1] >> 16) & 1;
489 util_cpu_caps.has_avx512dq = (regs3[1] >> 17) & 1;
490 util_cpu_caps.has_avx512ifma = (regs3[1] >> 21) & 1;
491 util_cpu_caps.has_avx512pf = (regs3[1] >> 26) & 1;
492 util_cpu_caps.has_avx512er = (regs3[1] >> 27) & 1;
493 util_cpu_caps.has_avx512cd = (regs3[1] >> 28) & 1;
494 util_cpu_caps.has_avx512bw = (regs3[1] >> 30) & 1;
495 util_cpu_caps.has_avx512vl = (regs3[1] >> 31) & 1;
496 util_cpu_caps.has_avx512vbmi = (regs3[2] >> 1) & 1;
497 }
498
499 if (regs[1] == 0x756e6547 && regs[2] == 0x6c65746e && regs[3] == 0x49656e69) {
500 /* GenuineIntel */
501 util_cpu_caps.has_intel = 1;
502 }
503
504 cpuid(0x80000000, regs);
505
506 if (regs[0] >= 0x80000001) {
507
508 cpuid(0x80000001, regs2);
509
510 util_cpu_caps.has_mmx |= (regs2[3] >> 23) & 1;
511 util_cpu_caps.has_mmx2 |= (regs2[3] >> 22) & 1;
512 util_cpu_caps.has_3dnow = (regs2[3] >> 31) & 1;
513 util_cpu_caps.has_3dnow_ext = (regs2[3] >> 30) & 1;
514
515 util_cpu_caps.has_xop = util_cpu_caps.has_avx &&
516 ((regs2[2] >> 11) & 1);
517 }
518
519 if (regs[0] >= 0x80000006) {
520 /* should we really do this if the clflush size above worked? */
521 unsigned int cacheline;
522 cpuid(0x80000006, regs2);
523 cacheline = regs2[2] & 0xFF;
524 if (cacheline > 0)
525 util_cpu_caps.cacheline = cacheline;
526 }
527
528 if (!util_cpu_caps.has_sse) {
529 util_cpu_caps.has_sse2 = 0;
530 util_cpu_caps.has_sse3 = 0;
531 util_cpu_caps.has_ssse3 = 0;
532 util_cpu_caps.has_sse4_1 = 0;
533 }
534 }
535 #endif /* PIPE_ARCH_X86 || PIPE_ARCH_X86_64 */
536
537 #if defined(PIPE_ARCH_ARM)
538 check_os_arm_support();
539 #endif
540
541 #if defined(PIPE_ARCH_PPC)
542 check_os_altivec_support();
543 #endif /* PIPE_ARCH_PPC */
544
545 get_cpu_topology();
546
547 #ifdef DEBUG
548 if (debug_get_option_dump_cpu()) {
549 debug_printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps.nr_cpus);
550
551 debug_printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps.x86_cpu_type);
552 debug_printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps.cacheline);
553
554 debug_printf("util_cpu_caps.has_tsc = %u\n", util_cpu_caps.has_tsc);
555 debug_printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps.has_mmx);
556 debug_printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps.has_mmx2);
557 debug_printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps.has_sse);
558 debug_printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps.has_sse2);
559 debug_printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps.has_sse3);
560 debug_printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps.has_ssse3);
561 debug_printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps.has_sse4_1);
562 debug_printf("util_cpu_caps.has_sse4_2 = %u\n", util_cpu_caps.has_sse4_2);
563 debug_printf("util_cpu_caps.has_avx = %u\n", util_cpu_caps.has_avx);
564 debug_printf("util_cpu_caps.has_avx2 = %u\n", util_cpu_caps.has_avx2);
565 debug_printf("util_cpu_caps.has_f16c = %u\n", util_cpu_caps.has_f16c);
566 debug_printf("util_cpu_caps.has_popcnt = %u\n", util_cpu_caps.has_popcnt);
567 debug_printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps.has_3dnow);
568 debug_printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps.has_3dnow_ext);
569 debug_printf("util_cpu_caps.has_xop = %u\n", util_cpu_caps.has_xop);
570 debug_printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps.has_altivec);
571 debug_printf("util_cpu_caps.has_vsx = %u\n", util_cpu_caps.has_vsx);
572 debug_printf("util_cpu_caps.has_neon = %u\n", util_cpu_caps.has_neon);
573 debug_printf("util_cpu_caps.has_daz = %u\n", util_cpu_caps.has_daz);
574 debug_printf("util_cpu_caps.has_avx512f = %u\n", util_cpu_caps.has_avx512f);
575 debug_printf("util_cpu_caps.has_avx512dq = %u\n", util_cpu_caps.has_avx512dq);
576 debug_printf("util_cpu_caps.has_avx512ifma = %u\n", util_cpu_caps.has_avx512ifma);
577 debug_printf("util_cpu_caps.has_avx512pf = %u\n", util_cpu_caps.has_avx512pf);
578 debug_printf("util_cpu_caps.has_avx512er = %u\n", util_cpu_caps.has_avx512er);
579 debug_printf("util_cpu_caps.has_avx512cd = %u\n", util_cpu_caps.has_avx512cd);
580 debug_printf("util_cpu_caps.has_avx512bw = %u\n", util_cpu_caps.has_avx512bw);
581 debug_printf("util_cpu_caps.has_avx512vl = %u\n", util_cpu_caps.has_avx512vl);
582 debug_printf("util_cpu_caps.has_avx512vbmi = %u\n", util_cpu_caps.has_avx512vbmi);
583 }
584 #endif
585 }
586
587 static once_flag cpu_once_flag = ONCE_FLAG_INIT;
588
589 void
590 util_cpu_detect(void)
591 {
592 call_once(&cpu_once_flag, util_cpu_detect_once);
593 }