util: skip AltiVec detection if built with -maltivec
[mesa.git] / src / util / u_cpu_detect.c
1 /**************************************************************************
2 *
3 * Copyright 2008 Dennis Smit
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 /**
28 * @file
29 * CPU feature detection.
30 *
31 * @author Dennis Smit
32 * @author Based on the work of Eric Anholt <anholt@FreeBSD.org>
33 */
34
35 #include "pipe/p_config.h"
36
37 #include "util/u_debug.h"
38 #include "u_cpu_detect.h"
39 #include "c11/threads.h"
40
41 #if defined(PIPE_ARCH_PPC)
42 #if defined(PIPE_OS_APPLE)
43 #include <sys/sysctl.h>
44 #else
45 #include <signal.h>
46 #include <setjmp.h>
47 #endif
48 #endif
49
50 #if defined(PIPE_OS_NETBSD) || defined(PIPE_OS_OPENBSD)
51 #include <sys/param.h>
52 #include <sys/sysctl.h>
53 #include <machine/cpu.h>
54 #endif
55
56 #if defined(PIPE_OS_FREEBSD) || defined(PIPE_OS_DRAGONFLY)
57 #include <sys/types.h>
58 #include <sys/sysctl.h>
59 #if __has_include(<sys/auxv.h>)
60 #include <sys/auxv.h>
61 #define HAVE_ELF_AUX_INFO
62 #endif
63 #endif
64
65 #if defined(PIPE_OS_LINUX)
66 #include <signal.h>
67 #include <fcntl.h>
68 #include <elf.h>
69 #endif
70
71 #ifdef PIPE_OS_UNIX
72 #include <unistd.h>
73 #endif
74
75 #if defined(HAS_ANDROID_CPUFEATURES)
76 #include <cpu-features.h>
77 #endif
78
79 #if defined(PIPE_OS_WINDOWS)
80 #include <windows.h>
81 #if defined(PIPE_CC_MSVC)
82 #include <intrin.h>
83 #endif
84 #endif
85
86
87 #ifdef DEBUG
88 DEBUG_GET_ONCE_BOOL_OPTION(dump_cpu, "GALLIUM_DUMP_CPU", FALSE)
89 #endif
90
91
92 struct util_cpu_caps util_cpu_caps;
93
94 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
95 static int has_cpuid(void);
96 #endif
97
98
99 #if defined(PIPE_ARCH_PPC) && !defined(PIPE_OS_APPLE) && !defined(PIPE_OS_LINUX)
100 static jmp_buf __lv_powerpc_jmpbuf;
101 static volatile sig_atomic_t __lv_powerpc_canjump = 0;
102
103 static void
104 sigill_handler(int sig)
105 {
106 if (!__lv_powerpc_canjump) {
107 signal (sig, SIG_DFL);
108 raise (sig);
109 }
110
111 __lv_powerpc_canjump = 0;
112 longjmp(__lv_powerpc_jmpbuf, 1);
113 }
114 #endif
115
116 #if defined(PIPE_ARCH_PPC)
117 static void
118 check_os_altivec_support(void)
119 {
120 #if defined(__ALTIVEC__)
121 util_cpu_caps.has_altivec = 1;
122 #endif
123 #if defined(__VSX__)
124 util_cpu_caps.has_vsx = 1;
125 #endif
126 #if defined(__ALTIVEC__) && defined(__VSX__)
127 /* Do nothing */
128 #elif defined(PIPE_OS_APPLE)
129 int sels[2] = {CTL_HW, HW_VECTORUNIT};
130 int has_vu = 0;
131 int len = sizeof (has_vu);
132 int err;
133
134 err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
135
136 if (err == 0) {
137 if (has_vu != 0) {
138 util_cpu_caps.has_altivec = 1;
139 }
140 }
141 #elif defined(PIPE_OS_LINUX) /* !PIPE_OS_APPLE */
142 #if defined(PIPE_ARCH_PPC_64)
143 Elf64_auxv_t aux;
144 #else
145 Elf32_auxv_t aux;
146 #endif
147 int fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
148 if (fd >= 0) {
149 while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
150 if (aux.a_type == AT_HWCAP) {
151 char *env_vsx = getenv("GALLIVM_VSX");
152 uint64_t hwcap = aux.a_un.a_val;
153 util_cpu_caps.has_altivec = (hwcap >> 28) & 1;
154 if (!env_vsx || env_vsx[0] != '0') {
155 util_cpu_caps.has_vsx = (hwcap >> 7) & 1;
156 }
157 break;
158 }
159 }
160 close(fd);
161 }
162 #else /* !PIPE_OS_APPLE && !PIPE_OS_LINUX */
163 /* not on Apple/Darwin or Linux, do it the brute-force way */
164 /* this is borrowed from the libmpeg2 library */
165 signal(SIGILL, sigill_handler);
166 if (setjmp(__lv_powerpc_jmpbuf)) {
167 signal(SIGILL, SIG_DFL);
168 } else {
169 boolean enable_altivec = TRUE; /* Default: enable if available, and if not overridden */
170 boolean enable_vsx = TRUE;
171 #ifdef DEBUG
172 /* Disabling Altivec code generation is not the same as disabling VSX code generation,
173 * which can be done simply by passing -mattr=-vsx to the LLVM compiler; cf.
174 * lp_build_create_jit_compiler_for_module().
175 * If you want to disable Altivec code generation, the best place to do it is here.
176 */
177 char *env_control = getenv("GALLIVM_ALTIVEC"); /* 1=enable (default); 0=disable */
178 if (env_control && env_control[0] == '0') {
179 enable_altivec = FALSE;
180 }
181 #endif
182 /* VSX instructions can be explicitly enabled/disabled via GALLIVM_VSX=1 or 0 */
183 char *env_vsx = getenv("GALLIVM_VSX");
184 if (env_vsx && env_vsx[0] == '0') {
185 enable_vsx = FALSE;
186 }
187 if (enable_altivec) {
188 __lv_powerpc_canjump = 1;
189
190 __asm __volatile
191 ("mtspr 256, %0\n\t"
192 "vand %%v0, %%v0, %%v0"
193 :
194 : "r" (-1));
195
196 util_cpu_caps.has_altivec = 1;
197
198 if (enable_vsx) {
199 __asm __volatile("xxland %vs0, %vs0, %vs0");
200 util_cpu_caps.has_vsx = 1;
201 }
202 signal(SIGILL, SIG_DFL);
203 } else {
204 util_cpu_caps.has_altivec = 0;
205 }
206 }
207 #endif /* !PIPE_OS_APPLE && !PIPE_OS_LINUX */
208 }
209 #endif /* PIPE_ARCH_PPC */
210
211
212 #if defined(PIPE_ARCH_X86) || defined (PIPE_ARCH_X86_64)
213 static int has_cpuid(void)
214 {
215 #if defined(PIPE_ARCH_X86)
216 #if defined(PIPE_OS_GCC)
217 int a, c;
218
219 __asm __volatile
220 ("pushf\n"
221 "popl %0\n"
222 "movl %0, %1\n"
223 "xorl $0x200000, %0\n"
224 "push %0\n"
225 "popf\n"
226 "pushf\n"
227 "popl %0\n"
228 : "=a" (a), "=c" (c)
229 :
230 : "cc");
231
232 return a != c;
233 #else
234 /* FIXME */
235 return 1;
236 #endif
237 #elif defined(PIPE_ARCH_X86_64)
238 return 1;
239 #else
240 return 0;
241 #endif
242 }
243
244
245 /**
246 * @sa cpuid.h included in gcc-4.3 onwards.
247 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
248 */
249 static inline void
250 cpuid(uint32_t ax, uint32_t *p)
251 {
252 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
253 __asm __volatile (
254 "xchgl %%ebx, %1\n\t"
255 "cpuid\n\t"
256 "xchgl %%ebx, %1"
257 : "=a" (p[0]),
258 "=S" (p[1]),
259 "=c" (p[2]),
260 "=d" (p[3])
261 : "0" (ax)
262 );
263 #elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
264 __asm __volatile (
265 "cpuid\n\t"
266 : "=a" (p[0]),
267 "=b" (p[1]),
268 "=c" (p[2]),
269 "=d" (p[3])
270 : "0" (ax)
271 );
272 #elif defined(PIPE_CC_MSVC)
273 __cpuid(p, ax);
274 #else
275 p[0] = 0;
276 p[1] = 0;
277 p[2] = 0;
278 p[3] = 0;
279 #endif
280 }
281
282 /**
283 * @sa cpuid.h included in gcc-4.4 onwards.
284 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh%28v=vs.90%29.aspx
285 */
286 static inline void
287 cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p)
288 {
289 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
290 __asm __volatile (
291 "xchgl %%ebx, %1\n\t"
292 "cpuid\n\t"
293 "xchgl %%ebx, %1"
294 : "=a" (p[0]),
295 "=S" (p[1]),
296 "=c" (p[2]),
297 "=d" (p[3])
298 : "0" (ax), "2" (cx)
299 );
300 #elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
301 __asm __volatile (
302 "cpuid\n\t"
303 : "=a" (p[0]),
304 "=b" (p[1]),
305 "=c" (p[2]),
306 "=d" (p[3])
307 : "0" (ax), "2" (cx)
308 );
309 #elif defined(PIPE_CC_MSVC)
310 __cpuidex(p, ax, cx);
311 #else
312 p[0] = 0;
313 p[1] = 0;
314 p[2] = 0;
315 p[3] = 0;
316 #endif
317 }
318
319
320 static inline uint64_t xgetbv(void)
321 {
322 #if defined(PIPE_CC_GCC)
323 uint32_t eax, edx;
324
325 __asm __volatile (
326 ".byte 0x0f, 0x01, 0xd0" // xgetbv isn't supported on gcc < 4.4
327 : "=a"(eax),
328 "=d"(edx)
329 : "c"(0)
330 );
331
332 return ((uint64_t)edx << 32) | eax;
333 #elif defined(PIPE_CC_MSVC) && defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
334 return _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
335 #else
336 return 0;
337 #endif
338 }
339
340
341 #if defined(PIPE_ARCH_X86)
342 PIPE_ALIGN_STACK static inline boolean sse2_has_daz(void)
343 {
344 struct {
345 uint32_t pad1[7];
346 uint32_t mxcsr_mask;
347 uint32_t pad2[128-8];
348 } PIPE_ALIGN_VAR(16) fxarea;
349
350 fxarea.mxcsr_mask = 0;
351 #if defined(PIPE_CC_GCC)
352 __asm __volatile ("fxsave %0" : "+m" (fxarea));
353 #elif defined(PIPE_CC_MSVC) || defined(PIPE_CC_ICL)
354 _fxsave(&fxarea);
355 #else
356 fxarea.mxcsr_mask = 0;
357 #endif
358 return !!(fxarea.mxcsr_mask & (1 << 6));
359 }
360 #endif
361
362 #endif /* X86 or X86_64 */
363
364 #if defined(PIPE_ARCH_ARM)
365 static void
366 check_os_arm_support(void)
367 {
368 /*
369 * On Android, the cpufeatures library is preferred way of checking
370 * CPU capabilities. However, it is not available for standalone Mesa
371 * builds, i.e. when Android build system (Android.mk-based) is not
372 * used. Because of this we cannot use PIPE_OS_ANDROID here, but rather
373 * have a separate macro that only gets enabled from respective Android.mk.
374 */
375 #if defined(__ARM_NEON) || defined(__ARM_NEON__)
376 util_cpu_caps.has_neon = 1;
377 #elif defined(PIPE_OS_FREEBSD) && defined(HAVE_ELF_AUX_INFO)
378 unsigned long hwcap = 0;
379 elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
380 if (hwcap & HWCAP_NEON)
381 util_cpu_caps.has_neon = 1;
382 #elif defined(HAS_ANDROID_CPUFEATURES)
383 AndroidCpuFamily cpu_family = android_getCpuFamily();
384 uint64_t cpu_features = android_getCpuFeatures();
385
386 if (cpu_family == ANDROID_CPU_FAMILY_ARM) {
387 if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON)
388 util_cpu_caps.has_neon = 1;
389 }
390 #elif defined(PIPE_OS_LINUX)
391 Elf32_auxv_t aux;
392 int fd;
393
394 fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
395 if (fd >= 0) {
396 while (read(fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t)) {
397 if (aux.a_type == AT_HWCAP) {
398 uint32_t hwcap = aux.a_un.a_val;
399
400 util_cpu_caps.has_neon = (hwcap >> 12) & 1;
401 break;
402 }
403 }
404 close (fd);
405 }
406 #endif /* PIPE_OS_LINUX */
407 }
408
409 #elif defined(PIPE_ARCH_AARCH64)
410 static void
411 check_os_arm_support(void)
412 {
413 util_cpu_caps.has_neon = true;
414 }
415 #endif /* PIPE_ARCH_ARM || PIPE_ARCH_AARCH64 */
416
417 static void
418 get_cpu_topology(void)
419 {
420 /* Default. This is correct if L3 is not present or there is only one. */
421 util_cpu_caps.cores_per_L3 = util_cpu_caps.nr_cpus;
422
423 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
424 /* AMD Zen */
425 if (util_cpu_caps.x86_cpu_type == 0x17) {
426 uint32_t regs[4];
427
428 /* Query the L3 cache topology information. */
429 cpuid_count(0x8000001D, 3, regs);
430 unsigned cache_level = (regs[0] >> 5) & 0x7;
431 unsigned cores_per_cache = ((regs[0] >> 14) & 0xfff) + 1;
432
433 if (cache_level == 3)
434 util_cpu_caps.cores_per_L3 = cores_per_cache;
435 }
436 #endif
437 }
438
439 static void
440 util_cpu_detect_once(void)
441 {
442 memset(&util_cpu_caps, 0, sizeof util_cpu_caps);
443
444 /* Count the number of CPUs in system */
445 #if defined(PIPE_OS_WINDOWS)
446 {
447 SYSTEM_INFO system_info;
448 GetSystemInfo(&system_info);
449 util_cpu_caps.nr_cpus = system_info.dwNumberOfProcessors;
450 }
451 #elif defined(PIPE_OS_UNIX) && defined(_SC_NPROCESSORS_ONLN)
452 util_cpu_caps.nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
453 if (util_cpu_caps.nr_cpus == ~0)
454 util_cpu_caps.nr_cpus = 1;
455 #elif defined(PIPE_OS_BSD)
456 {
457 int mib[2], ncpu;
458 int len;
459
460 mib[0] = CTL_HW;
461 mib[1] = HW_NCPU;
462
463 len = sizeof (ncpu);
464 sysctl(mib, 2, &ncpu, &len, NULL, 0);
465 util_cpu_caps.nr_cpus = ncpu;
466 }
467 #else
468 util_cpu_caps.nr_cpus = 1;
469 #endif
470
471 /* Make the fallback cacheline size nonzero so that it can be
472 * safely passed to align().
473 */
474 util_cpu_caps.cacheline = sizeof(void *);
475
476 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
477 if (has_cpuid()) {
478 uint32_t regs[4];
479 uint32_t regs2[4];
480
481 util_cpu_caps.cacheline = 32;
482
483 /* Get max cpuid level */
484 cpuid(0x00000000, regs);
485
486 if (regs[0] >= 0x00000001) {
487 unsigned int cacheline;
488
489 cpuid (0x00000001, regs2);
490
491 util_cpu_caps.x86_cpu_type = (regs2[0] >> 8) & 0xf;
492 /* Add "extended family". */
493 if (util_cpu_caps.x86_cpu_type == 0xf)
494 util_cpu_caps.x86_cpu_type += ((regs2[0] >> 20) & 0xff);
495
496 /* general feature flags */
497 util_cpu_caps.has_tsc = (regs2[3] >> 4) & 1; /* 0x0000010 */
498 util_cpu_caps.has_mmx = (regs2[3] >> 23) & 1; /* 0x0800000 */
499 util_cpu_caps.has_sse = (regs2[3] >> 25) & 1; /* 0x2000000 */
500 util_cpu_caps.has_sse2 = (regs2[3] >> 26) & 1; /* 0x4000000 */
501 util_cpu_caps.has_sse3 = (regs2[2] >> 0) & 1; /* 0x0000001 */
502 util_cpu_caps.has_ssse3 = (regs2[2] >> 9) & 1; /* 0x0000020 */
503 util_cpu_caps.has_sse4_1 = (regs2[2] >> 19) & 1;
504 util_cpu_caps.has_sse4_2 = (regs2[2] >> 20) & 1;
505 util_cpu_caps.has_popcnt = (regs2[2] >> 23) & 1;
506 util_cpu_caps.has_avx = ((regs2[2] >> 28) & 1) && // AVX
507 ((regs2[2] >> 27) & 1) && // OSXSAVE
508 ((xgetbv() & 6) == 6); // XMM & YMM
509 util_cpu_caps.has_f16c = ((regs2[2] >> 29) & 1) && util_cpu_caps.has_avx;
510 util_cpu_caps.has_fma = ((regs2[2] >> 12) & 1) && util_cpu_caps.has_avx;
511 util_cpu_caps.has_mmx2 = util_cpu_caps.has_sse; /* SSE cpus supports mmxext too */
512 #if defined(PIPE_ARCH_X86_64)
513 util_cpu_caps.has_daz = 1;
514 #else
515 util_cpu_caps.has_daz = util_cpu_caps.has_sse3 ||
516 (util_cpu_caps.has_sse2 && sse2_has_daz());
517 #endif
518
519 cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
520 if (cacheline > 0)
521 util_cpu_caps.cacheline = cacheline;
522 }
523 if (util_cpu_caps.has_avx && regs[0] >= 0x00000007) {
524 uint32_t regs7[4];
525 cpuid_count(0x00000007, 0x00000000, regs7);
526 util_cpu_caps.has_avx2 = (regs7[1] >> 5) & 1;
527 }
528
529 // check for avx512
530 if (((regs2[2] >> 27) & 1) && // OSXSAVE
531 (xgetbv() & (0x7 << 5)) && // OPMASK: upper-256 enabled by OS
532 ((xgetbv() & 6) == 6)) { // XMM/YMM enabled by OS
533 uint32_t regs3[4];
534 cpuid_count(0x00000007, 0x00000000, regs3);
535 util_cpu_caps.has_avx512f = (regs3[1] >> 16) & 1;
536 util_cpu_caps.has_avx512dq = (regs3[1] >> 17) & 1;
537 util_cpu_caps.has_avx512ifma = (regs3[1] >> 21) & 1;
538 util_cpu_caps.has_avx512pf = (regs3[1] >> 26) & 1;
539 util_cpu_caps.has_avx512er = (regs3[1] >> 27) & 1;
540 util_cpu_caps.has_avx512cd = (regs3[1] >> 28) & 1;
541 util_cpu_caps.has_avx512bw = (regs3[1] >> 30) & 1;
542 util_cpu_caps.has_avx512vl = (regs3[1] >> 31) & 1;
543 util_cpu_caps.has_avx512vbmi = (regs3[2] >> 1) & 1;
544 }
545
546 if (regs[1] == 0x756e6547 && regs[2] == 0x6c65746e && regs[3] == 0x49656e69) {
547 /* GenuineIntel */
548 util_cpu_caps.has_intel = 1;
549 }
550
551 cpuid(0x80000000, regs);
552
553 if (regs[0] >= 0x80000001) {
554
555 cpuid(0x80000001, regs2);
556
557 util_cpu_caps.has_mmx |= (regs2[3] >> 23) & 1;
558 util_cpu_caps.has_mmx2 |= (regs2[3] >> 22) & 1;
559 util_cpu_caps.has_3dnow = (regs2[3] >> 31) & 1;
560 util_cpu_caps.has_3dnow_ext = (regs2[3] >> 30) & 1;
561
562 util_cpu_caps.has_xop = util_cpu_caps.has_avx &&
563 ((regs2[2] >> 11) & 1);
564 }
565
566 if (regs[0] >= 0x80000006) {
567 /* should we really do this if the clflush size above worked? */
568 unsigned int cacheline;
569 cpuid(0x80000006, regs2);
570 cacheline = regs2[2] & 0xFF;
571 if (cacheline > 0)
572 util_cpu_caps.cacheline = cacheline;
573 }
574
575 if (!util_cpu_caps.has_sse) {
576 util_cpu_caps.has_sse2 = 0;
577 util_cpu_caps.has_sse3 = 0;
578 util_cpu_caps.has_ssse3 = 0;
579 util_cpu_caps.has_sse4_1 = 0;
580 }
581 }
582 #endif /* PIPE_ARCH_X86 || PIPE_ARCH_X86_64 */
583
584 #if defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
585 check_os_arm_support();
586 #endif
587
588 #if defined(PIPE_ARCH_PPC)
589 check_os_altivec_support();
590 #endif /* PIPE_ARCH_PPC */
591
592 get_cpu_topology();
593
594 #ifdef DEBUG
595 if (debug_get_option_dump_cpu()) {
596 debug_printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps.nr_cpus);
597
598 debug_printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps.x86_cpu_type);
599 debug_printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps.cacheline);
600
601 debug_printf("util_cpu_caps.has_tsc = %u\n", util_cpu_caps.has_tsc);
602 debug_printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps.has_mmx);
603 debug_printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps.has_mmx2);
604 debug_printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps.has_sse);
605 debug_printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps.has_sse2);
606 debug_printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps.has_sse3);
607 debug_printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps.has_ssse3);
608 debug_printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps.has_sse4_1);
609 debug_printf("util_cpu_caps.has_sse4_2 = %u\n", util_cpu_caps.has_sse4_2);
610 debug_printf("util_cpu_caps.has_avx = %u\n", util_cpu_caps.has_avx);
611 debug_printf("util_cpu_caps.has_avx2 = %u\n", util_cpu_caps.has_avx2);
612 debug_printf("util_cpu_caps.has_f16c = %u\n", util_cpu_caps.has_f16c);
613 debug_printf("util_cpu_caps.has_popcnt = %u\n", util_cpu_caps.has_popcnt);
614 debug_printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps.has_3dnow);
615 debug_printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps.has_3dnow_ext);
616 debug_printf("util_cpu_caps.has_xop = %u\n", util_cpu_caps.has_xop);
617 debug_printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps.has_altivec);
618 debug_printf("util_cpu_caps.has_vsx = %u\n", util_cpu_caps.has_vsx);
619 debug_printf("util_cpu_caps.has_neon = %u\n", util_cpu_caps.has_neon);
620 debug_printf("util_cpu_caps.has_daz = %u\n", util_cpu_caps.has_daz);
621 debug_printf("util_cpu_caps.has_avx512f = %u\n", util_cpu_caps.has_avx512f);
622 debug_printf("util_cpu_caps.has_avx512dq = %u\n", util_cpu_caps.has_avx512dq);
623 debug_printf("util_cpu_caps.has_avx512ifma = %u\n", util_cpu_caps.has_avx512ifma);
624 debug_printf("util_cpu_caps.has_avx512pf = %u\n", util_cpu_caps.has_avx512pf);
625 debug_printf("util_cpu_caps.has_avx512er = %u\n", util_cpu_caps.has_avx512er);
626 debug_printf("util_cpu_caps.has_avx512cd = %u\n", util_cpu_caps.has_avx512cd);
627 debug_printf("util_cpu_caps.has_avx512bw = %u\n", util_cpu_caps.has_avx512bw);
628 debug_printf("util_cpu_caps.has_avx512vl = %u\n", util_cpu_caps.has_avx512vl);
629 debug_printf("util_cpu_caps.has_avx512vbmi = %u\n", util_cpu_caps.has_avx512vbmi);
630 }
631 #endif
632 }
633
634 static once_flag cpu_once_flag = ONCE_FLAG_INIT;
635
636 void
637 util_cpu_detect(void)
638 {
639 call_once(&cpu_once_flag, util_cpu_detect_once);
640 }