1 /**************************************************************************
3 * Copyright 2008 Dennis Smit
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 **************************************************************************/
29 * CPU feature detection.
32 * @author Based on the work of Eric Anholt <anholt@FreeBSD.org>
35 #include "pipe/p_config.h"
37 #include "util/u_debug.h"
38 #include "u_cpu_detect.h"
39 #include "c11/threads.h"
41 #if defined(PIPE_ARCH_PPC)
42 #if defined(PIPE_OS_APPLE)
43 #include <sys/sysctl.h>
50 #if defined(PIPE_OS_NETBSD) || defined(PIPE_OS_OPENBSD)
51 #include <sys/param.h>
52 #include <sys/sysctl.h>
53 #include <machine/cpu.h>
56 #if defined(PIPE_OS_FREEBSD) || defined(PIPE_OS_DRAGONFLY)
57 #include <sys/types.h>
58 #include <sys/sysctl.h>
61 #if defined(PIPE_OS_LINUX)
71 #if defined(HAS_ANDROID_CPUFEATURES)
72 #include <cpu-features.h>
75 #if defined(PIPE_OS_WINDOWS)
77 #if defined(PIPE_CC_MSVC)
84 DEBUG_GET_ONCE_BOOL_OPTION(dump_cpu
, "GALLIUM_DUMP_CPU", FALSE
)
88 struct util_cpu_caps util_cpu_caps
;
90 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
91 static int has_cpuid(void);
95 #if defined(PIPE_ARCH_PPC) && !defined(PIPE_OS_APPLE) && !defined(PIPE_OS_LINUX)
96 static jmp_buf __lv_powerpc_jmpbuf
;
97 static volatile sig_atomic_t __lv_powerpc_canjump
= 0;
100 sigill_handler(int sig
)
102 if (!__lv_powerpc_canjump
) {
103 signal (sig
, SIG_DFL
);
107 __lv_powerpc_canjump
= 0;
108 longjmp(__lv_powerpc_jmpbuf
, 1);
112 #if defined(PIPE_ARCH_PPC)
114 check_os_altivec_support(void)
116 #if defined(PIPE_OS_APPLE)
117 int sels
[2] = {CTL_HW
, HW_VECTORUNIT
};
119 int len
= sizeof (has_vu
);
122 err
= sysctl(sels
, 2, &has_vu
, &len
, NULL
, 0);
126 util_cpu_caps
.has_altivec
= 1;
129 #elif defined(PIPE_OS_LINUX) /* !PIPE_OS_APPLE */
130 #if defined(PIPE_ARCH_PPC_64)
135 int fd
= open("/proc/self/auxv", O_RDONLY
| O_CLOEXEC
);
137 while (read(fd
, &aux
, sizeof(aux
)) == sizeof(aux
)) {
138 if (aux
.a_type
== AT_HWCAP
) {
139 char *env_vsx
= getenv("GALLIVM_VSX");
140 uint64_t hwcap
= aux
.a_un
.a_val
;
141 util_cpu_caps
.has_altivec
= (hwcap
>> 28) & 1;
142 if (!env_vsx
|| env_vsx
[0] != '0') {
143 util_cpu_caps
.has_vsx
= (hwcap
>> 7) & 1;
150 #else /* !PIPE_OS_APPLE && !PIPE_OS_LINUX */
151 /* not on Apple/Darwin or Linux, do it the brute-force way */
152 /* this is borrowed from the libmpeg2 library */
153 signal(SIGILL
, sigill_handler
);
154 if (setjmp(__lv_powerpc_jmpbuf
)) {
155 signal(SIGILL
, SIG_DFL
);
157 boolean enable_altivec
= TRUE
; /* Default: enable if available, and if not overridden */
158 boolean enable_vsx
= TRUE
;
160 /* Disabling Altivec code generation is not the same as disabling VSX code generation,
161 * which can be done simply by passing -mattr=-vsx to the LLVM compiler; cf.
162 * lp_build_create_jit_compiler_for_module().
163 * If you want to disable Altivec code generation, the best place to do it is here.
165 char *env_control
= getenv("GALLIVM_ALTIVEC"); /* 1=enable (default); 0=disable */
166 if (env_control
&& env_control
[0] == '0') {
167 enable_altivec
= FALSE
;
170 /* VSX instructions can be explicitly enabled/disabled via GALLIVM_VSX=1 or 0 */
171 char *env_vsx
= getenv("GALLIVM_VSX");
172 if (env_vsx
&& env_vsx
[0] == '0') {
175 if (enable_altivec
) {
176 __lv_powerpc_canjump
= 1;
180 "vand %%v0, %%v0, %%v0"
184 util_cpu_caps
.has_altivec
= 1;
187 __asm
__volatile("xxland %vs0, %vs0, %vs0");
188 util_cpu_caps
.has_vsx
= 1;
190 signal(SIGILL
, SIG_DFL
);
192 util_cpu_caps
.has_altivec
= 0;
195 #endif /* !PIPE_OS_APPLE && !PIPE_OS_LINUX */
197 #endif /* PIPE_ARCH_PPC */
200 #if defined(PIPE_ARCH_X86) || defined (PIPE_ARCH_X86_64)
201 static int has_cpuid(void)
203 #if defined(PIPE_ARCH_X86)
204 #if defined(PIPE_OS_GCC)
211 "xorl $0x200000, %0\n"
225 #elif defined(PIPE_ARCH_X86_64)
234 * @sa cpuid.h included in gcc-4.3 onwards.
235 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
238 cpuid(uint32_t ax
, uint32_t *p
)
240 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
242 "xchgl %%ebx, %1\n\t"
251 #elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
260 #elif defined(PIPE_CC_MSVC)
271 * @sa cpuid.h included in gcc-4.4 onwards.
272 * @sa http://msdn.microsoft.com/en-us/library/hskdteyh%28v=vs.90%29.aspx
275 cpuid_count(uint32_t ax
, uint32_t cx
, uint32_t *p
)
277 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
279 "xchgl %%ebx, %1\n\t"
288 #elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
297 #elif defined(PIPE_CC_MSVC)
298 __cpuidex(p
, ax
, cx
);
308 static inline uint64_t xgetbv(void)
310 #if defined(PIPE_CC_GCC)
314 ".byte 0x0f, 0x01, 0xd0" // xgetbv isn't supported on gcc < 4.4
320 return ((uint64_t)edx
<< 32) | eax
;
321 #elif defined(PIPE_CC_MSVC) && defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
322 return _xgetbv(_XCR_XFEATURE_ENABLED_MASK
);
329 #if defined(PIPE_ARCH_X86)
330 PIPE_ALIGN_STACK
static inline boolean
sse2_has_daz(void)
335 uint32_t pad2
[128-8];
336 } PIPE_ALIGN_VAR(16) fxarea
;
338 fxarea
.mxcsr_mask
= 0;
339 #if defined(PIPE_CC_GCC)
340 __asm
__volatile ("fxsave %0" : "+m" (fxarea
));
341 #elif defined(PIPE_CC_MSVC) || defined(PIPE_CC_ICL)
344 fxarea
.mxcsr_mask
= 0;
346 return !!(fxarea
.mxcsr_mask
& (1 << 6));
350 #endif /* X86 or X86_64 */
352 #if defined(PIPE_ARCH_ARM)
354 check_os_arm_support(void)
357 * On Android, the cpufeatures library is preferred way of checking
358 * CPU capabilities. However, it is not available for standalone Mesa
359 * builds, i.e. when Android build system (Android.mk-based) is not
360 * used. Because of this we cannot use PIPE_OS_ANDROID here, but rather
361 * have a separate macro that only gets enabled from respective Android.mk.
363 #if defined(HAS_ANDROID_CPUFEATURES)
364 AndroidCpuFamily cpu_family
= android_getCpuFamily();
365 uint64_t cpu_features
= android_getCpuFeatures();
367 if (cpu_family
== ANDROID_CPU_FAMILY_ARM
) {
368 if (cpu_features
& ANDROID_CPU_ARM_FEATURE_NEON
)
369 util_cpu_caps
.has_neon
= 1;
371 #elif defined(PIPE_OS_LINUX)
375 fd
= open("/proc/self/auxv", O_RDONLY
| O_CLOEXEC
);
377 while (read(fd
, &aux
, sizeof(Elf32_auxv_t
)) == sizeof(Elf32_auxv_t
)) {
378 if (aux
.a_type
== AT_HWCAP
) {
379 uint32_t hwcap
= aux
.a_un
.a_val
;
381 util_cpu_caps
.has_neon
= (hwcap
>> 12) & 1;
387 #endif /* PIPE_OS_LINUX */
390 #elif defined(PIPE_ARCH_AARCH64)
392 check_os_arm_support(void)
394 util_cpu_caps
.has_neon
= true;
396 #endif /* PIPE_ARCH_ARM || PIPE_ARCH_AARCH64 */
399 get_cpu_topology(void)
401 /* Default. This is correct if L3 is not present or there is only one. */
402 util_cpu_caps
.cores_per_L3
= util_cpu_caps
.nr_cpus
;
404 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
406 if (util_cpu_caps
.x86_cpu_type
== 0x17) {
409 /* Query the L3 cache topology information. */
410 cpuid_count(0x8000001D, 3, regs
);
411 unsigned cache_level
= (regs
[0] >> 5) & 0x7;
412 unsigned cores_per_cache
= ((regs
[0] >> 14) & 0xfff) + 1;
414 if (cache_level
== 3)
415 util_cpu_caps
.cores_per_L3
= cores_per_cache
;
421 util_cpu_detect_once(void)
423 memset(&util_cpu_caps
, 0, sizeof util_cpu_caps
);
425 /* Count the number of CPUs in system */
426 #if defined(PIPE_OS_WINDOWS)
428 SYSTEM_INFO system_info
;
429 GetSystemInfo(&system_info
);
430 util_cpu_caps
.nr_cpus
= system_info
.dwNumberOfProcessors
;
432 #elif defined(PIPE_OS_UNIX) && defined(_SC_NPROCESSORS_ONLN)
433 util_cpu_caps
.nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
434 if (util_cpu_caps
.nr_cpus
== ~0)
435 util_cpu_caps
.nr_cpus
= 1;
436 #elif defined(PIPE_OS_BSD)
445 sysctl(mib
, 2, &ncpu
, &len
, NULL
, 0);
446 util_cpu_caps
.nr_cpus
= ncpu
;
449 util_cpu_caps
.nr_cpus
= 1;
452 /* Make the fallback cacheline size nonzero so that it can be
453 * safely passed to align().
455 util_cpu_caps
.cacheline
= sizeof(void *);
457 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
462 util_cpu_caps
.cacheline
= 32;
464 /* Get max cpuid level */
465 cpuid(0x00000000, regs
);
467 if (regs
[0] >= 0x00000001) {
468 unsigned int cacheline
;
470 cpuid (0x00000001, regs2
);
472 util_cpu_caps
.x86_cpu_type
= (regs2
[0] >> 8) & 0xf;
473 /* Add "extended family". */
474 if (util_cpu_caps
.x86_cpu_type
== 0xf)
475 util_cpu_caps
.x86_cpu_type
+= ((regs2
[0] >> 20) & 0xff);
477 /* general feature flags */
478 util_cpu_caps
.has_tsc
= (regs2
[3] >> 4) & 1; /* 0x0000010 */
479 util_cpu_caps
.has_mmx
= (regs2
[3] >> 23) & 1; /* 0x0800000 */
480 util_cpu_caps
.has_sse
= (regs2
[3] >> 25) & 1; /* 0x2000000 */
481 util_cpu_caps
.has_sse2
= (regs2
[3] >> 26) & 1; /* 0x4000000 */
482 util_cpu_caps
.has_sse3
= (regs2
[2] >> 0) & 1; /* 0x0000001 */
483 util_cpu_caps
.has_ssse3
= (regs2
[2] >> 9) & 1; /* 0x0000020 */
484 util_cpu_caps
.has_sse4_1
= (regs2
[2] >> 19) & 1;
485 util_cpu_caps
.has_sse4_2
= (regs2
[2] >> 20) & 1;
486 util_cpu_caps
.has_popcnt
= (regs2
[2] >> 23) & 1;
487 util_cpu_caps
.has_avx
= ((regs2
[2] >> 28) & 1) && // AVX
488 ((regs2
[2] >> 27) & 1) && // OSXSAVE
489 ((xgetbv() & 6) == 6); // XMM & YMM
490 util_cpu_caps
.has_f16c
= ((regs2
[2] >> 29) & 1) && util_cpu_caps
.has_avx
;
491 util_cpu_caps
.has_fma
= ((regs2
[2] >> 12) & 1) && util_cpu_caps
.has_avx
;
492 util_cpu_caps
.has_mmx2
= util_cpu_caps
.has_sse
; /* SSE cpus supports mmxext too */
493 #if defined(PIPE_ARCH_X86_64)
494 util_cpu_caps
.has_daz
= 1;
496 util_cpu_caps
.has_daz
= util_cpu_caps
.has_sse3
||
497 (util_cpu_caps
.has_sse2
&& sse2_has_daz());
500 cacheline
= ((regs2
[1] >> 8) & 0xFF) * 8;
502 util_cpu_caps
.cacheline
= cacheline
;
504 if (util_cpu_caps
.has_avx
&& regs
[0] >= 0x00000007) {
506 cpuid_count(0x00000007, 0x00000000, regs7
);
507 util_cpu_caps
.has_avx2
= (regs7
[1] >> 5) & 1;
511 if (((regs2
[2] >> 27) & 1) && // OSXSAVE
512 (xgetbv() & (0x7 << 5)) && // OPMASK: upper-256 enabled by OS
513 ((xgetbv() & 6) == 6)) { // XMM/YMM enabled by OS
515 cpuid_count(0x00000007, 0x00000000, regs3
);
516 util_cpu_caps
.has_avx512f
= (regs3
[1] >> 16) & 1;
517 util_cpu_caps
.has_avx512dq
= (regs3
[1] >> 17) & 1;
518 util_cpu_caps
.has_avx512ifma
= (regs3
[1] >> 21) & 1;
519 util_cpu_caps
.has_avx512pf
= (regs3
[1] >> 26) & 1;
520 util_cpu_caps
.has_avx512er
= (regs3
[1] >> 27) & 1;
521 util_cpu_caps
.has_avx512cd
= (regs3
[1] >> 28) & 1;
522 util_cpu_caps
.has_avx512bw
= (regs3
[1] >> 30) & 1;
523 util_cpu_caps
.has_avx512vl
= (regs3
[1] >> 31) & 1;
524 util_cpu_caps
.has_avx512vbmi
= (regs3
[2] >> 1) & 1;
527 if (regs
[1] == 0x756e6547 && regs
[2] == 0x6c65746e && regs
[3] == 0x49656e69) {
529 util_cpu_caps
.has_intel
= 1;
532 cpuid(0x80000000, regs
);
534 if (regs
[0] >= 0x80000001) {
536 cpuid(0x80000001, regs2
);
538 util_cpu_caps
.has_mmx
|= (regs2
[3] >> 23) & 1;
539 util_cpu_caps
.has_mmx2
|= (regs2
[3] >> 22) & 1;
540 util_cpu_caps
.has_3dnow
= (regs2
[3] >> 31) & 1;
541 util_cpu_caps
.has_3dnow_ext
= (regs2
[3] >> 30) & 1;
543 util_cpu_caps
.has_xop
= util_cpu_caps
.has_avx
&&
544 ((regs2
[2] >> 11) & 1);
547 if (regs
[0] >= 0x80000006) {
548 /* should we really do this if the clflush size above worked? */
549 unsigned int cacheline
;
550 cpuid(0x80000006, regs2
);
551 cacheline
= regs2
[2] & 0xFF;
553 util_cpu_caps
.cacheline
= cacheline
;
556 if (!util_cpu_caps
.has_sse
) {
557 util_cpu_caps
.has_sse2
= 0;
558 util_cpu_caps
.has_sse3
= 0;
559 util_cpu_caps
.has_ssse3
= 0;
560 util_cpu_caps
.has_sse4_1
= 0;
563 #endif /* PIPE_ARCH_X86 || PIPE_ARCH_X86_64 */
565 #if defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
566 check_os_arm_support();
569 #if defined(PIPE_ARCH_PPC)
570 check_os_altivec_support();
571 #endif /* PIPE_ARCH_PPC */
576 if (debug_get_option_dump_cpu()) {
577 debug_printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps
.nr_cpus
);
579 debug_printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps
.x86_cpu_type
);
580 debug_printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps
.cacheline
);
582 debug_printf("util_cpu_caps.has_tsc = %u\n", util_cpu_caps
.has_tsc
);
583 debug_printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps
.has_mmx
);
584 debug_printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps
.has_mmx2
);
585 debug_printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps
.has_sse
);
586 debug_printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps
.has_sse2
);
587 debug_printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps
.has_sse3
);
588 debug_printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps
.has_ssse3
);
589 debug_printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps
.has_sse4_1
);
590 debug_printf("util_cpu_caps.has_sse4_2 = %u\n", util_cpu_caps
.has_sse4_2
);
591 debug_printf("util_cpu_caps.has_avx = %u\n", util_cpu_caps
.has_avx
);
592 debug_printf("util_cpu_caps.has_avx2 = %u\n", util_cpu_caps
.has_avx2
);
593 debug_printf("util_cpu_caps.has_f16c = %u\n", util_cpu_caps
.has_f16c
);
594 debug_printf("util_cpu_caps.has_popcnt = %u\n", util_cpu_caps
.has_popcnt
);
595 debug_printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps
.has_3dnow
);
596 debug_printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps
.has_3dnow_ext
);
597 debug_printf("util_cpu_caps.has_xop = %u\n", util_cpu_caps
.has_xop
);
598 debug_printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps
.has_altivec
);
599 debug_printf("util_cpu_caps.has_vsx = %u\n", util_cpu_caps
.has_vsx
);
600 debug_printf("util_cpu_caps.has_neon = %u\n", util_cpu_caps
.has_neon
);
601 debug_printf("util_cpu_caps.has_daz = %u\n", util_cpu_caps
.has_daz
);
602 debug_printf("util_cpu_caps.has_avx512f = %u\n", util_cpu_caps
.has_avx512f
);
603 debug_printf("util_cpu_caps.has_avx512dq = %u\n", util_cpu_caps
.has_avx512dq
);
604 debug_printf("util_cpu_caps.has_avx512ifma = %u\n", util_cpu_caps
.has_avx512ifma
);
605 debug_printf("util_cpu_caps.has_avx512pf = %u\n", util_cpu_caps
.has_avx512pf
);
606 debug_printf("util_cpu_caps.has_avx512er = %u\n", util_cpu_caps
.has_avx512er
);
607 debug_printf("util_cpu_caps.has_avx512cd = %u\n", util_cpu_caps
.has_avx512cd
);
608 debug_printf("util_cpu_caps.has_avx512bw = %u\n", util_cpu_caps
.has_avx512bw
);
609 debug_printf("util_cpu_caps.has_avx512vl = %u\n", util_cpu_caps
.has_avx512vl
);
610 debug_printf("util_cpu_caps.has_avx512vbmi = %u\n", util_cpu_caps
.has_avx512vbmi
);
615 static once_flag cpu_once_flag
= ONCE_FLAG_INIT
;
618 util_cpu_detect(void)
620 call_once(&cpu_once_flag
, util_cpu_detect_once
);