From: H.J. Lu Date: Thu, 21 May 2020 10:54:32 +0000 (-0700) Subject: libgfortran: Use __builtin_cpu_is/__builtin_cpu_supports X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=8ebc2f5e05aa32bcad8cbfb02f8b50d92a469e66;p=gcc.git libgfortran: Use __builtin_cpu_is/__builtin_cpu_supports * m4/matmul.m4: Don't include . Use __builtin_cpu_is/__builtin_cpu_supports * generated/matmul_c10.c: Regenerated. * generated/matmul_c16.c: Likewise. * generated/matmul_c4.c: Likewise. * generated/matmul_c8.c: Likewise. * generated/matmul_i1.c: Likewise. * generated/matmul_i16.c: Likewise. * generated/matmul_i2.c: Likewise. * generated/matmul_i4.c: Likewise. * generated/matmul_i8.c: Likewise. * generated/matmul_r10.c: Likewise. * generated/matmul_r16.c: Likewise. * generated/matmul_r4.c: Likewise. * generated/matmul_r8.c: Likewise. --- diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog index 149f45e1088..71c233c87d6 100644 --- a/libgfortran/ChangeLog +++ b/libgfortran/ChangeLog @@ -1,3 +1,21 @@ +2020-05-21 H.J. Lu + + * m4/matmul.m4: Don't include . Use + __builtin_cpu_is/__builtin_cpu_supports + * generated/matmul_c10.c: Regenerated. + * generated/matmul_c16.c: Likewise. + * generated/matmul_c4.c: Likewise. + * generated/matmul_c8.c: Likewise. + * generated/matmul_i1.c: Likewise. + * generated/matmul_i16.c: Likewise. + * generated/matmul_i2.c: Likewise. + * generated/matmul_i4.c: Likewise. + * generated/matmul_i8.c: Likewise. + * generated/matmul_r10.c: Likewise. + * generated/matmul_r16.c: Likewise. + * generated/matmul_r4.c: Likewise. + * generated/matmul_r8.c: Likewise. + 2020-05-15 H.J. Lu PR bootstrap/95147 diff --git a/libgfortran/generated/matmul_c10.c b/libgfortran/generated/matmul_c10.c index e866a6a4df1..ce5be246ddb 100644 --- a/libgfortran/generated/matmul_c10.c +++ b/libgfortran/generated/matmul_c10.c @@ -2367,7 +2367,6 @@ matmul_c10_vanilla (gfc_array_c10 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_c10 (gfc_array_c10 * const restrict retarray, gfc_array_c10 * const restrict a, gfc_array_c10 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_c10 (gfc_array_c10 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_c10_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_c10_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_c10 (gfc_array_c10 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c10_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_c10 (gfc_array_c10 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_c10_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c10_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_c10_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_c16.c b/libgfortran/generated/matmul_c16.c index e6605e89282..bf756d124ec 100644 --- a/libgfortran/generated/matmul_c16.c +++ b/libgfortran/generated/matmul_c16.c @@ -2367,7 +2367,6 @@ matmul_c16_vanilla (gfc_array_c16 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_c16 (gfc_array_c16 * const restrict retarray, gfc_array_c16 * const restrict a, gfc_array_c16 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_c16 (gfc_array_c16 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_c16_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_c16_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_c16 (gfc_array_c16 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c16_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_c16 (gfc_array_c16 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_c16_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c16_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_c16_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_c4.c b/libgfortran/generated/matmul_c4.c index e012fa200fd..5b244104574 100644 --- a/libgfortran/generated/matmul_c4.c +++ b/libgfortran/generated/matmul_c4.c @@ -2367,7 +2367,6 @@ matmul_c4_vanilla (gfc_array_c4 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_c4 (gfc_array_c4 * const restrict retarray, gfc_array_c4 * const restrict a, gfc_array_c4 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_c4 (gfc_array_c4 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_c4_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_c4_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_c4 (gfc_array_c4 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c4_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_c4 (gfc_array_c4 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_c4_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c4_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_c4_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_c8.c b/libgfortran/generated/matmul_c8.c index 8c19b49deae..df3cb927e1c 100644 --- a/libgfortran/generated/matmul_c8.c +++ b/libgfortran/generated/matmul_c8.c @@ -2367,7 +2367,6 @@ matmul_c8_vanilla (gfc_array_c8 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_c8 (gfc_array_c8 * const restrict retarray, gfc_array_c8 * const restrict a, gfc_array_c8 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_c8 (gfc_array_c8 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_c8_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_c8_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_c8 (gfc_array_c8 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c8_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_c8 (gfc_array_c8 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_c8_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_c8_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_c8_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_i1.c b/libgfortran/generated/matmul_i1.c index 8ae4194366e..49b0fbad211 100644 --- a/libgfortran/generated/matmul_i1.c +++ b/libgfortran/generated/matmul_i1.c @@ -2367,7 +2367,6 @@ matmul_i1_vanilla (gfc_array_i1 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_i1 (gfc_array_i1 * const restrict retarray, gfc_array_i1 * const restrict a, gfc_array_i1 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_i1 (gfc_array_i1 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_i1_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_i1_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_i1 (gfc_array_i1 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i1_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_i1 (gfc_array_i1 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_i1_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i1_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_i1_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_i16.c b/libgfortran/generated/matmul_i16.c index cfbf9206c18..4e1d837682b 100644 --- a/libgfortran/generated/matmul_i16.c +++ b/libgfortran/generated/matmul_i16.c @@ -2367,7 +2367,6 @@ matmul_i16_vanilla (gfc_array_i16 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_i16 (gfc_array_i16 * const restrict retarray, gfc_array_i16 * const restrict a, gfc_array_i16 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_i16 (gfc_array_i16 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_i16_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_i16_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_i16 (gfc_array_i16 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i16_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_i16 (gfc_array_i16 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_i16_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i16_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_i16_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_i2.c b/libgfortran/generated/matmul_i2.c index 5a4aeed78e3..191298708dc 100644 --- a/libgfortran/generated/matmul_i2.c +++ b/libgfortran/generated/matmul_i2.c @@ -2367,7 +2367,6 @@ matmul_i2_vanilla (gfc_array_i2 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_i2 (gfc_array_i2 * const restrict retarray, gfc_array_i2 * const restrict a, gfc_array_i2 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_i2 (gfc_array_i2 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_i2_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_i2_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_i2 (gfc_array_i2 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i2_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_i2 (gfc_array_i2 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_i2_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i2_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_i2_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_i4.c b/libgfortran/generated/matmul_i4.c index 80592a04d14..ab14a0a3ff3 100644 --- a/libgfortran/generated/matmul_i4.c +++ b/libgfortran/generated/matmul_i4.c @@ -2367,7 +2367,6 @@ matmul_i4_vanilla (gfc_array_i4 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_i4 (gfc_array_i4 * const restrict retarray, gfc_array_i4 * const restrict a, gfc_array_i4 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_i4 (gfc_array_i4 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_i4_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_i4_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_i4 (gfc_array_i4 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i4_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_i4 (gfc_array_i4 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_i4_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i4_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_i4_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_i8.c b/libgfortran/generated/matmul_i8.c index 7e4c5bcc1bb..bc627e189fe 100644 --- a/libgfortran/generated/matmul_i8.c +++ b/libgfortran/generated/matmul_i8.c @@ -2367,7 +2367,6 @@ matmul_i8_vanilla (gfc_array_i8 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_i8 (gfc_array_i8 * const restrict retarray, gfc_array_i8 * const restrict a, gfc_array_i8 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_i8 (gfc_array_i8 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_i8_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_i8_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_i8 (gfc_array_i8 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i8_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_i8 (gfc_array_i8 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_i8_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_i8_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_i8_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_r10.c b/libgfortran/generated/matmul_r10.c index d97aa41315e..b5e63be2448 100644 --- a/libgfortran/generated/matmul_r10.c +++ b/libgfortran/generated/matmul_r10.c @@ -2367,7 +2367,6 @@ matmul_r10_vanilla (gfc_array_r10 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_r10 (gfc_array_r10 * const restrict retarray, gfc_array_r10 * const restrict a, gfc_array_r10 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_r10 (gfc_array_r10 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_r10_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_r10_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_r10 (gfc_array_r10 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r10_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_r10 (gfc_array_r10 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_r10_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r10_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_r10_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_r16.c b/libgfortran/generated/matmul_r16.c index 82e8b502ba3..4e6c66bb8f3 100644 --- a/libgfortran/generated/matmul_r16.c +++ b/libgfortran/generated/matmul_r16.c @@ -2367,7 +2367,6 @@ matmul_r16_vanilla (gfc_array_r16 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_r16 (gfc_array_r16 * const restrict retarray, gfc_array_r16 * const restrict a, gfc_array_r16 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_r16 (gfc_array_r16 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_r16_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_r16_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_r16 (gfc_array_r16 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r16_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_r16 (gfc_array_r16 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_r16_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r16_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_r16_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_r4.c b/libgfortran/generated/matmul_r4.c index 36ce7daf781..202634b55d1 100644 --- a/libgfortran/generated/matmul_r4.c +++ b/libgfortran/generated/matmul_r4.c @@ -2367,7 +2367,6 @@ matmul_r4_vanilla (gfc_array_r4 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_r4 (gfc_array_r4 * const restrict retarray, gfc_array_r4 * const restrict a, gfc_array_r4 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_r4 (gfc_array_r4 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_r4_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_r4_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_r4 (gfc_array_r4 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r4_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_r4 (gfc_array_r4 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_r4_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r4_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_r4_avx128_fma4; goto store; diff --git a/libgfortran/generated/matmul_r8.c b/libgfortran/generated/matmul_r8.c index 9a81df189d5..22c24e50c37 100644 --- a/libgfortran/generated/matmul_r8.c +++ b/libgfortran/generated/matmul_r8.c @@ -2367,7 +2367,6 @@ matmul_r8_vanilla (gfc_array_r8 * const restrict retarray, /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_r8 (gfc_array_r8 * const restrict retarray, gfc_array_r8 * const restrict a, gfc_array_r8 * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -2384,11 +2383,11 @@ void matmul_r8 (gfc_array_r8 * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_r8_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_r8_avx512f; goto store; @@ -2397,8 +2396,8 @@ void matmul_r8 (gfc_array_r8 * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r8_avx2; goto store; @@ -2407,26 +2406,26 @@ void matmul_r8 (gfc_array_r8 * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_r8_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_r8_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_r8_avx128_fma4; goto store; diff --git a/libgfortran/m4/matmul.m4 b/libgfortran/m4/matmul.m4 index 83f4ae63339..5acecf1edce 100644 --- a/libgfortran/m4/matmul.m4 +++ b/libgfortran/m4/matmul.m4 @@ -134,7 +134,6 @@ internal_proto('matmul_name`); /* Currently, this is i386 only. Adjust for other architectures. */ -#include void matmul_'rtype_code` ('rtype` * const restrict retarray, 'rtype` * const restrict a, 'rtype` * const restrict b, int try_blas, int blas_limit, blas_call gemm) @@ -151,11 +150,11 @@ void matmul_'rtype_code` ('rtype` * const restrict retarray, if (matmul_fn == NULL) { matmul_fn = matmul_'rtype_code`_vanilla; - if (__cpu_model.__cpu_vendor == VENDOR_INTEL) + if (__builtin_cpu_is ("intel")) { /* Run down the available processors in order of preference. */ #ifdef HAVE_AVX512F - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX512F)) + if (__builtin_cpu_supports ("avx512f")) { matmul_fn = matmul_'rtype_code`_avx512f; goto store; @@ -164,8 +163,8 @@ void matmul_'rtype_code` ('rtype` * const restrict retarray, #endif /* HAVE_AVX512F */ #ifdef HAVE_AVX2 - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX2)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx2") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_'rtype_code`_avx2; goto store; @@ -174,26 +173,26 @@ void matmul_'rtype_code` ('rtype` * const restrict retarray, #endif #ifdef HAVE_AVX - if (__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) + if (__builtin_cpu_supports ("avx")) { matmul_fn = matmul_'rtype_code`_avx; goto store; } #endif /* HAVE_AVX */ } - else if (__cpu_model.__cpu_vendor == VENDOR_AMD) + else if (__builtin_cpu_is ("amd")) { #if defined(HAVE_AVX) && defined(HAVE_FMA3) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma")) { matmul_fn = matmul_'rtype_code`_avx128_fma3; goto store; } #endif #if defined(HAVE_AVX) && defined(HAVE_FMA4) && defined(HAVE_AVX128) - if ((__cpu_model.__cpu_features[0] & (1 << FEATURE_AVX)) - && (__cpu_model.__cpu_features[0] & (1 << FEATURE_FMA4))) + if (__builtin_cpu_supports ("avx") + && __builtin_cpu_supports ("fma4")) { matmul_fn = matmul_'rtype_code`_avx128_fma4; goto store;