From: Jakub Jelinek Date: Wed, 29 Apr 2020 15:31:26 +0000 (+0200) Subject: x86: Fix -O0 remaining intrinsic macros [PR94832] X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=0c8217b16f307c3eedce8f22354714938613f701;p=gcc.git x86: Fix -O0 remaining intrinsic macros [PR94832] A few other macros seem to suffer from the same issue. What I've done was: cat gcc/config/i386/*intrin.h | sed -e ':x /\\$/ { N; s/\\\n//g ; bx }' \ | grep '^[[:blank:]]*#[[:blank:]]*define[[:blank:]].*(' | sed 's/[ ]\+/ /g' \ > /tmp/macros and then looking for regexps: )[a-zA-Z] ) [a-zA-Z] [a-zA-Z][-+*/%] [a-zA-Z] [-+*/%] [-+*/%][a-zA-Z] [-+*/%] [a-zA-Z] in the resulting file. 2020-04-29 Jakub Jelinek PR target/94832 * config/i386/avx512bwintrin.h (_mm512_alignr_epi8, _mm512_mask_alignr_epi8, _mm512_maskz_alignr_epi8): Wrap macro operands used in casts into parens. * config/i386/avx512fintrin.h (_mm512_cvt_roundps_ph, _mm512_cvtps_ph, _mm512_mask_cvt_roundps_ph, _mm512_mask_cvtps_ph, _mm512_maskz_cvt_roundps_ph, _mm512_maskz_cvtps_ph, _mm512_mask_cmp_epi64_mask, _mm512_mask_cmp_epi32_mask, _mm512_mask_cmp_epu64_mask, _mm512_mask_cmp_epu32_mask, _mm512_mask_cmp_round_pd_mask, _mm512_mask_cmp_round_ps_mask, _mm512_mask_cmp_pd_mask, _mm512_mask_cmp_ps_mask): Likewise. * config/i386/avx512vlbwintrin.h (_mm256_mask_alignr_epi8, _mm256_maskz_alignr_epi8, _mm_mask_alignr_epi8, _mm_maskz_alignr_epi8, _mm256_mask_cmp_epu8_mask): Likewise. * config/i386/avx512vlintrin.h (_mm_mask_cvtps_ph, _mm_maskz_cvtps_ph, _mm256_mask_cvtps_ph, _mm256_maskz_cvtps_ph): Likewise. * config/i386/f16cintrin.h (_mm_cvtps_ph, _mm256_cvtps_ph): Likewise. * config/i386/shaintrin.h (_mm_sha1rnds4_epu32): Likewise. --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 16e05d1fa60..72e38d89308 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,24 @@ 2020-04-29 Jakub Jelinek + PR target/94832 + * config/i386/avx512bwintrin.h (_mm512_alignr_epi8, + _mm512_mask_alignr_epi8, _mm512_maskz_alignr_epi8): Wrap macro operands + used in casts into parens. + * config/i386/avx512fintrin.h (_mm512_cvt_roundps_ph, _mm512_cvtps_ph, + _mm512_mask_cvt_roundps_ph, _mm512_mask_cvtps_ph, + _mm512_maskz_cvt_roundps_ph, _mm512_maskz_cvtps_ph, + _mm512_mask_cmp_epi64_mask, _mm512_mask_cmp_epi32_mask, + _mm512_mask_cmp_epu64_mask, _mm512_mask_cmp_epu32_mask, + _mm512_mask_cmp_round_pd_mask, _mm512_mask_cmp_round_ps_mask, + _mm512_mask_cmp_pd_mask, _mm512_mask_cmp_ps_mask): Likewise. + * config/i386/avx512vlbwintrin.h (_mm256_mask_alignr_epi8, + _mm256_maskz_alignr_epi8, _mm_mask_alignr_epi8, _mm_maskz_alignr_epi8, + _mm256_mask_cmp_epu8_mask): Likewise. + * config/i386/avx512vlintrin.h (_mm_mask_cvtps_ph, _mm_maskz_cvtps_ph, + _mm256_mask_cvtps_ph, _mm256_maskz_cvtps_ph): Likewise. + * config/i386/f16cintrin.h (_mm_cvtps_ph, _mm256_cvtps_ph): Likewise. + * config/i386/shaintrin.h (_mm_sha1rnds4_epu32): Likewise. + PR target/94832 * config/i386/avx2intrin.h (_mm_mask_i32gather_pd, _mm256_mask_i32gather_pd, _mm_mask_i64gather_pd, diff --git a/gcc/config/i386/avx512bwintrin.h b/gcc/config/i386/avx512bwintrin.h index c886e5a31e9..d19c1044471 100644 --- a/gcc/config/i386/avx512bwintrin.h +++ b/gcc/config/i386/avx512bwintrin.h @@ -3128,16 +3128,16 @@ _mm512_bsrli_epi128 (__m512i __A, const int __N) #define _mm512_alignr_epi8(X, Y, N) \ ((__m512i) __builtin_ia32_palignr512 ((__v8di)(__m512i)(X), \ (__v8di)(__m512i)(Y), \ - (int)(N * 8))) + (int)((N) * 8))) #define _mm512_mask_alignr_epi8(W, U, X, Y, N) \ ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \ - (__v8di)(__m512i)(Y), (int)(N * 8), \ + (__v8di)(__m512i)(Y), (int)((N) * 8), \ (__v8di)(__m512i)(W), (__mmask64)(U))) #define _mm512_maskz_alignr_epi8(U, X, Y, N) \ ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \ - (__v8di)(__m512i)(Y), (int)(N * 8), \ + (__v8di)(__m512i)(Y), (int)((N) * 8), \ (__v8di)(__m512i) \ _mm512_setzero_si512 (), \ (__mmask64)(U))) diff --git a/gcc/config/i386/avx512fintrin.h b/gcc/config/i386/avx512fintrin.h index c86982ab9c8..012cf4eb31e 100644 --- a/gcc/config/i386/avx512fintrin.h +++ b/gcc/config/i386/avx512fintrin.h @@ -8570,22 +8570,22 @@ _mm512_maskz_cvtps_ph (__mmask16 __W, __m512 __A, const int __I) (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)_mm512_setzero_ps(), U, B) #define _mm512_cvt_roundps_ph(A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_undefined_si256 (), -1)) #define _mm512_cvtps_ph(A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_undefined_si256 (), -1)) #define _mm512_mask_cvt_roundps_ph(U, W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)(__m256i)(U), (__mmask16) (W))) #define _mm512_mask_cvtps_ph(U, W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)(__m256i)(U), (__mmask16) (W))) #define _mm512_maskz_cvt_roundps_ph(W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W))) #define _mm512_maskz_cvtps_ph(W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W))) #endif @@ -10081,32 +10081,32 @@ _mm_mask_cmp_round_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, #define _mm512_mask_cmp_epi64_mask(M, X, Y, P) \ ((__mmask8) __builtin_ia32_cmpq512_mask ((__v8di)(__m512i)(X), \ (__v8di)(__m512i)(Y), (int)(P),\ - (__mmask8)M)) + (__mmask8)(M))) #define _mm512_mask_cmp_epi32_mask(M, X, Y, P) \ ((__mmask16) __builtin_ia32_cmpd512_mask ((__v16si)(__m512i)(X), \ (__v16si)(__m512i)(Y), (int)(P), \ - (__mmask16)M)) + (__mmask16)(M))) #define _mm512_mask_cmp_epu64_mask(M, X, Y, P) \ ((__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di)(__m512i)(X), \ (__v8di)(__m512i)(Y), (int)(P),\ - (__mmask8)M)) + (__mmask8)(M))) #define _mm512_mask_cmp_epu32_mask(M, X, Y, P) \ ((__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si)(__m512i)(X), \ (__v16si)(__m512i)(Y), (int)(P), \ - (__mmask16)M)) + (__mmask16)(M))) #define _mm512_mask_cmp_round_pd_mask(M, X, Y, P, R) \ ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ (__v8df)(__m512d)(Y), (int)(P),\ - (__mmask8)M, R)) + (__mmask8)(M), R)) #define _mm512_mask_cmp_round_ps_mask(M, X, Y, P, R) \ ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ (__v16sf)(__m512)(Y), (int)(P),\ - (__mmask16)M, R)) + (__mmask16)(M), R)) #define _mm_cmp_round_sd_mask(X, Y, P, R) \ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ @@ -15498,12 +15498,12 @@ _mm_mask_cmp_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, const int __P) #define _mm512_mask_cmp_pd_mask(M, X, Y, P) \ ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ (__v8df)(__m512d)(Y), (int)(P),\ - (__mmask8)M, _MM_FROUND_CUR_DIRECTION)) + (__mmask8)(M), _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_cmp_ps_mask(M, X, Y, P) \ ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ (__v16sf)(__m512)(Y), (int)(P),\ - (__mmask16)M,_MM_FROUND_CUR_DIRECTION)) + (__mmask16)(M),_MM_FROUND_CUR_DIRECTION)) #define _mm_cmp_sd_mask(X, Y, P) \ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ diff --git a/gcc/config/i386/avx512vlbwintrin.h b/gcc/config/i386/avx512vlbwintrin.h index 19293e4d6f0..bee2639d60a 100644 --- a/gcc/config/i386/avx512vlbwintrin.h +++ b/gcc/config/i386/avx512vlbwintrin.h @@ -1787,7 +1787,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B) #else #define _mm256_mask_alignr_epi8(W, U, X, Y, N) \ ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \ - (__v4di)(__m256i)(Y), (int)(N * 8), \ + (__v4di)(__m256i)(Y), (int)((N) * 8), \ (__v4di)(__m256i)(X), (__mmask32)(U))) #define _mm256_mask_srli_epi16(W, U, A, B) \ @@ -1864,18 +1864,18 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B) #define _mm256_maskz_alignr_epi8(U, X, Y, N) \ ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \ - (__v4di)(__m256i)(Y), (int)(N * 8), \ + (__v4di)(__m256i)(Y), (int)((N) * 8), \ (__v4di)(__m256i)_mm256_setzero_si256 (), \ (__mmask32)(U))) #define _mm_mask_alignr_epi8(W, U, X, Y, N) \ ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \ - (__v2di)(__m128i)(Y), (int)(N * 8), \ + (__v2di)(__m128i)(Y), (int)((N) * 8), \ (__v2di)(__m128i)(X), (__mmask16)(U))) #define _mm_maskz_alignr_epi8(U, X, Y, N) \ ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \ - (__v2di)(__m128i)(Y), (int)(N * 8), \ + (__v2di)(__m128i)(Y), (int)((N) * 8), \ (__v2di)(__m128i)_mm_setzero_si128 (), \ (__mmask16)(U))) @@ -2033,7 +2033,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B) #define _mm256_mask_cmp_epu8_mask(M, X, Y, P) \ ((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X), \ (__v32qi)(__m256i)(Y), (int)(P),\ - (__mmask32)M)) + (__mmask32)(M))) #endif extern __inline __mmask32 diff --git a/gcc/config/i386/avx512vlintrin.h b/gcc/config/i386/avx512vlintrin.h index 7685bdfa391..cb6cc0ce782 100644 --- a/gcc/config/i386/avx512vlintrin.h +++ b/gcc/config/i386/avx512vlintrin.h @@ -13466,19 +13466,19 @@ _mm256_permutex_pd (__m256d __X, const int __M) (__mmask8)(U))) #define _mm_mask_cvtps_ph(W, U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \ (__v8hi)(__m128i) (W), (__mmask8) (U))) #define _mm_maskz_cvtps_ph(U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \ (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) #define _mm256_mask_cvtps_ph(W, U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \ (__v8hi)(__m128i) (W), (__mmask8) (U))) #define _mm256_maskz_cvtps_ph(U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \ (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) #define _mm256_mask_srai_epi32(W, U, A, B) \ diff --git a/gcc/config/i386/f16cintrin.h b/gcc/config/i386/f16cintrin.h index a4033cf0e87..8276e8d6521 100644 --- a/gcc/config/i386/f16cintrin.h +++ b/gcc/config/i386/f16cintrin.h @@ -84,10 +84,10 @@ _mm256_cvtps_ph (__m256 __A, const int __I) })) #define _mm_cvtps_ph(A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) A, (int) (I))) + ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) (A), (int) (I))) #define _mm256_cvtps_ph(A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) A, (int) (I))) + ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) (A), (int) (I))) #endif /* __OPTIMIZE */ #ifdef __DISABLE_F16C__ diff --git a/gcc/config/i386/shaintrin.h b/gcc/config/i386/shaintrin.h index 38011dda17a..13833b261c6 100644 --- a/gcc/config/i386/shaintrin.h +++ b/gcc/config/i386/shaintrin.h @@ -64,8 +64,8 @@ _mm_sha1rnds4_epu32 (__m128i __A, __m128i __B, const int __I) } #else #define _mm_sha1rnds4_epu32(A, B, I) \ - ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)A, \ - (__v4si)(__m128i)B, (int)I)) + ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(I))) #endif extern __inline __m128i