2020-04-29 Jakub Jelinek <jakub@redhat.com>
+ PR target/94832
+ * config/i386/avx512bwintrin.h (_mm512_alignr_epi8,
+ _mm512_mask_alignr_epi8, _mm512_maskz_alignr_epi8): Wrap macro operands
+ used in casts into parens.
+ * config/i386/avx512fintrin.h (_mm512_cvt_roundps_ph, _mm512_cvtps_ph,
+ _mm512_mask_cvt_roundps_ph, _mm512_mask_cvtps_ph,
+ _mm512_maskz_cvt_roundps_ph, _mm512_maskz_cvtps_ph,
+ _mm512_mask_cmp_epi64_mask, _mm512_mask_cmp_epi32_mask,
+ _mm512_mask_cmp_epu64_mask, _mm512_mask_cmp_epu32_mask,
+ _mm512_mask_cmp_round_pd_mask, _mm512_mask_cmp_round_ps_mask,
+ _mm512_mask_cmp_pd_mask, _mm512_mask_cmp_ps_mask): Likewise.
+ * config/i386/avx512vlbwintrin.h (_mm256_mask_alignr_epi8,
+ _mm256_maskz_alignr_epi8, _mm_mask_alignr_epi8, _mm_maskz_alignr_epi8,
+ _mm256_mask_cmp_epu8_mask): Likewise.
+ * config/i386/avx512vlintrin.h (_mm_mask_cvtps_ph, _mm_maskz_cvtps_ph,
+ _mm256_mask_cvtps_ph, _mm256_maskz_cvtps_ph): Likewise.
+ * config/i386/f16cintrin.h (_mm_cvtps_ph, _mm256_cvtps_ph): Likewise.
+ * config/i386/shaintrin.h (_mm_sha1rnds4_epu32): Likewise.
+
PR target/94832
* config/i386/avx2intrin.h (_mm_mask_i32gather_pd,
_mm256_mask_i32gather_pd, _mm_mask_i64gather_pd,
#define _mm512_alignr_epi8(X, Y, N) \
((__m512i) __builtin_ia32_palignr512 ((__v8di)(__m512i)(X), \
(__v8di)(__m512i)(Y), \
- (int)(N * 8)))
+ (int)((N) * 8)))
#define _mm512_mask_alignr_epi8(W, U, X, Y, N) \
((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \
- (__v8di)(__m512i)(Y), (int)(N * 8), \
+ (__v8di)(__m512i)(Y), (int)((N) * 8), \
(__v8di)(__m512i)(W), (__mmask64)(U)))
#define _mm512_maskz_alignr_epi8(U, X, Y, N) \
((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \
- (__v8di)(__m512i)(Y), (int)(N * 8), \
+ (__v8di)(__m512i)(Y), (int)((N) * 8), \
(__v8di)(__m512i) \
_mm512_setzero_si512 (), \
(__mmask64)(U)))
(__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)_mm512_setzero_ps(), U, B)
#define _mm512_cvt_roundps_ph(A, I) \
- ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+ ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
(__v16hi)_mm256_undefined_si256 (), -1))
#define _mm512_cvtps_ph(A, I) \
- ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+ ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
(__v16hi)_mm256_undefined_si256 (), -1))
#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
- ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+ ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
(__v16hi)(__m256i)(U), (__mmask16) (W)))
#define _mm512_mask_cvtps_ph(U, W, A, I) \
- ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+ ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
(__v16hi)(__m256i)(U), (__mmask16) (W)))
#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
- ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+ ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
(__v16hi)_mm256_setzero_si256 (), (__mmask16) (W)))
#define _mm512_maskz_cvtps_ph(W, A, I) \
- ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+ ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
(__v16hi)_mm256_setzero_si256 (), (__mmask16) (W)))
#endif
#define _mm512_mask_cmp_epi64_mask(M, X, Y, P) \
((__mmask8) __builtin_ia32_cmpq512_mask ((__v8di)(__m512i)(X), \
(__v8di)(__m512i)(Y), (int)(P),\
- (__mmask8)M))
+ (__mmask8)(M)))
#define _mm512_mask_cmp_epi32_mask(M, X, Y, P) \
((__mmask16) __builtin_ia32_cmpd512_mask ((__v16si)(__m512i)(X), \
(__v16si)(__m512i)(Y), (int)(P), \
- (__mmask16)M))
+ (__mmask16)(M)))
#define _mm512_mask_cmp_epu64_mask(M, X, Y, P) \
((__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di)(__m512i)(X), \
(__v8di)(__m512i)(Y), (int)(P),\
- (__mmask8)M))
+ (__mmask8)(M)))
#define _mm512_mask_cmp_epu32_mask(M, X, Y, P) \
((__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si)(__m512i)(X), \
(__v16si)(__m512i)(Y), (int)(P), \
- (__mmask16)M))
+ (__mmask16)(M)))
#define _mm512_mask_cmp_round_pd_mask(M, X, Y, P, R) \
((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \
(__v8df)(__m512d)(Y), (int)(P),\
- (__mmask8)M, R))
+ (__mmask8)(M), R))
#define _mm512_mask_cmp_round_ps_mask(M, X, Y, P, R) \
((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \
(__v16sf)(__m512)(Y), (int)(P),\
- (__mmask16)M, R))
+ (__mmask16)(M), R))
#define _mm_cmp_round_sd_mask(X, Y, P, R) \
((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
#define _mm512_mask_cmp_pd_mask(M, X, Y, P) \
((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \
(__v8df)(__m512d)(Y), (int)(P),\
- (__mmask8)M, _MM_FROUND_CUR_DIRECTION))
+ (__mmask8)(M), _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_cmp_ps_mask(M, X, Y, P) \
((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \
(__v16sf)(__m512)(Y), (int)(P),\
- (__mmask16)M,_MM_FROUND_CUR_DIRECTION))
+ (__mmask16)(M),_MM_FROUND_CUR_DIRECTION))
#define _mm_cmp_sd_mask(X, Y, P) \
((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \
#else
#define _mm256_mask_alignr_epi8(W, U, X, Y, N) \
((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \
- (__v4di)(__m256i)(Y), (int)(N * 8), \
+ (__v4di)(__m256i)(Y), (int)((N) * 8), \
(__v4di)(__m256i)(X), (__mmask32)(U)))
#define _mm256_mask_srli_epi16(W, U, A, B) \
#define _mm256_maskz_alignr_epi8(U, X, Y, N) \
((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \
- (__v4di)(__m256i)(Y), (int)(N * 8), \
+ (__v4di)(__m256i)(Y), (int)((N) * 8), \
(__v4di)(__m256i)_mm256_setzero_si256 (), \
(__mmask32)(U)))
#define _mm_mask_alignr_epi8(W, U, X, Y, N) \
((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \
- (__v2di)(__m128i)(Y), (int)(N * 8), \
+ (__v2di)(__m128i)(Y), (int)((N) * 8), \
(__v2di)(__m128i)(X), (__mmask16)(U)))
#define _mm_maskz_alignr_epi8(U, X, Y, N) \
((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \
- (__v2di)(__m128i)(Y), (int)(N * 8), \
+ (__v2di)(__m128i)(Y), (int)((N) * 8), \
(__v2di)(__m128i)_mm_setzero_si128 (), \
(__mmask16)(U)))
#define _mm256_mask_cmp_epu8_mask(M, X, Y, P) \
((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X), \
(__v32qi)(__m256i)(Y), (int)(P),\
- (__mmask32)M))
+ (__mmask32)(M)))
#endif
extern __inline __mmask32
(__mmask8)(U)))
#define _mm_mask_cvtps_ph(W, U, A, I) \
- ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I), \
+ ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \
(__v8hi)(__m128i) (W), (__mmask8) (U)))
#define _mm_maskz_cvtps_ph(U, A, I) \
- ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I), \
+ ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \
(__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U)))
#define _mm256_mask_cvtps_ph(W, U, A, I) \
- ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I), \
+ ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \
(__v8hi)(__m128i) (W), (__mmask8) (U)))
#define _mm256_maskz_cvtps_ph(U, A, I) \
- ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I), \
+ ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \
(__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U)))
#define _mm256_mask_srai_epi32(W, U, A, B) \
}))
#define _mm_cvtps_ph(A, I) \
- ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) A, (int) (I)))
+ ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) (A), (int) (I)))
#define _mm256_cvtps_ph(A, I) \
- ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) A, (int) (I)))
+ ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) (A), (int) (I)))
#endif /* __OPTIMIZE */
#ifdef __DISABLE_F16C__
}
#else
#define _mm_sha1rnds4_epu32(A, B, I) \
- ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)A, \
- (__v4si)(__m128i)B, (int)I))
+ ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(I)))
#endif
extern __inline __m128i