Tested on x86-64.
gcc/ChangeLog:
PR target/95483
* config/i386/avx2intrin.h (_mm_broadcastsi128_si256): New intrinsics.
(_mm_broadcastsd_pd): Ditto.
* config/i386/avx512bwintrin.h (_mm512_loadu_epi16): New intrinsics.
(_mm512_storeu_epi16): Ditto.
(_mm512_loadu_epi8): Ditto.
(_mm512_storeu_epi8): Ditto.
* config/i386/avx512dqintrin.h (_mm_reduce_round_sd): New intrinsics.
(_mm_mask_reduce_round_sd): Ditto.
(_mm_maskz_reduce_round_sd): Ditto.
(_mm_reduce_round_ss): Ditto.
(_mm_mask_reduce_round_ss): Ditto.
(_mm_maskz_reduce_round_ss): Ditto.
(_mm512_reduce_round_pd): Ditto.
(_mm512_mask_reduce_round_pd): Ditto.
(_mm512_maskz_reduce_round_pd): Ditto.
(_mm512_reduce_round_ps): Ditto.
(_mm512_mask_reduce_round_ps): Ditto.
(_mm512_maskz_reduce_round_ps): Ditto.
* config/i386/avx512erintrin.h
(_mm_mask_rcp28_round_sd): New intrinsics.
(_mm_maskz_rcp28_round_sd): Ditto.
(_mm_mask_rcp28_round_ss): Ditto.
(_mm_maskz_rcp28_round_ss): Ditto.
(_mm_mask_rsqrt28_round_sd): Ditto.
(_mm_maskz_rsqrt28_round_sd): Ditto.
(_mm_mask_rsqrt28_round_ss): Ditto.
(_mm_maskz_rsqrt28_round_ss): Ditto.
(_mm_mask_rcp28_sd): Ditto.
(_mm_maskz_rcp28_sd): Ditto.
(_mm_mask_rcp28_ss): Ditto.
(_mm_maskz_rcp28_ss): Ditto.
(_mm_mask_rsqrt28_sd): Ditto.
(_mm_maskz_rsqrt28_sd): Ditto.
(_mm_mask_rsqrt28_ss): Ditto.
(_mm_maskz_rsqrt28_ss): Ditto.
* config/i386/avx512fintrin.h (_mm_mask_sqrt_sd): New intrinsics.
(_mm_maskz_sqrt_sd): Ditto.
(_mm_mask_sqrt_ss): Ditto.
(_mm_maskz_sqrt_ss): Ditto.
(_mm_mask_scalef_sd): Ditto.
(_mm_maskz_scalef_sd): Ditto.
(_mm_mask_scalef_ss): Ditto.
(_mm_maskz_scalef_ss): Ditto.
(_mm_mask_cvt_roundsd_ss): Ditto.
(_mm_maskz_cvt_roundsd_ss): Ditto.
(_mm_mask_cvt_roundss_sd): Ditto.
(_mm_maskz_cvt_roundss_sd): Ditto.
(_mm_mask_cvtss_sd): Ditto.
(_mm_maskz_cvtss_sd): Ditto.
(_mm_mask_cvtsd_ss): Ditto.
(_mm_maskz_cvtsd_ss): Ditto.
(_mm512_cvtsi512_si32): Ditto.
(_mm_cvtsd_i32): Ditto.
(_mm_cvtss_i32): Ditto.
(_mm_cvti32_sd): Ditto.
(_mm_cvti32_ss): Ditto.
(_mm_cvtsd_i64): Ditto.
(_mm_cvtss_i64): Ditto.
(_mm_cvti64_sd): Ditto.
(_mm_cvti64_ss): Ditto.
* config/i386/avx512vlbwintrin.h (_mm256_storeu_epi8): New intrinsics.
(_mm_storeu_epi8): Ditto.
(_mm256_loadu_epi16): Ditto.
(_mm_loadu_epi16): Ditto.
(_mm256_loadu_epi8): Ditto.
(_mm_loadu_epi8): Ditto.
(_mm256_storeu_epi16): Ditto.
(_mm_storeu_epi16): Ditto.
* config/i386/avx512vlintrin.h (_mm256_load_epi64): New intrinsics.
(_mm_load_epi64): Ditto.
(_mm256_load_epi32): Ditto.
(_mm_load_epi32): Ditto.
(_mm256_store_epi32): Ditto.
(_mm_store_epi32): Ditto.
(_mm256_loadu_epi64): Ditto.
(_mm_loadu_epi64): Ditto.
(_mm256_loadu_epi32): Ditto.
(_mm_loadu_epi32): Ditto.
(_mm256_mask_cvt_roundps_ph): Ditto.
(_mm256_maskz_cvt_roundps_ph): Ditto.
(_mm_mask_cvt_roundps_ph): Ditto.
(_mm_maskz_cvt_roundps_ph): Ditto.
* config/i386/avxintrin.h (_mm256_cvtsi256_si32): New intrinsics.
* config/i386/emmintrin.h (_mm_loadu_si32): New intrinsics.
(_mm_loadu_si16): Ditto.
(_mm_storeu_si32): Ditto.
(_mm_storeu_si16): Ditto.
* config/i386/i386-builtin-types.def
(V8DF_FTYPE_V8DF_INT_V8DF_UQI_INT): Add new type.
(V16SF_FTYPE_V16SF_INT_V16SF_UHI_INT): Ditto.
(V4SF_FTYPE_V4SF_V2DF_V4SF_UQI_INT): Ditto.
(V2DF_FTYPE_V2DF_V4SF_V2DF_UQI_INT): Ditto.
* config/i386/i386-builtin.def
(__builtin_ia32_cvtsd2ss_mask_round): New builtin.
(__builtin_ia32_cvtss2sd_mask_round): Ditto.
(__builtin_ia32_rcp28sd_mask_round): Ditto.
(__builtin_ia32_rcp28ss_mask_round): Ditto.
(__builtin_ia32_rsqrt28sd_mask_round): Ditto.
(__builtin_ia32_rsqrt28ss_mask_round): Ditto.
(__builtin_ia32_reducepd512_mask_round): Ditto.
(__builtin_ia32_reduceps512_mask_round): Ditto.
(__builtin_ia32_reducesd_mask_round): Ditto.
(__builtin_ia32_reducess_mask_round): Ditto.
* config/i386/i386-expand.c
(ix86_expand_round_builtin): Expand round builtin for new type.
(V8DF_FTYPE_V8DF_INT_V8DF_UQI_INT)
(V16SF_FTYPE_V16SF_INT_V16SF_UHI_INT)
(V4SF_FTYPE_V4SF_V2DF_V4SF_UQI_INT)
(V2DF_FTYPE_V2DF_V4SF_V2DF_UQI_INT)
* config/i386/mmintrin.h ()
Define datatype __m32 and __m16.
Define datatype __m32_u and __m16_u.
* config/i386/sse.md: Adjust pattern.
(<mask_codefor>reducep<mode><mask_name><round_saeonly_name>): Adjust.
(reduces<mode><mask_scalar_name><round_saeonly_scalar_name>): Ditto.
(sse2_cvtsd2ss<mask_name><round_name>): Ditto.
(sse2_cvtss2sd<mask_name><round_saeonly_name>): Ditto.
(avx512er_vmrcp28<mode><mask_name><round_saeonly_name>): Ditto.
(avx512er_vmrsqrt28<mode><mask_name><round_saeonly_name>): Ditto.
gcc/testsuite/ChangeLog:
PR target/95483
* gcc.target/i386/avx-1.c: Add test.
* gcc.target/i386/avx2-vbroadcastsi128-1.c: Ditto.
* gcc.target/i386/avx2-vbroadcastsi128-2.c: Ditto.
* gcc.target/i386/avx512bw-vmovdqu16-1.c: Ditto.
* gcc.target/i386/avx512bw-vmovdqu8-1.c: Ditto.
* gcc.target/i386/avx512dq-vreducesd-1.c: Ditto.
* gcc.target/i386/avx512dq-vreducesd-2.c: Ditto.
* gcc.target/i386/avx512dq-vreducess-1.c: Ditto.
* gcc.target/i386/avx512dq-vreducess-2.c: Ditto.
* gcc.target/i386/avx512er-vrcp28sd-1.c: Ditto.
* gcc.target/i386/avx512er-vrcp28sd-2.c: Ditto.
* gcc.target/i386/avx512er-vrcp28ss-1.c: Ditto.
* gcc.target/i386/avx512er-vrcp28ss-2.c: Ditto.
* gcc.target/i386/avx512er-vrsqrt28sd-1.c: Ditto.
* gcc.target/i386/avx512er-vrsqrt28sd-2.c: Ditto.
* gcc.target/i386/avx512er-vrsqrt28ss-1.c: Ditto.
* gcc.target/i386/avx512er-vrsqrt28ss-2.c: Ditto.
* gcc.target/i386/avx512f-vcvtsd2si-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtsd2si64-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtsd2ss-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtsi2sd64-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtsi2ss-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtsi2ss64-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtss2sd-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtss2si-1.c: Ditto.
* gcc.target/i386/avx512f-vcvtss2si64-1.c: Ditto.
* gcc.target/i386/avx512f-vscalefsd-1.c: Ditto.
* gcc.target/i386/avx512f-vscalefsd-2.c: Ditto.
* gcc.target/i386/avx512f-vscalefss-1.c: Ditto.
* gcc.target/i386/avx512f-vscalefss-2.c: Ditto.
* gcc.target/i386/avx512f-vsqrtsd-1.c: Ditto.
* gcc.target/i386/avx512f-vsqrtsd-2.c: Ditto.
* gcc.target/i386/avx512f-vsqrtss-1.c: Ditto.
* gcc.target/i386/avx512f-vsqrtss-2.c: Ditto.
* gcc.target/i386/avx512vl-vmovdqa32-1.c: Ditto.
* gcc.target/i386/avx512vl-vmovdqa64-1.c: Ditto.
* gcc.target/i386/sse-13.c: Ditto.
* gcc.target/i386/sse-23.c: Ditto.
* gcc.target/i386/avx512dq-vreducepd-3.c: New test.
* gcc.target/i386/avx512dq-vreducepd-4.c: New test.
* gcc.target/i386/avx512dq-vreduceps-3.c: New test.
* gcc.target/i386/avx512dq-vreduceps-4.c: New test.
* gcc.target/i386/avx512f-vcvtsi2sd-1.c: New test.
* gcc.target/i386/pr95483-1.c: New test.
* gcc.target/i386/pr95483-2.c: New test.
* gcc.target/i386/pr95483-3.c: New test.
* gcc.target/i386/pr95483-4.c: New test.
* gcc.target/i386/pr95483-5.c: New test.
* gcc.target/i386/pr95483-6.c: New test.
* gcc.target/i386/pr95483-7.c: New test.
return (__m256i) __builtin_ia32_vbroadcastsi256 ((__v2di)__X);
}
+#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+#define _mm_broadcastsd_pd(X) _mm_movedup_pd(X)
+
#ifdef __OPTIMIZE__
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
/* Internal data types for implementing the intrinsics. */
typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+typedef short __v32hi_u __attribute__ ((__vector_size__ (64), \
+ __may_alias__, __aligned__ (1)));
typedef char __v64qi __attribute__ ((__vector_size__ (64)));
+typedef char __v64qi_u __attribute__ ((__vector_size__ (64), \
+ __may_alias__, __aligned__ (1)));
typedef unsigned long long __mmask64;
(__mmask32) __U);
}
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_loadu_epi16 (void const *__P)
+{
+ return (__m512i) (*(__v32hi_u *) __P);
+}
+
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
(__mmask32) __U);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_storeu_epi16 (void *__P, __m512i __A)
+{
+ *(__v32hi_u *) __P = (__v32hi_u) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
(__mmask64) __B);
}
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_loadu_epi8 (void const *__P)
+{
+ return (__m512i) (*(__v64qi_u *) __P);
+}
+
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
(__mmask64) __U);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_storeu_epi8 (void *__P, __m512i __A)
+{
+ *(__v64qi_u *) __P = (__v64qi_u) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
(__mmask8) -1);
}
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_round_sd (__m128d __A, __m128d __B, int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1, __R);
+}
+
extern __inline __m128d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_reduce_sd (__m128d __W, __mmask8 __U, __m128d __A,
(__mmask8) __U);
}
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df) __W,
+ __U, __R);
+}
+
extern __inline __m128d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_reduce_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
(__mmask8) __U);
}
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_round_sd (__mmask8 __U, __m128d __A, __m128d __B,
+ int __C, const int __R)
+{
+ return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A,
+ (__v2df) __B, __C,
+ (__v2df)
+ _mm_setzero_pd (),
+ __U, __R);
+}
+
extern __inline __m128
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_reduce_ss (__m128 __A, __m128 __B, int __C)
(__mmask8) -1);
}
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_round_ss (__m128 __A, __m128 __B, int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1, __R);
+}
extern __inline __m128
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
(__mmask8) __U);
}
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf) __W,
+ __U, __R);
+}
+
extern __inline __m128
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_reduce_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C)
(__mmask8) __U);
}
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_round_ss (__mmask8 __U, __m128 __A, __m128 __B,
+ int __C, const int __R)
+{
+ return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A,
+ (__v4sf) __B, __C,
+ (__v4sf)
+ _mm_setzero_ps (),
+ __U, __R);
+}
+
extern __inline __m128d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_range_sd (__m128d __A, __m128d __B, int __C)
(__mmask8) -1);
}
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_reduce_round_pd (__m512d __A, int __B, const int __R)
+{
+ return (__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df) __A,
+ __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) -1, __R);
+}
+
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_reduce_pd (__m512d __W, __mmask8 __U, __m512d __A, int __B)
(__mmask8) __U);
}
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_reduce_round_pd (__m512d __W, __mmask8 __U, __m512d __A,
+ int __B, const int __R)
+{
+ return (__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df) __A,
+ __B,
+ (__v8df) __W,
+ __U, __R);
+}
+
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_reduce_pd (__mmask8 __U, __m512d __A, int __B)
(__mmask8) __U);
}
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_reduce_round_pd (__mmask8 __U, __m512d __A, int __B,
+ const int __R)
+{
+ return (__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df) __A,
+ __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ __U, __R);
+}
+
extern __inline __m512
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_reduce_ps (__m512 __A, int __B)
(__mmask16) -1);
}
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_reduce_round_ps (__m512 __A, int __B, const int __R)
+{
+ return (__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf) __A,
+ __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) -1, __R);
+}
+
extern __inline __m512
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_reduce_ps (__m512 __W, __mmask16 __U, __m512 __A, int __B)
(__mmask16) __U);
}
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_reduce_round_ps (__m512 __W, __mmask16 __U, __m512 __A, int __B,
+ const int __R)
+{
+ return (__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf) __A,
+ __B,
+ (__v16sf) __W,
+ __U, __R);
+}
+
extern __inline __m512
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_reduce_ps (__mmask16 __U, __m512 __A, int __B)
(__mmask16) __U);
}
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_reduce_round_ps (__mmask16 __U, __m512 __A, int __B,
+ const int __R)
+{
+ return (__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf) __A,
+ __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ __U, __R);
+}
+
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_extractf32x8_ps (__m512 __A, const int __imm)
((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A), \
(int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)-1))
+#define _mm512_reduce_round_pd(A, B, R) \
+ ((__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df)(__m512d)(A),\
+ (int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)-1, (R)))
+
#define _mm512_mask_reduce_pd(W, U, A, B) \
((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A), \
(int)(B), (__v8df)(__m512d)(W), (__mmask8)(U)))
+#define _mm512_mask_reduce_round_pd(W, U, A, B, R) \
+ ((__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df)(__m512d)(A),\
+ (int)(B), (__v8df)(__m512d)(W), (U), (R)))
+
#define _mm512_maskz_reduce_pd(U, A, B) \
((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A), \
(int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)(U)))
+#define _mm512_maskz_reduce_round_pd(U, A, B, R) \
+ ((__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df)(__m512d)(A),\
+ (int)(B), (__v8df)_mm512_setzero_pd (), (U), (R)))
+
#define _mm512_reduce_ps(A, B) \
((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A), \
(int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)-1))
+#define _mm512_reduce_round_ps(A, B, R) \
+ ((__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf)(__m512)(A),\
+ (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)-1, (R)))
+
#define _mm512_mask_reduce_ps(W, U, A, B) \
((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A), \
(int)(B), (__v16sf)(__m512)(W), (__mmask16)(U)))
+#define _mm512_mask_reduce_round_ps(W, U, A, B, R) \
+ ((__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf)(__m512)(A),\
+ (int)(B), (__v16sf)(__m512)(W), (U), (R)))
+
#define _mm512_maskz_reduce_ps(U, A, B) \
((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A), \
(int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)(U)))
+#define _mm512_maskz_reduce_round_ps(U, A, B, R) \
+ ((__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf)(__m512)(A),\
+ (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)(U), (R)))
+
#define _mm512_extractf32x8_ps(X, C) \
((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X), \
(int) (C), (__v8sf)(__m256) _mm256_setzero_ps (), (__mmask8)-1))
(__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
(__mmask8)(U)))
+#define _mm_reduce_round_sd(A, B, C, R) \
+ ((__m128d) __builtin_ia32_reducesd_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__mmask8)(U), (int)(R)))
+
+#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
+ ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \
+ ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \
+ (__mmask8)(U), (int)(R)))
+
#define _mm_reduce_ss(A, B, C) \
((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
(__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
(__mmask8)(U)))
+#define _mm_reduce_round_ss(A, B, C, R) \
+ ((__m128) __builtin_ia32_reducess_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__mmask8)(U), (int)(R)))
+
+#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
+ ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
+ ((__m128) __builtin_ia32_reducesd_mask_round ((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \
+ (__mmask8)(U), (int)(R)))
#endif
__R);
}
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rcp28_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, int __R)
+{
+ return (__m128d) __builtin_ia32_rcp28sd_mask_round ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rcp28_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __R)
+{
+ return (__m128d) __builtin_ia32_rcp28sd_mask_round ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ __U,
+ __R);
+}
+
extern __inline __m128
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_rcp28_round_ss (__m128 __A, __m128 __B, int __R)
__R);
}
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rcp28_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, int __R)
+{
+ return (__m128) __builtin_ia32_rcp28ss_mask_round ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rcp28_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __R)
+{
+ return (__m128) __builtin_ia32_rcp28ss_mask_round ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ __U,
+ __R);
+}
+
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_rsqrt28_round_pd (__m512d __A, int __R)
__R);
}
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt28_round_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B, int __R)
+{
+ return (__m128d) __builtin_ia32_rsqrt28sd_mask_round ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt28_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __R)
+{
+ return (__m128d) __builtin_ia32_rsqrt28sd_mask_round ((__v2df) __B,
+ (__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ __U,
+ __R);
+}
+
extern __inline __m128
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_rsqrt28_round_ss (__m128 __A, __m128 __B, int __R)
__R);
}
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt28_round_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128 __B, int __R)
+{
+ return (__m128) __builtin_ia32_rsqrt28ss_mask_round ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt28_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __R)
+{
+ return (__m128) __builtin_ia32_rsqrt28ss_mask_round ((__v4sf) __B,
+ (__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ __U,
+ __R);
+}
+
#else
#define _mm512_exp2a23_round_pd(A, C) \
__builtin_ia32_exp2pd_mask(A, (__v8df)_mm512_setzero_pd(), -1, C)
#define _mm_rcp28_round_sd(A, B, R) \
__builtin_ia32_rcp28sd_round(A, B, R)
+#define _mm_mask_rcp28_round_sd(W, U, A, B, R) \
+ __builtin_ia32_rcp28sd_mask_round ((A), (B), (W), (U), (R))
+
+#define _mm_maskz_rcp28_round_sd(U, A, B, R) \
+ __builtin_ia32_rcp28sd_mask_round ((A), (B), (__v2df) _mm_setzero_pd (), \
+ (U), (R))
+
#define _mm_rcp28_round_ss(A, B, R) \
__builtin_ia32_rcp28ss_round(A, B, R)
+#define _mm_mask_rcp28_round_ss(W, U, A, B, R) \
+ __builtin_ia32_rcp28ss_mask_round ((A), (B), (W), (U), (R))
+
+#define _mm_maskz_rcp28_round_ss(U, A, B, R) \
+ __builtin_ia32_rcp28ss_mask_round ((A), (B), (__v4sf) _mm_setzero_ps (), \
+ (U), (R))
+
#define _mm_rsqrt28_round_sd(A, B, R) \
__builtin_ia32_rsqrt28sd_round(A, B, R)
+#define _mm_mask_rsqrt28_round_sd(W, U, A, B, R) \
+ __builtin_ia32_rsqrt28sd_mask_round ((A), (B), (W), (U), (R))
+
+#define _mm_maskz_rsqrt28_round_sd(U, A, B, R) \
+ __builtin_ia32_rsqrt28sd_mask_round ((A), (B), (__v2df) _mm_setzero_pd (),\
+ (U), (R))
+
#define _mm_rsqrt28_round_ss(A, B, R) \
__builtin_ia32_rsqrt28ss_round(A, B, R)
+#define _mm_mask_rsqrt28_round_ss(W, U, A, B, R) \
+ __builtin_ia32_rsqrt28ss_mask_round ((A), (B), (W), (U), (R))
+
+#define _mm_maskz_rsqrt28_round_ss(U, A, B, R) \
+ __builtin_ia32_rsqrt28ss_mask_round ((A), (B), (__v4sf) _mm_setzero_ps (),\
+ (U), (R))
+
#endif
+#define _mm_mask_rcp28_sd(W, U, A, B)\
+ _mm_mask_rcp28_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_sd(U, A, B)\
+ _mm_maskz_rcp28_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rcp28_ss(W, U, A, B)\
+ _mm_mask_rcp28_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rcp28_ss(U, A, B)\
+ _mm_maskz_rcp28_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_sd(W, U, A, B)\
+ _mm_mask_rsqrt28_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_sd(U, A, B)\
+ _mm_maskz_rsqrt28_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_rsqrt28_ss(W, U, A, B)\
+ _mm_mask_rsqrt28_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_rsqrt28_ss(U, A, B)\
+ _mm_maskz_rsqrt28_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
#define _mm512_exp2a23_pd(A) \
_mm512_exp2a23_round_pd(A, _MM_FROUND_CUR_DIRECTION)
(__v4sf) _mm_setzero_ps (), U, C)
#endif
+#define _mm_mask_sqrt_sd(W, U, A, B) \
+ _mm_mask_sqrt_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_sqrt_sd(U, A, B) \
+ _mm_maskz_sqrt_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_sqrt_ss(W, U, A, B) \
+ _mm_mask_sqrt_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_sqrt_ss(U, A, B) \
+ _mm_maskz_sqrt_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
extern __inline __m512i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepi8_epi32 (__m128i __A)
(__v4sf)_mm_setzero_ps (), -1, C)
#endif
+#define _mm_mask_scalef_sd(W, U, A, B) \
+ _mm_mask_scalef_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_scalef_sd(U, A, B) \
+ _mm_maskz_scalef_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_scalef_ss(W, U, A, B) \
+ _mm_mask_scalef_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_scalef_ss(U, A, B) \
+ _mm_maskz_scalef_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
#ifdef __OPTIMIZE__
extern __inline __m512d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
__R);
}
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundsd_ss (__m128 __W, __mmask8 __U, __m128 __A,
+ __m128d __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A,
+ (__v2df) __B,
+ (__v4sf) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundsd_ss (__mmask8 __U, __m128 __A,
+ __m128d __B, const int __R)
+{
+ return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A,
+ (__v2df) __B,
+ _mm_setzero_ps (),
+ __U,
+ __R);
+}
+
extern __inline __m128d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvt_roundss_sd (__m128d __A, __m128 __B, const int __R)
(__v4sf) __B,
__R);
}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvt_roundss_sd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128 __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A,
+ (__v4sf) __B,
+ (__v2df) __W,
+ __U,
+ __R);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvt_roundss_sd (__mmask8 __U, __m128d __A,
+ __m128 __B, const int __R)
+{
+ return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A,
+ (__v4sf) __B,
+ _mm_setzero_pd (),
+ __U,
+ __R);
+}
#else
#define _mm512_cvt_roundpd_ps(A, B) \
(__m256)__builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)_mm256_undefined_ps(), -1, B)
#define _mm_cvt_roundsd_ss(A, B, C) \
(__m128)__builtin_ia32_cvtsd2ss_round(A, B, C)
+#define _mm_mask_cvt_roundsd_ss(W, U, A, B, C) \
+ (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), (W), (U), (C))
+
+#define _mm_maskz_cvt_roundsd_ss(U, A, B, C) \
+ (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), _mm_setzero_ps (), \
+ (U), (C))
+
#define _mm_cvt_roundss_sd(A, B, C) \
(__m128d)__builtin_ia32_cvtss2sd_round(A, B, C)
+
+#define _mm_mask_cvt_roundss_sd(W, U, A, B, C) \
+ (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), (W), (U), (C))
+
+#define _mm_maskz_cvt_roundss_sd(U, A, B, C) \
+ (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), _mm_setzero_pd (), \
+ (U), (C))
+
#endif
+#define _mm_mask_cvtss_sd(W, U, A, B) \
+ _mm_mask_cvt_roundss_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_cvtss_sd(U, A, B) \
+ _mm_maskz_cvt_roundss_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_mask_cvtsd_ss(W, U, A, B) \
+ _mm_mask_cvt_roundsd_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_maskz_cvtsd_ss(U, A, B) \
+ _mm_maskz_cvt_roundsd_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION)
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_stream_si512 (__m512i * __P, __m512i __A)
}
#endif /* __x86_64__ */
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtsi512_si32 (__m512i __A)
+{
+ __v16si __B = (__v16si) __A;
+ return __B[0];
+}
+
extern __inline unsigned
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_cvtss_u32 (__m128 __A)
_MM_FROUND_CUR_DIRECTION);
}
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_i32 (__m128d __A)
+{
+ return (int) __builtin_ia32_cvtsd2si ((__v2df) __A);
+}
+
+extern __inline int
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_i32 (__m128 __A)
+{
+ return (int) __builtin_ia32_cvtss2si ((__v4sf) __A);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti32_sd (__m128d __A, int __B)
+{
+ return (__m128d) __builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti32_ss (__m128 __A, int __B)
+{
+ return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
+}
+
#ifdef __x86_64__
extern __inline unsigned long long
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
_MM_FROUND_CUR_DIRECTION);
}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsd_i64 (__m128d __A)
+{
+ return (long long) __builtin_ia32_cvtsd2si64 ((__v2df) __A);
+}
+
+extern __inline long long
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtss_i64 (__m128 __A)
+{
+ return (long long) __builtin_ia32_cvtss2si64 ((__v4sf) __A);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti64_sd (__m128d __A, long long __B)
+{
+ return (__m128d) __builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvti64_ss (__m128 __A, long long __B)
+{
+ return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
+}
#endif /* __x86_64__ */
extern __inline unsigned
#define __DISABLE_AVX512VLBW__
#endif /* __AVX512VLBW__ */
+/* Internal data types for implementing the intrinsics. */
+typedef short __v16hi_u __attribute__ ((__vector_size__ (32), \
+ __may_alias__, __aligned__ (1)));
+typedef short __v8hi_u __attribute__ ((__vector_size__ (16), \
+ __may_alias__, __aligned__ (1)));
+typedef char __v32qi_u __attribute__ ((__vector_size__ (32), \
+ __may_alias__, __aligned__ (1)));
+typedef char __v16qi_u __attribute__ ((__vector_size__ (16), \
+ __may_alias__, __aligned__ (1)));
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
(__mmask16) __U);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_storeu_epi8 (void *__P, __m256i __A)
+{
+ *(__v32qi_u *) __P = (__v32qi_u) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
(__mmask32) __U);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_epi8 (void *__P, __m128i __A)
+{
+ *(__v16qi_u *) __P = (__v16qi_u) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
(__mmask16) __U);
}
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_loadu_epi16 (void const *__P)
+{
+ return (__m256i) (*(__v16hi_u *) __P);
+}
+
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_epi16 (void const *__P)
+{
+ return (__m128i) (*(__v8hi_u *) __P);
+}
+
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_loadu_epi8 (void const *__P)
+{
+ return (__m256i) (*(__v32qi_u *) __P);
+}
+
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
(__mmask32) __U);
}
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_epi8 (void const *__P)
+{
+ return (__m128i) (*(__v16qi_u *) __P);
+}
+
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
(__mmask16) -1);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_storeu_epi16 (void *__P, __m256i __A)
+{
+ *(__v16hi_u *) __P = (__v16hi_u) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
(__mmask16) __U);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_epi16 (void *__P, __m128i __A)
+{
+ *(__v8hi_u *) __P = (__v8hi_u) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
/* Internal data types for implementing the intrinsics. */
typedef unsigned int __mmask32;
+typedef int __v4si_u __attribute__ ((__vector_size__ (16), \
+ __may_alias__, __aligned__ (1)));
+typedef int __v8si_u __attribute__ ((__vector_size__ (32), \
+ __may_alias__, __aligned__ (1)));
+typedef long long __v2di_u __attribute__ ((__vector_size__ (16), \
+ __may_alias__, __aligned__ (1)));
+typedef long long __v4di_u __attribute__ ((__vector_size__ (32), \
+ __may_alias__, __aligned__ (1)));
extern __inline __m256d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
(__mmask8) __U);
}
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_load_epi64 (void const *__P)
+{
+ return (__m256i) (*(__v4di *) __P);
+}
+
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
__U);
}
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_epi64 (void const *__P)
+{
+ return (__m128i) (*(__v2di *) __P);
+}
+
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_load_epi32 (void const *__P)
+{
+ return (__m256i) (*(__v8si *) __P);
+}
+
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
__U);
}
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_epi32 (void const *__P)
+{
+ return (__m128i) (*(__v4si *) __P);
+}
+
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
__U);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_store_epi32 (void *__P, __m256i __A)
+{
+ *(__v8si *) __P = (__v8si) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_epi32 (void *__P, __m128i __A)
+{
+ *(__v4si *) __P = (__v4si) __A;
+}
+
extern __inline void
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_loadu_epi64 (void const *__P)
+{
+ return (__m256i) (*(__v4di_u *) __P);
+}
+
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_epi64 (void const *__P)
+{
+ return (__m128i) (*(__v2di_u *) __P);
+}
+
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_loadu_epi32 (void const *__P)
+{
+ return (__m256i) (*(__v8si_u *) __P);
+}
+
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_epi32 (void const *__P)
+{
+ return (__m128i) (*(__v4si_u *) __P);
+}
+
extern __inline __m128i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
#endif
#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps ((B), (A))
+#define _mm256_mask_cvt_roundps_ph(A, B, C, D) \
+ _mm256_mask_cvtps_ph ((A), (B), (C), (D))
+#define _mm256_maskz_cvt_roundps_ph(A, B, C) \
+ _mm256_maskz_cvtps_ph ((A), (B), (C))
+#define _mm_mask_cvt_roundps_ph(A, B, C, D) \
+ _mm_mask_cvtps_ph ((A), (B), (C), (D))
+#define _mm_maskz_cvt_roundps_ph(A, B, C) _mm_maskz_cvtps_ph ((A), (B), (C))
#ifdef __DISABLE_AVX512VL__
#undef __DISABLE_AVX512VL__
(__v4sf)(__m128)(Y), (int)(P)))
#endif
+extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtsi256_si32 (__m256i __A)
+{
+ __v8si __B = (__v8si) __A;
+ return __B[0];
+}
+
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvtepi32_pd (__m128i __A)
{
return _mm_loadl_epi64 ((__m128i_u *)__P);
}
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_si32 (void const *__P)
+{
+ return _mm_set_epi32 (*(int *)__P, (int)0, (int)0, (int)0);
+}
+
+extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_loadu_si16 (void const *__P)
+{
+ return _mm_set_epi16 (*(short *)__P, (short)0, (short)0, (short)0,
+ (short)0, (short)0, (short)0, (short)0);
+}
+
extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_store_si128 (__m128i *__P, __m128i __B)
{
_mm_storel_epi64 ((__m128i_u *)__P, __B);
}
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_si32 (void *__P, __m128i __B)
+{
+ *(__m32_u *)__P = (__m32) ((__v4si)__B)[0];
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_storeu_si16 (void *__P, __m128i __B)
+{
+ *(__m16_u *)__P = (__m16) ((__v8hi)__B)[0];
+}
+
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_movepi64_pi64 (__m128i __B)
{
DEF_FUNCTION_TYPE (V8DF, V8DF, V8DF, INT, V8DF, UQI)
DEF_FUNCTION_TYPE (V8DF, V8DF, V8DF, INT, V8DF, QI, INT)
DEF_FUNCTION_TYPE (V8DF, V8DF, INT, V8DF, UQI)
+DEF_FUNCTION_TYPE (V8DF, V8DF, INT, V8DF, UQI, INT)
DEF_FUNCTION_TYPE (V8DF, V8DF, V8DF, V8DI, INT)
DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DI, INT, UQI)
DEF_FUNCTION_TYPE (V2DF, V2DF, V2DF, V2DI, INT, UQI)
DEF_FUNCTION_TYPE (V16SF, V16SF, V16SF, INT, V16SF, UHI)
DEF_FUNCTION_TYPE (V16SF, V16SF, V16SF, INT, V16SF, HI, INT)
DEF_FUNCTION_TYPE (V16SF, V16SF, INT, V16SF, UHI)
+DEF_FUNCTION_TYPE (V16SF, V16SF, INT, V16SF, UHI, INT)
DEF_FUNCTION_TYPE (V16SI, V16SI, V4SI, INT, V16SI, UHI)
DEF_FUNCTION_TYPE (V16SF, V16SF, V16SF, V16SI, INT)
DEF_FUNCTION_TYPE (V16SF, V16SF, V16SF, V16SI, INT, HI, INT)
DEF_FUNCTION_TYPE (V4SF, V4SF, V4SF, V4SF, UQI, INT)
DEF_FUNCTION_TYPE (V4SF, V4SF, V4SF, V4SF, QI, INT)
DEF_FUNCTION_TYPE (V4SF, V4SF, V2DF, V4SF, QI, INT)
+DEF_FUNCTION_TYPE (V4SF, V4SF, V2DF, V4SF, UQI, INT)
DEF_FUNCTION_TYPE (V2DF, V2DF, V2DF, V2DF, QI, INT)
DEF_FUNCTION_TYPE (V2DF, V2DF, V4SF, V2DF, QI, INT)
+DEF_FUNCTION_TYPE (V2DF, V2DF, V4SF, V2DF, UQI, INT)
DEF_FUNCTION_TYPE (V2DF, V2DF, V2DF, V2DF, INT)
DEF_FUNCTION_TYPE (V4SF, V4SF, V4SF, V4SF, INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_cvtps2pd512_mask_round, "__builtin_ia32_cvtps2pd512_mask", IX86_BUILTIN_CVTPS2PD512, UNKNOWN, (int) V8DF_FTYPE_V8SF_V8DF_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_avx512f_ufix_notruncv16sfv16si_mask_round, "__builtin_ia32_cvtps2udq512_mask", IX86_BUILTIN_CVTPS2UDQ512, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_cvtsd2ss_round, "__builtin_ia32_cvtsd2ss_round", IX86_BUILTIN_CVTSD2SS_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_cvtsd2ss_mask_round, "__builtin_ia32_cvtsd2ss_mask_round", IX86_BUILTIN_CVTSD2SS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_sse2_cvtsi2sdq_round, "__builtin_ia32_cvtsi2sd64", IX86_BUILTIN_CVTSI2SD64, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT64_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse_cvtsi2ss_round, "__builtin_ia32_cvtsi2ss32", IX86_BUILTIN_CVTSI2SS32, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT_INT)
BDESC (OPTION_MASK_ISA_AVX512F | OPTION_MASK_ISA_64BIT, 0, CODE_FOR_sse_cvtsi2ssq_round, "__builtin_ia32_cvtsi2ss64", IX86_BUILTIN_CVTSI2SS64, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT64_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_cvtss2sd_round, "__builtin_ia32_cvtss2sd_round", IX86_BUILTIN_CVTSS2SD_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF_INT)
+BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_sse2_cvtss2sd_mask_round, "__builtin_ia32_cvtss2sd_mask_round", IX86_BUILTIN_CVTSS2SD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_fix_truncv8dfv8si2_mask_round, "__builtin_ia32_cvttpd2dq512_mask", IX86_BUILTIN_CVTTPD2DQ512, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_fixuns_truncv8dfv8si2_mask_round, "__builtin_ia32_cvttpd2udq512_mask", IX86_BUILTIN_CVTTPD2UDQ512, UNKNOWN, (int) V8SI_FTYPE_V8DF_V8SI_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512F, 0, CODE_FOR_fix_truncv16sfv16si2_mask_round, "__builtin_ia32_cvttps2dq512_mask", IX86_BUILTIN_CVTTPS2DQ512, UNKNOWN, (int) V16SI_FTYPE_V16SF_V16SI_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_rcp28v8df_mask_round, "__builtin_ia32_rcp28pd_mask", IX86_BUILTIN_RCP28PD, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_rcp28v16sf_mask_round, "__builtin_ia32_rcp28ps_mask", IX86_BUILTIN_RCP28PS, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrcp28v2df_round, "__builtin_ia32_rcp28sd_round", IX86_BUILTIN_RCP28SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
+BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrcp28v2df_mask_round, "__builtin_ia32_rcp28sd_mask_round", IX86_BUILTIN_RCP28SD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrcp28v4sf_round, "__builtin_ia32_rcp28ss_round", IX86_BUILTIN_RCP28SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
+BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrcp28v4sf_mask_round, "__builtin_ia32_rcp28ss_mask_round", IX86_BUILTIN_RCP28SS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_rsqrt28v8df_mask_round, "__builtin_ia32_rsqrt28pd_mask", IX86_BUILTIN_RSQRT28PD, UNKNOWN, (int) V8DF_FTYPE_V8DF_V8DF_QI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_rsqrt28v16sf_mask_round, "__builtin_ia32_rsqrt28ps_mask", IX86_BUILTIN_RSQRT28PS, UNKNOWN, (int) V16SF_FTYPE_V16SF_V16SF_HI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrsqrt28v2df_round, "__builtin_ia32_rsqrt28sd_round", IX86_BUILTIN_RSQRT28SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT)
+BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrsqrt28v2df_mask_round, "__builtin_ia32_rsqrt28sd_mask_round", IX86_BUILTIN_RSQRT28SD_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrsqrt28v4sf_round, "__builtin_ia32_rsqrt28ss_round", IX86_BUILTIN_RSQRT28SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT)
+BDESC (OPTION_MASK_ISA_AVX512ER, 0, CODE_FOR_avx512er_vmrsqrt28v4sf_mask_round, "__builtin_ia32_rsqrt28ss_mask_round", IX86_BUILTIN_RSQRT28SS_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT)
/* AVX512DQ. */
+BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducepv8df_mask_round, "__builtin_ia32_reducepd512_mask_round", IX86_BUILTIN_REDUCEPD512_MASK_ROUND, UNKNOWN, (int) V8DF_FTYPE_V8DF_INT_V8DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducepv16sf_mask_round, "__builtin_ia32_reduceps512_mask_round", IX86_BUILTIN_REDUCEPS512_MASK_ROUND, UNKNOWN, (int) V16SF_FTYPE_V16SF_INT_V16SF_UHI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducesv2df_mask_round, "__builtin_ia32_reducesd_mask_round", IX86_BUILTIN_REDUCESD128_MASK_ROUND, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
+BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_reducesv4sf_mask_round, "__builtin_ia32_reducess_mask_round", IX86_BUILTIN_REDUCESS128_MASK_ROUND, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_rangesv2df_mask_round, "__builtin_ia32_rangesd128_mask_round", IX86_BUILTIN_RANGESD128, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_avx512dq_rangesv4sf_mask_round, "__builtin_ia32_rangess128_mask_round", IX86_BUILTIN_RANGESS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT)
BDESC (OPTION_MASK_ISA_AVX512DQ, 0, CODE_FOR_fix_notruncv8dfv8di2_mask_round, "__builtin_ia32_cvtpd2qq512_mask", IX86_BUILTIN_CVTPD2QQ512, UNKNOWN, (int) V8DI_FTYPE_V8DF_V8DI_QI_INT)
case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_QI_INT:
case V2DF_FTYPE_V2DF_V4SF_V2DF_QI_INT:
+ case V2DF_FTYPE_V2DF_V4SF_V2DF_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_QI_INT:
case V4SF_FTYPE_V4SF_V2DF_V4SF_QI_INT:
+ case V4SF_FTYPE_V4SF_V2DF_V4SF_UQI_INT:
nargs = 5;
break;
case V16SF_FTYPE_V16SF_INT_V16SF_HI_INT:
case V8DF_FTYPE_V8DF_INT_V8DF_QI_INT:
+ case V8DF_FTYPE_V8DF_INT_V8DF_UQI_INT:
+ case V16SF_FTYPE_V16SF_INT_V16SF_UHI_INT:
nargs_constant = 4;
nargs = 5;
break;
/* The Intel API is flexible enough that we must allow aliasing with other
vector types, and their scalar components. */
typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
+typedef int __m32 __attribute__ ((__vector_size__ (4), __may_alias__));
+typedef short __m16 __attribute__ ((__vector_size__ (2), __may_alias__));
/* Unaligned version of the same type */
typedef int __m64_u __attribute__ ((__vector_size__ (8), __may_alias__, __aligned__ (1)));
+typedef int __m32_u __attribute__ ((__vector_size__ (4), \
+ __may_alias__, __aligned__ (1)));
+typedef short __m16_u __attribute__ ((__vector_size__ (2), \
+ __may_alias__, __aligned__ (1)));
/* Internal data types for implementing the intrinsics. */
typedef int __v2si __attribute__ ((__vector_size__ (8)));
DONE;
})
-(define_insn "<mask_codefor>reducep<mode><mask_name>"
+(define_insn "<mask_codefor>reducep<mode><mask_name><round_saeonly_name>"
[(set (match_operand:VF_AVX512VL 0 "register_operand" "=v")
(unspec:VF_AVX512VL
- [(match_operand:VF_AVX512VL 1 "nonimmediate_operand" "vm")
+ [(match_operand:VF_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_REDUCE))]
"TARGET_AVX512DQ"
- "vreduce<ssemodesuffix>\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
+ "vreduce<ssemodesuffix>\t{%2, <round_saeonly_mask_op3>%1, %0<mask_operand3>|%0<mask_operand3>, %1<round_saeonly_mask_op3>, %2}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
-(define_insn "reduces<mode><mask_scalar_name>"
+(define_insn "reduces<mode><mask_scalar_name><round_saeonly_scalar_name>"
[(set (match_operand:VF_128 0 "register_operand" "=v")
(vec_merge:VF_128
(unspec:VF_128
[(match_operand:VF_128 1 "register_operand" "v")
- (match_operand:VF_128 2 "nonimmediate_operand" "vm")
+ (match_operand:VF_128 2 "<round_saeonly_scalar_nimm_predicate>" "<round_saeonly_scalar_constraint>")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_REDUCE)
(match_dup 1)
(const_int 1)))]
"TARGET_AVX512DQ"
- "vreduce<ssescalarmodesuffix>\t{%3, %2, %1, %0<mask_scalar_operand4>|%0<mask_scalar_operand4>, %1, %<iptr>2, %3}"
+ "vreduce<ssescalarmodesuffix>\t{%3, <round_saeonly_scalar_mask_op4>%2, %1, %0<mask_scalar_operand4>|%0<mask_scalar_operand4>, %1, %<iptr>2<round_saeonly_scalar_mask_op4>, %3}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
(set_attr "prefix" "evex")
(set_attr "mode" "TI")])
-(define_insn "sse2_cvtsd2ss<round_name>"
+(define_insn "sse2_cvtsd2ss<mask_name><round_name>"
[(set (match_operand:V4SF 0 "register_operand" "=x,x,v")
(vec_merge:V4SF
(vec_duplicate:V4SF
"@
cvtsd2ss\t{%2, %0|%0, %2}
cvtsd2ss\t{%2, %0|%0, %q2}
- vcvtsd2ss\t{<round_op3>%2, %1, %0|%0, %1, %q2<round_op3>}"
+ vcvtsd2ss\t{<round_mask_op3>%2, %1, %0<mask_operand3>|<mask_operand3>%0, %1, %q2<round_mask_op3>}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssecvt")
(set_attr "athlon_decode" "vector,double,*")
(set_attr "prefix" "orig,orig,vex")
(set_attr "mode" "SF")])
-(define_insn "sse2_cvtss2sd<round_saeonly_name>"
+(define_insn "sse2_cvtss2sd<mask_name><round_saeonly_name>"
[(set (match_operand:V2DF 0 "register_operand" "=x,x,v")
(vec_merge:V2DF
(float_extend:V2DF
"@
cvtss2sd\t{%2, %0|%0, %2}
cvtss2sd\t{%2, %0|%0, %k2}
- vcvtss2sd\t{<round_saeonly_op3>%2, %1, %0|%0, %1, %k2<round_saeonly_op3>}"
+ vcvtss2sd\t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|<mask_operand3>%0, %1, %k2<round_saeonly_mask_op3>}"
[(set_attr "isa" "noavx,noavx,avx")
(set_attr "type" "ssecvt")
(set_attr "amdfam10_decode" "vector,double,*")
(set_attr "type" "sse")
(set_attr "mode" "<MODE>")])
-(define_insn "avx512er_vmrcp28<mode><round_saeonly_name>"
+(define_insn "avx512er_vmrcp28<mode><mask_name><round_saeonly_name>"
[(set (match_operand:VF_128 0 "register_operand" "=v")
(vec_merge:VF_128
(unspec:VF_128
(match_operand:VF_128 2 "register_operand" "v")
(const_int 1)))]
"TARGET_AVX512ER"
- "vrcp28<ssescalarmodesuffix>\t{<round_saeonly_op3>%1, %2, %0|%0, %2, %<iptr>1<round_saeonly_op3>}"
+ "vrcp28<ssescalarmodesuffix>\t{<round_saeonly_mask_op3>%1, %2, %0<mask_operand3>|<mask_opernad3>%0, %2, %<iptr>1<round_saeonly_mask_op3>}"
[(set_attr "length_immediate" "1")
(set_attr "prefix" "evex")
(set_attr "type" "sse")
(set_attr "type" "sse")
(set_attr "mode" "<MODE>")])
-(define_insn "avx512er_vmrsqrt28<mode><round_saeonly_name>"
+(define_insn "avx512er_vmrsqrt28<mode><mask_name><round_saeonly_name>"
[(set (match_operand:VF_128 0 "register_operand" "=v")
(vec_merge:VF_128
(unspec:VF_128
(match_operand:VF_128 2 "register_operand" "v")
(const_int 1)))]
"TARGET_AVX512ER"
- "vrsqrt28<ssescalarmodesuffix>\t{<round_saeonly_op3>%1, %2, %0|%0, %2, %<iptr>1<round_saeonly_op3>}"
+ "vrsqrt28<ssescalarmodesuffix>\t{<round_saeonly_mask_op3>%1, %2, %0<mask_operand3>|<mask_operand3>%0, %2, %<iptr>1<round_saeonly_mask_op3>}"
[(set_attr "length_immediate" "1")
(set_attr "type" "sse")
(set_attr "prefix" "evex")
#define __builtin_ia32_vfmaddss3_mask3(A, B, C, D, E) __builtin_ia32_vfmaddss3_mask3(A, B, C, D, 8)
#define __builtin_ia32_vfmaddss3_maskz(A, B, C, D, E) __builtin_ia32_vfmaddss3_maskz(A, B, C, D, 8)
#define __builtin_ia32_vfmsubss3_mask3(A, B, C, D, E) __builtin_ia32_vfmsubss3_mask3(A, B, C, D, 8)
+#define __builtin_ia32_cvtsd2ss_mask_round(A, B, C, D, E) __builtin_ia32_cvtsd2ss_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cvtss2sd_mask_round(A, B, C, D, E) __builtin_ia32_cvtss2sd_mask_round(A, B, C, D, 8)
/* avx512erintrin.h */
#define __builtin_ia32_exp2ps_mask(A, B, C, D) __builtin_ia32_exp2ps_mask(A, B, C, 8)
#define __builtin_ia32_rcp28sd_round(A, B, C) __builtin_ia32_rcp28sd_round(A, B, 8)
#define __builtin_ia32_rsqrt28ss_round(A, B, C) __builtin_ia32_rsqrt28ss_round(A, B, 8)
#define __builtin_ia32_rsqrt28sd_round(A, B, C) __builtin_ia32_rsqrt28sd_round(A, B, 8)
+#define __builtin_ia32_rcp28sd_mask_round(A, B, C, D, E) __builtin_ia32_rcp28sd_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rcp28ss_mask_round(A, B, C, D, E) __builtin_ia32_rcp28ss_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rsqrt28sd_mask_round(A, B, C, D, E) __builtin_ia32_rsqrt28sd_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rsqrt28ss_mask_round(A, B, C, D, E) __builtin_ia32_rsqrt28ss_mask_round(A, B, C, D, 8)
/* avx512pfintrin.h */
#define __builtin_ia32_gatherpfdps(A, B, C, D, E) __builtin_ia32_gatherpfdps(A, B, C, 1, _MM_HINT_T0)
#define __builtin_ia32_cvtps2qq512_mask(A, B, C, D) __builtin_ia32_cvtps2qq512_mask(A, B, C, 8)
#define __builtin_ia32_cvtpd2uqq512_mask(A, B, C, D) __builtin_ia32_cvtpd2uqq512_mask(A, B, C, 8)
#define __builtin_ia32_cvtpd2qq512_mask(A, B, C, D) __builtin_ia32_cvtpd2qq512_mask(A, B, C, 8)
+#define __builtin_ia32_reducesd_mask_round(A, B, C, D, E, F) __builtin_ia32_reducesd_mask_round(A, B, 8, D, E, 8)
+#define __builtin_ia32_reducess_mask_round(A, B, C, D, E, F) __builtin_ia32_reducess_mask_round(A, B, 8, D, E, 8)
+#define __builtin_ia32_reducepd512_mask_round(A, B, C, D, E) __builtin_ia32_reducepd512_mask_round(A, 8, C, D, 8)
+#define __builtin_ia32_reduceps512_mask_round(A, B, C, D, E) __builtin_ia32_reduceps512_mask_round(A, 8, C, D, 8)
/* avx512vlintrin.h */
#define __builtin_ia32_vpermilps_mask(A, E, C, D) __builtin_ia32_vpermilps_mask(A, 1, C, D)
/* { dg-do compile } */
/* { dg-options "-mavx2 -O2" } */
-/* { dg-final { scan-assembler "vbroadcasti128\[ \\t\]+\[^\n\]*%ymm\[0-9\]" } } */
+/* { dg-final { scan-assembler-times "vbroadcasti128\[ \\t\]+\[^\n\]*%ymm\[0-9\]+" 2 } } */
#include <immintrin.h>
-volatile __m256i x;
-__m128i y;
+volatile __m256i x,xx;
+__m128i y,yy;
void extern
avx2_test (void)
{
x = _mm256_broadcastsi128_si256 (y);
+ xx = _mm_broadcastsi128_si256 (yy);
}
avx2_test (void)
{
union128i_q s1;
- union256i_q res;
+ union256i_q res, res1;
long long int res_ref[4];
int i, j;
int fail = 0;
s1.a[j] = j * i;
res.x = _mm256_broadcastsi128_si256 (s1.x);
+ res1.x = _mm_broadcastsi128_si256 (s1.x);
memcpy (res_ref, s1.a, 16);
memcpy (res_ref + 2, s1.a, 16);
fail += check_union256i_q (res, res_ref);
+ fail += check_union256i_q (res1, res_ref);
}
if (fail != 0)
/* { dg-final { scan-assembler-times "vmovdqu16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu16\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu16\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "(?:vmovdqu16|vinserti128)\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu16\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu16\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "(?:vmovdqu16|vextracti128)\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-short *p;
-volatile __m512i x1, yy;
-volatile __m256i x2, y2;
-volatile __m128i x3, y3;
+short *p, *p1, *p2, *p3, *p4, *p5, *p6;
+volatile __m512i x1, yy, zzz;
+volatile __m256i x2, y2, yyy;
+volatile __m128i x3, y3, xxx;
volatile __mmask32 m32;
volatile __mmask16 m16;
volatile __mmask8 m8;
x2 = _mm256_maskz_loadu_epi16 (m16, p);
x3 = _mm_maskz_loadu_epi16 (m8, p);
+ zzz = _mm512_loadu_epi16 (p5);
+ yyy = _mm256_loadu_epi16 (p3);
+ xxx = _mm_loadu_epi16 (p1);
+
_mm512_mask_storeu_epi16 (p, m32, x1);
_mm256_mask_storeu_epi16 (p, m16, x2);
_mm_mask_storeu_epi16 (p, m8, x3);
+
+ _mm512_storeu_epi16 (p6, zzz);
+ _mm256_storeu_epi16 (p4, yyy);
+ _mm_storeu_epi16 (p2, xxx);
}
/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqu8\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-char *p;
-volatile __m512i x1, yy;
+char *p, *p1, *p2, *p3, *p4;
+volatile __m512i x1, yy, zzz;
volatile __m256i x2, y2;
-volatile __m128i x3, y3;
+volatile __m128i x3, y3, xxx;
volatile __mmask64 m64;
volatile __mmask32 m32;
volatile __mmask16 m16;
x2 = _mm256_maskz_loadu_epi8 (m32, p);
x3 = _mm_maskz_loadu_epi8 (m16, p);
+ zzz = _mm512_loadu_epi8 (p3);
+ xxx = _mm_loadu_epi8 (p1);
+
_mm512_mask_storeu_epi8 (p, m64, x1);
_mm256_mask_storeu_epi8 (p, m32, x2);
_mm_mask_storeu_epi8 (p, m16, x3);
+
+ _mm512_storeu_epi8 (p4, zzz);
+ _mm_storeu_epi8 (p2, xxx);
}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512dq -O2" } */
+/* { dg-final { scan-assembler-times "vreducepd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducepd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducepd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+#define IMM 123
+
+volatile __m512d xx1;
+volatile __mmask8 m;
+
+void extern
+avx512dq_test (void)
+{
+ xx1 = _mm512_reduce_round_pd(xx1, IMM, _MM_FROUND_NO_EXC);
+
+ xx1 = _mm512_mask_reduce_round_pd (xx1, m, xx1, IMM, _MM_FROUND_NO_EXC);
+
+ xx1 = _mm512_maskz_reduce_round_pd (m, xx1, IMM, _MM_FROUND_NO_EXC);
+}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -mavx512dq" } */
+/* { dg-require-effective-target avx512dq } */
+
+#define AVX512DQ
+#include "avx512f-helper.h"
+
+#define SIZE (AVX512F_LEN / 64)
+#include "avx512f-mask-type.h"
+
+#define IMM 0x23
+
+void
+CALC (double *s, double *r)
+{
+ int i;
+
+ for (i = 0; i < SIZE; i++)
+ {
+ double tmp = (int) (4 * s[i]) / 4.0;
+ r[i] = s[i] - tmp;
+ }
+}
+
+void
+TEST (void)
+{
+ UNION_TYPE (AVX512F_LEN, d) s, res1, res2, res3;
+ MASK_TYPE mask = MASK_VALUE;
+ double res_ref[SIZE];
+ int i, sign = 1;
+
+ for (i = 0; i < SIZE; i++)
+ {
+ s.a[i] = 123.456 * (i + 2000) * sign;
+ res2.a[i] = DEFAULT_VALUE;
+ sign = -sign;
+ }
+
+ res1.x = INTRINSIC (_reduce_round_pd) (s.x, IMM, _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res2.x = INTRINSIC (_mask_reduce_round_pd) (res2.x, mask, s.x,
+ IMM, _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res3.x = INTRINSIC (_maskz_reduce_round_pd) (mask, s.x, IMM,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+
+ CALC (s.a, res_ref);
+
+ if (UNION_FP_CHECK (AVX512F_LEN, d) (res1, res_ref))
+ abort ();
+
+ MASK_MERGE (d) (res_ref, mask, SIZE);
+ if (UNION_FP_CHECK (AVX512F_LEN, d) (res2, res_ref))
+ abort ();
+
+ MASK_ZERO (d) (res_ref, mask, SIZE);
+ if (UNION_FP_CHECK (AVX512F_LEN, d) (res3, res_ref))
+ abort ();
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512dq -O2" } */
+/* { dg-final { scan-assembler-times "vreduceps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreduceps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreduceps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+#define IMM 123
+
+volatile __m512 xx1;
+volatile __mmask16 m16;
+
+void extern
+avx512dq_test (void)
+{
+ xx1 = _mm512_reduce_round_ps (xx1, IMM, _MM_FROUND_NO_EXC);
+
+ xx1 = _mm512_mask_reduce_round_ps (xx1, m16, xx1, IMM, _MM_FROUND_NO_EXC);
+
+ xx1 = _mm512_maskz_reduce_round_ps (m16, xx1, IMM, _MM_FROUND_NO_EXC);
+}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -mavx512dq" } */
+/* { dg-require-effective-target avx512dq } */
+
+#define AVX512DQ
+#include "avx512f-helper.h"
+
+#define SIZE (AVX512F_LEN / 32)
+#include "avx512f-mask-type.h"
+
+#define IMM 0x23
+
+void
+CALC (float *s, float *r)
+{
+ int i;
+
+ for (i = 0; i < SIZE; i++)
+ {
+ float tmp = (int) (4 * s[i]) / 4.0;
+ r[i] = s[i] - tmp;
+ }
+}
+
+void
+TEST (void)
+{
+ UNION_TYPE (AVX512F_LEN,) s, res1, res2, res3;
+ MASK_TYPE mask = MASK_VALUE;
+ float res_ref[SIZE];
+ int i, sign = 1;
+
+ for (i = 0; i < SIZE; i++)
+ {
+ s.a[i] = 123.456 * (i + 2000) * sign;
+ res2.a[i] = DEFAULT_VALUE;
+ sign = -sign;
+ }
+
+ res1.x = INTRINSIC (_reduce_round_ps) (s.x, IMM, _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res2.x = INTRINSIC (_mask_reduce_round_ps) (res2.x, mask, s.x,
+ IMM, _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res3.x = INTRINSIC (_maskz_reduce_round_ps) (mask, s.x, IMM,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+
+ CALC (s.a, res_ref);
+
+ if (UNION_FP_CHECK (AVX512F_LEN,) (res1, res_ref))
+ abort ();
+
+ MASK_MERGE () (res_ref, mask, SIZE);
+ if (UNION_FP_CHECK (AVX512F_LEN,) (res2, res_ref))
+ abort ();
+
+ MASK_ZERO () (res_ref, mask, SIZE);
+ if (UNION_FP_CHECK (AVX512F_LEN,) (res3, res_ref))
+ abort ();
+}
/* { dg-options "-mavx512dq -O2" } */
/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#define IMM 123
-volatile __m128d x1, x2;
+volatile __m128d x1, x2, xx1, xx2;
volatile __mmask8 m;
void extern
avx512dq_test (void)
{
+ xx1 = _mm_reduce_round_sd (xx1, xx2, IMM, _MM_FROUND_NO_EXC);
x1 = _mm_reduce_sd (x1, x2, IMM);
+ xx1 = _mm_mask_reduce_round_sd(xx1, m, xx1, xx2, IMM, _MM_FROUND_NO_EXC);
x1 = _mm_mask_reduce_sd(x1, m, x1, x2, IMM);
+ xx1 = _mm_maskz_reduce_round_sd(m, xx1, xx2, IMM, _MM_FROUND_NO_EXC);
x1 = _mm_maskz_reduce_sd(m, x1, x2, IMM);
}
void
TEST (void)
{
- union128d res1, res2, res3;
+ union128d res1, res2, res3, res4, res5, res6;
union128d s1, s2, src;
double res_ref[2];
MASK_TYPE mask = MASK_VALUE;
res1.a[j] = DEFAULT_VALUE;
res2.a[j] = DEFAULT_VALUE;
res3.a[j] = DEFAULT_VALUE;
+ res4.a[j] = DEFAULT_VALUE;
+ res5.a[j] = DEFAULT_VALUE;
+ res6.a[j] = DEFAULT_VALUE;
}
res1.x = _mm_reduce_sd (s1.x, s2.x, IMM);
res2.x = _mm_mask_reduce_sd (s1.x, mask, s1.x, s2.x, IMM);
res3.x = _mm_maskz_reduce_sd (mask, s1.x, s2.x, IMM);
+ res4.x = _mm_reduce_round_sd (s1.x, s2.x, IMM,_MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res5.x = _mm_mask_reduce_round_sd (s1.x, mask, s1.x, s2.x, IMM,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res6.x = _mm_maskz_reduce_round_sd (mask, s1.x, s2.x, IMM,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
CALC (res_ref, s2.a);
if (check_union128d (res1, res_ref))
abort ();
+ if (check_union128d (res4, res_ref))
+ abort ();
+
MASK_MERGE (d) (res_ref, mask, 1);
if (check_union128d (res2, res_ref))
abort ();
+ if (check_union128d (res5, res_ref))
+ abort ();
+
MASK_ZERO (d) (res_ref, mask, 1);
if (check_union128d (res3, res_ref))
abort ();
+ if (check_union128d (res6, res_ref))
+ abort ();
+
}
/* { dg-options "-mavx512dq -O2" } */
/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
#define IMM 123
-volatile __m128 x1, x2;
+volatile __m128 x1, x2, xx1, xx2;
volatile __mmask8 m;
void extern
avx512dq_test (void)
{
+ xx1 = _mm_reduce_round_ss (xx1, xx2, IMM, _MM_FROUND_NO_EXC);
x1 = _mm_reduce_ss (x1, x2, IMM);
+ xx1 = _mm_mask_reduce_round_ss (xx1, m, xx1, xx2, IMM, _MM_FROUND_NO_EXC);
x1 = _mm_mask_reduce_ss (x1, m, x1, x2, IMM);
+ xx1 = _mm_maskz_reduce_round_ss (m, xx1, xx2, IMM, _MM_FROUND_NO_EXC);
x1 = _mm_maskz_reduce_ss (m, x1, x2, IMM);
}
{
printf("\nsize = %d\n\n", SIZE);
- union128 res1, res2, res3;
+ union128 res1, res2, res3, res4, res5, res6;
union128 s1, s2, src;
float res_ref[4];
MASK_TYPE mask = MASK_VALUE;
res1.a[j] = DEFAULT_VALUE;
res2.a[j] = DEFAULT_VALUE;
res3.a[j] = DEFAULT_VALUE;
+ res4.a[j] = DEFAULT_VALUE;
+ res5.a[j] = DEFAULT_VALUE;
+ res6.a[j] = DEFAULT_VALUE;
}
res1.x = _mm_reduce_ss (s1.x, s2.x, IMM);
res2.x = _mm_mask_reduce_ss (s1.x, mask, s1.x, s2.x, IMM);
res3.x = _mm_maskz_reduce_ss (mask, s1.x, s2.x, IMM);
+ res4.x = _mm_reduce_round_ss (s1.x, s2.x, IMM, _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res5.x = _mm_mask_reduce_round_ss (s1.x, mask, s1.x, s2.x,
+ IMM, _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res6.x = _mm_maskz_reduce_round_ss (mask, s1.x, s2.x, IMM,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
CALC (res_ref, s2.a);
if (check_union128 (res1, res_ref))
abort ();
+ if (check_union128 (res4, res_ref))
+ abort ();
+
MASK_MERGE () (res_ref, mask, 1);
if (check_union128 (res2, res_ref))
abort ();
+ if (check_union128 (res5, res_ref))
+ abort ();
+
MASK_ZERO () (res_ref, mask, 1);
if (check_union128 (res3, res_ref))
abort ();
+ if (check_union128 (res6, res_ref))
+ abort ();
+
}
/* { dg-options "-mavx512er -O2" } */
/* { dg-final { scan-assembler-times "vrcp28sd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vrcp28sd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\{\]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28sd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28sd\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28sd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28sd\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128d x, y;
+volatile __m128d x, y, z;
+volatile __mmask8 m;
void extern
avx512er_test (void)
{
x = _mm_rcp28_sd (x, y);
x = _mm_rcp28_round_sd (x, y, _MM_FROUND_NO_EXC);
+ x = _mm_mask_rcp28_sd (z, m, x, y);
+ x = _mm_mask_rcp28_round_sd (z, m, x, y, _MM_FROUND_NO_EXC);
+ x = _mm_maskz_rcp28_sd (m, x, y);
+ x = _mm_maskz_rcp28_round_sd (m, x, y, _MM_FROUND_NO_EXC);
}
#include "avx512f-helper.h"
#include <math.h>
+#define IMM 0x23
+
void static
avx512er_test (void)
{
- union128d src1, src2, res;
+ union128d src1, src2, res, res1, res2, res3, res4;
double res_ref[2];
+ MASK_TYPE mask = MASK_VALUE;
int i;
for (i = 0; i < 2; i++)
res_ref[0] = 1.0 / src2.a[0];
res.x = _mm_rcp28_round_sd (src1.x, src2.x, _MM_FROUND_NO_EXC);
+ res1.x = _mm_mask_rcp28_sd (src1.x, IMM, src1.x, src2.x);
+ res2.x = _mm_mask_rcp28_round_sd (src1.x, IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res3.x = _mm_maskz_rcp28_sd (IMM, src1.x, src2.x);
+ res4.x = _mm_maskz_rcp28_round_sd (IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+
if (checkVd (res.a, res_ref, 2))
abort ();
+
+ MASK_MERGE (d) (res_ref, mask, 1);
+
+ if (checkVd (res1.a, res_ref, 2))
+ abort ();
+
+ if (checkVd (res2.a, res_ref, 2))
+ abort ();
+
+ MASK_ZERO (d) (res_ref, mask, 1);
+
+ if (checkVd (res3.a, res_ref, 2))
+ abort ();
+
+ if (checkVd (res4.a, res_ref, 2))
+ abort ();
}
/* { dg-options "-mavx512er -O2" } */
/* { dg-final { scan-assembler-times "vrcp28ss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vrcp28ss\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\{\]*(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28ss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28ss\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28ss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrcp28ss\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128 x, y;
+volatile __m128 x, y, z;
+volatile __mmask8 m;
void extern
avx512er_test (void)
{
x = _mm_rcp28_ss (x, y);
x = _mm_rcp28_round_ss (x, y, _MM_FROUND_NO_EXC);
+ x = _mm_mask_rcp28_ss (z, m, x, y);
+ x = _mm_mask_rcp28_round_ss (z, m, x, y, _MM_FROUND_NO_EXC);
+ x = _mm_maskz_rcp28_ss (m, x, y);
+ x = _mm_maskz_rcp28_round_ss (m, x, y, _MM_FROUND_NO_EXC);
}
#include "avx512f-helper.h"
#include <math.h>
+#define IMM 0x23
+
void static
avx512er_test (void)
{
- union128 src1, src2, res;
+ union128 src1, src2, res, res1, res2, res3, res4;
float res_ref[4];
+ MASK_TYPE mask = MASK_VALUE;
int i;
for (i = 0; i < 4; i++)
res_ref[0] = 1.0 / src2.a[0];
res.x = _mm_rcp28_round_ss (src1.x, src2.x, _MM_FROUND_NO_EXC);
+ res1.x = _mm_mask_rcp28_ss (src1.x, IMM, src1.x, src2.x);
+ res2.x = _mm_mask_rcp28_round_ss (src1.x, IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res3.x = _mm_maskz_rcp28_ss (IMM, src1.x, src2.x);
+ res4.x = _mm_maskz_rcp28_round_ss (IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
if (checkVf (res.a, res_ref, 4))
abort ();
+
+ MASK_MERGE () (res_ref, mask, 1);
+
+ if (checkVf (res1.a, res_ref, 2))
+ abort ();
+
+ if (checkVf (res2.a, res_ref, 2))
+ abort ();
+
+ MASK_ZERO () (res_ref, mask, 1);
+
+ if (checkVf (res3.a, res_ref, 2))
+ abort ();
+
+ if (checkVf (res4.a, res_ref, 2))
+ abort ();
}
/* { dg-options "-mavx512er -O2" } */
/* { dg-final { scan-assembler-times "vrsqrt28sd\[ \\t\]+\[^\{^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vrsqrt28sd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28sd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28sd\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28sd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28sd\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128d x, y;
+volatile __m128d x, y, z;
+volatile __mmask8 m;
void extern
avx512er_test (void)
{
x = _mm_rsqrt28_sd (x, y);
x = _mm_rsqrt28_round_sd (x, y, _MM_FROUND_NO_EXC);
+ x = _mm_mask_rsqrt28_sd (z, m, x, y);
+ x = _mm_mask_rsqrt28_round_sd (z, m, x, y, _MM_FROUND_NO_EXC);
+ x = _mm_maskz_rsqrt28_sd (m, x, y);
+ x = _mm_maskz_rsqrt28_round_sd (m, x, y, _MM_FROUND_NO_EXC);
}
#include "avx512f-helper.h"
#include <math.h>
+#define IMM 0x23
+
void static
avx512er_test (void)
{
- union128d src1, src2, res;
+ union128d src1, src2, res, res1, res2, res3, res4;
double res_ref[2];
+ MASK_TYPE mask = MASK_VALUE;
int i;
for (i = 0; i < 2; i++)
res_ref[0] = 1.0 / sqrt (src2.a[0]);
res.x = _mm_rsqrt28_round_sd (src1.x, src2.x, _MM_FROUND_NO_EXC);
+ res1.x = _mm_mask_rsqrt28_sd (src1.x, IMM, src1.x, src2.x);
+ res2.x = _mm_mask_rsqrt28_round_sd (src1.x, IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res3.x = _mm_maskz_rsqrt28_sd (IMM, src1.x, src2.x);
+ res4.x = _mm_maskz_rsqrt28_round_sd (IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
if (checkVd (res.a, res_ref, 2))
abort ();
+
+ MASK_MERGE (d) (res_ref, mask, 1);
+
+ if (checkVd (res1.a, res_ref, 2))
+ abort ();
+
+ if (checkVd (res2.a, res_ref, 2))
+ abort ();
+
+ MASK_ZERO (d) (res_ref, mask, 1);
+
+ if (checkVd (res3.a, res_ref, 2))
+ abort ();
+
+ if (checkVd (res4.a, res_ref, 2))
+ abort ();
}
/* { dg-options "-mavx512er -O2" } */
/* { dg-final { scan-assembler-times "vrsqrt28ss\[ \\t\]+\[^\{^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vrsqrt28ss\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28ss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28ss\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28ss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vrsqrt28ss\[ \\t\]+\[^\n\]*\{sae\}\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128 x, y;
+volatile __m128 x, y, z;
+volatile __mmask8 m;
void extern
avx512er_test (void)
{
x = _mm_rsqrt28_ss (x, y);
x = _mm_rsqrt28_round_ss (x, y, _MM_FROUND_NO_EXC);
+ x = _mm_mask_rsqrt28_ss (z, m, x, y);
+ x = _mm_mask_rsqrt28_round_ss (z, m, x, y, _MM_FROUND_NO_EXC);
+ x = _mm_maskz_rsqrt28_ss (m, x, y);
+ x = _mm_maskz_rsqrt28_round_ss (m, x, y, _MM_FROUND_NO_EXC);
}
#include "avx512f-helper.h"
#include <math.h>
+#define IMM 0x23
+
void static
avx512er_test (void)
{
- union128 src1, src2, res;
+ union128 src1, src2, res, res1, res2, res3, res4;
float res_ref[4];
+ MASK_TYPE mask = MASK_VALUE;
int i;
for (i = 0; i < 4; i++)
res_ref[0] = 1.0 / sqrt (src2.a[0]);
res.x = _mm_rsqrt28_round_ss (src1.x, src2.x, _MM_FROUND_NO_EXC);
+ res1.x = _mm_mask_rsqrt28_ss (src1.x, IMM, src1.x, src2.x);
+ res2.x = _mm_mask_rsqrt28_round_ss (src1.x, IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
+ res3.x = _mm_maskz_rsqrt28_ss (IMM, src1.x, src2.x);
+ res4.x = _mm_maskz_rsqrt28_round_ss (IMM, src1.x, src2.x,
+ _MM_FROUND_TO_NEAREST_INT
+ | _MM_FROUND_NO_EXC);
if (checkVf (res.a, res_ref, 4))
abort ();
+
+ MASK_MERGE () (res_ref, mask, 1);
+
+ if (checkVf (res1.a, res_ref, 2))
+ abort ();
+
+ if (checkVf (res2.a, res_ref, 2))
+ abort ();
+
+ MASK_ZERO () (res_ref, mask, 1);
+
+ if (checkVf (res3.a, res_ref, 2))
+ abort ();
+
+ if (checkVf (res4.a, res_ref, 2))
+ abort ();
}
/* { dg-do compile } */
/* { dg-options "-O2 -mavx512f" } */
/* { dg-final { scan-assembler-times "vcvtsd2sil?\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsd2sil?\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
volatile __m128d x;
-volatile unsigned y;
+volatile unsigned y, z;
void extern
avx512f_test (void)
{
y = _mm_cvt_roundsd_i32 (x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ z = _mm_cvtsd_i32 (x);
}
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-O2 -mavx512f" } */
/* { dg-final { scan-assembler-times "vcvtsd2siq\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsd2siq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
volatile __m128d x;
-volatile unsigned long long y;
+volatile unsigned long long y, z;
void extern
avx512f_test (void)
{
y = _mm_cvt_roundsd_i64 (x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ z = _mm_cvtsd_i64 (x);
}
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
/* { dg-final { scan-assembler-times "vcvtsd2ss\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsd2ss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsd2ss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsd2ss\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsd2ss\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128 s1, r;
+volatile __m128 s1, r, s3;
volatile __m128d s2;
+volatile __mmask8 m;
void extern
avx512f_test (void)
{
r = _mm_cvt_roundsd_ss (s1, s2, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ r = _mm_mask_cvtsd_ss (s3, m, s1, s2);
+ r = _mm_maskz_cvtsd_ss (m, s1, s2);
+ r = _mm_mask_cvt_roundsd_ss (s3, m, s1, s2, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ r = _mm_maskz_cvt_roundsd_ss (m, s1, s2, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
--- /dev/null
+/* { dg-options "-mavx512f -O2" } */
+/* { dg-final { scan-assembler-times "vcvtsi2sdl\[ \\t\]+\[^%\n\]*%e\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+volatile __m128d x;
+volatile int n;
+
+void extern
+avx512f_test (void)
+{
+ x = _mm_cvti32_sd (x, n);
+}
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512f -O2" } */
/* { dg-final { scan-assembler-times "vcvtsi2sdq\[ \\t\]+\[^%\n\]*%r\[^\{\n\]*\{ru-sae\}\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsi2sdq\[ \\t\]+\[^%\n\]*%r\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128d x;
+volatile __m128d x, y;
volatile long long n;
void extern
avx512f_test (void)
{
x = _mm_cvt_roundi64_sd (x, n, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ y = _mm_cvti64_sd (x, n);
}
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
/* { dg-final { scan-assembler-times "vcvtsi2ssl\[ \\t\]+\[^%\n\]*%e\[^\{\n\]*\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsi2ssl\[ \\t\]+\[^%\n\]*%e\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128 x;
+volatile __m128 x, y;
volatile int n;
void extern
avx512f_test (void)
{
x = _mm_cvt_roundi32_ss (x, n, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ y = _mm_cvti32_ss (x, n);
}
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512f -O2" } */
/* { dg-final { scan-assembler-times "vcvtsi2ssq\[ \\t\]+\[^%\n\]*%r\[^\{\n\]*\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtsi2ssq\[ \\t\]+\[^%\n\]*%r\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128 x;
+volatile __m128 x, y;
volatile long long n;
void extern
avx512f_test (void)
{
x = _mm_cvt_roundi64_ss (x, n, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ y = _mm_cvti64_ss (x, n);
}
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
/* { dg-final { scan-assembler-times "vcvtss2sd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtss2sd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtss2sd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtss2sd\[ \\t\]+\[^\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtss2sd\[ \\t\]+\[^\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128d s1, r;
+volatile __m128d s1, r, s3;
volatile __m128 s2;
+volatile __mmask8 m;
void extern
avx512f_test (void)
{
r = _mm_cvt_roundss_sd (s1, s2, _MM_FROUND_NO_EXC);
+ r = _mm_mask_cvtss_sd (s3, m, s1, s2);
+ r = _mm_maskz_cvtss_sd (m, s1, s2);
+ r = _mm_mask_cvt_roundss_sd (s3, m, s1, s2, _MM_FROUND_NO_EXC);
+ r = _mm_maskz_cvt_roundss_sd (m, s1, s2, _MM_FROUND_NO_EXC);
}
/* { dg-do compile } */
/* { dg-options "-O2 -mavx512f" } */
/* { dg-final { scan-assembler-times "vcvtss2sil?\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtss2sil?\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
volatile __m128 x;
-volatile unsigned y;
+volatile unsigned y, z;
void extern
avx512f_test (void)
{
y = _mm_cvt_roundss_i32 (x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ z = _mm_cvtss_i32 (x);
}
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-O2 -mavx512f" } */
/* { dg-final { scan-assembler-times "vcvtss2siq\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vcvtss2siq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+.{6}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
volatile __m128 x;
-volatile unsigned long long y;
+volatile unsigned long long y, z;
void extern
avx512f_test (void)
{
y = _mm_cvt_roundss_i64 (x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ z = _mm_cvtss_i64 (x);
}
/* { dg-final { scan-assembler-times "vscalefsd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vscalefsd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vscalefsd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefsd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefsd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
x = _mm_scalef_round_sd (x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
x = _mm_mask_scalef_round_sd (x, m, x, x, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
x = _mm_maskz_scalef_round_sd (m, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ x = _mm_mask_scalef_sd (x, m, x, x);
+ x = _mm_maskz_scalef_sd (m, x, x);
}
void static
avx512f_test (void)
{
- union128d res1, res2, res3, res4;
+ union128d res1, res2, res3, res4, res5, res6;
union128d s1, s2;
double res_ref[SIZE];
MASK_TYPE mask = MASK_VALUE;
res2.a[i] = DEFAULT_VALUE;
res3.a[i] = DEFAULT_VALUE;
res4.a[i] = DEFAULT_VALUE;
+ res5.a[i] = DEFAULT_VALUE;
+ res6.a[i] = DEFAULT_VALUE;
}
res1.x = _mm_scalef_sd (s1.x, s2.x);
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
res4.x = _mm_maskz_scalef_round_sd (mask, s1.x, s2.x,
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ res5.x = _mm_mask_scalef_sd (s1.x, mask, s1.x, s2.x);
+ res6.x = _mm_maskz_scalef_sd (mask, s1.x, s2.x);
compute_scalefsd (s1.a, s2.a, res_ref);
if (check_union128d (res3, res_ref))
abort ();
+ if (check_union128d (res5, res_ref))
+ abort ();
+
MASK_ZERO (d) (res_ref, mask, 1);
if (check_union128d (res4, res_ref))
abort ();
+
+ if (check_union128d (res6, res_ref))
+ abort ();
}
/* { dg-final { scan-assembler-times "vscalefss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vscalefss\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vscalefss\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vscalefss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
x = _mm_scalef_round_ss (x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
x = _mm_mask_scalef_round_ss (x, m, x, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
x = _mm_maskz_scalef_round_ss (m, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ x = _mm_mask_scalef_ss (x, m, x, x);
+ x = _mm_maskz_scalef_ss (m, x, x);
}
static void
avx512f_test (void)
{
- union128 res1, res2, res3, res4;
+ union128 res1, res2, res3, res4, res5, res6;
union128 s1, s2;
float res_ref[SIZE];
MASK_TYPE mask = MASK_VALUE;
res2.a[i] = DEFAULT_VALUE;
res3.a[i] = DEFAULT_VALUE;
res4.a[i] = DEFAULT_VALUE;
+ res5.a[i] = DEFAULT_VALUE;
+ res6.a[i] = DEFAULT_VALUE;
}
res1.x = _mm_scalef_ss (s1.x, s2.x);
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
res4.x = _mm_maskz_scalef_round_ss (mask, s1.x, s2.x,
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ res5.x = _mm_mask_scalef_ss (s1.x, mask, s1.x, s2.x);
+ res6.x = _mm_maskz_scalef_ss (mask, s1.x, s2.x);
compute_scalefss (s1.a, s2.a, res_ref);
if (check_union128 (res3, res_ref))
abort ();
+ if (check_union128 (res5, res_ref))
+ abort ();
+
MASK_ZERO () (res_ref, mask, 1);
if (check_union128 (res4, res_ref))
abort ();
+
+ if (check_union128 (res6, res_ref))
+ abort ();
}
/* { dg-final { scan-assembler-times "vsqrtsd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtsd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtsd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsqrtsd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsqrtsd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128d x1, x2;
+volatile __m128d x1, x2, x3;
volatile __mmask8 m;
void extern
x1 = _mm_sqrt_round_sd (x1, x2, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
x1 = _mm_mask_sqrt_round_sd (x1, m, x1, x2, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
x1 = _mm_maskz_sqrt_round_sd (m, x1, x2, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ x1 = _mm_mask_sqrt_sd (x3, m, x1, x2);
+ x1 = _mm_maskz_sqrt_sd (m, x1, x2);
}
void static
avx512f_test (void)
{
- union128d res1, res2, res3;
+ union128d res1, res2, res3, res4, res5;
union128d s1, s2;
double res_ref[SIZE];
MASK_TYPE mask = MASK_VALUE;
res1.a[i] = DEFAULT_VALUE;
res2.a[i] = DEFAULT_VALUE;
res3.a[i] = DEFAULT_VALUE;
+ res4.a[i] = DEFAULT_VALUE;
+ res5.a[i] = DEFAULT_VALUE;
}
res1.x = _mm_sqrt_round_sd (s1.x, s2.x,
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
res3.x = _mm_maskz_sqrt_round_sd (mask, s1.x, s2.x,
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ res4.x = _mm_mask_sqrt_sd (s1.x, mask, s1.x, s2.x);
+ res5.x = _mm_maskz_sqrt_sd (mask, s1.x, s2.x);
compute_sqrtsd (s1.a, s2.a, res_ref);
if (check_union128d (res2, res_ref))
abort ();
+ if (check_union128d (res4, res_ref))
+ abort ();
+
MASK_ZERO (d) (res_ref, mask, 1);
if (check_union128d (res3, res_ref))
abort ();
+
+ if (check_union128d (res5, res_ref))
+ abort ();
}
/* { dg-final { scan-assembler-times "vsqrtss\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtss\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vsqrtss\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsqrtss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vsqrtss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-volatile __m128 x1, x2;
+volatile __m128 x1, x2, x3;
volatile __mmask8 m;
void extern
x1 = _mm_sqrt_round_ss (x1, x2, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
x1 = _mm_mask_sqrt_round_ss (x1, m, x1, x2, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
x1 = _mm_maskz_sqrt_round_ss (m, x1, x2, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ x1 = _mm_mask_sqrt_ss (x3, m, x1, x2);
+ x1 = _mm_maskz_sqrt_ss (m, x1, x2);
}
static void
avx512f_test (void)
{
- union128 res1, res2, res3;
+ union128 res1, res2, res3, res4, res5;
union128 s1, s2;
float res_ref[SIZE];
MASK_TYPE mask = MASK_VALUE;
res1.a[i] = DEFAULT_VALUE;
res2.a[i] = DEFAULT_VALUE;
res3.a[i] = DEFAULT_VALUE;
+ res4.a[i] = DEFAULT_VALUE;
+ res5.a[i] = DEFAULT_VALUE;
}
res1.x = _mm_sqrt_round_ss (s1.x, s2.x,
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
res3.x = _mm_maskz_sqrt_round_ss (mask, s1.x, s2.x,
_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ res4.x = _mm_mask_sqrt_ss (s1.x, mask, s1.x, s2.x);
+ res5.x = _mm_maskz_sqrt_ss (mask, s1.x, s2.x);
compute_sqrtss (s1.a, s2.a, res_ref);
if (check_union128 (res2, res_ref))
abort ();
+ if (check_union128 (res4, res_ref))
+ abort ();
+
MASK_ZERO () (res_ref, mask, 1);
if (check_union128 (res3, res_ref))
abort ();
+
+ if (check_union128 (res5, res_ref))
+ abort ();
}
/* { dg-final { scan-assembler-times "vmovdqa32\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqa32\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqa32\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\\(\[^\{\n\]*\\)\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\\(\[^\{\n\]*\\)\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*\[ \\t\]+\\(\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*\[ \\t\]+\\(\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
-int *p;
-volatile __m256i yy, y2;
-volatile __m128i xx, x2;
+int *p, *p1, *p2;
+volatile __m256i yy, y2, yyy;
+volatile __m128i xx, x2, xxx;
volatile __mmask8 m;
void extern
yy = _mm256_mask_load_epi32 (yy, m, p);
xx = _mm_mask_load_epi32 (xx, m, p);
+ yyy = _mm256_load_epi32 (p2);
+ xxx = _mm_load_epi32 (p1);
+
yy = _mm256_maskz_load_epi32 (m, p);
xx = _mm_maskz_load_epi32 (m, p);
_mm256_mask_store_epi32 (p, m, yy);
_mm_mask_store_epi32 (p, m, xx);
+
+ _mm256_store_epi32 (p2, yyy);
+ _mm_store_epi32 (p2, xxx);
}
/* { dg-final { scan-assembler-times "(?:vmovdqa64|vpblendmq)\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqa64\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqa64\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
-/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\\(\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 { target nonpic } } } */
-/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\\(\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 { target nonpic } } } */
+/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\\(\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 { target nonpic } } } */
+/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\\(\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 { target nonpic } } } */
/* { dg-final { scan-assembler-times "vmovdqa64\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqa64\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vmovdqa64\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
yy = _mm256_load_si256 (p1);
xx = _mm_load_si128 (p2);
+ yy = _mm256_load_epi64 (p);
+ xx = _mm_load_epi64 (p);
+
yy = _mm256_mask_load_epi64 (yy, m, p);
xx = _mm_mask_load_epi64 (xx, m, p);
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse" } */
+/* { dg-final { scan-assembler-times "pxor\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pinsrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "pextrw\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*(?:\n|\[ \\t\]+#)" 1 } } */
+
+
+#include <emmintrin.h>
+unsigned short *p1,*p2;
+volatile __m128i x1,x2;
+
+void foo (void)
+{
+ x1=_mm_loadu_si16 (p1);
+ _mm_storeu_si16 (p2, x2);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse2" } */
+/* { dg-final { scan-assembler-times "(?:vpinsrd|movd)\[ \\t\]+\[^\n\]*\\)\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "movd\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <emmintrin.h>
+unsigned int *p1,*p2;
+volatile __m128i x1,x2;
+
+void foo (void)
+{
+ x1=_mm_loadu_si32 (p1);
+ _mm_storeu_si32 (p2, x2);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx" } */
+/* { dg-final { scan-assembler-times "vmovd\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+volatile __m256i x1;
+
+int foo (void)
+{
+ return _mm256_cvtsi256_si32 (x1);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512f" } */
+/* { dg-final { scan-assembler-times "vmovd\[ \\t\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+volatile __m512i x1;
+
+int foo (void)
+{
+ return _mm512_cvtsi512_si32 (x1);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512bw -mavx512vl -O2" } */
+/* { dg-final { scan-assembler-times "(?:vmovdqu8|vinserti128)\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
+/* { dg-final { scan-assembler-times "(?:vmovdqu8|vextracti128)\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\[^\n\]*\\)(?:\n|\[ \\t\]+#)" 1 } } */
+
+#include <immintrin.h>
+
+char *p, *p1;
+volatile __m256i yyy;
+
+void extern
+avx512bw_test (void)
+{
+ yyy = _mm256_loadu_epi8 (p);
+ _mm256_storeu_epi8 (p1, yyy);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512vl -O2" } */
+/* { dg-final { scan-assembler-times "(?:vinserti128|vmovdqu)\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+
+#include <immintrin.h>
+
+int *p;
+long long *p1;
+volatile __m256i x1, x2;
+
+void extern
+avx512vl_test (void)
+{
+ x1 = _mm256_loadu_epi32 (p);
+ x2 = _mm256_loadu_epi64 (p1);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavx512vl -O2" } */
+/* { dg-final { scan-assembler-times "vmovdqu\[ \\t\]+\[^\{\n\]*\\)\[^\n\]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */
+
+#include <immintrin.h>
+
+int *p;
+long long *p1;
+volatile __m128i x1, x2;
+
+void extern
+avx512vl_test (void)
+{
+ x1 = _mm_loadu_epi32 (p);
+ x2 = _mm_loadu_epi64 (p1);
+}
#define __builtin_ia32_vfmaddss3_mask3(A, B, C, D, E) __builtin_ia32_vfmaddss3_mask3(A, B, C, D, 8)
#define __builtin_ia32_vfmaddss3_maskz(A, B, C, D, E) __builtin_ia32_vfmaddss3_maskz(A, B, C, D, 8)
#define __builtin_ia32_vfmsubss3_mask3(A, B, C, D, E) __builtin_ia32_vfmsubss3_mask3(A, B, C, D, 8)
+#define __builtin_ia32_cvtsd2ss_mask_round(A, B, C, D, E) __builtin_ia32_cvtsd2ss_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cvtss2sd_mask_round(A, B, C, D, E) __builtin_ia32_cvtss2sd_mask_round(A, B, C, D, 8)
/* avx512erintrin.h */
#define __builtin_ia32_exp2ps_mask(A, B, C, D) __builtin_ia32_exp2ps_mask(A, B, C, 8)
#define __builtin_ia32_rcp28sd_round(A, B, C) __builtin_ia32_rcp28sd_round(A, B, 8)
#define __builtin_ia32_rsqrt28ss_round(A, B, C) __builtin_ia32_rsqrt28ss_round(A, B, 8)
#define __builtin_ia32_rsqrt28sd_round(A, B, C) __builtin_ia32_rsqrt28sd_round(A, B, 8)
+#define __builtin_ia32_rcp28sd_mask_round(A, B, C, D, E) __builtin_ia32_rcp28sd_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rcp28ss_mask_round(A, B, C, D, E) __builtin_ia32_rcp28ss_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rsqrt28sd_mask_round(A, B, C, D, E) __builtin_ia32_rsqrt28sd_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rsqrt28ss_mask_round(A, B, C, D, E) __builtin_ia32_rsqrt28ss_mask_round(A, B, C, D, 8)
/* avx512pfintrin.h */
#define __builtin_ia32_gatherpfdps(A, B, C, D, E) __builtin_ia32_gatherpfdps(A, B, C, 1, _MM_HINT_T0)
#define __builtin_ia32_cmpw128_mask(A, B, E, D) __builtin_ia32_cmpw128_mask(A, B, 1, D)
#define __builtin_ia32_cmpb256_mask(A, B, E, D) __builtin_ia32_cmpb256_mask(A, B, 1, D)
#define __builtin_ia32_cmpb128_mask(A, B, E, D) __builtin_ia32_cmpb128_mask(A, B, 1, D)
+#define __builtin_ia32_reducepd512_mask_round(A,B,C,D,E) __builtin_ia32_reducepd512_mask_round(A,1,C,D,8)
+#define __builtin_ia32_reduceps512_mask_round(A,B,C,D,E) __builtin_ia32_reduceps512_mask_round(A,1,C,D,8)
+#define __builtin_ia32_reducesd_mask_round(A, B, F, W, U, E) __builtin_ia32_reducesd_mask_round(A, B, 1, W, U, 8)
+#define __builtin_ia32_reducess_mask_round(A, B, F, W, U, E) __builtin_ia32_reducess_mask_round(A, B, 1, W, U, 8)
/* avx512vldqintrin.h */
#define __builtin_ia32_reduceps256_mask(A, E, C, D) __builtin_ia32_reduceps256_mask(A, 1, C, D)
#define __builtin_ia32_vfmaddss3_mask3(A, B, C, D, E) __builtin_ia32_vfmaddss3_mask3(A, B, C, D, 8)
#define __builtin_ia32_vfmaddss3_maskz(A, B, C, D, E) __builtin_ia32_vfmaddss3_maskz(A, B, C, D, 8)
#define __builtin_ia32_vfmsubss3_mask3(A, B, C, D, E) __builtin_ia32_vfmsubss3_mask3(A, B, C, D, 8)
+#define __builtin_ia32_cvtsd2ss_mask_round(A, B, C, D, E) __builtin_ia32_cvtsd2ss_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_cvtss2sd_mask_round(A, B, C, D, E) __builtin_ia32_cvtss2sd_mask_round(A, B, C, D, 8)
/* avx512pfintrin.h */
#define __builtin_ia32_gatherpfdps(A, B, C, D, E) __builtin_ia32_gatherpfdps(A, B, C, 1, _MM_HINT_T0)
#define __builtin_ia32_rcp28ss_round(A, B, C) __builtin_ia32_rcp28ss_round(A, B, 8)
#define __builtin_ia32_rsqrt28sd_round(A, B, C) __builtin_ia32_rsqrt28sd_round(A, B, 8)
#define __builtin_ia32_rsqrt28ss_round(A, B, C) __builtin_ia32_rsqrt28ss_round(A, B, 8)
+#define __builtin_ia32_rcp28sd_mask_round(A, B, C, D, E) __builtin_ia32_rcp28sd_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rcp28ss_mask_round(A, B, C, D, E) __builtin_ia32_rcp28ss_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rsqrt28sd_mask_round(A, B, C, D, E) __builtin_ia32_rsqrt28sd_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_rsqrt28ss_mask_round(A, B, C, D, E) __builtin_ia32_rsqrt28ss_mask_round(A, B, C, D, 8)
/* shaintrin.h */
#define __builtin_ia32_sha1rnds4(A, B, C) __builtin_ia32_sha1rnds4(A, B, 1)
#define __builtin_ia32_cvtps2qq512_mask(A, B, C, D) __builtin_ia32_cvtps2qq512_mask(A, B, C, 8)
#define __builtin_ia32_cvtpd2uqq512_mask(A, B, C, D) __builtin_ia32_cvtpd2uqq512_mask(A, B, C, 8)
#define __builtin_ia32_cvtpd2qq512_mask(A, B, C, D) __builtin_ia32_cvtpd2qq512_mask(A, B, C, 8)
+#define __builtin_ia32_reducesd_mask_round(A, B, C, D, E, F) __builtin_ia32_reducesd_mask_round(A, B, 8, D, E, 8)
+#define __builtin_ia32_reducess_mask_round(A, B, C, D, E, F) __builtin_ia32_reducess_mask_round(A, B, 8, D, E, 8)
+#define __builtin_ia32_reducepd512_mask_round(A, B, C, D, E) __builtin_ia32_reducepd512_mask_round(A, 8, C, D, 8)
+#define __builtin_ia32_reduceps512_mask_round(A, B, C, D, E) __builtin_ia32_reduceps512_mask_round(A, 8, C, D, 8)
/* avx512vlintrin.h */
#define __builtin_ia32_vpermilps_mask(A, E, C, D) __builtin_ia32_vpermilps_mask(A, 1, C, D)