This patch supports the following MVE ACLE store intrinsics which stores a byte, halfword, or word to memory.
vst1q_f32, vst1q_f16, vst1q_s8, vst1q_s32, vst1q_s16, vst1q_u8, vst1q_u32, vst1q_u16, vstrhq_f16, vstrhq_scatter_offset_s32, vstrhq_scatter_offset_s16, vstrhq_scatter_offset_u32, vstrhq_scatter_offset_u16, vstrhq_scatter_offset_p_s32, vstrhq_scatter_offset_p_s16, vstrhq_scatter_offset_p_u32, vstrhq_scatter_offset_p_u16, vstrhq_scatter_shifted_offset_s32,
vstrhq_scatter_shifted_offset_s16, vstrhq_scatter_shifted_offset_u32,
vstrhq_scatter_shifted_offset_u16, vstrhq_scatter_shifted_offset_p_s32,
vstrhq_scatter_shifted_offset_p_s16, vstrhq_scatter_shifted_offset_p_u32,
vstrhq_scatter_shifted_offset_p_u16, vstrhq_s32, vstrhq_s16, vstrhq_u32, vstrhq_u16, vstrhq_p_f16, vstrhq_p_s32, vstrhq_p_s16, vstrhq_p_u32, vstrhq_p_u16, vstrwq_f32, vstrwq_s32, vstrwq_u32, vstrwq_p_f32, vstrwq_p_s32, vstrwq_p_u32.
Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details.
[1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* config/arm/arm_mve.h (vst1q_f32): Define macro.
(vst1q_f16): Likewise.
(vst1q_s8): Likewise.
(vst1q_s32): Likewise.
(vst1q_s16): Likewise.
(vst1q_u8): Likewise.
(vst1q_u32): Likewise.
(vst1q_u16): Likewise.
(vstrhq_f16): Likewise.
(vstrhq_scatter_offset_s32): Likewise.
(vstrhq_scatter_offset_s16): Likewise.
(vstrhq_scatter_offset_u32): Likewise.
(vstrhq_scatter_offset_u16): Likewise.
(vstrhq_scatter_offset_p_s32): Likewise.
(vstrhq_scatter_offset_p_s16): Likewise.
(vstrhq_scatter_offset_p_u32): Likewise.
(vstrhq_scatter_offset_p_u16): Likewise.
(vstrhq_scatter_shifted_offset_s32): Likewise.
(vstrhq_scatter_shifted_offset_s16): Likewise.
(vstrhq_scatter_shifted_offset_u32): Likewise.
(vstrhq_scatter_shifted_offset_u16): Likewise.
(vstrhq_scatter_shifted_offset_p_s32): Likewise.
(vstrhq_scatter_shifted_offset_p_s16): Likewise.
(vstrhq_scatter_shifted_offset_p_u32): Likewise.
(vstrhq_scatter_shifted_offset_p_u16): Likewise.
(vstrhq_s32): Likewise.
(vstrhq_s16): Likewise.
(vstrhq_u32): Likewise.
(vstrhq_u16): Likewise.
(vstrhq_p_f16): Likewise.
(vstrhq_p_s32): Likewise.
(vstrhq_p_s16): Likewise.
(vstrhq_p_u32): Likewise.
(vstrhq_p_u16): Likewise.
(vstrwq_f32): Likewise.
(vstrwq_s32): Likewise.
(vstrwq_u32): Likewise.
(vstrwq_p_f32): Likewise.
(vstrwq_p_s32): Likewise.
(vstrwq_p_u32): Likewise.
(__arm_vst1q_s8): Define intrinsic.
(__arm_vst1q_s32): Likewise.
(__arm_vst1q_s16): Likewise.
(__arm_vst1q_u8): Likewise.
(__arm_vst1q_u32): Likewise.
(__arm_vst1q_u16): Likewise.
(__arm_vstrhq_scatter_offset_s32): Likewise.
(__arm_vstrhq_scatter_offset_s16): Likewise.
(__arm_vstrhq_scatter_offset_u32): Likewise.
(__arm_vstrhq_scatter_offset_u16): Likewise.
(__arm_vstrhq_scatter_offset_p_s32): Likewise.
(__arm_vstrhq_scatter_offset_p_s16): Likewise.
(__arm_vstrhq_scatter_offset_p_u32): Likewise.
(__arm_vstrhq_scatter_offset_p_u16): Likewise.
(__arm_vstrhq_scatter_shifted_offset_s32): Likewise.
(__arm_vstrhq_scatter_shifted_offset_s16): Likewise.
(__arm_vstrhq_scatter_shifted_offset_u32): Likewise.
(__arm_vstrhq_scatter_shifted_offset_u16): Likewise.
(__arm_vstrhq_scatter_shifted_offset_p_s32): Likewise.
(__arm_vstrhq_scatter_shifted_offset_p_s16): Likewise.
(__arm_vstrhq_scatter_shifted_offset_p_u32): Likewise.
(__arm_vstrhq_scatter_shifted_offset_p_u16): Likewise.
(__arm_vstrhq_s32): Likewise.
(__arm_vstrhq_s16): Likewise.
(__arm_vstrhq_u32): Likewise.
(__arm_vstrhq_u16): Likewise.
(__arm_vstrhq_p_s32): Likewise.
(__arm_vstrhq_p_s16): Likewise.
(__arm_vstrhq_p_u32): Likewise.
(__arm_vstrhq_p_u16): Likewise.
(__arm_vstrwq_s32): Likewise.
(__arm_vstrwq_u32): Likewise.
(__arm_vstrwq_p_s32): Likewise.
(__arm_vstrwq_p_u32): Likewise.
(__arm_vstrwq_p_f32): Likewise.
(__arm_vstrwq_f32): Likewise.
(__arm_vst1q_f32): Likewise.
(__arm_vst1q_f16): Likewise.
(__arm_vstrhq_f16): Likewise.
(__arm_vstrhq_p_f16): Likewise.
(vst1q): Define polymorphic variant.
(vstrhq): Likewise.
(vstrhq_p): Likewise.
(vstrhq_scatter_offset_p): Likewise.
(vstrhq_scatter_offset): Likewise.
(vstrhq_scatter_shifted_offset_p): Likewise.
(vstrhq_scatter_shifted_offset): Likewise.
(vstrwq_p): Likewise.
(vstrwq): Likewise.
* config/arm/arm_mve_builtins.def (STRS): Use builtin qualifier.
(STRS_P): Likewise.
(STRSS): Likewise.
(STRSS_P): Likewise.
(STRSU): Likewise.
(STRSU_P): Likewise.
(STRU): Likewise.
(STRU_P): Likewise.
* config/arm/mve.md (VST1Q): Define iterator.
(VSTRHSOQ): Likewise.
(VSTRHSSOQ): Likewise.
(VSTRHQ): Likewise.
(VSTRWQ): Likewise.
(mve_vstrhq_fv8hf): Define RTL pattern.
(mve_vstrhq_p_fv8hf): Likewise.
(mve_vstrhq_p_<supf><mode>): Likewise.
(mve_vstrhq_scatter_offset_p_<supf><mode>): Likewise.
(mve_vstrhq_scatter_offset_<supf><mode>): Likewise.
(mve_vstrhq_scatter_shifted_offset_p_<supf><mode>): Likewise.
(mve_vstrhq_scatter_shifted_offset_<supf><mode>): Likewise.
(mve_vstrhq_<supf><mode>): Likewise.
(mve_vstrwq_fv4sf): Likewise.
(mve_vstrwq_p_fv4sf): Likewise.
(mve_vstrwq_p_<supf>v4si): Likewise.
(mve_vstrwq_<supf>v4si): Likewise.
(mve_vst1q_f<mode>): Define expand.
(mve_vst1q_<supf><mode>): Likewise.
gcc/testsuite/ChangeLog:
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* gcc.target/arm/mve/intrinsics/vst1q_f16.c: New test.
* gcc.target/arm/mve/intrinsics/vst1q_f32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vst1q_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vst1q_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vst1q_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vst1q_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vst1q_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vst1q_u8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_f16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_p_f16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_p_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_p_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_p_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_p_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s16.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s32.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u16.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u32.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s16.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s32.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u16.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u32.c:
Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrhq_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrwq_f32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrwq_p_f32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrwq_p_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrwq_p_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrwq_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vstrwq_u32.c: Likewise.
+2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * config/arm/arm_mve.h (vst1q_f32): Define macro.
+ (vst1q_f16): Likewise.
+ (vst1q_s8): Likewise.
+ (vst1q_s32): Likewise.
+ (vst1q_s16): Likewise.
+ (vst1q_u8): Likewise.
+ (vst1q_u32): Likewise.
+ (vst1q_u16): Likewise.
+ (vstrhq_f16): Likewise.
+ (vstrhq_scatter_offset_s32): Likewise.
+ (vstrhq_scatter_offset_s16): Likewise.
+ (vstrhq_scatter_offset_u32): Likewise.
+ (vstrhq_scatter_offset_u16): Likewise.
+ (vstrhq_scatter_offset_p_s32): Likewise.
+ (vstrhq_scatter_offset_p_s16): Likewise.
+ (vstrhq_scatter_offset_p_u32): Likewise.
+ (vstrhq_scatter_offset_p_u16): Likewise.
+ (vstrhq_scatter_shifted_offset_s32): Likewise.
+ (vstrhq_scatter_shifted_offset_s16): Likewise.
+ (vstrhq_scatter_shifted_offset_u32): Likewise.
+ (vstrhq_scatter_shifted_offset_u16): Likewise.
+ (vstrhq_scatter_shifted_offset_p_s32): Likewise.
+ (vstrhq_scatter_shifted_offset_p_s16): Likewise.
+ (vstrhq_scatter_shifted_offset_p_u32): Likewise.
+ (vstrhq_scatter_shifted_offset_p_u16): Likewise.
+ (vstrhq_s32): Likewise.
+ (vstrhq_s16): Likewise.
+ (vstrhq_u32): Likewise.
+ (vstrhq_u16): Likewise.
+ (vstrhq_p_f16): Likewise.
+ (vstrhq_p_s32): Likewise.
+ (vstrhq_p_s16): Likewise.
+ (vstrhq_p_u32): Likewise.
+ (vstrhq_p_u16): Likewise.
+ (vstrwq_f32): Likewise.
+ (vstrwq_s32): Likewise.
+ (vstrwq_u32): Likewise.
+ (vstrwq_p_f32): Likewise.
+ (vstrwq_p_s32): Likewise.
+ (vstrwq_p_u32): Likewise.
+ (__arm_vst1q_s8): Define intrinsic.
+ (__arm_vst1q_s32): Likewise.
+ (__arm_vst1q_s16): Likewise.
+ (__arm_vst1q_u8): Likewise.
+ (__arm_vst1q_u32): Likewise.
+ (__arm_vst1q_u16): Likewise.
+ (__arm_vstrhq_scatter_offset_s32): Likewise.
+ (__arm_vstrhq_scatter_offset_s16): Likewise.
+ (__arm_vstrhq_scatter_offset_u32): Likewise.
+ (__arm_vstrhq_scatter_offset_u16): Likewise.
+ (__arm_vstrhq_scatter_offset_p_s32): Likewise.
+ (__arm_vstrhq_scatter_offset_p_s16): Likewise.
+ (__arm_vstrhq_scatter_offset_p_u32): Likewise.
+ (__arm_vstrhq_scatter_offset_p_u16): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_s32): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_s16): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_u32): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_u16): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_p_s32): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_p_s16): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_p_u32): Likewise.
+ (__arm_vstrhq_scatter_shifted_offset_p_u16): Likewise.
+ (__arm_vstrhq_s32): Likewise.
+ (__arm_vstrhq_s16): Likewise.
+ (__arm_vstrhq_u32): Likewise.
+ (__arm_vstrhq_u16): Likewise.
+ (__arm_vstrhq_p_s32): Likewise.
+ (__arm_vstrhq_p_s16): Likewise.
+ (__arm_vstrhq_p_u32): Likewise.
+ (__arm_vstrhq_p_u16): Likewise.
+ (__arm_vstrwq_s32): Likewise.
+ (__arm_vstrwq_u32): Likewise.
+ (__arm_vstrwq_p_s32): Likewise.
+ (__arm_vstrwq_p_u32): Likewise.
+ (__arm_vstrwq_p_f32): Likewise.
+ (__arm_vstrwq_f32): Likewise.
+ (__arm_vst1q_f32): Likewise.
+ (__arm_vst1q_f16): Likewise.
+ (__arm_vstrhq_f16): Likewise.
+ (__arm_vstrhq_p_f16): Likewise.
+ (vst1q): Define polymorphic variant.
+ (vstrhq): Likewise.
+ (vstrhq_p): Likewise.
+ (vstrhq_scatter_offset_p): Likewise.
+ (vstrhq_scatter_offset): Likewise.
+ (vstrhq_scatter_shifted_offset_p): Likewise.
+ (vstrhq_scatter_shifted_offset): Likewise.
+ (vstrwq_p): Likewise.
+ (vstrwq): Likewise.
+ * config/arm/arm_mve_builtins.def (STRS): Use builtin qualifier.
+ (STRS_P): Likewise.
+ (STRSS): Likewise.
+ (STRSS_P): Likewise.
+ (STRSU): Likewise.
+ (STRSU_P): Likewise.
+ (STRU): Likewise.
+ (STRU_P): Likewise.
+ * config/arm/mve.md (VST1Q): Define iterator.
+ (VSTRHSOQ): Likewise.
+ (VSTRHSSOQ): Likewise.
+ (VSTRHQ): Likewise.
+ (VSTRWQ): Likewise.
+ (mve_vstrhq_fv8hf): Define RTL pattern.
+ (mve_vstrhq_p_fv8hf): Likewise.
+ (mve_vstrhq_p_<supf><mode>): Likewise.
+ (mve_vstrhq_scatter_offset_p_<supf><mode>): Likewise.
+ (mve_vstrhq_scatter_offset_<supf><mode>): Likewise.
+ (mve_vstrhq_scatter_shifted_offset_p_<supf><mode>): Likewise.
+ (mve_vstrhq_scatter_shifted_offset_<supf><mode>): Likewise.
+ (mve_vstrhq_<supf><mode>): Likewise.
+ (mve_vstrwq_fv4sf): Likewise.
+ (mve_vstrwq_p_fv4sf): Likewise.
+ (mve_vstrwq_p_<supf>v4si): Likewise.
+ (mve_vstrwq_<supf>v4si): Likewise.
+ (mve_vst1q_f<mode>): Define expand.
+ (mve_vst1q_<supf><mode>): Likewise.
+
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
#define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p)
#define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p)
#define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p)
+#define vst1q_f32(__addr, __value) __arm_vst1q_f32(__addr, __value)
+#define vst1q_f16(__addr, __value) __arm_vst1q_f16(__addr, __value)
+#define vst1q_s8(__addr, __value) __arm_vst1q_s8(__addr, __value)
+#define vst1q_s32(__addr, __value) __arm_vst1q_s32(__addr, __value)
+#define vst1q_s16(__addr, __value) __arm_vst1q_s16(__addr, __value)
+#define vst1q_u8(__addr, __value) __arm_vst1q_u8(__addr, __value)
+#define vst1q_u32(__addr, __value) __arm_vst1q_u32(__addr, __value)
+#define vst1q_u16(__addr, __value) __arm_vst1q_u16(__addr, __value)
+#define vstrhq_f16(__addr, __value) __arm_vstrhq_f16(__addr, __value)
+#define vstrhq_scatter_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_offset_s32( __base, __offset, __value)
+#define vstrhq_scatter_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_offset_s16( __base, __offset, __value)
+#define vstrhq_scatter_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_offset_u32( __base, __offset, __value)
+#define vstrhq_scatter_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_offset_u16( __base, __offset, __value)
+#define vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p)
+#define vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p)
+#define vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p)
+#define vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s32( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s16( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u32( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u16( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p)
+#define vstrhq_s32(__addr, __value) __arm_vstrhq_s32(__addr, __value)
+#define vstrhq_s16(__addr, __value) __arm_vstrhq_s16(__addr, __value)
+#define vstrhq_u32(__addr, __value) __arm_vstrhq_u32(__addr, __value)
+#define vstrhq_u16(__addr, __value) __arm_vstrhq_u16(__addr, __value)
+#define vstrhq_p_f16(__addr, __value, __p) __arm_vstrhq_p_f16(__addr, __value, __p)
+#define vstrhq_p_s32(__addr, __value, __p) __arm_vstrhq_p_s32(__addr, __value, __p)
+#define vstrhq_p_s16(__addr, __value, __p) __arm_vstrhq_p_s16(__addr, __value, __p)
+#define vstrhq_p_u32(__addr, __value, __p) __arm_vstrhq_p_u32(__addr, __value, __p)
+#define vstrhq_p_u16(__addr, __value, __p) __arm_vstrhq_p_u16(__addr, __value, __p)
+#define vstrwq_f32(__addr, __value) __arm_vstrwq_f32(__addr, __value)
+#define vstrwq_s32(__addr, __value) __arm_vstrwq_s32(__addr, __value)
+#define vstrwq_u32(__addr, __value) __arm_vstrwq_u32(__addr, __value)
+#define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p)
+#define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p)
+#define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p)
#endif
__extension__ extern __inline void
return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
}
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_s8 (int8_t * __addr, int8x16_t __value)
+{
+ __builtin_mve_vst1q_sv16qi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_s32 (int32_t * __addr, int32x4_t __value)
+{
+ __builtin_mve_vst1q_sv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_s16 (int16_t * __addr, int16x8_t __value)
+{
+ __builtin_mve_vst1q_sv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_u8 (uint8_t * __addr, uint8x16_t __value)
+{
+ __builtin_mve_vst1q_uv16qi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_u32 (uint32_t * __addr, uint32x4_t __value)
+{
+ __builtin_mve_vst1q_uv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_u16 (uint16_t * __addr, uint16x8_t __value)
+{
+ __builtin_mve_vst1q_uv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_s32 (int16_t * __addr, int32x4_t __value)
+{
+ __builtin_mve_vstrhq_sv4si ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_s16 (int16_t * __addr, int16x8_t __value)
+{
+ __builtin_mve_vstrhq_sv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_u32 (uint16_t * __addr, uint32x4_t __value)
+{
+ __builtin_mve_vstrhq_uv4si ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_u16 (uint16_t * __addr, uint16x8_t __value)
+{
+ __builtin_mve_vstrhq_uv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_s32 (int16_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_sv4si ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_sv8hi ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_u32 (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_uv4si ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_uv8hi ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_s32 (int32_t * __addr, int32x4_t __value)
+{
+ __builtin_mve_vstrwq_sv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_u32 (uint32_t * __addr, uint32x4_t __value)
+{
+ __builtin_mve_vstrwq_uv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_p_sv4si ((__builtin_neon_si *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p);
+}
+
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void
return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf (__base, __offset, __p);
}
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_p_fv4sf (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_f32 (float32_t * __addr, float32x4_t __value)
+{
+ __builtin_mve_vstrwq_fv4sf (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_f32 (float32_t * __addr, float32x4_t __value)
+{
+ __builtin_mve_vst1q_fv4sf (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_f16 (float16_t * __addr, float16x8_t __value)
+{
+ __builtin_mve_vst1q_fv8hf (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_f16 (float16_t * __addr, float16x8_t __value)
+{
+ __builtin_mve_vstrhq_fv8hf (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_fv8hf (__addr, __value, __p);
+}
+
#endif
enum {
int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2), \
int (*)[__ARM_mve_type_float32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce(__p0, float32_t const *), p1, p2));})
+#define vst1q(p0,p1) __arm_vst1q(p0,p1)
+#define __arm_vst1q(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define vstrhq(p0,p1) __arm_vstrhq(p0,p1)
+#define __arm_vstrhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));})
+
+#define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2)
+#define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));})
+
+#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3)
+#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2)
+#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3)
+#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2)
+#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+#define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2)
+#define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vstrwq(p0,p1) __arm_vstrwq(p0,p1)
+#define __arm_vstrwq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
+
#else /* MVE Integer. */
#define vst4q(p0,p1) __arm_vst4q(p0,p1)
int (*)[__ARM_mve_type_int32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \
int (*)[__ARM_mve_type_uint32_t_const_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2));})
+#define vst1q(p0,p1) __arm_vst1q(p0,p1)
+#define __arm_vst1q(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vstrhq(p0,p1) __arm_vstrhq(p0,p1)
+#define __arm_vstrhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2)
+#define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3)
+#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2)
+#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3)
+#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2)
+#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+
+#define vstrwq(p0,p1) __arm_vstrwq(p0,p1)
+#define __arm_vstrwq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2)
+#define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
#endif /* MVE Integer. */
#define vldrdq_gather_offset(p0,p1) __arm_vldrdq_gather_offset(p0,p1)
VAR1 (LDRGU_Z, vldrdq_gather_shifted_offset_z_u, v2di)
VAR1 (LDRGU_Z, vldrwq_gather_offset_z_u, v4si)
VAR1 (LDRGU_Z, vldrwq_gather_shifted_offset_z_u, v4si)
+VAR3 (STRU, vst1q_u, v16qi, v8hi, v4si)
+VAR3 (STRS, vst1q_s, v16qi, v8hi, v4si)
+VAR2 (STRU_P, vstrhq_p_u, v8hi, v4si)
+VAR2 (STRU, vstrhq_u, v8hi, v4si)
+VAR2 (STRS_P, vstrhq_p_s, v8hi, v4si)
+VAR2 (STRS, vstrhq_s, v8hi, v4si)
+VAR2 (STRS, vst1q_f, v8hf, v4sf)
+VAR2 (STRSU_P, vstrhq_scatter_shifted_offset_p_u, v8hi, v4si)
+VAR2 (STRSU_P, vstrhq_scatter_offset_p_u, v8hi, v4si)
+VAR2 (STRSU, vstrhq_scatter_shifted_offset_u, v8hi, v4si)
+VAR2 (STRSU, vstrhq_scatter_offset_u, v8hi, v4si)
+VAR2 (STRSS_P, vstrhq_scatter_shifted_offset_p_s, v8hi, v4si)
+VAR2 (STRSS_P, vstrhq_scatter_offset_p_s, v8hi, v4si)
+VAR2 (STRSS, vstrhq_scatter_shifted_offset_s, v8hi, v4si)
+VAR2 (STRSS, vstrhq_scatter_offset_s, v8hi, v4si)
+VAR1 (STRS, vstrhq_f, v8hf)
+VAR1 (STRS_P, vstrhq_p_f, v8hf)
+VAR1 (STRS, vstrwq_f, v4sf)
+VAR1 (STRS, vstrwq_s, v4si)
+VAR1 (STRU, vstrwq_u, v4si)
+VAR1 (STRS_P, vstrwq_p_f, v4sf)
+VAR1 (STRS_P, vstrwq_p_s, v4si)
+VAR1 (STRU_P, vstrwq_p_u, v4si)
VLDRDQGO_S VLDRDQGO_U VLDRDQGSO_S VLDRDQGSO_U
VLDRHQGO_F VLDRHQGSO_F VLDRWQGB_F VLDRWQGO_F
VLDRWQGO_S VLDRWQGO_U VLDRWQGSO_F VLDRWQGSO_S
- VLDRWQGSO_U])
+ VLDRWQGSO_U VSTRHQ_F VST1Q_S VST1Q_U VSTRHQSO_S
+ VSTRHQSO_U VSTRHQSSO_S VSTRHQSSO_U VSTRHQ_S
+ VSTRHQ_U VSTRWQ_S VSTRWQ_U VSTRWQ_F VST1Q_F])
(define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF") (V8HF "V8HI")
(V4SF "V4SI")])
(VLDRWQ_U "u") (VLDRDQGB_S "s") (VLDRDQGB_U "u")
(VLDRDQGO_S "s") (VLDRDQGO_U "u") (VLDRDQGSO_S "s")
(VLDRDQGSO_U "u") (VLDRWQGO_S "s") (VLDRWQGO_U "u")
- (VLDRWQGSO_S "s") (VLDRWQGSO_U "u")])
+ (VLDRWQGSO_S "s") (VLDRWQGSO_U "u") (VST1Q_S "s")
+ (VST1Q_U "u") (VSTRHQSO_S "s") (VSTRHQSO_U "u")
+ (VSTRHQSSO_S "s") (VSTRHQSSO_U "u") (VSTRHQ_S "s")
+ (VSTRHQ_U "u") (VSTRWQ_S "s") (VSTRWQ_U "u")])
(define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32")
(VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16")
(define_int_iterator VLDRDGSOQ [VLDRDQGSO_S VLDRDQGSO_U])
(define_int_iterator VLDRWGOQ [VLDRWQGO_S VLDRWQGO_U])
(define_int_iterator VLDRWGSOQ [VLDRWQGSO_S VLDRWQGSO_U])
+(define_int_iterator VST1Q [VST1Q_S VST1Q_U])
+(define_int_iterator VSTRHSOQ [VSTRHQSO_S VSTRHQSO_U])
+(define_int_iterator VSTRHSSOQ [VSTRHQSSO_S VSTRHQSSO_U])
+(define_int_iterator VSTRHQ [VSTRHQ_S VSTRHQ_U])
+(define_int_iterator VSTRWQ [VSTRWQ_S VSTRWQ_U])
(define_insn "*mve_mov<mode>"
[(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us")
return "";
}
[(set_attr "length" "8")])
+
+;;
+;; [vstrhq_f]
+;;
+(define_insn "mve_vstrhq_fv8hf"
+ [(set (match_operand:V8HI 0 "memory_operand" "=Us")
+ (unspec:V8HI [(match_operand:V8HF 1 "s_register_operand" "w")]
+ VSTRHQ_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vstrh.16\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "4")])
+
+;;
+;; [vstrhq_p_f]
+;;
+(define_insn "mve_vstrhq_p_fv8hf"
+ [(set (match_operand:V8HI 0 "memory_operand" "=Us")
+ (unspec:V8HI [(match_operand:V8HF 1 "s_register_operand" "w")
+ (match_operand:HI 2 "vpr_register_operand" "Up")]
+ VSTRHQ_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vpst\n\tvstrht.16\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vstrhq_p_s vstrhq_p_u]
+;;
+(define_insn "mve_vstrhq_p_<supf><mode>"
+ [(set (match_operand:<MVE_H_ELEM> 0 "memory_operand" "=Us")
+ (unspec:<MVE_H_ELEM> [(match_operand:MVE_6 1 "s_register_operand" "w")
+ (match_operand:HI 2 "vpr_register_operand" "Up")]
+ VSTRHQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vpst\n\tvstrht.<V_sz_elem>\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vstrhq_scatter_offset_p_s vstrhq_scatter_offset_p_u]
+;;
+(define_insn "mve_vstrhq_scatter_offset_p_<supf><mode>"
+ [(set (match_operand:<MVE_H_ELEM> 0 "memory_operand" "=Us")
+ (unspec:<MVE_H_ELEM>
+ [(match_operand:MVE_6 1 "s_register_operand" "w")
+ (match_operand:MVE_6 2 "s_register_operand" "w")
+ (match_operand:HI 3 "vpr_register_operand" "Up")]
+ VSTRHSOQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[3];
+ ops[0] = operands[0];
+ ops[1] = operands[1];
+ ops[2] = operands[2];
+ output_asm_insn ("vpst\n\tvstrht.<V_sz_elem>\t%q2, [%m0, %q1]",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vstrhq_scatter_offset_s vstrhq_scatter_offset_u]
+;;
+(define_insn "mve_vstrhq_scatter_offset_<supf><mode>"
+ [(set (match_operand:<MVE_H_ELEM> 0 "memory_operand" "=Us")
+ (unspec:<MVE_H_ELEM>
+ [(match_operand:MVE_6 1 "s_register_operand" "w")
+ (match_operand:MVE_6 2 "s_register_operand" "w")]
+ VSTRHSOQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[3];
+ ops[0] = operands[0];
+ ops[1] = operands[1];
+ ops[2] = operands[2];
+ output_asm_insn ("vstrh.<V_sz_elem>\t%q2, [%m0, %q1]",ops);
+ return "";
+}
+ [(set_attr "length" "4")])
+
+;;
+;; [vstrhq_scatter_shifted_offset_p_s vstrhq_scatter_shifted_offset_p_u]
+;;
+(define_insn "mve_vstrhq_scatter_shifted_offset_p_<supf><mode>"
+ [(set (match_operand:<MVE_H_ELEM> 0 "memory_operand" "=Us")
+ (unspec:<MVE_H_ELEM>
+ [(match_operand:MVE_6 1 "s_register_operand" "w")
+ (match_operand:MVE_6 2 "s_register_operand" "w")
+ (match_operand:HI 3 "vpr_register_operand" "Up")]
+ VSTRHSSOQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[3];
+ ops[0] = operands[0];
+ ops[1] = operands[1];
+ ops[2] = operands[2];
+ output_asm_insn ("vpst\n\tvstrht.<V_sz_elem>\t%q2, [%m0, %q1, uxtw #1]",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vstrhq_scatter_shifted_offset_s vstrhq_scatter_shifted_offset_u]
+;;
+(define_insn "mve_vstrhq_scatter_shifted_offset_<supf><mode>"
+ [(set (match_operand:<MVE_H_ELEM> 0 "memory_operand" "=Us")
+ (unspec:<MVE_H_ELEM>
+ [(match_operand:MVE_6 1 "s_register_operand" "w")
+ (match_operand:MVE_6 2 "s_register_operand" "w")]
+ VSTRHSSOQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[3];
+ ops[0] = operands[0];
+ ops[1] = operands[1];
+ ops[2] = operands[2];
+ output_asm_insn ("vstrh.<V_sz_elem>\t%q2, [%m0, %q1, uxtw #1]",ops);
+ return "";
+}
+ [(set_attr "length" "4")])
+
+;;
+;; [vstrhq_s, vstrhq_u]
+;;
+(define_insn "mve_vstrhq_<supf><mode>"
+ [(set (match_operand:<MVE_H_ELEM> 0 "memory_operand" "=Us")
+ (unspec:<MVE_H_ELEM> [(match_operand:MVE_6 1 "s_register_operand" "w")]
+ VSTRHQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vstrh.<V_sz_elem>\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "4")])
+
+;;
+;; [vstrwq_f]
+;;
+(define_insn "mve_vstrwq_fv4sf"
+ [(set (match_operand:V4SI 0 "memory_operand" "=Us")
+ (unspec:V4SI [(match_operand:V4SF 1 "s_register_operand" "w")]
+ VSTRWQ_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vstrw.32\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "4")])
+
+;;
+;; [vstrwq_p_f]
+;;
+(define_insn "mve_vstrwq_p_fv4sf"
+ [(set (match_operand:V4SI 0 "memory_operand" "=Us")
+ (unspec:V4SI [(match_operand:V4SF 1 "s_register_operand" "w")
+ (match_operand:HI 2 "vpr_register_operand" "Up")]
+ VSTRWQ_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vpst\n\tvstrwt.32\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vstrwq_p_s vstrwq_p_u]
+;;
+(define_insn "mve_vstrwq_p_<supf>v4si"
+ [(set (match_operand:V4SI 0 "memory_operand" "=Us")
+ (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w")
+ (match_operand:HI 2 "vpr_register_operand" "Up")]
+ VSTRWQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vpst\n\tvstrwt.32\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vstrwq_s vstrwq_u]
+;;
+(define_insn "mve_vstrwq_<supf>v4si"
+ [(set (match_operand:V4SI 0 "memory_operand" "=Us")
+ (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w")]
+ VSTRWQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[1]);
+ ops[1] = gen_rtx_REG (TImode, regno);
+ ops[0] = operands[0];
+ output_asm_insn ("vstrw.32\t%q1, %E0",ops);
+ return "";
+}
+ [(set_attr "length" "4")])
+
+(define_expand "mve_vst1q_f<mode>"
+ [(match_operand:<MVE_CNVT> 0 "memory_operand")
+ (unspec:<MVE_CNVT> [(match_operand:MVE_0 1 "s_register_operand")] VST1Q_F)
+ ]
+ "TARGET_HAVE_MVE || TARGET_HAVE_MVE_FLOAT"
+{
+ emit_insn (gen_mve_vstr<V_sz_elem1>q_f<mode>(operands[0],operands[1]));
+ DONE;
+})
+
+(define_expand "mve_vst1q_<supf><mode>"
+ [(match_operand:MVE_2 0 "memory_operand")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand")] VST1Q)
+ ]
+ "TARGET_HAVE_MVE"
+{
+ emit_insn (gen_mve_vstr<V_sz_elem1>q_<supf><mode>(operands[0],operands[1]));
+ DONE;
+})
+2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * gcc.target/arm/mve/intrinsics/vst1q_f16.c: New test.
+ * gcc.target/arm/mve/intrinsics/vst1q_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vst1q_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vst1q_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vst1q_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vst1q_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vst1q_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vst1q_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_p_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_p_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_p_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_p_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_p_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s16.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_s32.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u16.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_u32.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s16.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_s32.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u16.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_u32.c:
+ Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrhq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrwq_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrwq_p_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrwq_p_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrwq_p_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrwq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vstrwq_u32.c: Likewise.
+
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
/* { dg-do compile } */
-/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" } */
-/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (float16_t * addr, float16x8_t value)
+{
+ vst1q_f16 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (float16_t * addr, float16x8_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (float32_t * addr, float32x4_t value)
+{
+ vst1q_f32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
+
+void
+foo1 (float32_t * addr, float32x4_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * addr, int16x8_t value)
+{
+ vst1q_s16 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (int16_t * addr, int16x8_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int32_t * addr, int32x4_t value)
+{
+ vst1q_s32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
+
+void
+foo1 (int32_t * addr, int32x4_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int8_t * addr, int8x16_t value)
+{
+ vst1q_s8 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrb.8" } } */
+
+void
+foo1 (int8_t * addr, int8x16_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrb.8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * addr, uint16x8_t value)
+{
+ vst1q_u16 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (uint16_t * addr, uint16x8_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint32_t * addr, uint32x4_t value)
+{
+ vst1q_u32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
+
+void
+foo1 (uint32_t * addr, uint32x4_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint8_t * addr, uint8x16_t value)
+{
+ vst1q_u8 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrb.8" } } */
+
+void
+foo1 (uint8_t * addr, uint8x16_t value)
+{
+ vst1q (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrb.8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (float16_t * addr, float16x8_t value)
+{
+ vstrhq_f16 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (float16_t * addr, float16x8_t value)
+{
+ vstrhq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (float16_t * addr, float16x8_t value, mve_pred16_t p)
+{
+ vstrhq_p_f16 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
+
+void
+foo1 (float16_t * addr, float16x8_t value, mve_pred16_t p)
+{
+ vstrhq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * addr, int16x8_t value, mve_pred16_t p)
+{
+ vstrhq_p_s16 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
+
+void
+foo1 (int16_t * addr, int16x8_t value, mve_pred16_t p)
+{
+ vstrhq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * addr, int32x4_t value, mve_pred16_t p)
+{
+ vstrhq_p_s32 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
+
+void
+foo1 (int16_t * addr, int32x4_t value, mve_pred16_t p)
+{
+ vstrhq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * addr, uint16x8_t value, mve_pred16_t p)
+{
+ vstrhq_p_u16 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
+
+void
+foo1 (uint16_t * addr, uint16x8_t value, mve_pred16_t p)
+{
+ vstrhq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * addr, uint32x4_t value, mve_pred16_t p)
+{
+ vstrhq_p_u32 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
+
+void
+foo1 (uint16_t * addr, uint32x4_t value, mve_pred16_t p)
+{
+ vstrhq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * addr, int16x8_t value)
+{
+ vstrhq_s16 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (int16_t * addr, int16x8_t value)
+{
+ vstrhq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * addr, int32x4_t value)
+{
+ vstrhq_s32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
+
+void
+foo1 (int16_t * addr, int32x4_t value)
+{
+ vstrhq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p_s16 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
+
+void
+foo1 (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p_s32 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
+
+void
+foo1 (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p_u16 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
+
+void
+foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p_u32 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
+
+void
+foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint16x8_t offset, int16x8_t value)
+{
+ vstrhq_scatter_offset_s16 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (int16_t * base, uint16x8_t offset, int16x8_t value)
+{
+ vstrhq_scatter_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint32x4_t offset, int32x4_t value)
+{
+ vstrhq_scatter_offset_s32 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
+
+void
+foo1 (int16_t * base, uint32x4_t offset, int32x4_t value)
+{
+ vstrhq_scatter_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint16x8_t offset, uint16x8_t value)
+{
+ vstrhq_scatter_offset_u16 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value)
+{
+ vstrhq_scatter_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint32x4_t offset, uint32x4_t value)
+{
+ vstrhq_scatter_offset_u32 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
+
+void
+foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value)
+{
+ vstrhq_scatter_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p_s16 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
+
+void
+foo1 (int16_t * base, uint16x8_t offset, int16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p_s32 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
+
+void
+foo1 (int16_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p_u16 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
+
+void
+foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p_u32 (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
+
+void
+foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p)
+{
+ vstrhq_scatter_shifted_offset_p (base, offset, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrht.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint16x8_t offset, int16x8_t value)
+{
+ vstrhq_scatter_shifted_offset_s16 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (int16_t * base, uint16x8_t offset, int16x8_t value)
+{
+ vstrhq_scatter_shifted_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int16_t * base, uint32x4_t offset, int32x4_t value)
+{
+ vstrhq_scatter_shifted_offset_s32 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
+
+void
+foo1 (int16_t * base, uint32x4_t offset, int32x4_t value)
+{
+ vstrhq_scatter_shifted_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint16x8_t offset, uint16x8_t value)
+{
+ vstrhq_scatter_shifted_offset_u16 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (uint16_t * base, uint16x8_t offset, uint16x8_t value)
+{
+ vstrhq_scatter_shifted_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * base, uint32x4_t offset, uint32x4_t value)
+{
+ vstrhq_scatter_shifted_offset_u32 (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
+
+void
+foo1 (uint16_t * base, uint32x4_t offset, uint32x4_t value)
+{
+ vstrhq_scatter_shifted_offset (base, offset, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * addr, uint16x8_t value)
+{
+ vstrhq_u16 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
+
+void
+foo1 (uint16_t * addr, uint16x8_t value)
+{
+ vstrhq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint16_t * addr, uint32x4_t value)
+{
+ vstrhq_u32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
+
+void
+foo1 (uint16_t * addr, uint32x4_t value)
+{
+ vstrhq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrh.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (float32_t * addr, float32x4_t value)
+{
+ vstrwq_f32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
+
+void
+foo1 (float32_t * addr, float32x4_t value)
+{
+ vstrwq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (float32_t * addr, float32x4_t value, mve_pred16_t p)
+{
+ vstrwq_p_f32 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrwt.32" } } */
+
+void
+foo1 (float32_t * addr, float32x4_t value, mve_pred16_t p)
+{
+ vstrwq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrwt.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int32_t * addr, int32x4_t value, mve_pred16_t p)
+{
+ vstrwq_p_s32 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrwt.32" } } */
+
+void
+foo1 (int32_t * addr, int32x4_t value, mve_pred16_t p)
+{
+ vstrwq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrwt.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint32_t * addr, uint32x4_t value, mve_pred16_t p)
+{
+ vstrwq_p_u32 (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrwt.32" } } */
+
+void
+foo1 (uint32_t * addr, uint32x4_t value, mve_pred16_t p)
+{
+ vstrwq_p (addr, value, p);
+}
+
+/* { dg-final { scan-assembler "vstrwt.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (int32_t * addr, int32x4_t value)
+{
+ vstrwq_s32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
+
+void
+foo1 (int32_t * addr, int32x4_t value)
+{
+ vstrwq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+void
+foo (uint32_t * addr, uint32x4_t value)
+{
+ vstrwq_u32 (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */
+
+void
+foo1 (uint32_t * addr, uint32x4_t value)
+{
+ vstrwq (addr, value);
+}
+
+/* { dg-final { scan-assembler "vstrw.32" } } */