This patch supports following MVE ACLE intrinsics with quaternary operands.
vsriq_m_n_s8, vsubq_m_s8, vsubq_x_s8, vcvtq_m_n_f16_u16, vcvtq_x_n_f16_u16,
vqshluq_m_n_s8, vabavq_p_s8, vsriq_m_n_u8, vshlq_m_u8, vshlq_x_u8, vsubq_m_u8,
vsubq_x_u8, vabavq_p_u8, vshlq_m_s8, vshlq_x_s8, vcvtq_m_n_f16_s16,
vcvtq_x_n_f16_s16, vsriq_m_n_s16, vsubq_m_s16, vsubq_x_s16, vcvtq_m_n_f32_u32,
vcvtq_x_n_f32_u32, vqshluq_m_n_s16, vabavq_p_s16, vsriq_m_n_u16,
vshlq_m_u16, vshlq_x_u16, vsubq_m_u16, vsubq_x_u16, vabavq_p_u16, vshlq_m_s16,
vshlq_x_s16, vcvtq_m_n_f32_s32, vcvtq_x_n_f32_s32, vsriq_m_n_s32, vsubq_m_s32,
vsubq_x_s32, vqshluq_m_n_s32, vabavq_p_s32, vsriq_m_n_u32, vshlq_m_u32,
vshlq_x_u32, vsubq_m_u32, vsubq_x_u32, vabavq_p_u32, vshlq_m_s32, vshlq_x_s32.
Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details.
[1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* config/arm/arm-builtins.c (QUADOP_UNONE_UNONE_NONE_NONE_UNONE_QUALIFIERS):
Define builtin qualifier.
(QUADOP_NONE_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
(QUADOP_NONE_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_NONE_NONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise.
* config/arm/arm_mve.h (vsriq_m_n_s8): Define macro.
(vsubq_m_s8): Likewise.
(vcvtq_m_n_f16_u16): Likewise.
(vqshluq_m_n_s8): Likewise.
(vabavq_p_s8): Likewise.
(vsriq_m_n_u8): Likewise.
(vshlq_m_u8): Likewise.
(vsubq_m_u8): Likewise.
(vabavq_p_u8): Likewise.
(vshlq_m_s8): Likewise.
(vcvtq_m_n_f16_s16): Likewise.
(vsriq_m_n_s16): Likewise.
(vsubq_m_s16): Likewise.
(vcvtq_m_n_f32_u32): Likewise.
(vqshluq_m_n_s16): Likewise.
(vabavq_p_s16): Likewise.
(vsriq_m_n_u16): Likewise.
(vshlq_m_u16): Likewise.
(vsubq_m_u16): Likewise.
(vabavq_p_u16): Likewise.
(vshlq_m_s16): Likewise.
(vcvtq_m_n_f32_s32): Likewise.
(vsriq_m_n_s32): Likewise.
(vsubq_m_s32): Likewise.
(vqshluq_m_n_s32): Likewise.
(vabavq_p_s32): Likewise.
(vsriq_m_n_u32): Likewise.
(vshlq_m_u32): Likewise.
(vsubq_m_u32): Likewise.
(vabavq_p_u32): Likewise.
(vshlq_m_s32): Likewise.
(__arm_vsriq_m_n_s8): Define intrinsic.
(__arm_vsubq_m_s8): Likewise.
(__arm_vqshluq_m_n_s8): Likewise.
(__arm_vabavq_p_s8): Likewise.
(__arm_vsriq_m_n_u8): Likewise.
(__arm_vshlq_m_u8): Likewise.
(__arm_vsubq_m_u8): Likewise.
(__arm_vabavq_p_u8): Likewise.
(__arm_vshlq_m_s8): Likewise.
(__arm_vsriq_m_n_s16): Likewise.
(__arm_vsubq_m_s16): Likewise.
(__arm_vqshluq_m_n_s16): Likewise.
(__arm_vabavq_p_s16): Likewise.
(__arm_vsriq_m_n_u16): Likewise.
(__arm_vshlq_m_u16): Likewise.
(__arm_vsubq_m_u16): Likewise.
(__arm_vabavq_p_u16): Likewise.
(__arm_vshlq_m_s16): Likewise.
(__arm_vsriq_m_n_s32): Likewise.
(__arm_vsubq_m_s32): Likewise.
(__arm_vqshluq_m_n_s32): Likewise.
(__arm_vabavq_p_s32): Likewise.
(__arm_vsriq_m_n_u32): Likewise.
(__arm_vshlq_m_u32): Likewise.
(__arm_vsubq_m_u32): Likewise.
(__arm_vabavq_p_u32): Likewise.
(__arm_vshlq_m_s32): Likewise.
(__arm_vcvtq_m_n_f16_u16): Likewise.
(__arm_vcvtq_m_n_f16_s16): Likewise.
(__arm_vcvtq_m_n_f32_u32): Likewise.
(__arm_vcvtq_m_n_f32_s32): Likewise.
(vcvtq_m_n): Define polymorphic variant.
(vqshluq_m_n): Likewise.
(vshlq_m): Likewise.
(vsriq_m_n): Likewise.
(vsubq_m): Likewise.
(vabavq_p): Likewise.
* config/arm/arm_mve_builtins.def
(QUADOP_UNONE_UNONE_NONE_NONE_UNONE_QUALIFIERS): Use builtin qualifier.
(QUADOP_NONE_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
(QUADOP_NONE_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_NONE_NONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
(QUADOP_UNONE_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise.
* config/arm/mve.md (VABAVQ_P): Define iterator.
(VSHLQ_M): Likewise.
(VSRIQ_M_N): Likewise.
(VSUBQ_M): Likewise.
(VCVTQ_M_N_TO_F): Likewise.
(mve_vabavq_p_<supf><mode>): Define RTL pattern.
(mve_vqshluq_m_n_s<mode>): Likewise.
(mve_vshlq_m_<supf><mode>): Likewise.
(mve_vsriq_m_n_<supf><mode>): Likewise.
(mve_vsubq_m_<supf><mode>): Likewise.
(mve_vcvtq_m_n_to_f_<supf><mode>): Likewise.
gcc/testsuite/ChangeLog:
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* gcc.target/arm/mve/intrinsics/vabavq_p_s16.c: New test.
* gcc.target/arm/mve/intrinsics/vabavq_p_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vabavq_p_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vabavq_p_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vabavq_p_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vabavq_p_u8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vqshluq_m_n_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vqshluq_m_n_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vqshluq_m_n_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlq_m_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlq_m_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlq_m_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlq_m_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlq_m_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlq_m_u8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsriq_m_n_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsriq_m_n_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsriq_m_n_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsriq_m_n_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsriq_m_n_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsriq_m_n_u8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsubq_m_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsubq_m_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsubq_m_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsubq_m_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsubq_m_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vsubq_m_u8.c: Likewise.
+2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * config/arm/arm-builtins.c (QUADOP_UNONE_UNONE_NONE_NONE_UNONE_QUALIFIERS):
+ Define builtin qualifier.
+ (QUADOP_NONE_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_NONE_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_NONE_NONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise.
+ * config/arm/arm_mve.h (vsriq_m_n_s8): Define macro.
+ (vsubq_m_s8): Likewise.
+ (vcvtq_m_n_f16_u16): Likewise.
+ (vqshluq_m_n_s8): Likewise.
+ (vabavq_p_s8): Likewise.
+ (vsriq_m_n_u8): Likewise.
+ (vshlq_m_u8): Likewise.
+ (vsubq_m_u8): Likewise.
+ (vabavq_p_u8): Likewise.
+ (vshlq_m_s8): Likewise.
+ (vcvtq_m_n_f16_s16): Likewise.
+ (vsriq_m_n_s16): Likewise.
+ (vsubq_m_s16): Likewise.
+ (vcvtq_m_n_f32_u32): Likewise.
+ (vqshluq_m_n_s16): Likewise.
+ (vabavq_p_s16): Likewise.
+ (vsriq_m_n_u16): Likewise.
+ (vshlq_m_u16): Likewise.
+ (vsubq_m_u16): Likewise.
+ (vabavq_p_u16): Likewise.
+ (vshlq_m_s16): Likewise.
+ (vcvtq_m_n_f32_s32): Likewise.
+ (vsriq_m_n_s32): Likewise.
+ (vsubq_m_s32): Likewise.
+ (vqshluq_m_n_s32): Likewise.
+ (vabavq_p_s32): Likewise.
+ (vsriq_m_n_u32): Likewise.
+ (vshlq_m_u32): Likewise.
+ (vsubq_m_u32): Likewise.
+ (vabavq_p_u32): Likewise.
+ (vshlq_m_s32): Likewise.
+ (__arm_vsriq_m_n_s8): Define intrinsic.
+ (__arm_vsubq_m_s8): Likewise.
+ (__arm_vqshluq_m_n_s8): Likewise.
+ (__arm_vabavq_p_s8): Likewise.
+ (__arm_vsriq_m_n_u8): Likewise.
+ (__arm_vshlq_m_u8): Likewise.
+ (__arm_vsubq_m_u8): Likewise.
+ (__arm_vabavq_p_u8): Likewise.
+ (__arm_vshlq_m_s8): Likewise.
+ (__arm_vsriq_m_n_s16): Likewise.
+ (__arm_vsubq_m_s16): Likewise.
+ (__arm_vqshluq_m_n_s16): Likewise.
+ (__arm_vabavq_p_s16): Likewise.
+ (__arm_vsriq_m_n_u16): Likewise.
+ (__arm_vshlq_m_u16): Likewise.
+ (__arm_vsubq_m_u16): Likewise.
+ (__arm_vabavq_p_u16): Likewise.
+ (__arm_vshlq_m_s16): Likewise.
+ (__arm_vsriq_m_n_s32): Likewise.
+ (__arm_vsubq_m_s32): Likewise.
+ (__arm_vqshluq_m_n_s32): Likewise.
+ (__arm_vabavq_p_s32): Likewise.
+ (__arm_vsriq_m_n_u32): Likewise.
+ (__arm_vshlq_m_u32): Likewise.
+ (__arm_vsubq_m_u32): Likewise.
+ (__arm_vabavq_p_u32): Likewise.
+ (__arm_vshlq_m_s32): Likewise.
+ (__arm_vcvtq_m_n_f16_u16): Likewise.
+ (__arm_vcvtq_m_n_f16_s16): Likewise.
+ (__arm_vcvtq_m_n_f32_u32): Likewise.
+ (__arm_vcvtq_m_n_f32_s32): Likewise.
+ (vcvtq_m_n): Define polymorphic variant.
+ (vqshluq_m_n): Likewise.
+ (vshlq_m): Likewise.
+ (vsriq_m_n): Likewise.
+ (vsubq_m): Likewise.
+ (vabavq_p): Likewise.
+ * config/arm/arm_mve_builtins.def
+ (QUADOP_UNONE_UNONE_NONE_NONE_UNONE_QUALIFIERS): Use builtin qualifier.
+ (QUADOP_NONE_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_NONE_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_NONE_NONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
+ (QUADOP_UNONE_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise.
+ * config/arm/mve.md (VABAVQ_P): Define iterator.
+ (VSHLQ_M): Likewise.
+ (VSRIQ_M_N): Likewise.
+ (VSUBQ_M): Likewise.
+ (VCVTQ_M_N_TO_F): Likewise.
+ (mve_vabavq_p_<supf><mode>): Define RTL pattern.
+ (mve_vqshluq_m_n_s<mode>): Likewise.
+ (mve_vshlq_m_<supf><mode>): Likewise.
+ (mve_vsriq_m_n_<supf><mode>): Likewise.
+ (mve_vsubq_m_<supf><mode>): Likewise.
+ (mve_vcvtq_m_n_to_f_<supf><mode>): Likewise.
+
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
#define TERNOP_NONE_NONE_NONE_NONE_QUALIFIERS \
(arm_ternop_none_none_none_none_qualifiers)
+static enum arm_type_qualifiers
+arm_quadop_unone_unone_none_none_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_none, qualifier_none,
+ qualifier_unsigned };
+#define QUADOP_UNONE_UNONE_NONE_NONE_UNONE_QUALIFIERS \
+ (arm_quadop_unone_unone_none_none_unone_qualifiers)
+
+static enum arm_type_qualifiers
+arm_quadop_none_none_none_none_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_none,
+ qualifier_unsigned };
+#define QUADOP_NONE_NONE_NONE_NONE_UNONE_QUALIFIERS \
+ (arm_quadop_none_none_none_none_unone_qualifiers)
+
+static enum arm_type_qualifiers
+arm_quadop_none_none_none_imm_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate,
+ qualifier_unsigned };
+#define QUADOP_NONE_NONE_NONE_IMM_UNONE_QUALIFIERS \
+ (arm_quadop_none_none_none_imm_unone_qualifiers)
+
+static enum arm_type_qualifiers
+arm_quadop_unone_unone_unone_unone_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned,
+ qualifier_unsigned, qualifier_unsigned };
+#define QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE_QUALIFIERS \
+ (arm_quadop_unone_unone_unone_unone_unone_qualifiers)
+
+static enum arm_type_qualifiers
+arm_quadop_unone_unone_none_imm_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_none,
+ qualifier_immediate, qualifier_unsigned };
+#define QUADOP_UNONE_UNONE_NONE_IMM_UNONE_QUALIFIERS \
+ (arm_quadop_unone_unone_none_imm_unone_qualifiers)
+
+static enum arm_type_qualifiers
+arm_quadop_none_none_unone_imm_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_unsigned, qualifier_immediate,
+ qualifier_unsigned };
+#define QUADOP_NONE_NONE_UNONE_IMM_UNONE_QUALIFIERS \
+ (arm_quadop_none_none_unone_imm_unone_qualifiers)
+
+static enum arm_type_qualifiers
+arm_quadop_unone_unone_unone_imm_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned,
+ qualifier_immediate, qualifier_unsigned };
+#define QUADOP_UNONE_UNONE_UNONE_IMM_UNONE_QUALIFIERS \
+ (arm_quadop_unone_unone_unone_imm_unone_qualifiers)
+
+static enum arm_type_qualifiers
+arm_quadop_unone_unone_unone_none_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned,
+ qualifier_none, qualifier_unsigned };
+#define QUADOP_UNONE_UNONE_UNONE_NONE_UNONE_QUALIFIERS \
+ (arm_quadop_unone_unone_unone_none_unone_qualifiers)
+
/* End of Qualifier for MVE builtins. */
/* void ([T element type] *, T, immediate). */
#define vqmovnbq_m_u32(__a, __b, __p) __arm_vqmovnbq_m_u32(__a, __b, __p)
#define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p)
#define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p)
+#define vsriq_m_n_s8(__a, __b, __imm, __p) __arm_vsriq_m_n_s8(__a, __b, __imm, __p)
+#define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p)
+#define vqshluq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a, __imm, __p)
+#define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p)
+#define vsriq_m_n_u8(__a, __b, __imm, __p) __arm_vsriq_m_n_u8(__a, __b, __imm, __p)
+#define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p)
+#define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p)
+#define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p)
+#define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p)
+#define vsriq_m_n_s16(__a, __b, __imm, __p) __arm_vsriq_m_n_s16(__a, __b, __imm, __p)
+#define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p)
+#define vqshluq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a, __imm, __p)
+#define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p)
+#define vsriq_m_n_u16(__a, __b, __imm, __p) __arm_vsriq_m_n_u16(__a, __b, __imm, __p)
+#define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p)
+#define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p)
+#define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p)
+#define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p)
+#define vsriq_m_n_s32(__a, __b, __imm, __p) __arm_vsriq_m_n_s32(__a, __b, __imm, __p)
+#define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p)
+#define vqshluq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a, __imm, __p)
+#define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p)
+#define vsriq_m_n_u32(__a, __b, __imm, __p) __arm_vsriq_m_n_u32(__a, __b, __imm, __p)
+#define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p)
+#define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p)
+#define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p)
+#define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p)
#endif
__extension__ extern __inline void
{
return __builtin_mve_vrev32q_m_uv8hi (__inactive, __a, __p);
}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshluq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_uv16qi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_uv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshluq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_uv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshluq_m_n_sv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_uv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv4si (__inactive, __a, __b, __p);
+}
+
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void
return __builtin_mve_vcvtq_m_from_f_uv4si (__inactive, __a, __p);
}
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f16_u16 (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f16_s16 (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f32_u32 (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f32_s32 (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__inactive, __a, __imm6, __p);
+}
+
#endif
enum {
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+#define vcvtq_m_n(p0,p1,p2,p3) __arm_vcvtq_m_n(p0,p1,p2,p3)
+#define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_n_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_n_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
#define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2)
#define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
-#define vcmpeqq_m_n(p0,p1,p2) __arm_vcmpeqq_m_n(p0,p1,p2)
-#define __arm_vcmpeqq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
- int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
- int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
- int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
- int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
- int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \
- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));})
-
#define vrndxq_m(p0,p1,p2) __arm_vrndxq_m(p0,p1,p2)
#define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
#define vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p(p0,p1,p2)
#define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2)
+#define vsubq_m(p0,p1,p2,p3) __arm_vsubq_m(p0,p1,p2,p3)
+#define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vabavq_p(p0,p1,p2,p3) __arm_vabavq_p(p0,p1,p2,p3)
+#define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
#endif /* MVE Integer. */
#define vqabsq_m(p0,p1,p2) __arm_vqabsq_m(p0,p1,p2)
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+#define vqshluq_m(p0,p1,p2,p3) __arm_vqshluq_m(p0,p1,p2,p3)
+#define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshluq_m_n_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
+
+#define vshlq_m(p0,p1,p2,p3) __arm_vshlq_m(p0,p1,p2,p3)
+#define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define vsriq_m(p0,p1,p2,p3) __arm_vsriq_m(p0,p1,p2,p3)
+#define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
#ifdef __cplusplus
}
#endif
VAR1 (TERNOP_NONE_NONE_NONE_NONE, vrmlsldavhaxq_s, v4si)
VAR1 (TERNOP_NONE_NONE_NONE_NONE, vrmlsldavhaq_s, v4si)
VAR1 (TERNOP_NONE_NONE_NONE_NONE, vrmlaldavhaxq_s, v4si)
+VAR3 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vsriq_m_n_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vsriq_m_n_u, v16qi, v8hi, v4si)
+VAR3 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vsubq_m_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vsubq_m_u, v16qi, v8hi, v4si)
+VAR2 (QUADOP_NONE_NONE_UNONE_IMM_UNONE, vcvtq_m_n_to_f_u, v8hf, v4sf)
+VAR2 (QUADOP_NONE_NONE_NONE_IMM_UNONE, vcvtq_m_n_to_f_s, v8hf, v4sf)
+VAR3 (QUADOP_UNONE_UNONE_NONE_IMM_UNONE, vqshluq_m_n_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_NONE_NONE_UNONE, vabavq_p_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_UNONE_UNONE, vabavq_p_u, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_NONE_UNONE, vshlq_m_u, v16qi, v8hi, v4si)
+VAR3 (QUADOP_NONE_NONE_NONE_NONE_UNONE, vshlq_m_s, v16qi, v8hi, v4si)
VCVTPQ_M_S VCVTPQ_M_U VCVTQ_M_N_FROM_F_S VCVTNQ_M_U
VREV16Q_M_S VREV16Q_M_U VREV32Q_M VCVTQ_M_FROM_F_U
VCVTQ_M_FROM_F_S VRMLALDAVHQ_P_U VADDLVAQ_P_U
- VCVTQ_M_N_FROM_F_U])
+ VCVTQ_M_N_FROM_F_U VQSHLUQ_M_N_S VABAVQ_P_S
+ VABAVQ_P_U VSHLQ_M_S VSHLQ_M_U VSRIQ_M_N_S
+ VSRIQ_M_N_U VSUBQ_M_U VSUBQ_M_S VCVTQ_M_N_TO_F_U
+ VCVTQ_M_N_TO_F_S])
(define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF")
(V8HF "V8HI") (V4SF "V4SI")])
(VCVTQ_M_N_FROM_F_U "u") (VCVTQ_M_FROM_F_S "s")
(VCVTQ_M_FROM_F_U "u") (VRMLALDAVHQ_P_U "u")
(VRMLALDAVHQ_P_S "s") (VADDLVAQ_P_U "u")
- (VCVTQ_M_N_FROM_F_S "s")])
+ (VCVTQ_M_N_FROM_F_S "s") (VABAVQ_P_U "u")
+ (VABAVQ_P_S "s") (VSHLQ_M_S "s") (VSHLQ_M_U "u")
+ (VSRIQ_M_N_S "s") (VSRIQ_M_N_U "u") (VSUBQ_M_S "s")
+ (VSUBQ_M_U "u") (VCVTQ_M_N_TO_F_S "s")
+ (VCVTQ_M_N_TO_F_U "u")])
(define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32")
(VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16")
(define_int_iterator VCVTQ_M_FROM_F [VCVTQ_M_FROM_F_U VCVTQ_M_FROM_F_S])
(define_int_iterator VRMLALDAVHQ_P [VRMLALDAVHQ_P_S VRMLALDAVHQ_P_U])
(define_int_iterator VADDLVAQ_P [VADDLVAQ_P_U VADDLVAQ_P_S])
+(define_int_iterator VABAVQ_P [VABAVQ_P_S VABAVQ_P_U])
+(define_int_iterator VSHLQ_M [VSHLQ_M_S VSHLQ_M_U])
+(define_int_iterator VSRIQ_M_N [VSRIQ_M_N_S VSRIQ_M_N_U])
+(define_int_iterator VSUBQ_M [VSUBQ_M_U VSUBQ_M_S])
+(define_int_iterator VCVTQ_M_N_TO_F [VCVTQ_M_N_TO_F_U VCVTQ_M_N_TO_F_S])
(define_insn "*mve_mov<mode>"
[(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us")
VSHRNTQ_N))
]
"TARGET_HAVE_MVE"
- "vshrnt.i%#<V_sz_elem> %q0, %q2, %3"
+ "vshrnt.i%#<V_sz_elem>\t%q0, %q2, %3"
[(set_attr "type" "mve_move")
])
VCVTMQ_M))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vpst\;vcvtmt.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q2"
+ "vpst\;vcvtmt.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q2"
[(set_attr "type" "mve_move")
(set_attr "length""8")])
VCVTPQ_M))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vpst\;vcvtpt.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q2"
+ "vpst\;vcvtpt.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q2"
[(set_attr "type" "mve_move")
(set_attr "length""8")])
VCVTNQ_M))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vpst\;vcvtnt.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q2"
+ "vpst\;vcvtnt.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q2"
[(set_attr "type" "mve_move")
(set_attr "length""8")])
VCVTQ_M_N_FROM_F))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vpst\;vcvtt.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q2, %3"
+ "vpst\;vcvtt.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q2, %3"
[(set_attr "type" "mve_move")
(set_attr "length""8")])
VCVTQ_M_FROM_F))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vpst\;vcvtt.<supf>%#<V_sz_elem>.f%#<V_sz_elem> %q0, %q2"
+ "vpst\;vcvtt.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q2"
[(set_attr "type" "mve_move")
(set_attr "length""8")])
"vrmlsldavha.s32 %Q0, %R0, %q2, %q3"
[(set_attr "type" "mve_move")
])
+
+;;
+;; [vabavq_p_s, vabavq_p_u])
+;;
+(define_insn "mve_vabavq_p_<supf><mode>"
+ [
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")
+ (match_operand:MVE_2 3 "s_register_operand" "w")
+ (match_operand:HI 4 "vpr_register_operand" "Up")]
+ VABAVQ_P))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vabavt.<supf>%#<V_sz_elem>\t%0, %q2, %q3"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqshluq_m_n_s])
+;;
+(define_insn "mve_vqshluq_m_n_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")
+ (match_operand:SI 3 "mve_imm_7" "Ra")
+ (match_operand:HI 4 "vpr_register_operand" "Up")]
+ VQSHLUQ_M_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\n\tvqshlut.s%#<V_sz_elem>\t%q0, %q2, %3"
+ [(set_attr "type" "mve_move")])
+
+;;
+;; [vshlq_m_s, vshlq_m_u])
+;;
+(define_insn "mve_vshlq_m_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")
+ (match_operand:MVE_2 3 "s_register_operand" "w")
+ (match_operand:HI 4 "vpr_register_operand" "Up")]
+ VSHLQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vshlt.<supf>%#<V_sz_elem>\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")])
+
+;;
+;; [vsriq_m_n_s, vsriq_m_n_u])
+;;
+(define_insn "mve_vsriq_m_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")
+ (match_operand:SI 3 "mve_imm_selective_upto_8" "Rg")
+ (match_operand:HI 4 "vpr_register_operand" "Up")]
+ VSRIQ_M_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vsrit.%#<V_sz_elem>\t%q0, %q2, %3"
+ [(set_attr "type" "mve_move")])
+
+;;
+;; [vsubq_m_u, vsubq_m_s])
+;;
+(define_insn "mve_vsubq_m_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")
+ (match_operand:MVE_2 3 "s_register_operand" "w")
+ (match_operand:HI 4 "vpr_register_operand" "Up")]
+ VSUBQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vsubt.i%#<V_sz_elem>\t%q0, %q2, %q3"
+ [(set_attr "type" "mve_move")])
+
+;;
+;; [vcvtq_m_n_to_f_u, vcvtq_m_n_to_f_s])
+;;
+(define_insn "mve_vcvtq_m_n_to_f_<supf><mode>"
+ [
+ (set (match_operand:MVE_0 0 "s_register_operand" "=w")
+ (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
+ (match_operand:<MVE_CNVT> 2 "s_register_operand" "w")
+ (match_operand:SI 3 "mve_imm_16" "Rd")
+ (match_operand:HI 4 "vpr_register_operand" "Up")]
+ VCVTQ_M_N_TO_F))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+ "vpst\;vcvtt.f%#<V_sz_elem>.<supf>%#<V_sz_elem>\t%q0, %q2, %3"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * gcc.target/arm/mve/intrinsics/vabavq_p_s16.c: New test.
+ * gcc.target/arm/mve/intrinsics/vabavq_p_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabavq_p_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabavq_p_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabavq_p_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabavq_p_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f16_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_m_n_f32_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshluq_m_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshluq_m_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshluq_m_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_m_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_m_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_m_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_m_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_m_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_m_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsriq_m_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsriq_m_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsriq_m_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsriq_m_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsriq_m_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsriq_m_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_m_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_m_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_m_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_m_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_m_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_m_u8.c: Likewise.
+
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+{
+ return vabavq_p_s16 (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.s16" } } */
+
+uint32_t
+foo1 (uint32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p)
+{
+ return vabavq_p (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.s16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+{
+ return vabavq_p_s32 (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.s32" } } */
+
+uint32_t
+foo1 (uint32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p)
+{
+ return vabavq_p (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.s32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+{
+ return vabavq_p_s8 (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.s8" } } */
+
+uint32_t
+foo1 (uint32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p)
+{
+ return vabavq_p (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.s8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
+{
+ return vabavq_p_u16 (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.u16" } } */
+
+uint32_t
+foo1 (uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p)
+{
+ return vabavq_p (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.u16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
+{
+ return vabavq_p_u32 (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.u32" } } */
+
+uint32_t
+foo1 (uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p)
+{
+ return vabavq_p (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.u32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p)
+{
+ return vabavq_p_u8 (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.u8" } } */
+
+uint32_t
+foo1 (uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p)
+{
+ return vabavq_p (a, b, c, p);
+}
+
+/* { dg-final { scan-assembler "vabavt.u8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n_f16_s16 (inactive, a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
+
+float16x8_t
+foo1 (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n (inactive, a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n_f16_u16 (inactive, a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
+
+float16x8_t
+foo1 (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n (inactive, a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n_f32_s32 (inactive, a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
+
+float32x4_t
+foo1 (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n (inactive, a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n_f32_u32 (inactive, a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
+
+float32x4_t
+foo1 (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_m_n (inactive, a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t inactive, int16x8_t a, mve_pred16_t p)
+{
+ return vqshluq_m_n_s16 (inactive, a, 7, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vqshlut.s16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t inactive, int16x8_t a, mve_pred16_t p)
+{
+ return vqshluq_m (inactive, a, 7, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t inactive, int32x4_t a, mve_pred16_t p)
+{
+ return vqshluq_m_n_s32 (inactive, a, 7, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vqshlut.s32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t inactive, int32x4_t a, mve_pred16_t p)
+{
+ return vqshluq_m (inactive, a, 7, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t inactive, int8x16_t a, mve_pred16_t p)
+{
+ return vqshluq_m_n_s8 (inactive, a, 7, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vqshlut.s8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t inactive, int8x16_t a, mve_pred16_t p)
+{
+ return vqshluq_m (inactive, a, 7, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vshlq_m_s16 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vshlq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vshlq_m_s32 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vshlq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vshlq_m_s8 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vshlq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vshlq_m_u16 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t inactive, uint16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vshlq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vshlq_m_u32 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t inactive, uint32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vshlq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vshlq_m_u8 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t inactive, uint8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vshlq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vsriq_m_n_s16 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsrit.16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vsriq_m (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vsriq_m_n_s32 (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsrit.32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vsriq_m (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vsriq_m_n_s8 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsrit.8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vsriq_m (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vsriq_m_n_u16 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsrit.16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vsriq_m (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vsriq_m_n_u32 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsrit.32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vsriq_m (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vsriq_m_n_u8 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsrit.8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vsriq_m (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vsubq_m_s16 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vsubq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vsubq_m_s32 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vsubq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vsubq_m_s8 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vsubq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vsubq_m_u16 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vsubq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vsubq_m_u32 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vsubq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vsubq_m_u8 (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vsubq_m (inactive, a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */