This patch supports the following MVE ACLE load intrinsics with zero(_z) suffix.
* ``_z`` (zero) which indicates false-predicated lanes are filled with zeroes, these are only used for load instructions.
vldrbq_gather_offset_z_s16, vldrbq_gather_offset_z_u8, vldrbq_gather_offset_z_s32, vldrbq_gather_offset_z_u16, vldrbq_gather_offset_z_u32, vldrbq_gather_offset_z_s8, vldrbq_z_s16, vldrbq_z_u8, vldrbq_z_s8, vldrbq_z_s32, vldrbq_z_u16, vldrbq_z_u32, vldrwq_gather_base_z_u32, vldrwq_gather_base_z_s32.
Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details.
[1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* config/arm/arm-builtins.c (LDRGBS_Z_QUALIFIERS): Define builtin
qualifier.
(LDRGBU_Z_QUALIFIERS): Likewise.
(LDRGS_Z_QUALIFIERS): Likewise.
(LDRGU_Z_QUALIFIERS): Likewise.
(LDRS_Z_QUALIFIERS): Likewise.
(LDRU_Z_QUALIFIERS): Likewise.
* config/arm/arm_mve.h (vldrbq_gather_offset_z_s16): Define macro.
(vldrbq_gather_offset_z_u8): Likewise.
(vldrbq_gather_offset_z_s32): Likewise.
(vldrbq_gather_offset_z_u16): Likewise.
(vldrbq_gather_offset_z_u32): Likewise.
(vldrbq_gather_offset_z_s8): Likewise.
(vldrbq_z_s16): Likewise.
(vldrbq_z_u8): Likewise.
(vldrbq_z_s8): Likewise.
(vldrbq_z_s32): Likewise.
(vldrbq_z_u16): Likewise.
(vldrbq_z_u32): Likewise.
(vldrwq_gather_base_z_u32): Likewise.
(vldrwq_gather_base_z_s32): Likewise.
(__arm_vldrbq_gather_offset_z_s8): Define intrinsic.
(__arm_vldrbq_gather_offset_z_s32): Likewise.
(__arm_vldrbq_gather_offset_z_s16): Likewise.
(__arm_vldrbq_gather_offset_z_u8): Likewise.
(__arm_vldrbq_gather_offset_z_u32): Likewise.
(__arm_vldrbq_gather_offset_z_u16): Likewise.
(__arm_vldrbq_z_s8): Likewise.
(__arm_vldrbq_z_s32): Likewise.
(__arm_vldrbq_z_s16): Likewise.
(__arm_vldrbq_z_u8): Likewise.
(__arm_vldrbq_z_u32): Likewise.
(__arm_vldrbq_z_u16): Likewise.
(__arm_vldrwq_gather_base_z_s32): Likewise.
(__arm_vldrwq_gather_base_z_u32): Likewise.
(vldrbq_gather_offset_z): Define polymorphic variant.
* config/arm/arm_mve_builtins.def (LDRGBS_Z_QUALIFIERS): Use builtin
qualifier.
(LDRGBU_Z_QUALIFIERS): Likewise.
(LDRGS_Z_QUALIFIERS): Likewise.
(LDRGU_Z_QUALIFIERS): Likewise.
(LDRS_Z_QUALIFIERS): Likewise.
(LDRU_Z_QUALIFIERS): Likewise.
* config/arm/mve.md (mve_vldrbq_gather_offset_z_<supf><mode>): Define
RTL pattern.
(mve_vldrbq_z_<supf><mode>): Likewise.
(mve_vldrwq_gather_base_z_<supf>v4si): Likewise.
gcc/testsuite/ChangeLog: Likewise.
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s16.c: New test.
* gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_z_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_z_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_z_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_z_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_z_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrbq_z_u8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_u32.c: Likewise.
+2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * config/arm/arm-builtins.c (LDRGBS_Z_QUALIFIERS): Define builtin
+ qualifier.
+ (LDRGBU_Z_QUALIFIERS): Likewise.
+ (LDRGS_Z_QUALIFIERS): Likewise.
+ (LDRGU_Z_QUALIFIERS): Likewise.
+ (LDRS_Z_QUALIFIERS): Likewise.
+ (LDRU_Z_QUALIFIERS): Likewise.
+ * config/arm/arm_mve.h (vldrbq_gather_offset_z_s16): Define macro.
+ (vldrbq_gather_offset_z_u8): Likewise.
+ (vldrbq_gather_offset_z_s32): Likewise.
+ (vldrbq_gather_offset_z_u16): Likewise.
+ (vldrbq_gather_offset_z_u32): Likewise.
+ (vldrbq_gather_offset_z_s8): Likewise.
+ (vldrbq_z_s16): Likewise.
+ (vldrbq_z_u8): Likewise.
+ (vldrbq_z_s8): Likewise.
+ (vldrbq_z_s32): Likewise.
+ (vldrbq_z_u16): Likewise.
+ (vldrbq_z_u32): Likewise.
+ (vldrwq_gather_base_z_u32): Likewise.
+ (vldrwq_gather_base_z_s32): Likewise.
+ (__arm_vldrbq_gather_offset_z_s8): Define intrinsic.
+ (__arm_vldrbq_gather_offset_z_s32): Likewise.
+ (__arm_vldrbq_gather_offset_z_s16): Likewise.
+ (__arm_vldrbq_gather_offset_z_u8): Likewise.
+ (__arm_vldrbq_gather_offset_z_u32): Likewise.
+ (__arm_vldrbq_gather_offset_z_u16): Likewise.
+ (__arm_vldrbq_z_s8): Likewise.
+ (__arm_vldrbq_z_s32): Likewise.
+ (__arm_vldrbq_z_s16): Likewise.
+ (__arm_vldrbq_z_u8): Likewise.
+ (__arm_vldrbq_z_u32): Likewise.
+ (__arm_vldrbq_z_u16): Likewise.
+ (__arm_vldrwq_gather_base_z_s32): Likewise.
+ (__arm_vldrwq_gather_base_z_u32): Likewise.
+ (vldrbq_gather_offset_z): Define polymorphic variant.
+ * config/arm/arm_mve_builtins.def (LDRGBS_Z_QUALIFIERS): Use builtin
+ qualifier.
+ (LDRGBU_Z_QUALIFIERS): Likewise.
+ (LDRGS_Z_QUALIFIERS): Likewise.
+ (LDRGU_Z_QUALIFIERS): Likewise.
+ (LDRS_Z_QUALIFIERS): Likewise.
+ (LDRU_Z_QUALIFIERS): Likewise.
+ * config/arm/mve.md (mve_vldrbq_gather_offset_z_<supf><mode>): Define
+ RTL pattern.
+ (mve_vldrbq_z_<supf><mode>): Likewise.
+ (mve_vldrwq_gather_base_z_<supf>v4si): Likewise.
+
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
= { qualifier_unsigned, qualifier_unsigned, qualifier_immediate};
#define LDRGBU_QUALIFIERS (arm_ldrgbu_qualifiers)
+static enum arm_type_qualifiers
+arm_ldrgbs_z_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_unsigned, qualifier_immediate,
+ qualifier_unsigned};
+#define LDRGBS_Z_QUALIFIERS (arm_ldrgbs_z_qualifiers)
+
+static enum arm_type_qualifiers
+arm_ldrgbu_z_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_immediate,
+ qualifier_unsigned};
+#define LDRGBU_Z_QUALIFIERS (arm_ldrgbu_z_qualifiers)
+
+static enum arm_type_qualifiers
+arm_ldrgs_z_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_pointer, qualifier_unsigned,
+ qualifier_unsigned};
+#define LDRGS_Z_QUALIFIERS (arm_ldrgs_z_qualifiers)
+
+static enum arm_type_qualifiers
+arm_ldrgu_z_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_pointer, qualifier_unsigned,
+ qualifier_unsigned};
+#define LDRGU_Z_QUALIFIERS (arm_ldrgu_z_qualifiers)
+
+static enum arm_type_qualifiers
+arm_ldrs_z_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_pointer, qualifier_unsigned};
+#define LDRS_Z_QUALIFIERS (arm_ldrs_z_qualifiers)
+
+static enum arm_type_qualifiers
+arm_ldru_z_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_pointer, qualifier_unsigned};
+#define LDRU_Z_QUALIFIERS (arm_ldru_z_qualifiers)
+
/* End of Qualifier for MVE builtins. */
/* void ([T element type] *, T, immediate). */
#define vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p)
#define vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p)
#define vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p)
+#define vldrbq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s16(__base, __offset, __p)
+#define vldrbq_gather_offset_z_u8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u8(__base, __offset, __p)
+#define vldrbq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s32(__base, __offset, __p)
+#define vldrbq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u16(__base, __offset, __p)
+#define vldrbq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u32(__base, __offset, __p)
+#define vldrbq_gather_offset_z_s8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s8(__base, __offset, __p)
+#define vldrbq_z_s16(__base, __p) __arm_vldrbq_z_s16(__base, __p)
+#define vldrbq_z_u8(__base, __p) __arm_vldrbq_z_u8(__base, __p)
+#define vldrbq_z_s8(__base, __p) __arm_vldrbq_z_s8(__base, __p)
+#define vldrbq_z_s32(__base, __p) __arm_vldrbq_z_s32(__base, __p)
+#define vldrbq_z_u16(__base, __p) __arm_vldrbq_z_u16(__base, __p)
+#define vldrbq_z_u32(__base, __p) __arm_vldrbq_z_u32(__base, __p)
+#define vldrwq_gather_base_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr, __offset, __p)
+#define vldrwq_gather_base_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr, __offset, __p)
#endif
__extension__ extern __inline void
{
__builtin_mve_vstrwq_scatter_base_p_uv4si (__addr, __offset, __value, __p);
}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_s8 (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_sv16qi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_s32 (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_sv4si ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_s16 (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_sv8hi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_u8 (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_uv16qi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_u32 (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_uv4si ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_u16 (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_uv8hi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_s8 (int8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_sv16qi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_s32 (int8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_sv4si ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_s16 (int8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_sv8hi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_u8 (uint8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_uv16qi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_u32 (uint8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_uv4si ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_u16 (uint8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_uv8hi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_z_s32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_base_z_sv4si (__addr, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_z_u32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p);
+}
+
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void
{
return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p);
}
+
#endif
enum {
int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+#endif /* MVE Integer. */
+
#define vldrbq_gather_offset_z(p0,p1,p2) __arm_vldrbq_gather_offset_z(p0,p1,p2)
#define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
int (*)[__ARM_mve_type_uint8_t_const_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
-#endif /* MVE Integer. */
-
#define vqrdmlahq_m(p0,p1,p2,p3) __arm_vqrdmlahq_m(p0,p1,p2,p3)
#define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
VAR3 (STRSU_P, vstrbq_scatter_offset_p_u, v16qi, v8hi, v4si)
VAR1 (STRSBS_P, vstrwq_scatter_base_p_s, v4si)
VAR1 (STRSBU_P, vstrwq_scatter_base_p_u, v4si)
+VAR1 (LDRGBS_Z, vldrwq_gather_base_z_s, v4si)
+VAR1 (LDRGBU_Z, vldrwq_gather_base_z_u, v4si)
+VAR3 (LDRGS_Z, vldrbq_gather_offset_z_s, v16qi, v8hi, v4si)
+VAR3 (LDRGU_Z, vldrbq_gather_offset_z_u, v16qi, v8hi, v4si)
+VAR3 (LDRS_Z, vldrbq_z_s, v16qi, v8hi, v4si)
+VAR3 (LDRU_Z, vldrbq_z_u, v16qi, v8hi, v4si)
return "";
}
[(set_attr "length" "8")])
+
+;;
+;; [vldrbq_gather_offset_z_s vldrbq_gather_offset_z_u]
+;;
+(define_insn "mve_vldrbq_gather_offset_z_<supf><mode>"
+ [(set (match_operand:MVE_2 0 "s_register_operand" "=&w")
+ (unspec:MVE_2 [(match_operand:<MVE_B_ELEM> 1 "memory_operand" "Us")
+ (match_operand:MVE_2 2 "s_register_operand" "w")
+ (match_operand:HI 3 "vpr_register_operand" "Up")]
+ VLDRBGOQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[4];
+ ops[0] = operands[0];
+ ops[1] = operands[1];
+ ops[2] = operands[2];
+ ops[3] = operands[3];
+ if (!strcmp ("<supf>","s") && <V_sz_elem> == 8)
+ output_asm_insn ("vpst\n\tvldrbt.u8\t%q0, [%m1, %q2]",ops);
+ else
+ output_asm_insn ("vpst\n\tvldrbt.<supf><V_sz_elem>\t%q0, [%m1, %q2]",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vldrbq_z_s vldrbq_z_u]
+;;
+(define_insn "mve_vldrbq_z_<supf><mode>"
+ [(set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:<MVE_B_ELEM> 1 "memory_operand" "Us")
+ (match_operand:HI 2 "vpr_register_operand" "Up")]
+ VLDRBQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[2];
+ int regno = REGNO (operands[0]);
+ ops[0] = gen_rtx_REG (TImode, regno);
+ ops[1] = operands[1];
+ output_asm_insn ("vpst\n\tvldrbt.<supf><V_sz_elem>\t%q0, %E1",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+
+;;
+;; [vldrwq_gather_base_z_s vldrwq_gather_base_z_u]
+;;
+(define_insn "mve_vldrwq_gather_base_z_<supf>v4si"
+ [(set (match_operand:V4SI 0 "s_register_operand" "=&w")
+ (unspec:V4SI [(match_operand:V4SI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:HI 3 "vpr_register_operand" "Up")]
+ VLDRWGBQ))
+ ]
+ "TARGET_HAVE_MVE"
+{
+ rtx ops[3];
+ ops[0] = operands[0];
+ ops[1] = operands[1];
+ ops[2] = operands[2];
+ output_asm_insn ("vpst\n\tvldrwt.u32\t%q0, [%q1, %2]",ops);
+ return "";
+}
+ [(set_attr "length" "8")])
+2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s16.c: New test.
+ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_gather_offset_z_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_z_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_z_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_z_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_z_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_z_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrbq_z_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vldrwq_gather_base_z_u32.c: Likewise.
+
2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8_t const * base, uint16x8_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z_s16 (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.s16" } } */
+
+int16x8_t
+foo1 (int8_t const * base, uint16x8_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.s16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int8_t const * base, uint32x4_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z_s32 (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.s32" } } */
+
+int32x4_t
+foo1 (int8_t const * base, uint32x4_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.s32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8_t const * base, uint8x16_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z_s8 (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u8" } } */
+
+int8x16_t
+foo1 (int8_t const * base, uint8x16_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8_t const * base, uint16x8_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z_u16 (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u16" } } */
+
+uint16x8_t
+foo1 (uint8_t const * base, uint16x8_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint8_t const * base, uint32x4_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z_u32 (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u32" } } */
+
+uint32x4_t
+foo1 (uint8_t const * base, uint32x4_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8_t const * base, uint8x16_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z_u8 (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u8" } } */
+
+uint8x16_t
+foo1 (uint8_t const * base, uint8x16_t offset, mve_pred16_t p)
+{
+ return vldrbq_gather_offset_z (base, offset, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8_t const * base, mve_pred16_t p)
+{
+ return vldrbq_z_s16 (base, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.s16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int8_t const * base, mve_pred16_t p)
+{
+ return vldrbq_z_s32 (base, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.s32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8_t const * base, mve_pred16_t p)
+{
+ return vldrbq_z_s8 (base, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.s8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8_t const * base, mve_pred16_t p)
+{
+ return vldrbq_z_u16 (base, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u16" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint8_t const * base, mve_pred16_t p)
+{
+ return vldrbq_z_u32 (base, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8_t const * base, mve_pred16_t p)
+{
+ return vldrbq_z_u8 (base, p);
+}
+
+/* { dg-final { scan-assembler "vldrbt.u8" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (uint32x4_t addr, mve_pred16_t p)
+{
+ return vldrwq_gather_base_z_s32 (addr, 4, p);
+}
+
+/* { dg-final { scan-assembler "vldrwt.u32" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t addr, mve_pred16_t p)
+{
+ return vldrwq_gather_base_z_u32 (addr, 4, p);
+}
+
+/* { dg-final { scan-assembler "vldrwt.u32" } } */