This patch supports following MVE ACLE whole vector left shift with carry intrinsics.
vshlcq_m_s8, vshlcq_m_s16, vshlcq_m_s32, vshlcq_m_u8, vshlcq_m_u16, vshlcq_m_u32.
Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details.
[1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics
2020-03-23 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
* config/arm/arm_mve.h (vshlcq_m_s8): Define macro.
(vshlcq_m_u8): Likewise.
(vshlcq_m_s16): Likewise.
(vshlcq_m_u16): Likewise.
(vshlcq_m_s32): Likewise.
(vshlcq_m_u32): Likewise.
(__arm_vshlcq_m_s8): Define intrinsic.
(__arm_vshlcq_m_u8): Likewise.
(__arm_vshlcq_m_s16): Likewise.
(__arm_vshlcq_m_u16): Likewise.
(__arm_vshlcq_m_s32): Likewise.
(__arm_vshlcq_m_u32): Likewise.
(vshlcq_m): Define polymorphic variant.
* config/arm/arm_mve_builtins.def (QUADOP_NONE_NONE_UNONE_IMM_UNONE):
Use builtin qualifier.
(QUADOP_UNONE_UNONE_UNONE_IMM_UNONE): Likewise.
* config/arm/mve.md (mve_vshlcq_m_vec_<supf><mode>): Define RTL pattern.
(mve_vshlcq_m_carry_<supf><mode>): Likewise.
(mve_vshlcq_m_<supf><mode>): Likewise.
gcc/testsuite/ChangeLog:
2020-03-23 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
* gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c: New test.
* gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c: Likewise.
+2020-03-23 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+ Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+
+ * config/arm/arm_mve.h (vshlcq_m_s8): Define macro.
+ (vshlcq_m_u8): Likewise.
+ (vshlcq_m_s16): Likewise.
+ (vshlcq_m_u16): Likewise.
+ (vshlcq_m_s32): Likewise.
+ (vshlcq_m_u32): Likewise.
+ (__arm_vshlcq_m_s8): Define intrinsic.
+ (__arm_vshlcq_m_u8): Likewise.
+ (__arm_vshlcq_m_s16): Likewise.
+ (__arm_vshlcq_m_u16): Likewise.
+ (__arm_vshlcq_m_s32): Likewise.
+ (__arm_vshlcq_m_u32): Likewise.
+ (vshlcq_m): Define polymorphic variant.
+ * config/arm/arm_mve_builtins.def (QUADOP_NONE_NONE_UNONE_IMM_UNONE):
+ Use builtin qualifier.
+ (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE): Likewise.
+ * config/arm/mve.md (mve_vshlcq_m_vec_<supf><mode>): Define RTL pattern.
+ (mve_vshlcq_m_carry_<supf><mode>): Likewise.
+ (mve_vshlcq_m_<supf><mode>): Likewise.
+
2020-03-23 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* config/arm/arm-builtins.c (LSLL_QUALIFIERS): Define builtin qualifier.
#define urshrl(__p0, __p1) __arm_urshrl(__p0, __p1)
#define lsll(__p0, __p1) __arm_lsll(__p0, __p1)
#define asrl(__p0, __p1) __arm_asrl(__p0, __p1)
+#define vshlcq_m_s8(__a, __b, __imm, __p) __arm_vshlcq_m_s8(__a, __b, __imm, __p)
+#define vshlcq_m_u8(__a, __b, __imm, __p) __arm_vshlcq_m_u8(__a, __b, __imm, __p)
+#define vshlcq_m_s16(__a, __b, __imm, __p) __arm_vshlcq_m_s16(__a, __b, __imm, __p)
+#define vshlcq_m_u16(__a, __b, __imm, __p) __arm_vshlcq_m_u16(__a, __b, __imm, __p)
+#define vshlcq_m_s32(__a, __b, __imm, __p) __arm_vshlcq_m_s32(__a, __b, __imm, __p)
+#define vshlcq_m_u32(__a, __b, __imm, __p) __arm_vshlcq_m_u32(__a, __b, __imm, __p)
#endif
/* For big-endian, GCC's vector indices are reversed within each 64 bits
return __builtin_mve_srshr_si (value, shift);
}
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s8 (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ int8x16_t __res = __builtin_mve_vshlcq_m_vec_sv16qi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_sv16qi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u8 (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __res = __builtin_mve_vshlcq_m_vec_uv16qi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_uv16qi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s16 (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ int16x8_t __res = __builtin_mve_vshlcq_m_vec_sv8hi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_sv8hi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u16 (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __res = __builtin_mve_vshlcq_m_vec_uv8hi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_uv8hi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s32 (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ int32x4_t __res = __builtin_mve_vshlcq_m_vec_sv4si (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_sv4si (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u32 (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __res = __builtin_mve_vshlcq_m_vec_uv4si (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_uv4si (__a, *__b, __imm, __p);
+ return __res;
+}
+
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void
int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
+#define vshlcq_m(p0,p1,p2,p3) __arm_vshlcq_m(p0,p1,p2,p3)
+#define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));})
+
#ifdef __cplusplus
}
#endif
VAR1 (UQSHL, urshrl_, di)
VAR1 (UQSHL, uqshl_, si)
VAR1 (UQSHL, uqshll_, di)
+VAR3 (QUADOP_NONE_NONE_UNONE_IMM_UNONE, vshlcq_m_vec_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_NONE_NONE_UNONE_IMM_UNONE, vshlcq_m_carry_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshlcq_m_vec_u, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshlcq_m_carry_u, v16qi, v8hi, v4si)
VADCQ_M_S VSBCIQ_U VSBCIQ_S VSBCIQ_M_U VSBCIQ_M_S
VSBCQ_U VSBCQ_S VSBCQ_M_U VSBCQ_M_S VADCIQ_U VADCIQ_M_U
VADCIQ_S VADCIQ_M_S VLD2Q VLD4Q VST2Q SRSHRL SRSHR
- URSHR URSHRL SQRSHR UQRSHL UQRSHLL_64
- UQRSHLL_48 SQRSHRL_64 SQRSHRL_48])
+ URSHR URSHRL SQRSHR UQRSHL UQRSHLL_64 VSHLCQ_M_U
+ UQRSHLL_48 SQRSHRL_64 SQRSHRL_48 VSHLCQ_M_S])
(define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF") (V8HF "V8HI")
(V4SF "V4SI")])
(VADCQ_U "u") (VADCQ_M_U "u") (VADCQ_S "s")
(VADCIQ_U "u") (VADCIQ_M_U "u") (VADCIQ_S "s")
(VADCIQ_M_S "s") (SQRSHRL_64 "64") (SQRSHRL_48 "48")
- (UQRSHLL_64 "64") (UQRSHLL_48 "48")])
+ (UQRSHLL_64 "64") (UQRSHLL_48 "48") (VSHLCQ_M_S "s")
+ (VSHLCQ_M_U "u")])
(define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32")
(VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16")
(define_int_iterator VADCQ_M [VADCQ_M_U VADCQ_M_S])
(define_int_iterator UQRSHLLQ [UQRSHLL_64 UQRSHLL_48])
(define_int_iterator SQRSHRLQ [SQRSHRL_64 SQRSHRL_48])
+(define_int_iterator VSHLCQ_M [VSHLCQ_M_S VSHLCQ_M_U])
(define_insn "*mve_mov<mode>"
[(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us")
"TARGET_HAVE_MVE"
"sqshll%?\\t%Q1, %R1, %2"
[(set_attr "predicable" "yes")])
+
+;;
+;; [vshlcq_m_u vshlcq_m_s]
+;;
+(define_expand "mve_vshlcq_m_vec_<supf><mode>"
+ [(match_operand:MVE_2 0 "s_register_operand")
+ (match_operand:MVE_2 1 "s_register_operand")
+ (match_operand:SI 2 "s_register_operand")
+ (match_operand:SI 3 "mve_imm_32")
+ (match_operand:HI 4 "vpr_register_operand")
+ (unspec:MVE_2 [(const_int 0)] VSHLCQ_M)]
+ "TARGET_HAVE_MVE"
+{
+ rtx ignore_wb = gen_reg_rtx (SImode);
+ emit_insn (gen_mve_vshlcq_m_<supf><mode> (operands[0], ignore_wb, operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "mve_vshlcq_m_carry_<supf><mode>"
+ [(match_operand:SI 0 "s_register_operand")
+ (match_operand:MVE_2 1 "s_register_operand")
+ (match_operand:SI 2 "s_register_operand")
+ (match_operand:SI 3 "mve_imm_32")
+ (match_operand:HI 4 "vpr_register_operand")
+ (unspec:MVE_2 [(const_int 0)] VSHLCQ_M)]
+ "TARGET_HAVE_MVE"
+{
+ rtx ignore_vec = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mve_vshlcq_m_<supf><mode> (ignore_vec, operands[0],
+ operands[1], operands[2],
+ operands[3], operands[4]));
+ DONE;
+})
+
+(define_insn "mve_vshlcq_m_<supf><mode>"
+ [(set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 2 "s_register_operand" "0")
+ (match_operand:SI 3 "s_register_operand" "1")
+ (match_operand:SI 4 "mve_imm_32" "Rf")
+ (match_operand:HI 5 "vpr_register_operand" "Up")]
+ VSHLCQ_M))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (unspec:SI [(match_dup 2)
+ (match_dup 3)
+ (match_dup 4)
+ (match_dup 5)]
+ VSHLCQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vshlct\t%q0, %1, %4"
+ [(set_attr "type" "mve_move")
+ (set_attr "length" "8")])
+2020-03-23 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+ Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+
+ * gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c: New test.
+ * gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c: Likewise.
+
2020-03-23 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
* gcc.target/arm/mve/intrinsics/asrl.c: New test.
--- /dev/null
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m_s16 (a, b, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
+
+int16x8_t
+foo1 (int16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m (a, b, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
--- /dev/null
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m_s32 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
+
+int32x4_t
+foo1 (int32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
--- /dev/null
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m_s8 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
+
+int8x16_t
+foo1 (int8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
--- /dev/null
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m_u16 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
--- /dev/null
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m_u32 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
--- /dev/null
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m_u8 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+ return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct" } } */