BUILTIN_VQW (TERNOP, sabal2, 0, NONE)
BUILTIN_VQW (TERNOPU, uabal2, 0, NONE)
+ /* Implemented by aarch64_<sur>abdl<mode>. */
+ BUILTIN_VD_BHSI (BINOP, sabdl, 0, NONE)
+ BUILTIN_VD_BHSI (BINOPU, uabdl, 0, NONE)
+
/* Implemented by aarch64_<sur>abdl2<mode>. */
BUILTIN_VQW (BINOP, sabdl2, 0, NONE)
BUILTIN_VQW (BINOPU, uabdl2, 0, NONE)
[(set_attr "type" "neon_abd<q>")]
)
+
+(define_insn "aarch64_<sur>abdl<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (unspec:<VWIDE> [(match_operand:VD_BHSI 1 "register_operand" "w")
+ (match_operand:VD_BHSI 2 "register_operand" "w")]
+ ABDL))]
+ "TARGET_SIMD"
+ "<sur>abdl\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_abd<q>")]
+)
+
(define_insn "aarch64_<sur>abdl2<mode>"
[(set (match_operand:<VDBLW> 0 "register_operand" "=w")
(unspec:<VDBLW> [(match_operand:VQW 1 "register_operand" "w")
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s8 (int8x8_t __a, int8x8_t __b)
{
- int16x8_t __result;
- __asm__ ("sabdl %0.8h, %1.8b, %2.8b"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_sabdlv8qi (__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s16 (int16x4_t __a, int16x4_t __b)
{
- int32x4_t __result;
- __asm__ ("sabdl %0.4s, %1.4h, %2.4h"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_sabdlv4hi (__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s32 (int32x2_t __a, int32x2_t __b)
{
- int64x2_t __result;
- __asm__ ("sabdl %0.2d, %1.2s, %2.2s"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_sabdlv2si (__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u8 (uint8x8_t __a, uint8x8_t __b)
{
- uint16x8_t __result;
- __asm__ ("uabdl %0.8h, %1.8b, %2.8b"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uabdlv8qi_uuu (__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u16 (uint16x4_t __a, uint16x4_t __b)
{
- uint32x4_t __result;
- __asm__ ("uabdl %0.4s, %1.4h, %2.4h"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uabdlv4hi_uuu (__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u32 (uint32x2_t __a, uint32x2_t __b)
{
- uint64x2_t __result;
- __asm__ ("uabdl %0.2d, %1.2s, %2.2s"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uabdlv2si_uuu (__a, __b);
}
__extension__ extern __inline int8x16_t
;; The unspec codes for the SABAL, UABAL AdvancedSIMD instructions.
(define_int_iterator ABAL [UNSPEC_SABAL UNSPEC_UABAL])
+;; The unspec codes for the SABDL, UABDL AdvancedSIMD instructions.
+(define_int_iterator ABDL [UNSPEC_SABDL UNSPEC_UABDL])
+
;; The unspec codes for the SABAL2, UABAL2 AdvancedSIMD instructions.
(define_int_iterator ABAL2 [UNSPEC_SABAL2 UNSPEC_UABAL2])
(UNSPEC_ADDHN "") (UNSPEC_RADDHN "r")
(UNSPEC_SABAL "s") (UNSPEC_UABAL "u")
(UNSPEC_SABAL2 "s") (UNSPEC_UABAL2 "u")
+ (UNSPEC_SABDL "s") (UNSPEC_UABDL "u")
(UNSPEC_SABDL2 "s") (UNSPEC_UABDL2 "u")
(UNSPEC_SADALP "s") (UNSPEC_UADALP "u")
(UNSPEC_SUBHN "") (UNSPEC_RSUBHN "r")