;; sq<r>dmulh_lane
(define_insn "aarch64_sq<r>dmulh_lane<mode>"
- [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
- (unspec:VSDQ_HSI
- [(match_operand:VSDQ_HSI 1 "register_operand" "w")
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (unspec:VDQHS
+ [(match_operand:VDQHS 1 "register_operand" "w")
+ (vec_select:<VEL>
+ (match_operand:<VCOND> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "*
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCOND>mode));
+ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_sq<r>dmulh_laneq<mode>"
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (unspec:VDQHS
+ [(match_operand:VDQHS 1 "register_operand" "w")
+ (vec_select:<VEL>
+ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "*
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
+ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_sq<r>dmulh_lane<mode>"
+ [(set (match_operand:SD_HSI 0 "register_operand" "=w")
+ (unspec:SD_HSI
+ [(match_operand:SD_HSI 1 "register_operand" "w")
(vec_select:<VEL>
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
(parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
VQDMULH))]
"TARGET_SIMD"
"*
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
- return \"sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]\";"
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
+ return \"sq<r>dmulh\\t%<v>0, %<v>1, %2.<v>[%3]\";"
[(set_attr "simd_type" "simd_sat_mul")
(set_attr "simd_mode" "<MODE>")]
)
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
+ return __builtin_aarch64_sqdmulh_laneqv4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
+ return __builtin_aarch64_sqdmulh_laneqv2si (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
+ return __builtin_aarch64_sqdmulh_laneqv8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
- return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
+ return __builtin_aarch64_sqdmulh_laneqv4si (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
+ return __builtin_aarch64_sqrdmulh_laneqv4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
+ return __builtin_aarch64_sqrdmulh_laneqv2si (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
+ return __builtin_aarch64_sqrdmulh_laneqv8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
- return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
+ return __builtin_aarch64_sqrdmulh_laneqv4si (__a, __b, __c);
}
/* Table intrinsics. */
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
- return __builtin_aarch64_sqdmulh_lanev4hi (__a, __tmp, __c);
+ return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
- return __builtin_aarch64_sqdmulh_lanev2si (__a, __tmp, __c);
+ return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
- int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
- return __builtin_aarch64_sqdmulh_lanev8hi (__a, __tmp, __c);
+ return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
- int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
- return __builtin_aarch64_sqdmulh_lanev4si (__a, __tmp, __c);
+ return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
}
__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
- return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __tmp, __c);
+ return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
- return __builtin_aarch64_sqrdmulh_lanev2si (__a, __tmp, __c);
+ return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
- int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
- return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __tmp, __c);
+ return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
- int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
- return __builtin_aarch64_sqrdmulh_lanev4si (__a, __tmp, __c);
+ return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
}
__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))