__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b)
{
- return __builtin_mve_vcmulq_rot90_fv8hf (__a, __b);
+ return __builtin_mve_vcmulq_rot90v8hf (__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b)
{
- return __builtin_mve_vcmulq_rot270_fv8hf (__a, __b);
+ return __builtin_mve_vcmulq_rot270v8hf (__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b)
{
- return __builtin_mve_vcmulq_rot180_fv8hf (__a, __b);
+ return __builtin_mve_vcmulq_rot180v8hf (__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b)
{
- return __builtin_mve_vcmulq_fv8hf (__a, __b);
+ return __builtin_mve_vcmulqv8hf (__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b)
{
- return __builtin_mve_vcmulq_rot90_fv4sf (__a, __b);
+ return __builtin_mve_vcmulq_rot90v4sf (__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b)
{
- return __builtin_mve_vcmulq_rot270_fv4sf (__a, __b);
+ return __builtin_mve_vcmulq_rot270v4sf (__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b)
{
- return __builtin_mve_vcmulq_rot180_fv4sf (__a, __b);
+ return __builtin_mve_vcmulq_rot180v4sf (__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b)
{
- return __builtin_mve_vcmulq_fv4sf (__a, __b);
+ return __builtin_mve_vcmulqv4sf (__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
{
- return __builtin_mve_vcmlaq_fv8hf (__a, __b, __c);
+ return __builtin_mve_vcmlaqv8hf (__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
{
- return __builtin_mve_vcmlaq_rot180_fv8hf (__a, __b, __c);
+ return __builtin_mve_vcmlaq_rot180v8hf (__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
{
- return __builtin_mve_vcmlaq_rot270_fv8hf (__a, __b, __c);
+ return __builtin_mve_vcmlaq_rot270v8hf (__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
{
- return __builtin_mve_vcmlaq_rot90_fv8hf (__a, __b, __c);
+ return __builtin_mve_vcmlaq_rot90v8hf (__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return __builtin_mve_vcmlaq_fv4sf (__a, __b, __c);
+ return __builtin_mve_vcmlaqv4sf (__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return __builtin_mve_vcmlaq_rot180_fv4sf (__a, __b, __c);
+ return __builtin_mve_vcmlaq_rot180v4sf (__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return __builtin_mve_vcmlaq_rot270_fv4sf (__a, __b, __c);
+ return __builtin_mve_vcmlaq_rot270v4sf (__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
__arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return __builtin_mve_vcmlaq_rot90_fv4sf (__a, __b, __c);
+ return __builtin_mve_vcmlaq_rot90v4sf (__a, __b, __c);
}
__extension__ extern __inline float32x4_t
VAR2 (BINOP_NONE_NONE_NONE, vmaxnmavq_f, v8hf, v4sf)
VAR2 (BINOP_NONE_NONE_NONE, vmaxnmaq_f, v8hf, v4sf)
VAR2 (BINOP_NONE_NONE_NONE, veorq_f, v8hf, v4sf)
-VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot90_f, v8hf, v4sf)
-VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot270_f, v8hf, v4sf)
-VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot180_f, v8hf, v4sf)
-VAR2 (BINOP_NONE_NONE_NONE, vcmulq_f, v8hf, v4sf)
VAR2 (BINOP_NONE_NONE_NONE, vbicq_f, v8hf, v4sf)
VAR2 (BINOP_NONE_NONE_NONE, vandq_f, v8hf, v4sf)
VAR2 (BINOP_NONE_NONE_NONE, vaddq_n_f, v8hf, v4sf)
VAR2 (TERNOP_NONE_NONE_NONE_NONE, vfmasq_n_f, v8hf, v4sf)
VAR2 (TERNOP_NONE_NONE_NONE_NONE, vfmaq_n_f, v8hf, v4sf)
VAR2 (TERNOP_NONE_NONE_NONE_NONE, vfmaq_f, v8hf, v4sf)
-VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot90_f, v8hf, v4sf)
-VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot270_f, v8hf, v4sf)
-VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot180_f, v8hf, v4sf)
-VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_f, v8hf, v4sf)
VAR2 (TERNOP_NONE_NONE_NONE_IMM, vshrntq_n_s, v8hi, v4si)
VAR2 (TERNOP_NONE_NONE_NONE_IMM, vshrnbq_n_s, v8hi, v4si)
VAR2 (TERNOP_NONE_NONE_NONE_IMM, vrshrntq_n_s, v8hi, v4si)
/* optabs without any suffixes. */
VAR5 (BINOP_NONE_NONE_NONE, vcaddq_rot90, v16qi, v8hi, v4si, v8hf, v4sf)
VAR5 (BINOP_NONE_NONE_NONE, vcaddq_rot270, v16qi, v8hi, v4si, v8hf, v4sf)
+VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot90, v8hf, v4sf)
+VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot270, v8hf, v4sf)
+VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot180, v8hf, v4sf)
+VAR2 (BINOP_NONE_NONE_NONE, vcmulq, v8hf, v4sf)
+VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot90, v8hf, v4sf)
+VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot270, v8hf, v4sf)
+VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot180, v8hf, v4sf)
+VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq, v8hf, v4sf)
(UNSPEC_VCMLA270 "270")])
(define_int_attr mve_rot [(UNSPEC_VCADD90 "_rot90")
- (UNSPEC_VCADD270 "_rot270")])
+ (UNSPEC_VCADD270 "_rot270")
+ (UNSPEC_VCMLA "")
+ (UNSPEC_VCMLA90 "_rot90")
+ (UNSPEC_VCMLA180 "_rot180")
+ (UNSPEC_VCMLA270 "_rot270")
+ (UNSPEC_VCMUL "")
+ (UNSPEC_VCMUL90 "_rot90")
+ (UNSPEC_VCMUL180 "_rot180")
+ (UNSPEC_VCMUL270 "_rot270")])
+
+(define_int_iterator VCMUL [UNSPEC_VCMUL UNSPEC_VCMUL90
+ UNSPEC_VCMUL180 UNSPEC_VCMUL270])
(define_int_attr simd32_op [(UNSPEC_QADD8 "qadd8") (UNSPEC_QSUB8 "qsub8")
(UNSPEC_SHADD8 "shadd8") (UNSPEC_SHSUB8 "shsub8")
])
;;
-;; [vcmulq_f])
+;; [vcmulq, vcmulq_rot90, vcmulq_rot180, vcmulq_rot270])
;;
-(define_insn "mve_vcmulq_f<mode>"
+(define_insn "mve_vcmulq<mve_rot><mode>"
[
(set (match_operand:MVE_0 0 "s_register_operand" "<earlyclobber_32>")
(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
(match_operand:MVE_0 2 "s_register_operand" "w")]
- VCMULQ_F))
+ VCMUL))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmul.f%#<V_sz_elem> %q0, %q1, %q2, #0"
- [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vcmulq_rot180_f])
-;;
-(define_insn "mve_vcmulq_rot180_f<mode>"
- [
- (set (match_operand:MVE_0 0 "s_register_operand" "<earlyclobber_32>")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
- (match_operand:MVE_0 2 "s_register_operand" "w")]
- VCMULQ_ROT180_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmul.f%#<V_sz_elem> %q0, %q1, %q2, #180"
- [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vcmulq_rot270_f])
-;;
-(define_insn "mve_vcmulq_rot270_f<mode>"
- [
- (set (match_operand:MVE_0 0 "s_register_operand" "<earlyclobber_32>")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
- (match_operand:MVE_0 2 "s_register_operand" "w")]
- VCMULQ_ROT270_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmul.f%#<V_sz_elem> %q0, %q1, %q2, #270"
- [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vcmulq_rot90_f])
-;;
-(define_insn "mve_vcmulq_rot90_f<mode>"
- [
- (set (match_operand:MVE_0 0 "s_register_operand" "<earlyclobber_32>")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
- (match_operand:MVE_0 2 "s_register_operand" "w")]
- VCMULQ_ROT90_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmul.f%#<V_sz_elem> %q0, %q1, %q2, #90"
+ "vcmul.f%#<V_sz_elem> %q0, %q1, %q2, #<rot>"
[(set_attr "type" "mve_move")
])
[(set_attr "type" "mve_move")
(set_attr "length""8")])
;;
-;; [vcmlaq_f])
-;;
-(define_insn "mve_vcmlaq_f<mode>"
- [
- (set (match_operand:MVE_0 0 "s_register_operand" "=w")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
- (match_operand:MVE_0 2 "s_register_operand" "w")
- (match_operand:MVE_0 3 "s_register_operand" "w")]
- VCMLAQ_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmla.f%#<V_sz_elem> %q0, %q2, %q3, #0"
- [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vcmlaq_rot180_f])
+;; [vcmlaq, vcmlaq_rot90, vcmlaq_rot180, vcmlaq_rot270])
;;
-(define_insn "mve_vcmlaq_rot180_f<mode>"
+(define_insn "mve_vcmlaq<mve_rot><mode>"
[
- (set (match_operand:MVE_0 0 "s_register_operand" "=w")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
- (match_operand:MVE_0 2 "s_register_operand" "w")
- (match_operand:MVE_0 3 "s_register_operand" "w")]
- VCMLAQ_ROT180_F))
+ (set (match_operand:MVE_0 0 "s_register_operand" "=w,w")
+ (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0,Dz")
+ (match_operand:MVE_0 2 "s_register_operand" "w,w")
+ (match_operand:MVE_0 3 "s_register_operand" "w,w")]
+ VCMLA))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmla.f%#<V_sz_elem> %q0, %q2, %q3, #180"
- [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vcmlaq_rot270_f])
-;;
-(define_insn "mve_vcmlaq_rot270_f<mode>"
- [
- (set (match_operand:MVE_0 0 "s_register_operand" "=w")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
- (match_operand:MVE_0 2 "s_register_operand" "w")
- (match_operand:MVE_0 3 "s_register_operand" "w")]
- VCMLAQ_ROT270_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmla.f%#<V_sz_elem> %q0, %q2, %q3, #270"
- [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vcmlaq_rot90_f])
-;;
-(define_insn "mve_vcmlaq_rot90_f<mode>"
- [
- (set (match_operand:MVE_0 0 "s_register_operand" "=w")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
- (match_operand:MVE_0 2 "s_register_operand" "w")
- (match_operand:MVE_0 3 "s_register_operand" "w")]
- VCMLAQ_ROT90_F))
- ]
- "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
- "vcmla.f%#<V_sz_elem> %q0, %q2, %q3, #90"
+ "@
+ vcmla.f%#<V_sz_elem> %q0, %q2, %q3, #<rot>
+ vcmul.f%#<V_sz_elem> %q0, %q2, %q3, #<rot>"
[(set_attr "type" "mve_move")
])
UNSPEC_VCMLA90
UNSPEC_VCMLA180
UNSPEC_VCMLA270
+ UNSPEC_VCMUL
+ UNSPEC_VCMUL90
+ UNSPEC_VCMUL180
+ UNSPEC_VCMUL270
UNSPEC_MATMUL_S
UNSPEC_MATMUL_U
UNSPEC_MATMUL_US
VCMPLTQ_N_F
VCMPNEQ_F
VCMPNEQ_N_F
- VCMULQ_F
- VCMULQ_ROT180_F
- VCMULQ_ROT270_F
- VCMULQ_ROT90_F
VMAXNMAQ_F
VMAXNMAVQ_F
VMAXNMQ_F
VMLSLDAVAQ_S
VQSHRUNBQ_N_S
VQRSHRUNTQ_N_S
- VCMLAQ_F
VMINNMAQ_M_F
VFMASQ_N_F
VDUPQ_M_N_F
VADDLVAQ_P_S
VQMOVUNBQ_M_S
VCMPLEQ_M_F
- VCMLAQ_ROT180_F
VMLSLDAVAXQ_S
VRNDXQ_M_F
VFMSQ_F
VMINNMVQ_P_F
VMAXNMVQ_P_F
VPSELQ_F
- VCMLAQ_ROT90_F
VQMOVUNTQ_M_S
VREV64Q_M_F
VNEGQ_M_F
VRMLALDAVHQ_P_S
VRMLALDAVHXQ_P_S
VCMPEQQ_M_N_F
- VCMLAQ_ROT270_F
VMAXNMAQ_M_F
VRNDQ_M_F
VMLALDAVQ_P_U