From 3cc4e183f12e962678c70f2c3d476c748a82c29e Mon Sep 17 00:00:00 2001 From: Tamar Christina Date: Sun, 13 Dec 2020 16:49:55 +0000 Subject: [PATCH] Revert "Arm: Add NEON and MVE RTL patterns for Complex Addition, Multiply and FMA." This reverts commit 3b8a82f97dd48e153ce93b317c44254839e11461. Has a dependency on the AArch64 patch which hasn't been approved yet. --- gcc/config/arm/arm_mve.h | 70 ++++++----- gcc/config/arm/arm_mve_builtins.def | 26 +++-- gcc/config/arm/constraints.md | 2 +- gcc/config/arm/iterators.md | 44 +------ gcc/config/arm/mve.md | 172 +++++++++++++++++++++++----- gcc/config/arm/neon.md | 20 ---- gcc/config/arm/unspecs.md | 20 +++- gcc/config/arm/vec-common.md | 70 ----------- 8 files changed, 208 insertions(+), 216 deletions(-) diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index 45014621f25..6c0d1e2e634 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -3981,16 +3981,14 @@ __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) - __builtin_mve_vcaddq_rot90v16qi ((int8x16_t)__a, (int8x16_t)__b); + return __builtin_mve_vcaddq_rot90_uv16qi (__a, __b); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) - __builtin_mve_vcaddq_rot270v16qi ((int8x16_t)__a, (int8x16_t)__b); + return __builtin_mve_vcaddq_rot270_uv16qi (__a, __b); } __extension__ extern __inline uint8x16_t @@ -4522,14 +4520,14 @@ __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b) { - return __builtin_mve_vcaddq_rot90v16qi (__a, __b); + return __builtin_mve_vcaddq_rot90_sv16qi (__a, __b); } __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b) { - return __builtin_mve_vcaddq_rot270v16qi (__a, __b); + return __builtin_mve_vcaddq_rot270_sv16qi (__a, __b); } __extension__ extern __inline int8x16_t @@ -4823,16 +4821,14 @@ __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) - __builtin_mve_vcaddq_rot90v8hi ((int16x8_t)__a, (int16x8_t)__b); + return __builtin_mve_vcaddq_rot90_uv8hi (__a, __b); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) - __builtin_mve_vcaddq_rot270v8hi ((int16x8_t)__a, (int16x8_t)__b); + return __builtin_mve_vcaddq_rot270_uv8hi (__a, __b); } __extension__ extern __inline uint16x8_t @@ -5364,14 +5360,14 @@ __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b) { - return __builtin_mve_vcaddq_rot90v8hi (__a, __b); + return __builtin_mve_vcaddq_rot90_sv8hi (__a, __b); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b) { - return __builtin_mve_vcaddq_rot270v8hi (__a, __b); + return __builtin_mve_vcaddq_rot270_sv8hi (__a, __b); } __extension__ extern __inline int16x8_t @@ -5665,16 +5661,14 @@ __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) - __builtin_mve_vcaddq_rot90v4si ((int32x4_t)__a, (int32x4_t)__b); + return __builtin_mve_vcaddq_rot90_uv4si (__a, __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) - __builtin_mve_vcaddq_rot270v4si ((int32x4_t)__a, (int32x4_t)__b); + return __builtin_mve_vcaddq_rot270_uv4si (__a, __b); } __extension__ extern __inline uint32x4_t @@ -6206,14 +6200,14 @@ __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b) { - return __builtin_mve_vcaddq_rot90v4si (__a, __b); + return __builtin_mve_vcaddq_rot90_sv4si (__a, __b); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b) { - return __builtin_mve_vcaddq_rot270v4si (__a, __b); + return __builtin_mve_vcaddq_rot270_sv4si (__a, __b); } __extension__ extern __inline int32x4_t @@ -17348,42 +17342,42 @@ __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b) { - return __builtin_mve_vcmulq_rot90v8hf (__a, __b); + return __builtin_mve_vcmulq_rot90_fv8hf (__a, __b); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b) { - return __builtin_mve_vcmulq_rot270v8hf (__a, __b); + return __builtin_mve_vcmulq_rot270_fv8hf (__a, __b); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b) { - return __builtin_mve_vcmulq_rot180v8hf (__a, __b); + return __builtin_mve_vcmulq_rot180_fv8hf (__a, __b); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b) { - return __builtin_mve_vcmulqv8hf (__a, __b); + return __builtin_mve_vcmulq_fv8hf (__a, __b); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b) { - return __builtin_mve_vcaddq_rot90v8hf (__a, __b); + return __builtin_mve_vcaddq_rot90_fv8hf (__a, __b); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b) { - return __builtin_mve_vcaddq_rot270v8hf (__a, __b); + return __builtin_mve_vcaddq_rot270_fv8hf (__a, __b); } __extension__ extern __inline float16x8_t @@ -17600,42 +17594,42 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b) { - return __builtin_mve_vcmulq_rot90v4sf (__a, __b); + return __builtin_mve_vcmulq_rot90_fv4sf (__a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b) { - return __builtin_mve_vcmulq_rot270v4sf (__a, __b); + return __builtin_mve_vcmulq_rot270_fv4sf (__a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b) { - return __builtin_mve_vcmulq_rot180v4sf (__a, __b); + return __builtin_mve_vcmulq_rot180_fv4sf (__a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b) { - return __builtin_mve_vcmulqv4sf (__a, __b); + return __builtin_mve_vcmulq_fv4sf (__a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b) { - return __builtin_mve_vcaddq_rot90v4sf (__a, __b); + return __builtin_mve_vcaddq_rot90_fv4sf (__a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b) { - return __builtin_mve_vcaddq_rot270v4sf (__a, __b); + return __builtin_mve_vcaddq_rot270_fv4sf (__a, __b); } __extension__ extern __inline float32x4_t @@ -17790,28 +17784,28 @@ __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) { - return __builtin_mve_vcmlaqv8hf (__a, __b, __c); + return __builtin_mve_vcmlaq_fv8hf (__a, __b, __c); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) { - return __builtin_mve_vcmlaq_rot180v8hf (__a, __b, __c); + return __builtin_mve_vcmlaq_rot180_fv8hf (__a, __b, __c); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) { - return __builtin_mve_vcmlaq_rot270v8hf (__a, __b, __c); + return __builtin_mve_vcmlaq_rot270_fv8hf (__a, __b, __c); } __extension__ extern __inline float16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) { - return __builtin_mve_vcmlaq_rot90v8hf (__a, __b, __c); + return __builtin_mve_vcmlaq_rot90_fv8hf (__a, __b, __c); } __extension__ extern __inline float16x8_t @@ -18098,28 +18092,28 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { - return __builtin_mve_vcmlaqv4sf (__a, __b, __c); + return __builtin_mve_vcmlaq_fv4sf (__a, __b, __c); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { - return __builtin_mve_vcmlaq_rot180v4sf (__a, __b, __c); + return __builtin_mve_vcmlaq_rot180_fv4sf (__a, __b, __c); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { - return __builtin_mve_vcmlaq_rot270v4sf (__a, __b, __c); + return __builtin_mve_vcmlaq_rot270_fv4sf (__a, __b, __c); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { - return __builtin_mve_vcmlaq_rot90v4sf (__a, __b, __c); + return __builtin_mve_vcmlaq_rot90_fv4sf (__a, __b, __c); } __extension__ extern __inline float32x4_t diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def index 56b652fff0a..f38926ffd8e 100644 --- a/gcc/config/arm/arm_mve_builtins.def +++ b/gcc/config/arm/arm_mve_builtins.def @@ -125,6 +125,8 @@ VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpeqq_u, v16qi, v8hi, v4si) VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpeqq_n_u, v16qi, v8hi, v4si) VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpcsq_u, v16qi, v8hi, v4si) VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpcsq_n_u, v16qi, v8hi, v4si) +VAR3 (BINOP_UNONE_UNONE_UNONE, vcaddq_rot90_u, v16qi, v8hi, v4si) +VAR3 (BINOP_UNONE_UNONE_UNONE, vcaddq_rot270_u, v16qi, v8hi, v4si) VAR3 (BINOP_UNONE_UNONE_UNONE, vbicq_u, v16qi, v8hi, v4si) VAR3 (BINOP_UNONE_UNONE_UNONE, vandq_u, v16qi, v8hi, v4si) VAR3 (BINOP_UNONE_UNONE_UNONE, vaddvq_p_u, v16qi, v8hi, v4si) @@ -200,6 +202,8 @@ VAR3 (BINOP_NONE_NONE_NONE, vhcaddq_rot270_s, v16qi, v8hi, v4si) VAR3 (BINOP_NONE_NONE_NONE, vhaddq_s, v16qi, v8hi, v4si) VAR3 (BINOP_NONE_NONE_NONE, vhaddq_n_s, v16qi, v8hi, v4si) VAR3 (BINOP_NONE_NONE_NONE, veorq_s, v16qi, v8hi, v4si) +VAR3 (BINOP_NONE_NONE_NONE, vcaddq_rot90_s, v16qi, v8hi, v4si) +VAR3 (BINOP_NONE_NONE_NONE, vcaddq_rot270_s, v16qi, v8hi, v4si) VAR3 (BINOP_NONE_NONE_NONE, vbrsrq_n_s, v16qi, v8hi, v4si) VAR3 (BINOP_NONE_NONE_NONE, vbicq_s, v16qi, v8hi, v4si) VAR3 (BINOP_NONE_NONE_NONE, vandq_s, v16qi, v8hi, v4si) @@ -260,6 +264,12 @@ VAR2 (BINOP_NONE_NONE_NONE, vmaxnmq_f, v8hf, v4sf) VAR2 (BINOP_NONE_NONE_NONE, vmaxnmavq_f, v8hf, v4sf) VAR2 (BINOP_NONE_NONE_NONE, vmaxnmaq_f, v8hf, v4sf) VAR2 (BINOP_NONE_NONE_NONE, veorq_f, v8hf, v4sf) +VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot90_f, v8hf, v4sf) +VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot270_f, v8hf, v4sf) +VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot180_f, v8hf, v4sf) +VAR2 (BINOP_NONE_NONE_NONE, vcmulq_f, v8hf, v4sf) +VAR2 (BINOP_NONE_NONE_NONE, vcaddq_rot90_f, v8hf, v4sf) +VAR2 (BINOP_NONE_NONE_NONE, vcaddq_rot270_f, v8hf, v4sf) VAR2 (BINOP_NONE_NONE_NONE, vbicq_f, v8hf, v4sf) VAR2 (BINOP_NONE_NONE_NONE, vandq_f, v8hf, v4sf) VAR2 (BINOP_NONE_NONE_NONE, vaddq_n_f, v8hf, v4sf) @@ -460,6 +470,10 @@ VAR2 (TERNOP_NONE_NONE_NONE_NONE, vfmsq_f, v8hf, v4sf) VAR2 (TERNOP_NONE_NONE_NONE_NONE, vfmasq_n_f, v8hf, v4sf) VAR2 (TERNOP_NONE_NONE_NONE_NONE, vfmaq_n_f, v8hf, v4sf) VAR2 (TERNOP_NONE_NONE_NONE_NONE, vfmaq_f, v8hf, v4sf) +VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot90_f, v8hf, v4sf) +VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot270_f, v8hf, v4sf) +VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot180_f, v8hf, v4sf) +VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_f, v8hf, v4sf) VAR2 (TERNOP_NONE_NONE_NONE_IMM, vshrntq_n_s, v8hi, v4si) VAR2 (TERNOP_NONE_NONE_NONE_IMM, vshrnbq_n_s, v8hi, v4si) VAR2 (TERNOP_NONE_NONE_NONE_IMM, vrshrntq_n_s, v8hi, v4si) @@ -878,15 +892,3 @@ VAR3 (QUADOP_NONE_NONE_UNONE_IMM_UNONE, vshlcq_m_vec_s, v16qi, v8hi, v4si) VAR3 (QUADOP_NONE_NONE_UNONE_IMM_UNONE, vshlcq_m_carry_s, v16qi, v8hi, v4si) VAR3 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshlcq_m_vec_u, v16qi, v8hi, v4si) VAR3 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshlcq_m_carry_u, v16qi, v8hi, v4si) - -/* optabs without any suffixes. */ -VAR5 (BINOP_NONE_NONE_NONE, vcaddq_rot90, v16qi, v8hi, v4si, v8hf, v4sf) -VAR5 (BINOP_NONE_NONE_NONE, vcaddq_rot270, v16qi, v8hi, v4si, v8hf, v4sf) -VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot90, v8hf, v4sf) -VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot270, v8hf, v4sf) -VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot180, v8hf, v4sf) -VAR2 (BINOP_NONE_NONE_NONE, vcmulq, v8hf, v4sf) -VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot90, v8hf, v4sf) -VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot270, v8hf, v4sf) -VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq_rot180, v8hf, v4sf) -VAR2 (TERNOP_NONE_NONE_NONE_NONE, vcmlaq, v8hf, v4sf) diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md index 6ebddb95b4f..789e3332abb 100644 --- a/gcc/config/arm/constraints.md +++ b/gcc/config/arm/constraints.md @@ -310,7 +310,7 @@ "@internal In ARM/Thumb-2 state a vector of constant zeros." (and (match_code "const_vector") - (match_test "(TARGET_NEON || TARGET_HAVE_MVE) && op == CONST0_RTX (mode)"))) + (match_test "TARGET_NEON && op == CONST0_RTX (mode)"))) (define_constraint "Da" "@internal diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md index 5fc75cb8d07..5fcb7afe565 100644 --- a/gcc/config/arm/iterators.md +++ b/gcc/config/arm/iterators.md @@ -1177,40 +1177,11 @@ (define_int_attr rot [(UNSPEC_VCADD90 "90") (UNSPEC_VCADD270 "270") - (UNSPEC_VCMLS "0") (UNSPEC_VCMLA "0") (UNSPEC_VCMLA90 "90") (UNSPEC_VCMLA180 "180") (UNSPEC_VCMLA270 "270")]) -(define_int_attr mve_rotsplit1 [(UNSPEC_VCMLA "") - (UNSPEC_VCMLA180 "") - (UNSPEC_VCMUL "") - (UNSPEC_VCMUL180 "") - (UNSPEC_VCMLS "_rot270") - (UNSPEC_VCMLS180 "_rot90")]) - -(define_int_attr mve_rotsplit2 [(UNSPEC_VCMLA "_rot90") - (UNSPEC_VCMLA180 "_rot270") - (UNSPEC_VCMUL "_rot90") - (UNSPEC_VCMUL180 "_rot270") - (UNSPEC_VCMLS "_rot180") - (UNSPEC_VCMLS180 "_rot180")]) - -(define_int_attr mve_rot [(UNSPEC_VCADD90 "_rot90") - (UNSPEC_VCADD270 "_rot270") - (UNSPEC_VCMLA "") - (UNSPEC_VCMLA90 "_rot90") - (UNSPEC_VCMLA180 "_rot180") - (UNSPEC_VCMLA270 "_rot270") - (UNSPEC_VCMUL "") - (UNSPEC_VCMUL90 "_rot90") - (UNSPEC_VCMUL180 "_rot180") - (UNSPEC_VCMUL270 "_rot270")]) - -(define_int_iterator VCMUL [UNSPEC_VCMUL UNSPEC_VCMUL90 - UNSPEC_VCMUL180 UNSPEC_VCMUL270]) - (define_int_attr simd32_op [(UNSPEC_QADD8 "qadd8") (UNSPEC_QSUB8 "qsub8") (UNSPEC_SHADD8 "shadd8") (UNSPEC_SHSUB8 "shsub8") (UNSPEC_UHADD8 "uhadd8") (UNSPEC_UHSUB8 "uhsub8") @@ -1262,8 +1233,9 @@ (VABDQ_M_S "s") (VABDQ_M_U "u") (VABDQ_S "s") (VABDQ_U "u") (VADDQ_N_S "s") (VADDQ_N_U "u") (VADDVQ_P_S "s") (VADDVQ_P_U "u") (VBICQ_S "s") (VBICQ_U "u") - (VBRSRQ_N_S "s") (VBRSRQ_N_U "u") - (VCMPEQQ_S "s") (VCMPEQQ_U "u") + (VBRSRQ_N_S "s") (VBRSRQ_N_U "u") (VCADDQ_ROT270_S "s") + (VCADDQ_ROT270_U "u") (VCADDQ_ROT90_S "s") + (VCMPEQQ_S "s") (VCMPEQQ_U "u") (VCADDQ_ROT90_U "u") (VCMPEQQ_N_S "s") (VCMPEQQ_N_U "u") (VCMPNEQ_N_S "s") (VCMPNEQ_N_U "u") (VEORQ_S "s") (VEORQ_U "u") (VHADDQ_N_S "s") (VHADDQ_N_U "u") (VHADDQ_S "s") @@ -1530,6 +1502,8 @@ (define_int_iterator VADDVQ_P [VADDVQ_P_U VADDVQ_P_S]) (define_int_iterator VBICQ [VBICQ_S VBICQ_U]) (define_int_iterator VBRSRQ_N [VBRSRQ_N_U VBRSRQ_N_S]) +(define_int_iterator VCADDQ_ROT270 [VCADDQ_ROT270_S VCADDQ_ROT270_U]) +(define_int_iterator VCADDQ_ROT90 [VCADDQ_ROT90_U VCADDQ_ROT90_S]) (define_int_iterator VCMPEQQ [VCMPEQQ_U VCMPEQQ_S]) (define_int_iterator VCMPEQQ_N [VCMPEQQ_N_S VCMPEQQ_N_U]) (define_int_iterator VCMPNEQ_N [VCMPNEQ_N_U VCMPNEQ_N_S]) @@ -1738,11 +1712,3 @@ (define_int_iterator UQRSHLLQ [UQRSHLL_64 UQRSHLL_48]) (define_int_iterator SQRSHRLQ [SQRSHRL_64 SQRSHRL_48]) (define_int_iterator VSHLCQ_M [VSHLCQ_M_S VSHLCQ_M_U]) -;; Define iterators for VCMLA operations -(define_int_iterator VCMLA_OP [UNSPEC_VCMLA - UNSPEC_VCMLA180 - UNSPEC_VCMLS]) - -;; Define iterators for VCMLA operations as MUL -(define_int_iterator VCMUL_OP [UNSPEC_VCMUL - UNSPEC_VCMUL180]) diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index d29f387b5bf..4b2e46afc19 100644 --- a/gcc/config/arm/mve.md +++ b/gcc/config/arm/mve.md @@ -950,28 +950,34 @@ ]) ;; -;; [vcaddq, vcaddq_rot90, vcadd_rot180, vcadd_rot270]) +;; [vcaddq_rot270_s, vcaddq_rot270_u]) ;; -(define_insn "mve_vcaddq" +(define_insn "mve_vcaddq_rot270_" [ (set (match_operand:MVE_2 0 "s_register_operand" "") (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w") (match_operand:MVE_2 2 "s_register_operand" "w")] - VCADD)) + VCADDQ_ROT270)) ] "TARGET_HAVE_MVE" - "vcadd.i%# %q0, %q1, %q2, #" + "vcadd.i%# %q0, %q1, %q2, #270" [(set_attr "type" "mve_move") ]) -;; Auto vectorizer pattern for int vcadd -(define_expand "cadd3" - [(set (match_operand:MVE_2 0 "register_operand") - (unspec:MVE_2 [(match_operand:MVE_2 1 "register_operand") - (match_operand:MVE_2 2 "register_operand")] - VCADD))] - "TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN" -) +;; +;; [vcaddq_rot90_u, vcaddq_rot90_s]) +;; +(define_insn "mve_vcaddq_rot90_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w")] + VCADDQ_ROT90)) + ] + "TARGET_HAVE_MVE" + "vcadd.i%# %q0, %q1, %q2, #90" + [(set_attr "type" "mve_move") +]) ;; ;; [vcmpcsq_n_u]) @@ -2078,17 +2084,32 @@ ]) ;; -;; [vcaddq, vcaddq_rot90, vcadd_rot180, vcadd_rot270]) +;; [vcaddq_rot270_f]) +;; +(define_insn "mve_vcaddq_rot270_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w") + (match_operand:MVE_0 2 "s_register_operand" "w")] + VCADDQ_ROT270_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vcadd.f%# %q0, %q1, %q2, #270" + [(set_attr "type" "mve_move") +]) + +;; +;; [vcaddq_rot90_f]) ;; -(define_insn "mve_vcaddq" +(define_insn "mve_vcaddq_rot90_f" [ (set (match_operand:MVE_0 0 "s_register_operand" "") (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w") (match_operand:MVE_0 2 "s_register_operand" "w")] - VCADD)) + VCADDQ_ROT90_F)) ] "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" - "vcadd.f%# %q0, %q1, %q2, #" + "vcadd.f%# %q0, %q1, %q2, #90" [(set_attr "type" "mve_move") ]) @@ -2273,17 +2294,62 @@ ]) ;; -;; [vcmulq, vcmulq_rot90, vcmulq_rot180, vcmulq_rot270]) +;; [vcmulq_f]) +;; +(define_insn "mve_vcmulq_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w") + (match_operand:MVE_0 2 "s_register_operand" "w")] + VCMULQ_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vcmul.f%# %q0, %q1, %q2, #0" + [(set_attr "type" "mve_move") +]) + +;; +;; [vcmulq_rot180_f]) +;; +(define_insn "mve_vcmulq_rot180_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w") + (match_operand:MVE_0 2 "s_register_operand" "w")] + VCMULQ_ROT180_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vcmul.f%# %q0, %q1, %q2, #180" + [(set_attr "type" "mve_move") +]) + +;; +;; [vcmulq_rot270_f]) +;; +(define_insn "mve_vcmulq_rot270_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w") + (match_operand:MVE_0 2 "s_register_operand" "w")] + VCMULQ_ROT270_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vcmul.f%# %q0, %q1, %q2, #270" + [(set_attr "type" "mve_move") +]) + +;; +;; [vcmulq_rot90_f]) ;; -(define_insn "mve_vcmulq" +(define_insn "mve_vcmulq_rot90_f" [ (set (match_operand:MVE_0 0 "s_register_operand" "") (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w") (match_operand:MVE_0 2 "s_register_operand" "w")] - VCMUL)) + VCMULQ_ROT90_F)) ] "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" - "vcmul.f%# %q0, %q1, %q2, #" + "vcmul.f%# %q0, %q1, %q2, #90" [(set_attr "type" "mve_move") ]) @@ -4056,20 +4122,66 @@ [(set_attr "type" "mve_move") (set_attr "length""8")]) ;; -;; [vcmlaq, vcmlaq_rot90, vcmlaq_rot180, vcmlaq_rot270]) +;; [vcmlaq_f]) ;; -(define_insn "mve_vcmlaq" +(define_insn "mve_vcmlaq_f" [ - (set (match_operand:MVE_0 0 "s_register_operand" "=w,w") - (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0,Dz") - (match_operand:MVE_0 2 "s_register_operand" "w,w") - (match_operand:MVE_0 3 "s_register_operand" "w,w")] - VCMLA)) + (set (match_operand:MVE_0 0 "s_register_operand" "=w") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0") + (match_operand:MVE_0 2 "s_register_operand" "w") + (match_operand:MVE_0 3 "s_register_operand" "w")] + VCMLAQ_F)) ] "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" - "@ - vcmla.f%# %q0, %q2, %q3, # - vcmul.f%# %q0, %q2, %q3, #" + "vcmla.f%# %q0, %q2, %q3, #0" + [(set_attr "type" "mve_move") +]) + +;; +;; [vcmlaq_rot180_f]) +;; +(define_insn "mve_vcmlaq_rot180_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "=w") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0") + (match_operand:MVE_0 2 "s_register_operand" "w") + (match_operand:MVE_0 3 "s_register_operand" "w")] + VCMLAQ_ROT180_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vcmla.f%# %q0, %q2, %q3, #180" + [(set_attr "type" "mve_move") +]) + +;; +;; [vcmlaq_rot270_f]) +;; +(define_insn "mve_vcmlaq_rot270_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "=w") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0") + (match_operand:MVE_0 2 "s_register_operand" "w") + (match_operand:MVE_0 3 "s_register_operand" "w")] + VCMLAQ_ROT270_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vcmla.f%# %q0, %q2, %q3, #270" + [(set_attr "type" "mve_move") +]) + +;; +;; [vcmlaq_rot90_f]) +;; +(define_insn "mve_vcmlaq_rot90_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "=w") + (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0") + (match_operand:MVE_0 2 "s_register_operand" "w") + (match_operand:MVE_0 3 "s_register_operand" "w")] + VCMLAQ_ROT90_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vcmla.f%# %q0, %q2, %q3, #90" [(set_attr "type" "mve_move") ]) diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md index 487c0a168b2..669c34da4e0 100644 --- a/gcc/config/arm/neon.md +++ b/gcc/config/arm/neon.md @@ -3030,26 +3030,6 @@ [(set_attr "type" "neon_fcmla")] ) -;; The complex mul operations always need to expand to two instructions. -;; The first operation does half the computation and the second does the -;; remainder. Because of this, expand early. -(define_expand "cmul3" - [(set (match_operand:VDF 0 "register_operand") - (unspec:VDF [(match_operand:VDF 1 "register_operand") - (match_operand:VDF 2 "register_operand")] - VCMUL_OP))] - "TARGET_COMPLEX && !BYTES_BIG_ENDIAN" -{ - rtx tmp = gen_reg_rtx (mode); - rtx res1 = gen_reg_rtx (mode); - emit_move_insn (tmp, CONST0_RTX (mode)); - emit_insn (gen_neon_vcmla (res1, tmp, - operands[1], operands[2])); - emit_insn (gen_neon_vcmla (operands[0], res1, - operands[1], operands[2])); - DONE; -}) - ;; These instructions map to the __builtins for the Dot Product operations. (define_insn "neon_dot" diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md index 8bb00602103..c2076c9ce6f 100644 --- a/gcc/config/arm/unspecs.md +++ b/gcc/config/arm/unspecs.md @@ -510,12 +510,6 @@ UNSPEC_VCMLA90 UNSPEC_VCMLA180 UNSPEC_VCMLA270 - UNSPEC_VCMUL - UNSPEC_VCMUL90 - UNSPEC_VCMUL180 - UNSPEC_VCMUL270 - UNSPEC_VCMLS - UNSPEC_VCMLS180 UNSPEC_MATMUL_S UNSPEC_MATMUL_U UNSPEC_MATMUL_US @@ -609,6 +603,8 @@ VADDVQ_P_S VBICQ_S VBRSRQ_N_S + VCADDQ_ROT270_S + VCADDQ_ROT90_S VCMPEQQ_S VCMPEQQ_N_S VCMPNEQ_N_S @@ -652,6 +648,8 @@ VADDVQ_P_U VBICQ_U VBRSRQ_N_U + VCADDQ_ROT270_U + VCADDQ_ROT90_U VCMPEQQ_U VCMPEQQ_N_U VCMPNEQ_N_U @@ -720,6 +718,8 @@ VABDQ_F VADDQ_N_F VBICQ_F + VCADDQ_ROT270_F + VCADDQ_ROT90_F VCMPEQQ_F VCMPEQQ_N_F VCMPGEQ_F @@ -732,6 +732,10 @@ VCMPLTQ_N_F VCMPNEQ_F VCMPNEQ_N_F + VCMULQ_F + VCMULQ_ROT180_F + VCMULQ_ROT270_F + VCMULQ_ROT90_F VEORQ_F VMAXNMAQ_F VMAXNMAVQ_F @@ -904,6 +908,7 @@ VMLSLDAVAQ_S VQSHRUNBQ_N_S VQRSHRUNTQ_N_S + VCMLAQ_F VMINNMAQ_M_F VFMASQ_N_F VDUPQ_M_N_F @@ -925,12 +930,14 @@ VADDLVAQ_P_S VQMOVUNBQ_M_S VCMPLEQ_M_F + VCMLAQ_ROT180_F VMLSLDAVAXQ_S VRNDXQ_M_F VFMSQ_F VMINNMVQ_P_F VMAXNMVQ_P_F VPSELQ_F + VCMLAQ_ROT90_F VQMOVUNTQ_M_S VREV64Q_M_F VNEGQ_M_F @@ -943,6 +950,7 @@ VRMLALDAVHQ_P_S VRMLALDAVHXQ_P_S VCMPEQQ_M_N_F + VCMLAQ_ROT270_F VMAXNMAQ_M_F VRNDQ_M_F VMLALDAVQ_P_U diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md index 784305955ee..8d9c89c5b2b 100644 --- a/gcc/config/arm/vec-common.md +++ b/gcc/config/arm/vec-common.md @@ -186,73 +186,3 @@ (match_operand:VDQ 2 "neon_logic_op2" "")))] "ARM_HAVE__ARITH" ) - -(define_expand "cadd3" - [(set (match_operand:VF 0 "register_operand") - (unspec:VF [(match_operand:VF 1 "register_operand") - (match_operand:VF 2 "register_operand")] - VCADD))] - "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT - && ARM_HAVE__ARITH)) && !BYTES_BIG_ENDIAN" -) - -;; The complex mul operations always need to expand to two instructions. -;; The first operation does half the computation and the second does the -;; remainder. Because of this, expand early. -(define_expand "cmul3" - [(set (match_operand:VQ_HSF 0 "register_operand") - (unspec:VQ_HSF [(match_operand:VQ_HSF 1 "register_operand") - (match_operand:VQ_HSF 2 "register_operand")] - VCMUL_OP))] - "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)) - && !BYTES_BIG_ENDIAN" -{ - rtx res1 = gen_reg_rtx (mode); - if (TARGET_COMPLEX) - { - rtx tmp = gen_reg_rtx (mode); - emit_move_insn (tmp, CONST0_RTX (mode)); - emit_insn (gen_neon_vcmla (res1, tmp, - operands[1], operands[2])); - emit_insn (gen_neon_vcmla (operands[0], res1, - operands[1], operands[2])); - } - else - { - emit_insn (gen_mve_vcmulq (operands[0], operands[1], - operands[2])); - emit_insn (gen_mve_vcmulq (operands[0], operands[1], - operands[2])); - } - DONE; -}) - -(define_expand "arm_vcmla" - [(set (match_operand:VF 0 "register_operand") - (plus:VF (match_operand:VF 1 "register_operand") - (unspec:VF [(match_operand:VF 2 "register_operand") - (match_operand:VF 3 "register_operand")] - VCMLA)))] - "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT - && ARM_HAVE__ARITH)) && !BYTES_BIG_ENDIAN" -) - -;; The complex mla/mls operations always need to expand to two instructions. -;; The first operation does half the computation and the second does the -;; remainder. Because of this, expand early. -(define_expand "cml4" - [(set (match_operand:VF 0 "register_operand") - (plus:VF (match_operand:VF 1 "register_operand") - (unspec:VF [(match_operand:VF 2 "register_operand") - (match_operand:VF 3 "register_operand")] - VCMLA_OP)))] - "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT - && ARM_HAVE__ARITH)) && !BYTES_BIG_ENDIAN" -{ - rtx tmp = gen_reg_rtx (mode); - emit_insn (gen_arm_vcmla (tmp, operands[1], - operands[2], operands[3])); - emit_insn (gen_arm_vcmla (operands[0], tmp, - operands[2], operands[3])); - DONE; -}) -- 2.30.2