From 0b8393221177617f19e7c5c5c692b8c59f85fffb Mon Sep 17 00:00:00 2001 From: Wilco Dijkstra Date: Fri, 6 Mar 2020 18:29:02 +0000 Subject: [PATCH] [AArch64] Use intrinsics for widening multiplies (PR91598) Inline assembler instructions don't have latency info and the scheduler does not attempt to schedule them at all - it does not even honor latencies of asm source operands. As a result, SIMD intrinsics which are implemented using inline assembler perform very poorly, particularly on in-order cores. Add new patterns and intrinsics for widening multiplies, which results in a 63% speedup for the example in the PR, thus fixing the reported regression. gcc/ PR target/91598 * config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define. * config/aarch64/aarch64-simd.md (aarch64_vec_mult_lane): Add new insn for widening lane mul. (aarch64_vec_mlal_lane): Likewise. * config/aarch64/aarch64-simd-builtins.def: Add intrinsics. * config/aarch64/arm_neon.h: (vmlal_lane_s16): Expand using intrinsics rather than inline asm. (vmlal_lane_u16): Likewise. (vmlal_lane_s32): Likewise. (vmlal_lane_u32): Likewise. (vmlal_laneq_s16): Likewise. (vmlal_laneq_u16): Likewise. (vmlal_laneq_s32): Likewise. (vmlal_laneq_u32): Likewise. (vmull_lane_s16): Likewise. (vmull_lane_u16): Likewise. (vmull_lane_s32): Likewise. (vmull_lane_u32): Likewise. (vmull_laneq_s16): Likewise. (vmull_laneq_u16): Likewise. (vmull_laneq_s32): Likewise. (vmull_laneq_u32): Likewise. * config/aarch64/iterators.md (Vcondtype): New iterator for lane mul. (Qlane): Likewise. --- gcc/ChangeLog | 28 ++ gcc/config/aarch64/aarch64-builtins.c | 5 + gcc/config/aarch64/aarch64-simd-builtins.def | 9 + gcc/config/aarch64/aarch64-simd.md | 40 +++ gcc/config/aarch64/arm_neon.h | 296 ++++++------------- gcc/config/aarch64/iterators.md | 7 + 6 files changed, 185 insertions(+), 200 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 1cb66942d40..2bc6f39b014 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,31 @@ +2020-03-06 Wilco Dijkstra + + PR target/91598 + * config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define. + * config/aarch64/aarch64-simd.md + (aarch64_vec_mult_lane): Add new insn for widening lane mul. + (aarch64_vec_mlal_lane): Likewise. + * config/aarch64/aarch64-simd-builtins.def: Add intrinsics. + * config/aarch64/arm_neon.h: + (vmlal_lane_s16): Expand using intrinsics rather than inline asm. + (vmlal_lane_u16): Likewise. + (vmlal_lane_s32): Likewise. + (vmlal_lane_u32): Likewise. + (vmlal_laneq_s16): Likewise. + (vmlal_laneq_u16): Likewise. + (vmlal_laneq_s32): Likewise. + (vmlal_laneq_u32): Likewise. + (vmull_lane_s16): Likewise. + (vmull_lane_u16): Likewise. + (vmull_lane_s32): Likewise. + (vmull_lane_u32): Likewise. + (vmull_laneq_s16): Likewise. + (vmull_laneq_u16): Likewise. + (vmull_laneq_s32): Likewise. + (vmull_laneq_u32): Likewise. + * config/aarch64/iterators.md (Vcondtype): New iterator for lane mul. + (Qlane): Likewise. + 2020-03-06 Wilco Dijkstra * aarch64/aarch64-simd.md (aarch64_mla_elt): Correct lane syntax. diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index 9c9c6d86ae2..5744e68ea08 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -175,6 +175,11 @@ aarch64_types_ternopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] qualifier_unsigned, qualifier_unsigned }; #define TYPES_TERNOPU (aarch64_types_ternopu_qualifiers) static enum aarch64_type_qualifiers +aarch64_types_ternopu_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_unsigned, qualifier_unsigned, + qualifier_unsigned, qualifier_lane_index }; +#define TYPES_TERNOPU_LANE (aarch64_types_ternopu_lane_qualifiers) +static enum aarch64_type_qualifiers aarch64_types_ternopu_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS] = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned, qualifier_immediate }; diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index cc0bd0e6b59..332a0b6b1ea 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -191,6 +191,15 @@ BUILTIN_VQW (BINOP, vec_widen_smult_hi_, 10) BUILTIN_VQW (BINOPU, vec_widen_umult_hi_, 10) + BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_lane_, 0) + BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_lane_, 0) + BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_laneq_, 0) + BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_laneq_, 0) + BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_lane_, 0) + BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_lane_, 0) + BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_laneq_, 0) + BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_laneq_, 0) + BUILTIN_VSD_HSI (BINOP, sqdmull, 0) BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0) BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index e5cf4e4549c..24a11fb5040 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1892,6 +1892,46 @@ } ) +;; vmull_lane_s16 intrinsics +(define_insn "aarch64_vec_mult_lane" + [(set (match_operand: 0 "register_operand" "=w") + (mult: + (ANY_EXTEND: + (match_operand: 1 "register_operand" "w")) + (ANY_EXTEND: + (vec_duplicate: + (vec_select: + (match_operand:VDQHS 2 "register_operand" "") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))))))] + "TARGET_SIMD" + { + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); + return "mull\\t%0., %1., %2.[%3]"; + } + [(set_attr "type" "neon_mul__scalar_long")] +) + +;; vmlal_lane_s16 intrinsics +(define_insn "aarch64_vec_mlal_lane" + [(set (match_operand: 0 "register_operand" "=w") + (plus: + (mult: + (ANY_EXTEND: + (match_operand: 2 "register_operand" "w")) + (ANY_EXTEND: + (vec_duplicate: + (vec_select: + (match_operand:VDQHS 3 "register_operand" "") + (parallel [(match_operand:SI 4 "immediate_operand" "i")]))))) + (match_operand: 1 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); + return "mlal\\t%0., %2., %3.[%4]"; + } + [(set_attr "type" "neon_mla__scalar_long")] +) + ;; FP vector operations. ;; AArch64 AdvSIMD supports single-precision (32-bit) and ;; double-precision (64-bit) floating-point data types and arithmetic as diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index cc4ce76d16e..50f8b23bc17 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7700,117 +7700,61 @@ vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) return __result; } -#define vmlal_lane_s16(a, b, c, d) \ - __extension__ \ - ({ \ - int16x4_t c_ = (c); \ - int16x4_t b_ = (b); \ - int32x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smlal %0.4s,%2.4h,%3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_s16 (int32x4_t __acc, int16x4_t __a, int16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_lane_v4hi (__acc, __a, __b, __c); +} -#define vmlal_lane_s32(a, b, c, d) \ - __extension__ \ - ({ \ - int32x2_t c_ = (c); \ - int32x2_t b_ = (b); \ - int64x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smlal %0.2d,%2.2s,%3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_s32 (int64x2_t __acc, int32x2_t __a, int32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_lane_v2si (__acc, __a, __b, __c); +} -#define vmlal_lane_u16(a, b, c, d) \ - __extension__ \ - ({ \ - uint16x4_t c_ = (c); \ - uint16x4_t b_ = (b); \ - uint32x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umlal %0.4s,%2.4h,%3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_lane_v4hi_uuuus (__acc, __a, __b, __c); +} -#define vmlal_lane_u32(a, b, c, d) \ - __extension__ \ - ({ \ - uint32x2_t c_ = (c); \ - uint32x2_t b_ = (b); \ - uint64x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_lane_v2si_uuuus (__acc, __a, __b, __c); +} -#define vmlal_laneq_s16(a, b, c, d) \ - __extension__ \ - ({ \ - int16x8_t c_ = (c); \ - int16x4_t b_ = (b); \ - int32x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smlal %0.4s, %2.4h, %3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_s16 (int32x4_t __acc, int16x4_t __a, int16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_laneq_v4hi (__acc, __a, __b, __c); +} -#define vmlal_laneq_s32(a, b, c, d) \ - __extension__ \ - ({ \ - int32x4_t c_ = (c); \ - int32x2_t b_ = (b); \ - int64x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smlal %0.2d, %2.2s, %3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_s32 (int64x2_t __acc, int32x2_t __a, int32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_laneq_v2si (__acc, __a, __b, __c); +} -#define vmlal_laneq_u16(a, b, c, d) \ - __extension__ \ - ({ \ - uint16x8_t c_ = (c); \ - uint16x4_t b_ = (b); \ - uint32x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umlal %0.4s, %2.4h, %3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_laneq_v4hi_uuuus (__acc, __a, __b, __c); +} -#define vmlal_laneq_u32(a, b, c, d) \ - __extension__ \ - ({ \ - uint32x4_t c_ = (c); \ - uint32x2_t b_ = (b); \ - uint64x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_laneq_v2si_uuuus (__acc, __a, __b, __c); +} __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) @@ -9289,109 +9233,61 @@ vmull_high_u32 (uint32x4_t __a, uint32x4_t __b) return __builtin_aarch64_vec_widen_umult_hi_v4si_uuu (__a, __b); } -#define vmull_lane_s16(a, b, c) \ - __extension__ \ - ({ \ - int16x4_t b_ = (b); \ - int16x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smull %0.4s,%1.4h,%2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_lane_v4hi (__a, __b, __c); +} -#define vmull_lane_s32(a, b, c) \ - __extension__ \ - ({ \ - int32x2_t b_ = (b); \ - int32x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smull %0.2d,%1.2s,%2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_lane_v2si (__a, __b, __c); +} -#define vmull_lane_u16(a, b, c) \ - __extension__ \ - ({ \ - uint16x4_t b_ = (b); \ - uint16x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umull %0.4s,%1.4h,%2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_lane_v4hi_uuus (__a, __b, __c); +} -#define vmull_lane_u32(a, b, c) \ - __extension__ \ - ({ \ - uint32x2_t b_ = (b); \ - uint32x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_lane_v2si_uuus (__a, __b, __c); +} -#define vmull_laneq_s16(a, b, c) \ - __extension__ \ - ({ \ - int16x8_t b_ = (b); \ - int16x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smull %0.4s, %1.4h, %2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_laneq_v4hi (__a, __b, __c); +} -#define vmull_laneq_s32(a, b, c) \ - __extension__ \ - ({ \ - int32x4_t b_ = (b); \ - int32x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smull %0.2d, %1.2s, %2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_laneq_v2si (__a, __b, __c); +} -#define vmull_laneq_u16(a, b, c) \ - __extension__ \ - ({ \ - uint16x8_t b_ = (b); \ - uint16x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umull %0.4s, %1.4h, %2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_laneq_v4hi_uuus (__a, __b, __c); +} -#define vmull_laneq_u32(a, b, c) \ - __extension__ \ - ({ \ - uint32x4_t b_ = (b); \ - uint32x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_laneq_v2si_uuus (__a, __b, __c); +} __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index b56a050ac09..95fa3e4baa1 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -986,6 +986,13 @@ (V4SF "4s") (V2DF "2d") (V4HF "4h") (V8HF "8h")]) +;; Map mode to type used in widening multiplies. +(define_mode_attr Vcondtype [(V4HI "4h") (V8HI "4h") (V2SI "2s") (V4SI "2s")]) + +;; Map lane mode to name +(define_mode_attr Qlane [(V4HI "_v4hi") (V8HI "q_v4hi") + (V2SI "_v2si") (V4SI "q_v2si")]) + (define_mode_attr Vrevsuff [(V4HI "16") (V8HI "16") (V2SI "32") (V4SI "32") (V2DI "64")]) -- 2.30.2