From: Kyrylo Tkachov Date: Mon, 25 Jan 2021 09:50:54 +0000 (+0000) Subject: aarch64: Reimplement vshrn_high_n* intrinsics using builtins X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=d61ca09ec9342ec5683a67a50b9bdd3dbdcd3624;p=gcc.git aarch64: Reimplement vshrn_high_n* intrinsics using builtins This patch reimplements the vshrn_high_n* intrinsics that generate the SHRN2 instruction. It is a vec_concat of the narrowing shift with the bottom part of the destination register, so we need a little-endian and a big-endian version and an expander to pick between them. gcc/ChangeLog: * config/aarch64/aarch64-simd-builtins.def (shrn2): Define builtin. * config/aarch64/aarch64-simd.md (aarch64_shrn2_insn_le): Define. (aarch64_shrn2_insn_be): Likewise. (aarch64_shrn2): Likewise. * config/aarch64/arm_neon.h (vshrn_high_n_s16): Reimlplement using builtins. (vshrn_high_n_s32): Likewise. (vshrn_high_n_s64): Likewise. (vshrn_high_n_u16): Likewise. (vshrn_high_n_u32): Likewise. (vshrn_high_n_u64): Likewise. --- diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 13bc6928d4d..66f1b231d21 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -191,6 +191,9 @@ /* Implemented by aarch64_shrn". */ BUILTIN_VQN (SHIFTIMM, shrn, 0, NONE) + /* Implemented by aarch64_shrn2. */ + BUILTIN_VQN (SHIFTACC, shrn2, 0, NONE) + /* Implemented by aarch64_mlsl. */ BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE) BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 872aa83fc92..86d2667601b 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1728,6 +1728,49 @@ } ) +(define_insn "aarch64_shrn2_insn_le" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (match_operand: 1 "register_operand" "0") + (truncate: + (lshiftrt:VQN (match_operand:VQN 2 "register_operand" "w") + (match_operand:VQN 3 "aarch64_simd_rshift_imm")))))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "shrn2\\t%0., %2., %3" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_insn "aarch64_shrn2_insn_be" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (truncate: + (lshiftrt:VQN (match_operand:VQN 2 "register_operand" "w") + (match_operand:VQN 3 "aarch64_simd_rshift_imm"))) + (match_operand: 1 "register_operand" "0")))] + "TARGET_SIMD && BYTES_BIG_ENDIAN" + "shrn2\\t%0., %2., %3" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_expand "aarch64_shrn2" + [(match_operand: 0 "register_operand") + (match_operand: 1 "register_operand") + (match_operand:VQN 2 "register_operand") + (match_operand:SI 3 "aarch64_simd_shift_imm_offset_")] + "TARGET_SIMD" + { + operands[3] = aarch64_simd_gen_const_vector_dup (mode, + INTVAL (operands[3])); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_shrn2_insn_be (operands[0], operands[1], + operands[2], operands[3])); + else + emit_insn (gen_aarch64_shrn2_insn_le (operands[0], operands[1], + operands[2], operands[3])); + DONE; + } +) + ;; For quads. diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 80d75555a71..ac469ce3f58 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -9809,95 +9809,50 @@ vrsqrteq_u32 (uint32x4_t __a) return __result; } -#define vshrn_high_n_s16(a, b, c) \ - __extension__ \ - ({ \ - int16x8_t b_ = (b); \ - int8x8_t a_ = (a); \ - int8x16_t result = vcombine_s8 \ - (a_, vcreate_s8 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("shrn2 %0.16b,%1.8h,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) +{ + return __builtin_aarch64_shrn2v8hi (__a, __b, __c); +} -#define vshrn_high_n_s32(a, b, c) \ - __extension__ \ - ({ \ - int32x4_t b_ = (b); \ - int16x4_t a_ = (a); \ - int16x8_t result = vcombine_s16 \ - (a_, vcreate_s16 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("shrn2 %0.8h,%1.4s,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) +{ + return __builtin_aarch64_shrn2v4si (__a, __b, __c); +} -#define vshrn_high_n_s64(a, b, c) \ - __extension__ \ - ({ \ - int64x2_t b_ = (b); \ - int32x2_t a_ = (a); \ - int32x4_t result = vcombine_s32 \ - (a_, vcreate_s32 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("shrn2 %0.4s,%1.2d,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) +{ + return __builtin_aarch64_shrn2v2di (__a, __b, __c); +} -#define vshrn_high_n_u16(a, b, c) \ - __extension__ \ - ({ \ - uint16x8_t b_ = (b); \ - uint8x8_t a_ = (a); \ - uint8x16_t result = vcombine_u8 \ - (a_, vcreate_u8 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("shrn2 %0.16b,%1.8h,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) +{ + return (uint8x16_t) + __builtin_aarch64_shrn2v8hi ((int8x8_t) __a, (int16x8_t) __b, __c); +} -#define vshrn_high_n_u32(a, b, c) \ - __extension__ \ - ({ \ - uint32x4_t b_ = (b); \ - uint16x4_t a_ = (a); \ - uint16x8_t result = vcombine_u16 \ - (a_, vcreate_u16 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("shrn2 %0.8h,%1.4s,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) +{ + return (uint16x8_t) + __builtin_aarch64_shrn2v4si ((int16x4_t) __a, (int32x4_t) __b, __c); +} -#define vshrn_high_n_u64(a, b, c) \ - __extension__ \ - ({ \ - uint64x2_t b_ = (b); \ - uint32x2_t a_ = (a); \ - uint32x4_t result = vcombine_u32 \ - (a_, vcreate_u32 \ - (__AARCH64_UINT64_C (0x0))); \ - __asm__ ("shrn2 %0.4s,%1.2d,#%2" \ - : "+w"(result) \ - : "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) +{ + return (uint32x4_t) + __builtin_aarch64_shrn2v2di ((int32x2_t) __a, (int64x2_t) __b, __c); +} #define vsli_n_p8(a, b, c) \ __extension__ \