aarch64: reimplement vqmovn_high* intrinsics using builtins
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>
Wed, 13 Jan 2021 12:14:30 +0000 (12:14 +0000)
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>
Thu, 14 Jan 2021 08:36:19 +0000 (08:36 +0000)
This patch reimplements the saturating-truncate-and-insert-into-high
intrinsics using the appropriate RTL codes and builtins.

gcc/
* config/aarch64/aarch64-simd.md (aarch64_<su>qxtn2<mode>_le):
Define.
(aarch64_<su>qxtn2<mode>_be): Likewise.
(aarch64_<su>qxtn2<mode>): Likewise.
* config/aarch64/aarch64-simd-builtins.def (sqxtn2, uqxtn2):
Define builtins.
* config/aarch64/iterators.md (SAT_TRUNC): Define code_iterator.
(su): Handle ss_truncate and us_truncate.
* config/aarch64/arm_neon.h (vqmovn_high_s16): Reimplement using
builtin.
(vqmovn_high_s32): Likewise.
(vqmovn_high_s64): Likewise.
(vqmovn_high_u16): Likewise.
(vqmovn_high_u32): Likewise.
(vqmovn_high_u64): Likewise.

gcc/testsuite/
* gcc.target/aarch64/narrow_high-intrinsics.c: Update uqxtn2 and
sqxtn2 scan-assembler-times.

gcc/config/aarch64/aarch64-simd-builtins.def
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/arm_neon.h
gcc/config/aarch64/iterators.md
gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c

index 6efc7706a41e02d947753a4cda984159b68bd39f..27e9026d9e8b7ff980c5b8d9ff1b00490e3a18cb 100644 (file)
   /* Implemented by aarch64_<sur>qmovn<mode>.  */
   BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0, ALL)
   BUILTIN_VSQN_HSDI (UNOP, uqmovn, 0, ALL)
+
+  /* Implemented by aarch64_<su>qxtn2<mode>.  */
+  BUILTIN_VQN (BINOP, sqxtn2, 0, ALL)
+  BUILTIN_VQN (BINOPU, uqxtn2, 0, ALL)
+
   /* Implemented by aarch64_s<optab><mode>.  */
   BUILTIN_VSDQ_I (UNOP, sqabs, 0, ALL)
   BUILTIN_VSDQ_I (UNOP, sqneg, 0, ALL)
index 65209686b7e17afca72aa2477aa26ea2472aef1f..0827f0eb579cd706c9e18550652c0916fc18230c 100644 (file)
    [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
 )
 
+(define_insn "aarch64_<su>qxtn2<mode>_le"
+  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+       (vec_concat:<VNARROWQ2>
+         (match_operand:<VNARROWQ> 1 "register_operand" "0")
+         (SAT_TRUNC:<VNARROWQ>
+           (match_operand:VQN 2 "register_operand" "w"))))]
+  "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+  "<su>qxtn2\\t%0.<V2ntype>, %2.<Vtype>"
+   [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_<su>qxtn2<mode>_be"
+  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+       (vec_concat:<VNARROWQ2>
+         (SAT_TRUNC:<VNARROWQ>
+           (match_operand:VQN 2 "register_operand" "w"))
+         (match_operand:<VNARROWQ> 1 "register_operand" "0")))]
+  "TARGET_SIMD && BYTES_BIG_ENDIAN"
+  "<su>qxtn2\\t%0.<V2ntype>, %2.<Vtype>"
+   [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_<su>qxtn2<mode>"
+  [(match_operand:<VNARROWQ2> 0 "register_operand")
+   (match_operand:<VNARROWQ> 1 "register_operand")
+   (SAT_TRUNC:<VNARROWQ>
+     (match_operand:VQN 2 "register_operand"))]
+  "TARGET_SIMD"
+  {
+    if (BYTES_BIG_ENDIAN)
+      emit_insn (gen_aarch64_<su>qxtn2<mode>_be (operands[0], operands[1],
+                                                operands[2]));
+    else
+      emit_insn (gen_aarch64_<su>qxtn2<mode>_le (operands[0], operands[1],
+                                                operands[2]));
+    DONE;
+  }
+)
+
 ;; <su>q<absneg>
 
 (define_insn "aarch64_s<optab><mode>"
index b2a6b58f8c92b896ade449f8f7978fe79c5a114f..6095c0dcbfa51ddbbdb4c1e659d9cad6ce7e4c7a 100644 (file)
@@ -9584,72 +9584,42 @@ __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqmovn_high_s16 (int8x8_t __a, int16x8_t __b)
 {
-  int8x16_t __result = vcombine_s8 (__a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
-  __asm__ ("sqxtn2 %0.16b, %1.8h"
-           : "+w"(__result)
-           : "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_sqxtn2v8hi (__a, __b);
 }
 
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqmovn_high_s32 (int16x4_t __a, int32x4_t __b)
 {
-  int16x8_t __result = vcombine_s16 (__a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
-  __asm__ ("sqxtn2 %0.8h, %1.4s"
-           : "+w"(__result)
-           : "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_sqxtn2v4si (__a, __b);
 }
 
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqmovn_high_s64 (int32x2_t __a, int64x2_t __b)
 {
-  int32x4_t __result = vcombine_s32 (__a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
-  __asm__ ("sqxtn2 %0.4s, %1.2d"
-           : "+w"(__result)
-           : "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_sqxtn2v2di (__a, __b);
 }
 
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqmovn_high_u16 (uint8x8_t __a, uint16x8_t __b)
 {
-  uint8x16_t __result = vcombine_u8 (__a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
-  __asm__ ("uqxtn2 %0.16b, %1.8h"
-           : "+w"(__result)
-           : "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uqxtn2v8hi_uuu (__a, __b);
 }
 
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqmovn_high_u32 (uint16x4_t __a, uint32x4_t __b)
 {
-  uint16x8_t __result = vcombine_u16 (__a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
-  __asm__ ("uqxtn2 %0.8h, %1.4s"
-           : "+w"(__result)
-           : "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uqxtn2v4si_uuu (__a, __b);
 }
 
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqmovn_high_u64 (uint32x2_t __a, uint64x2_t __b)
 {
-  uint32x4_t __result = vcombine_u32 (__a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
-  __asm__ ("uqxtn2 %0.4s, %1.2d"
-           : "+w"(__result)
-           : "w"(__b)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uqxtn2v2di_uuu (__a, __b);
 }
 
 __extension__ extern __inline uint8x16_t
index 54a99d441b831b983f1c15f8387eba314f675d83..d42a70653edb266f2b76924b75a814db25f08f23 100644 (file)
 ;; Unsigned comparison operators.
 (define_code_iterator FAC_COMPARISONS [lt le ge gt])
 
+;; Signed and unsigned saturating truncations.
+(define_code_iterator SAT_TRUNC [ss_truncate us_truncate])
+
 ;; SVE integer unary operations.
 (define_code_iterator SVE_INT_UNARY [abs neg not clrsb clz popcount
                                     (ss_abs "TARGET_SVE2")
                      (fix "s") (unsigned_fix "u")
                      (div "s") (udiv "u")
                      (smax "s") (umax "u")
-                     (smin "s") (umin "u")])
+                     (smin "s") (umin "u")
+                     (ss_truncate "s") (us_truncate "u")])
 
 ;; "s" for signed ops, empty for unsigned ones.
 (define_code_attr s [(sign_extend "s") (zero_extend "")])
index a2e0cb9b1008f620922d64f06acc3b66795514b0..b22aecaa9cf2efe35d0a7ae330dd214e198f4ad1 100644 (file)
@@ -119,7 +119,7 @@ ONE (vmovn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
 /* { dg-final { scan-assembler-times "uqshrn2\\tv" 3} }  */
 /* { dg-final { scan-assembler-times "sqrshrn2\\tv" 3} }  */
 /* { dg-final { scan-assembler-times "uqrshrn2\\tv" 3} }  */
-/* { dg-final { scan-assembler-times "uqxtn2 v" 3} }  */
-/* { dg-final { scan-assembler-times "sqxtn2 v" 3} }  */
+/* { dg-final { scan-assembler-times "uqxtn2\\tv" 3} }  */
+/* { dg-final { scan-assembler-times "sqxtn2\\tv" 3} }  */
 /* { dg-final { scan-assembler-times "sqxtun2 v" 3} }  */
 /* { dg-final { scan-assembler-times "\\txtn2\\tv" 6} }  */