[(set_attr "type" "neon_shift_imm_narrow_q")]
)
+(define_insn "aarch64_xtn2<mode>_le"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "xtn2\t%0.<V2ntype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_xtn2<mode>_be"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "xtn2\t%0.<V2ntype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_xtn2<mode>"
+ [(match_operand:<VNARROWQ2> 0 "register_operand")
+ (match_operand:<VNARROWQ> 1 "register_operand")
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand"))]
+ "TARGET_SIMD"
+ {
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_xtn2<mode>_be (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_aarch64_xtn2<mode>_le (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+)
+
(define_insn "aarch64_bfdot<mode>"
[(set (match_operand:VDQSF 0 "register_operand" "=w")
(plus:VDQSF
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s16 (int8x8_t __a, int16x8_t __b)
{
- int8x16_t __result = vcombine_s8 (__a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.16b,%1.8h"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_xtn2v8hi (__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s32 (int16x4_t __a, int32x4_t __b)
{
- int16x8_t __result = vcombine_s16 (__a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.8h,%1.4s"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_xtn2v4si (__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s64 (int32x2_t __a, int64x2_t __b)
{
- int32x4_t __result = vcombine_s32 (__a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.4s,%1.2d"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_xtn2v2di (__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u16 (uint8x8_t __a, uint16x8_t __b)
{
- uint8x16_t __result = vcombine_u8 (__a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.16b,%1.8h"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (uint8x16_t)
+ __builtin_aarch64_xtn2v8hi ((int8x8_t) __a, (int16x8_t) __b);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u32 (uint16x4_t __a, uint32x4_t __b)
{
- uint16x8_t __result = vcombine_u16 (__a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.8h,%1.4s"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (uint16x8_t)
+ __builtin_aarch64_xtn2v4si ((int16x4_t) __a, (int32x4_t) __b);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u64 (uint32x2_t __a, uint64x2_t __b)
{
- uint32x4_t __result = vcombine_u32 (__a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
- __asm__ ("xtn2 %0.4s,%1.2d"
- : "+w"(__result)
- : "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (uint32x4_t)
+ __builtin_aarch64_xtn2v2di ((int32x2_t) __a, (int64x2_t) __b);
}
__extension__ extern __inline int8x8_t