}
)
+(define_insn "aarch64_rshrn<mode>_insn_le"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "aarch64_simd_rshift_imm")
+ ] UNSPEC_RSHRN)
+ (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "rshrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_rshrn<mode>_insn_be"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")
+ (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "aarch64_simd_rshift_imm")
+ ] UNSPEC_RSHRN)))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "rshrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_rshrn<mode>"
+ [(match_operand:<VNARROWQ> 0 "register_operand")
+ (match_operand:VQN 1 "register_operand")
+ (match_operand:SI 2 "aarch64_simd_shift_imm_offset_<vn_mode>")]
+ "TARGET_SIMD"
+ {
+ operands[2] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ INTVAL (operands[2]));
+ rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_rshrn<mode>_insn_be (tmp, operands[1],
+ operands[2], CONST0_RTX (<VNARROWQ>mode)));
+ else
+ emit_insn (gen_aarch64_rshrn<mode>_insn_le (tmp, operands[1],
+ operands[2], CONST0_RTX (<VNARROWQ>mode)));
+
+ /* The intrinsic expects a narrow result, so emit a subreg that will get
+ optimized away as appropriate. */
+ emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
+ <VNARROWQ2>mode));
+ DONE;
+ }
+)
+
(define_insn "aarch64_shrn2<mode>_insn_le"
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
(vec_concat:<VNARROWQ2>
}
)
+(define_insn "aarch64_rshrn2<mode>_insn_le"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")
+ (unspec:<VNARROWQ> [(match_operand:VQN 2 "register_operand" "w")
+ (match_operand:VQN 3 "aarch64_simd_rshift_imm")] UNSPEC_RSHRN)))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "rshrn2\\t%0.<V2ntype>, %2.<Vtype>, %3"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_rshrn2<mode>_insn_be"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (unspec:<VNARROWQ> [(match_operand:VQN 2 "register_operand" "w")
+ (match_operand:VQN 3 "aarch64_simd_rshift_imm")] UNSPEC_RSHRN)
+ (match_operand:<VNARROWQ> 1 "register_operand" "0")))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "rshrn2\\t%0.<V2ntype>, %2.<Vtype>, %3"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_rshrn2<mode>"
+ [(match_operand:<VNARROWQ2> 0 "register_operand")
+ (match_operand:<VNARROWQ> 1 "register_operand")
+ (match_operand:VQN 2 "register_operand")
+ (match_operand:SI 3 "aarch64_simd_shift_imm_offset_<vn_mode>")]
+ "TARGET_SIMD"
+ {
+ operands[3] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ INTVAL (operands[3]));
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_rshrn2<mode>_insn_be (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_aarch64_rshrn2<mode>_insn_le (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+ }
+)
;; For quads.
return __builtin_aarch64_sqshrun2_nv2di_uuss (__a, __b, __c);
}
-#define vrshrn_high_n_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int8x8_t a_ = (a); \
- int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 \
- (__AARCH64_UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_rshrn2v8hi (__a, __b, __c);
+}
-#define vrshrn_high_n_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 \
- (__AARCH64_UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_rshrn2v4si (__a, __b, __c);
+}
-#define vrshrn_high_n_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 \
- (__AARCH64_UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c)
+{
+ return __builtin_aarch64_rshrn2v2di (__a, __b, __c);
+}
-#define vrshrn_high_n_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 \
- (__AARCH64_UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_rshrn2v8hi ((int8x8_t) __a,
+ (int16x8_t) __b, __c);
+}
-#define vrshrn_high_n_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 \
- (__AARCH64_UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_rshrn2v4si ((int16x4_t) __a,
+ (int32x4_t) __b, __c);
+}
-#define vrshrn_high_n_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 \
- (__AARCH64_UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_rshrn2v2di ((int32x2_t)__a,
+ (int64x2_t)__b, __c);
+}
-#define vrshrn_n_s16(a, b) \
- __extension__ \
- ({ \
- int16x8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("rshrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_rshrnv8hi (__a, __b);
+}
-#define vrshrn_n_s32(a, b) \
- __extension__ \
- ({ \
- int32x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("rshrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_rshrnv4si (__a, __b);
+}
-#define vrshrn_n_s64(a, b) \
- __extension__ \
- ({ \
- int64x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("rshrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_rshrnv2di (__a, __b);
+}
-#define vrshrn_n_u16(a, b) \
- __extension__ \
- ({ \
- uint16x8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("rshrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_rshrnv8hi ((int16x8_t) __a, __b);
+}
-#define vrshrn_n_u32(a, b) \
- __extension__ \
- ({ \
- uint32x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("rshrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_rshrnv4si ((int32x4_t) __a, __b);
+}
-#define vrshrn_n_u64(a, b) \
- __extension__ \
- ({ \
- uint64x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("rshrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_rshrnv2di ((int64x2_t) __a, __b);
+}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))