DONE;
})
+(define_insn "aarch64_shrn<mode>_insn_le"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ>
+ (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "aarch64_simd_rshift_imm")))
+ (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_shrn<mode>_insn_be"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")
+ (truncate:<VNARROWQ>
+ (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "aarch64_simd_rshift_imm")))))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_shrn<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand")
+ (truncate:<VNARROWQ>
+ (lshiftrt:VQN (match_operand:VQN 1 "register_operand")
+ (match_operand:SI 2 "aarch64_simd_shift_imm_offset_<vn_mode>"))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ INTVAL (operands[2]));
+ rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_shrn<mode>_insn_be (tmp, operands[1],
+ operands[2], CONST0_RTX (<VNARROWQ>mode)));
+ else
+ emit_insn (gen_aarch64_shrn<mode>_insn_le (tmp, operands[1],
+ operands[2], CONST0_RTX (<VNARROWQ>mode)));
+
+ /* The intrinsic expects a narrow result, so emit a subreg that will get
+ optimized away as appropriate. */
+ emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
+ <VNARROWQ2>mode));
+ DONE;
+ }
+)
+
+
;; For quads.
(define_insn "vec_pack_trunc_<mode>"
return (uint32x2_t) __builtin_aarch64_xtnv2di ((int64x2_t) __a);
}
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_shrnv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_shrnv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_shrnv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_aarch64_shrnv8hi ((int16x8_t)__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_aarch64_shrnv4si ((int32x4_t)__a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_aarch64_shrnv2di ((int64x2_t)__a, __b);
+}
#define vmull_high_lane_s16(a, b, c) \
__extension__ \
({ \
result; \
})
-#define vshrn_n_s16(a, b) \
- __extension__ \
- ({ \
- int16x8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("shrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_s32(a, b) \
- __extension__ \
- ({ \
- int32x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("shrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_s64(a, b) \
- __extension__ \
- ({ \
- int64x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("shrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u16(a, b) \
- __extension__ \
- ({ \
- uint16x8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("shrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u32(a, b) \
- __extension__ \
- ({ \
- uint32x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("shrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u64(a, b) \
- __extension__ \
- ({ \
- uint64x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("shrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
#define vsli_n_p8(a, b, c) \
__extension__ \
({ \