#ifdef __FAST_MATH
return __a + __b;
#else
- return (float32x2_t) __builtin_neon_vaddv2sf (__a, __b, 3);
+ return (float32x2_t) __builtin_neon_vaddv2sf (__a, __b);
#endif
}
#ifdef __FAST_MATH
return __a + __b;
#else
- return (float32x4_t) __builtin_neon_vaddv4sf (__a, __b, 3);
+ return (float32x4_t) __builtin_neon_vaddv4sf (__a, __b);
#endif
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vaddl_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int16x8_t)__builtin_neon_vaddlv8qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vaddlsv8qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vaddl_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int32x4_t)__builtin_neon_vaddlv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vaddlsv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vaddl_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int64x2_t)__builtin_neon_vaddlv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vaddlsv2si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vaddlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vaddluv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vaddlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vaddluv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vaddlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vaddluv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vaddw_s8 (int16x8_t __a, int8x8_t __b)
{
- return (int16x8_t)__builtin_neon_vaddwv8qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vaddwsv8qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vaddw_s16 (int32x4_t __a, int16x4_t __b)
{
- return (int32x4_t)__builtin_neon_vaddwv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vaddwsv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vaddw_s32 (int64x2_t __a, int32x2_t __b)
{
- return (int64x2_t)__builtin_neon_vaddwv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vaddwsv2si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vaddwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vaddwuv8qi ((int16x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vaddwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vaddwuv4hi ((int32x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vaddwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vaddwuv2si ((int64x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vhadd_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vhaddsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vhadd_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vhaddsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vhadd_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vhaddsv2si (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vhadduv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vhadduv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vhadduv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vhaddq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vhaddsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vhaddq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vhaddsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vhaddq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vhaddsv4si (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vhadduv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vhadduv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vhadduv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vrhadd_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vrhaddsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vrhadd_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vrhaddsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vrhadd_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vrhaddsv2si (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 4);
+ return (uint8x8_t)__builtin_neon_vrhadduv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 4);
+ return (uint16x4_t)__builtin_neon_vrhadduv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 4);
+ return (uint32x2_t)__builtin_neon_vrhadduv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 5);
+ return (int8x16_t)__builtin_neon_vrhaddsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 5);
+ return (int16x8_t)__builtin_neon_vrhaddsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 5);
+ return (int32x4_t)__builtin_neon_vrhaddsv4si (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 4);
+ return (uint8x16_t)__builtin_neon_vrhadduv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+ return (uint16x8_t)__builtin_neon_vrhadduv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+ return (uint32x4_t)__builtin_neon_vrhadduv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqadd_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vqaddv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vqaddsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqadd_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vqaddv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vqaddsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqadd_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vqaddv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vqaddsv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vqadd_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t)__builtin_neon_vqadddi (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vqaddsdi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vqaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vqadduv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vqaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vqadduv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vqaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vqadduv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t)__builtin_neon_vqadddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+ return (uint64x1_t)__builtin_neon_vqaddudi ((int64x1_t) __a, (int64x1_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqaddq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vqaddv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vqaddsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqaddq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vqaddv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vqaddsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqaddq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vqaddv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vqaddsv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqaddq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t)__builtin_neon_vqaddv2di (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vqaddsv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vqaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vqadduv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vqaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vqadduv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vqaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vqadduv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vqaddv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vqadduv2di ((int64x2_t) __a, (int64x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vaddhn_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vaddhn_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vaddhn_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vraddhn_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vraddhnv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vraddhn_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vraddhnv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vraddhn_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vraddhnv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+ return (uint8x8_t)__builtin_neon_vraddhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+ return (uint16x4_t)__builtin_neon_vraddhnv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 4);
+ return (uint32x2_t)__builtin_neon_vraddhnv2di ((int64x2_t) __a, (int64x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
#ifdef __FAST_MATH
return __a * __b;
#else
- return (float32x2_t) __builtin_neon_vmulv2sf (__a, __b, 3);
+ return (float32x2_t) __builtin_neon_vmulfv2sf (__a, __b);
#endif
}
#ifdef __FAST_MATH
return __a * __b;
#else
- return (float32x4_t) __builtin_neon_vmulv4sf (__a, __b, 3);
+ return (float32x4_t) __builtin_neon_vmulfv4sf (__a, __b);
#endif
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vmul_p8 (poly8x8_t __a, poly8x8_t __b)
{
- return (poly8x8_t)__builtin_neon_vmulv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+ return (poly8x8_t)__builtin_neon_vmulpv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
{
- return (poly8x16_t)__builtin_neon_vmulv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+ return (poly8x16_t)__builtin_neon_vmulpv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vqrdmulhv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vqrdmulhv2si (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 5);
+ return (int16x8_t)__builtin_neon_vqrdmulhv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 5);
+ return (int32x4_t)__builtin_neon_vqrdmulhv4si (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmull_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int16x8_t)__builtin_neon_vmullv8qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vmullsv8qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmull_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int32x4_t)__builtin_neon_vmullv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vmullsv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmull_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int64x2_t)__builtin_neon_vmullv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vmullsv2si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmull_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vmulluv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmull_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vmullv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vmulluv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmull_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vmullv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vmulluv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
vmull_p8 (poly8x8_t __a, poly8x8_t __b)
{
- return (poly16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+ return (poly16x8_t)__builtin_neon_vmullpv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmull_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int32x4_t)__builtin_neon_vqdmullv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vqdmullv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmull_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int64x2_t)__builtin_neon_vqdmullv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vqdmullv2si (__a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
- return (int8x8_t)__builtin_neon_vmlav8qi (__a, __b, __c, 1);
+ return (int8x8_t)__builtin_neon_vmlav8qi (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int16x4_t)__builtin_neon_vmlav4hi (__a, __b, __c, 1);
+ return (int16x4_t)__builtin_neon_vmlav4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int32x2_t)__builtin_neon_vmlav2si (__a, __b, __c, 1);
+ return (int32x2_t)__builtin_neon_vmlav2si (__a, __b, __c);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
- return (float32x2_t)__builtin_neon_vmlav2sf (__a, __b, __c, 3);
+ return (float32x2_t)__builtin_neon_vmlav2sf (__a, __b, __c);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- return (uint8x8_t)__builtin_neon_vmlav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+ return (uint8x8_t)__builtin_neon_vmlav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- return (uint16x4_t)__builtin_neon_vmlav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+ return (uint16x4_t)__builtin_neon_vmlav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- return (uint32x2_t)__builtin_neon_vmlav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+ return (uint32x2_t)__builtin_neon_vmlav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
- return (int8x16_t)__builtin_neon_vmlav16qi (__a, __b, __c, 1);
+ return (int8x16_t)__builtin_neon_vmlav16qi (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
- return (int16x8_t)__builtin_neon_vmlav8hi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vmlav8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
- return (int32x4_t)__builtin_neon_vmlav4si (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vmlav4si (__a, __b, __c);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return (float32x4_t)__builtin_neon_vmlav4sf (__a, __b, __c, 3);
+ return (float32x4_t)__builtin_neon_vmlav4sf (__a, __b, __c);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
- return (uint8x16_t)__builtin_neon_vmlav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+ return (uint8x16_t)__builtin_neon_vmlav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
- return (uint16x8_t)__builtin_neon_vmlav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+ return (uint16x8_t)__builtin_neon_vmlav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
- return (uint32x4_t)__builtin_neon_vmlav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmlav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
{
- return (int16x8_t)__builtin_neon_vmlalv8qi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vmlalsv8qi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int32x4_t)__builtin_neon_vmlalv4hi (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vmlalsv4hi (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int64x2_t)__builtin_neon_vmlalv2si (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vmlalsv2si (__a, __b, __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- return (uint16x8_t)__builtin_neon_vmlalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+ return (uint16x8_t)__builtin_neon_vmlaluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- return (uint32x4_t)__builtin_neon_vmlalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmlaluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- return (uint64x2_t)__builtin_neon_vmlalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+ return (uint64x2_t)__builtin_neon_vmlaluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int32x4_t)__builtin_neon_vqdmlalv4hi (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vqdmlalv4hi (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int64x2_t)__builtin_neon_vqdmlalv2si (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vqdmlalv2si (__a, __b, __c);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
- return (int8x8_t)__builtin_neon_vmlsv8qi (__a, __b, __c, 1);
+ return (int8x8_t)__builtin_neon_vmlsv8qi (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int16x4_t)__builtin_neon_vmlsv4hi (__a, __b, __c, 1);
+ return (int16x4_t)__builtin_neon_vmlsv4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int32x2_t)__builtin_neon_vmlsv2si (__a, __b, __c, 1);
+ return (int32x2_t)__builtin_neon_vmlsv2si (__a, __b, __c);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
- return (float32x2_t)__builtin_neon_vmlsv2sf (__a, __b, __c, 3);
+ return (float32x2_t)__builtin_neon_vmlsv2sf (__a, __b, __c);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- return (uint8x8_t)__builtin_neon_vmlsv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+ return (uint8x8_t)__builtin_neon_vmlsv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- return (uint16x4_t)__builtin_neon_vmlsv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+ return (uint16x4_t)__builtin_neon_vmlsv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- return (uint32x2_t)__builtin_neon_vmlsv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+ return (uint32x2_t)__builtin_neon_vmlsv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
- return (int8x16_t)__builtin_neon_vmlsv16qi (__a, __b, __c, 1);
+ return (int8x16_t)__builtin_neon_vmlsv16qi (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
- return (int16x8_t)__builtin_neon_vmlsv8hi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vmlsv8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
- return (int32x4_t)__builtin_neon_vmlsv4si (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vmlsv4si (__a, __b, __c);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return (float32x4_t)__builtin_neon_vmlsv4sf (__a, __b, __c, 3);
+ return (float32x4_t)__builtin_neon_vmlsv4sf (__a, __b, __c);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
- return (uint8x16_t)__builtin_neon_vmlsv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+ return (uint8x16_t)__builtin_neon_vmlsv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
- return (uint16x8_t)__builtin_neon_vmlsv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+ return (uint16x8_t)__builtin_neon_vmlsv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
- return (uint32x4_t)__builtin_neon_vmlsv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmlsv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
{
- return (int16x8_t)__builtin_neon_vmlslv8qi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vmlslsv8qi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int32x4_t)__builtin_neon_vmlslv4hi (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vmlslsv4hi (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int64x2_t)__builtin_neon_vmlslv2si (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vmlslsv2si (__a, __b, __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- return (uint16x8_t)__builtin_neon_vmlslv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+ return (uint16x8_t)__builtin_neon_vmlsluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- return (uint32x4_t)__builtin_neon_vmlslv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmlsluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- return (uint64x2_t)__builtin_neon_vmlslv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+ return (uint64x2_t)__builtin_neon_vmlsluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int32x4_t)__builtin_neon_vqdmlslv4hi (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vqdmlslv4hi (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int64x2_t)__builtin_neon_vqdmlslv2si (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vqdmlslv2si (__a, __b, __c);
}
#ifdef __ARM_FEATURE_FMA
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vfma_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
- return (float32x2_t)__builtin_neon_vfmav2sf (__a, __b, __c, 3);
+ return (float32x2_t)__builtin_neon_vfmav2sf (__a, __b, __c);
}
#endif
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return (float32x4_t)__builtin_neon_vfmav4sf (__a, __b, __c, 3);
+ return (float32x4_t)__builtin_neon_vfmav4sf (__a, __b, __c);
}
#endif
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vfms_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
- return (float32x2_t)__builtin_neon_vfmsv2sf (__a, __b, __c, 3);
+ return (float32x2_t)__builtin_neon_vfmsv2sf (__a, __b, __c);
}
#endif
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return (float32x4_t)__builtin_neon_vfmsv4sf (__a, __b, __c, 3);
+ return (float32x4_t)__builtin_neon_vfmsv4sf (__a, __b, __c);
}
#endif
#ifdef __FAST_MATH
return __a - __b;
#else
- return (float32x2_t) __builtin_neon_vsubv2sf (__a, __b, 3);
+ return (float32x2_t) __builtin_neon_vsubv2sf (__a, __b);
#endif
}
#ifdef __FAST_MATH
return __a - __b;
#else
- return (float32x4_t) __builtin_neon_vsubv4sf (__a, __b, 3);
+ return (float32x4_t) __builtin_neon_vsubv4sf (__a, __b);
#endif
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vsubl_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int16x8_t)__builtin_neon_vsublv8qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vsublsv8qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vsubl_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int32x4_t)__builtin_neon_vsublv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vsublsv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vsubl_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int64x2_t)__builtin_neon_vsublv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vsublsv2si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vsublv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vsubluv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vsublv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vsubluv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vsublv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vsubluv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vsubw_s8 (int16x8_t __a, int8x8_t __b)
{
- return (int16x8_t)__builtin_neon_vsubwv8qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vsubwsv8qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vsubw_s16 (int32x4_t __a, int16x4_t __b)
{
- return (int32x4_t)__builtin_neon_vsubwv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vsubwsv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vsubw_s32 (int64x2_t __a, int32x2_t __b)
{
- return (int64x2_t)__builtin_neon_vsubwv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vsubwsv2si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vsubwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vsubwuv8qi ((int16x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vsubwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vsubwuv4hi ((int32x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vsubwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vsubwuv2si ((int64x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vhsub_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vhsubv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vhsubsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vhsub_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vhsubv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vhsubsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vhsub_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vhsubv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vhsubsv2si (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vhsub_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vhsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vhsubuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vhsub_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vhsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vhsubuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vhsub_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vhsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vhsubuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vhsubq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vhsubv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vhsubsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vhsubq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vhsubv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vhsubsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vhsubq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vhsubv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vhsubsv4si (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vhsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vhsubuv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vhsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vhsubuv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vhsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vhsubuv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqsub_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vqsubv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vqsubsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqsub_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vqsubv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vqsubsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqsub_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vqsubv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vqsubsv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vqsub_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t)__builtin_neon_vqsubdi (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vqsubsdi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vqsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vqsubuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vqsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vqsubuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vqsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vqsubuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t)__builtin_neon_vqsubdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+ return (uint64x1_t)__builtin_neon_vqsubudi ((int64x1_t) __a, (int64x1_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqsubq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vqsubv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vqsubsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqsubq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vqsubv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vqsubsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqsubq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vqsubv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vqsubsv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqsubq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t)__builtin_neon_vqsubv2di (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vqsubsv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vqsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vqsubuv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vqsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vqsubuv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vqsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vqsubuv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vqsubv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vqsubuv2di ((int64x2_t) __a, (int64x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsubhn_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vsubhn_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vsubhn_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vrsubhnv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vrsubhnv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vrsubhnv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+ return (uint8x8_t)__builtin_neon_vrsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+ return (uint16x4_t)__builtin_neon_vrsubhnv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 4);
+ return (uint32x2_t)__builtin_neon_vrsubhnv2di ((int64x2_t) __a, (int64x2_t) __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vceq_s8 (int8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vceqv8qi (__a, __b, 1);
+ return (uint8x8_t)__builtin_neon_vceqv8qi (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vceq_s16 (int16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vceqv4hi (__a, __b, 1);
+ return (uint16x4_t)__builtin_neon_vceqv4hi (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vceq_s32 (int32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vceqv2si (__a, __b, 1);
+ return (uint32x2_t)__builtin_neon_vceqv2si (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vceq_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vceqv2sf (__a, __b, 3);
+ return (uint32x2_t)__builtin_neon_vceqv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vceq_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vceq_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vceqv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vceqv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vceq_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vceqv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vceqv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vceq_p8 (poly8x8_t __a, poly8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vceqq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vceqv16qi (__a, __b, 1);
+ return (uint8x16_t)__builtin_neon_vceqv16qi (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vceqq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vceqv8hi (__a, __b, 1);
+ return (uint16x8_t)__builtin_neon_vceqv8hi (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vceqq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vceqv4si (__a, __b, 1);
+ return (uint32x4_t)__builtin_neon_vceqv4si (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vceqq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vceqv4sf (__a, __b, 3);
+ return (uint32x4_t)__builtin_neon_vceqv4sf (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vceqv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vceqv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vceqv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vceqv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vcge_s8 (int8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgev8qi (__a, __b, 1);
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vcge_s16 (int16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgev4hi (__a, __b, 1);
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcge_s32 (int32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgev2si (__a, __b, 1);
+ return (uint32x2_t)__builtin_neon_vcgev2si (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcge_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgev2sf (__a, __b, 3);
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vcge_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vcge_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcge_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcgeq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgev16qi (__a, __b, 1);
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcgeq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgev8hi (__a, __b, 1);
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcgeq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgev4si (__a, __b, 1);
+ return (uint32x4_t)__builtin_neon_vcgev4si (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcgeq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgev4sf (__a, __b, 3);
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vcle_s8 (int8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgev8qi (__b, __a, 1);
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__b, __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vcle_s16 (int16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgev4hi (__b, __a, 1);
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__b, __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcle_s32 (int32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgev2si (__b, __a, 1);
+ return (uint32x2_t)__builtin_neon_vcgev2si (__b, __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcle_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgev2sf (__b, __a, 3);
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__b, __a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vcle_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+ return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __b, (int8x8_t) __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vcle_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+ return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __b, (int16x4_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcle_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __b, (int32x2_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcleq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgev16qi (__b, __a, 1);
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__b, __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcleq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgev8hi (__b, __a, 1);
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__b, __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcleq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgev4si (__b, __a, 1);
+ return (uint32x4_t)__builtin_neon_vcgev4si (__b, __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcleq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgev4sf (__b, __a, 3);
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__b, __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+ return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __b, (int8x16_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+ return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __b, (int16x8_t) __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __b, (int32x4_t) __a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vcgt_s8 (int8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgtv8qi (__a, __b, 1);
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vcgt_s16 (int16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgtv4hi (__a, __b, 1);
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcgt_s32 (int32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgtv2si (__a, __b, 1);
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcgt_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgtv2sf (__a, __b, 3);
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcgtq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgtv16qi (__a, __b, 1);
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcgtq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgtv8hi (__a, __b, 1);
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcgtq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgtv4si (__a, __b, 1);
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcgtq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgtv4sf (__a, __b, 3);
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vclt_s8 (int8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgtv8qi (__b, __a, 1);
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__b, __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vclt_s16 (int16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgtv4hi (__b, __a, 1);
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__b, __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vclt_s32 (int32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgtv2si (__b, __a, 1);
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__b, __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vclt_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgtv2sf (__b, __a, 3);
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__b, __a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vclt_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+ return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __b, (int8x8_t) __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vclt_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+ return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __b, (int16x4_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vclt_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __b, (int32x2_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcltq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgtv16qi (__b, __a, 1);
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__b, __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcltq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgtv8hi (__b, __a, 1);
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__b, __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcltq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgtv4si (__b, __a, 1);
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__b, __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcltq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgtv4sf (__b, __a, 3);
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__b, __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+ return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __b, (int8x16_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+ return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __b, (int16x8_t) __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __b, (int32x4_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcage_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcagev2sf (__a, __b, 3);
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcageq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcagev4sf (__a, __b, 3);
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcale_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcagev2sf (__b, __a, 3);
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__b, __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcaleq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcagev4sf (__b, __a, 3);
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__b, __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcagt_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcagtv2sf (__a, __b, 3);
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcagtq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcagtv4sf (__a, __b, 3);
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcalt_f32 (float32x2_t __a, float32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vcagtv2sf (__b, __a, 3);
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__b, __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcaltq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vcagtv4sf (__b, __a, 3);
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__b, __a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vtst_s8 (int8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vtstv8qi (__a, __b, 1);
+ return (uint8x8_t)__builtin_neon_vtstv8qi (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vtst_s16 (int16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vtstv4hi (__a, __b, 1);
+ return (uint16x4_t)__builtin_neon_vtstv4hi (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vtst_s32 (int32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vtstv2si (__a, __b, 1);
+ return (uint32x2_t)__builtin_neon_vtstv2si (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vtst_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vtst_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vtstv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vtstv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vtst_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vtstv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vtstv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vtst_p8 (poly8x8_t __a, poly8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vtstq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vtstv16qi (__a, __b, 1);
+ return (uint8x16_t)__builtin_neon_vtstv16qi (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vtstq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vtstv8hi (__a, __b, 1);
+ return (uint16x8_t)__builtin_neon_vtstv8hi (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vtstq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vtstv4si (__a, __b, 1);
+ return (uint32x4_t)__builtin_neon_vtstv4si (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vtstv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vtstv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vtstv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vtstv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vtstq_p8 (poly8x16_t __a, poly8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vabd_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vabdv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vabdsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vabd_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vabdv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vabdsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vabd_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vabdv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vabdsv2si (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vabd_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vabdv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vabdfv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vabd_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vabdv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vabduv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vabd_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vabdv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vabduv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vabd_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vabdv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vabduv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vabdq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vabdv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vabdsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vabdq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vabdv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vabdsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vabdq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vabdv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vabdsv4si (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vabdq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (float32x4_t)__builtin_neon_vabdv4sf (__a, __b, 3);
+ return (float32x4_t)__builtin_neon_vabdfv4sf (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vabdv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vabduv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vabdv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vabduv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vabdv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vabduv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vabdl_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int16x8_t)__builtin_neon_vabdlv8qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vabdlsv8qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vabdl_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int32x4_t)__builtin_neon_vabdlv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vabdlsv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vabdl_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int64x2_t)__builtin_neon_vabdlv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vabdlsv2si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vabdl_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vabdlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vabdluv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vabdl_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vabdlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vabdluv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vabdl_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vabdlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vabdluv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
- return (int8x8_t)__builtin_neon_vabav8qi (__a, __b, __c, 1);
+ return (int8x8_t)__builtin_neon_vabasv8qi (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int16x4_t)__builtin_neon_vabav4hi (__a, __b, __c, 1);
+ return (int16x4_t)__builtin_neon_vabasv4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int32x2_t)__builtin_neon_vabav2si (__a, __b, __c, 1);
+ return (int32x2_t)__builtin_neon_vabasv2si (__a, __b, __c);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- return (uint8x8_t)__builtin_neon_vabav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+ return (uint8x8_t)__builtin_neon_vabauv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- return (uint16x4_t)__builtin_neon_vabav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+ return (uint16x4_t)__builtin_neon_vabauv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- return (uint32x2_t)__builtin_neon_vabav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+ return (uint32x2_t)__builtin_neon_vabauv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
- return (int8x16_t)__builtin_neon_vabav16qi (__a, __b, __c, 1);
+ return (int8x16_t)__builtin_neon_vabasv16qi (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
- return (int16x8_t)__builtin_neon_vabav8hi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vabasv8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
- return (int32x4_t)__builtin_neon_vabav4si (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vabasv4si (__a, __b, __c);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
- return (uint8x16_t)__builtin_neon_vabav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+ return (uint8x16_t)__builtin_neon_vabauv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
- return (uint16x8_t)__builtin_neon_vabav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+ return (uint16x8_t)__builtin_neon_vabauv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
- return (uint32x4_t)__builtin_neon_vabav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+ return (uint32x4_t)__builtin_neon_vabauv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
{
- return (int16x8_t)__builtin_neon_vabalv8qi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vabalsv8qi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
- return (int32x4_t)__builtin_neon_vabalv4hi (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vabalsv4hi (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
- return (int64x2_t)__builtin_neon_vabalv2si (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vabalsv2si (__a, __b, __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- return (uint16x8_t)__builtin_neon_vabalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+ return (uint16x8_t)__builtin_neon_vabaluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- return (uint32x4_t)__builtin_neon_vabalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+ return (uint32x4_t)__builtin_neon_vabaluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- return (uint64x2_t)__builtin_neon_vabalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+ return (uint64x2_t)__builtin_neon_vabaluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmax_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vmaxv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vmaxsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmax_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vmaxv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vmaxsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmax_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vmaxv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vmaxsv2si (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmax_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vmaxv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vmaxfv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vmax_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vmaxuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmax_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vmaxuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmax_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vmaxuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vmaxq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vmaxv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vmaxsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmaxq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vmaxv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vmaxsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmaxq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vmaxv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vmaxsv4si (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmaxq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (float32x4_t)__builtin_neon_vmaxv4sf (__a, __b, 3);
+ return (float32x4_t)__builtin_neon_vmaxfv4sf (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vmaxv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vmaxuv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vmaxv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vmaxuv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vmaxv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vmaxuv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmin_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vminv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vminsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmin_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vminv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vminsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmin_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vminv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vminsv2si (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmin_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vminv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vminfv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vmin_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vminuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmin_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vminuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmin_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vminuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vminq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vminv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vminsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vminq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vminv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vminsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vminq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vminv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vminsv4si (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vminq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (float32x4_t)__builtin_neon_vminv4sf (__a, __b, 3);
+ return (float32x4_t)__builtin_neon_vminfv4sf (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vminq_u8 (uint8x16_t __a, uint8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vminv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+ return (uint8x16_t)__builtin_neon_vminuv16qi ((int8x16_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vminq_u16 (uint16x8_t __a, uint16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vminv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vminuv8hi ((int16x8_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vminq_u32 (uint32x4_t __a, uint32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vminv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vminuv4si ((int32x4_t) __a, (int32x4_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vpadd_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vpaddv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vpaddv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vpadd_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vpaddv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vpaddv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vpadd_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vpaddv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vpaddv2si (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vpadd_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vpaddv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vpaddv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vpaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vpaddv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vpaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vpaddv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vpaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vpaddv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vpaddl_s8 (int8x8_t __a)
{
- return (int16x4_t)__builtin_neon_vpaddlv8qi (__a, 1);
+ return (int16x4_t)__builtin_neon_vpaddlsv8qi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vpaddl_s16 (int16x4_t __a)
{
- return (int32x2_t)__builtin_neon_vpaddlv4hi (__a, 1);
+ return (int32x2_t)__builtin_neon_vpaddlsv4hi (__a);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vpaddl_s32 (int32x2_t __a)
{
- return (int64x1_t)__builtin_neon_vpaddlv2si (__a, 1);
+ return (int64x1_t)__builtin_neon_vpaddlsv2si (__a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vpaddl_u8 (uint8x8_t __a)
{
- return (uint16x4_t)__builtin_neon_vpaddlv8qi ((int8x8_t) __a, 0);
+ return (uint16x4_t)__builtin_neon_vpaddluv8qi ((int8x8_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vpaddl_u16 (uint16x4_t __a)
{
- return (uint32x2_t)__builtin_neon_vpaddlv4hi ((int16x4_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vpaddluv4hi ((int16x4_t) __a);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vpaddl_u32 (uint32x2_t __a)
{
- return (uint64x1_t)__builtin_neon_vpaddlv2si ((int32x2_t) __a, 0);
+ return (uint64x1_t)__builtin_neon_vpaddluv2si ((int32x2_t) __a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vpaddlq_s8 (int8x16_t __a)
{
- return (int16x8_t)__builtin_neon_vpaddlv16qi (__a, 1);
+ return (int16x8_t)__builtin_neon_vpaddlsv16qi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vpaddlq_s16 (int16x8_t __a)
{
- return (int32x4_t)__builtin_neon_vpaddlv8hi (__a, 1);
+ return (int32x4_t)__builtin_neon_vpaddlsv8hi (__a);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vpaddlq_s32 (int32x4_t __a)
{
- return (int64x2_t)__builtin_neon_vpaddlv4si (__a, 1);
+ return (int64x2_t)__builtin_neon_vpaddlsv4si (__a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vpaddlq_u8 (uint8x16_t __a)
{
- return (uint16x8_t)__builtin_neon_vpaddlv16qi ((int8x16_t) __a, 0);
+ return (uint16x8_t)__builtin_neon_vpaddluv16qi ((int8x16_t) __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vpaddlq_u16 (uint16x8_t __a)
{
- return (uint32x4_t)__builtin_neon_vpaddlv8hi ((int16x8_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vpaddluv8hi ((int16x8_t) __a);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vpaddlq_u32 (uint32x4_t __a)
{
- return (uint64x2_t)__builtin_neon_vpaddlv4si ((int32x4_t) __a, 0);
+ return (uint64x2_t)__builtin_neon_vpaddluv4si ((int32x4_t) __a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vpadal_s8 (int16x4_t __a, int8x8_t __b)
{
- return (int16x4_t)__builtin_neon_vpadalv8qi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vpadalsv8qi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vpadal_s16 (int32x2_t __a, int16x4_t __b)
{
- return (int32x2_t)__builtin_neon_vpadalv4hi (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vpadalsv4hi (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vpadal_s32 (int64x1_t __a, int32x2_t __b)
{
- return (int64x1_t)__builtin_neon_vpadalv2si (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vpadalsv2si (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vpadal_u8 (uint16x4_t __a, uint8x8_t __b)
{
- return (uint16x4_t)__builtin_neon_vpadalv8qi ((int16x4_t) __a, (int8x8_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vpadaluv8qi ((int16x4_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vpadal_u16 (uint32x2_t __a, uint16x4_t __b)
{
- return (uint32x2_t)__builtin_neon_vpadalv4hi ((int32x2_t) __a, (int16x4_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vpadaluv4hi ((int32x2_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vpadal_u32 (uint64x1_t __a, uint32x2_t __b)
{
- return (uint64x1_t)__builtin_neon_vpadalv2si ((int64x1_t) __a, (int32x2_t) __b, 0);
+ return (uint64x1_t)__builtin_neon_vpadaluv2si ((int64x1_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vpadalq_s8 (int16x8_t __a, int8x16_t __b)
{
- return (int16x8_t)__builtin_neon_vpadalv16qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vpadalsv16qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vpadalq_s16 (int32x4_t __a, int16x8_t __b)
{
- return (int32x4_t)__builtin_neon_vpadalv8hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vpadalsv8hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vpadalq_s32 (int64x2_t __a, int32x4_t __b)
{
- return (int64x2_t)__builtin_neon_vpadalv4si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vpadalsv4si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vpadalq_u8 (uint16x8_t __a, uint8x16_t __b)
{
- return (uint16x8_t)__builtin_neon_vpadalv16qi ((int16x8_t) __a, (int8x16_t) __b, 0);
+ return (uint16x8_t)__builtin_neon_vpadaluv16qi ((int16x8_t) __a, (int8x16_t) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vpadalq_u16 (uint32x4_t __a, uint16x8_t __b)
{
- return (uint32x4_t)__builtin_neon_vpadalv8hi ((int32x4_t) __a, (int16x8_t) __b, 0);
+ return (uint32x4_t)__builtin_neon_vpadaluv8hi ((int32x4_t) __a, (int16x8_t) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vpadalq_u32 (uint64x2_t __a, uint32x4_t __b)
{
- return (uint64x2_t)__builtin_neon_vpadalv4si ((int64x2_t) __a, (int32x4_t) __b, 0);
+ return (uint64x2_t)__builtin_neon_vpadaluv4si ((int64x2_t) __a, (int32x4_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vpmax_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vpmaxv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vpmaxsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vpmax_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vpmaxv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vpmaxsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vpmax_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vpmaxv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vpmaxsv2si (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vpmax_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vpmaxv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vpmaxfv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vpmax_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vpmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vpmaxuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vpmax_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vpmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vpmaxuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vpmax_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vpmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vpmaxuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vpmin_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vpminv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vpminsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vpmin_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vpminv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vpminsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vpmin_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vpminv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vpminsv2si (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vpmin_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vpminv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vpminfv2sf (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vpmin_u8 (uint8x8_t __a, uint8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vpminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+ return (uint8x8_t)__builtin_neon_vpminuv8qi ((int8x8_t) __a, (int8x8_t) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vpmin_u16 (uint16x4_t __a, uint16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vpminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+ return (uint16x4_t)__builtin_neon_vpminuv4hi ((int16x4_t) __a, (int16x4_t) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vpmin_u32 (uint32x2_t __a, uint32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vpminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+ return (uint32x2_t)__builtin_neon_vpminuv2si ((int32x2_t) __a, (int32x2_t) __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrecps_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vrecpsv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vrecpsv2sf (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (float32x4_t)__builtin_neon_vrecpsv4sf (__a, __b, 3);
+ return (float32x4_t)__builtin_neon_vrecpsv4sf (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrsqrts_f32 (float32x2_t __a, float32x2_t __b)
{
- return (float32x2_t)__builtin_neon_vrsqrtsv2sf (__a, __b, 3);
+ return (float32x2_t)__builtin_neon_vrsqrtsv2sf (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b)
{
- return (float32x4_t)__builtin_neon_vrsqrtsv4sf (__a, __b, 3);
+ return (float32x4_t)__builtin_neon_vrsqrtsv4sf (__a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vshl_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vshlsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vshl_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vshlsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vshl_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vshlsv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vshl_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t)__builtin_neon_vshldi (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vshlsdi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vshl_u8 (uint8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 0);
+ return (uint8x8_t)__builtin_neon_vshluv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vshl_u16 (uint16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 0);
+ return (uint16x4_t)__builtin_neon_vshluv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vshl_u32 (uint32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vshluv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshl_u64 (uint64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 0);
+ return (uint64x1_t)__builtin_neon_vshludi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vshlq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vshlsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vshlq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vshlsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vshlq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vshlsv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vshlq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vshlsv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 0);
+ return (uint8x16_t)__builtin_neon_vshluv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 0);
+ return (uint16x8_t)__builtin_neon_vshluv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 0);
+ return (uint32x4_t)__builtin_neon_vshluv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 0);
+ return (uint64x2_t)__builtin_neon_vshluv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vrshl_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vrshlsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vrshl_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vrshlsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vrshl_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vrshlsv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vrshl_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t)__builtin_neon_vshldi (__a, __b, 5);
+ return (int64x1_t)__builtin_neon_vrshlsdi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vrshl_u8 (uint8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 4);
+ return (uint8x8_t)__builtin_neon_vrshluv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vrshl_u16 (uint16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 4);
+ return (uint16x4_t)__builtin_neon_vrshluv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrshl_u32 (uint32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 4);
+ return (uint32x2_t)__builtin_neon_vrshluv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vrshl_u64 (uint64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 4);
+ return (uint64x1_t)__builtin_neon_vrshludi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vrshlq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 5);
+ return (int8x16_t)__builtin_neon_vrshlsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vrshlq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 5);
+ return (int16x8_t)__builtin_neon_vrshlsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vrshlq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 5);
+ return (int32x4_t)__builtin_neon_vrshlsv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vrshlq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 5);
+ return (int64x2_t)__builtin_neon_vrshlsv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 4);
+ return (uint8x16_t)__builtin_neon_vrshluv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 4);
+ return (uint16x8_t)__builtin_neon_vrshluv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 4);
+ return (uint32x4_t)__builtin_neon_vrshluv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 4);
+ return (uint64x2_t)__builtin_neon_vrshluv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqshl_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vqshlsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqshl_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vqshlsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqshl_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vqshlsv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vqshl_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vqshlsdi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqshl_u8 (uint8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 0);
+ return (uint8x8_t)__builtin_neon_vqshluv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqshl_u16 (uint16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 0);
+ return (uint16x4_t)__builtin_neon_vqshluv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqshl_u32 (uint32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vqshluv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vqshl_u64 (uint64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 0);
+ return (uint64x1_t)__builtin_neon_vqshludi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqshlq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vqshlsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqshlq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vqshlsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqshlq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vqshlsv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqshlq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vqshlsv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 0);
+ return (uint8x16_t)__builtin_neon_vqshluv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 0);
+ return (uint16x8_t)__builtin_neon_vqshluv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 0);
+ return (uint32x4_t)__builtin_neon_vqshluv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 0);
+ return (uint64x2_t)__builtin_neon_vqshluv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqrshl_s8 (int8x8_t __a, int8x8_t __b)
{
- return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vqrshlsv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqrshl_s16 (int16x4_t __a, int16x4_t __b)
{
- return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vqrshlsv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqrshl_s32 (int32x2_t __a, int32x2_t __b)
{
- return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vqrshlsv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vqrshl_s64 (int64x1_t __a, int64x1_t __b)
{
- return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 5);
+ return (int64x1_t)__builtin_neon_vqrshlsdi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
{
- return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 4);
+ return (uint8x8_t)__builtin_neon_vqrshluv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
{
- return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 4);
+ return (uint16x4_t)__builtin_neon_vqrshluv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
{
- return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 4);
+ return (uint32x2_t)__builtin_neon_vqrshluv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 4);
+ return (uint64x1_t)__builtin_neon_vqrshludi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
{
- return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 5);
+ return (int8x16_t)__builtin_neon_vqrshlsv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
{
- return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 5);
+ return (int16x8_t)__builtin_neon_vqrshlsv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
{
- return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 5);
+ return (int32x4_t)__builtin_neon_vqrshlsv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
{
- return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 5);
+ return (int64x2_t)__builtin_neon_vqrshlsv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
- return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 4);
+ return (uint8x16_t)__builtin_neon_vqrshluv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
- return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 4);
+ return (uint16x8_t)__builtin_neon_vqrshluv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
- return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 4);
+ return (uint32x4_t)__builtin_neon_vqrshluv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
- return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 4);
+ return (uint64x2_t)__builtin_neon_vqrshluv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vshr_n_s8 (int8x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vshrs_nv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vshr_n_s16 (int16x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vshrs_nv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vshr_n_s32 (int32x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vshrs_nv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vshr_n_s64 (int64x1_t __a, const int __b)
{
- return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vshrs_ndi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vshr_n_u8 (uint8x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 0);
+ return (uint8x8_t)__builtin_neon_vshru_nv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vshr_n_u16 (uint16x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 0);
+ return (uint16x4_t)__builtin_neon_vshru_nv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vshr_n_u32 (uint32x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vshru_nv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshr_n_u64 (uint64x1_t __a, const int __b)
{
- return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 0);
+ return (uint64x1_t)__builtin_neon_vshru_ndi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vshrq_n_s8 (int8x16_t __a, const int __b)
{
- return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vshrs_nv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vshrq_n_s16 (int16x8_t __a, const int __b)
{
- return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vshrs_nv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vshrq_n_s32 (int32x4_t __a, const int __b)
{
- return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vshrs_nv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vshrq_n_s64 (int64x2_t __a, const int __b)
{
- return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vshrs_nv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vshrq_n_u8 (uint8x16_t __a, const int __b)
{
- return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 0);
+ return (uint8x16_t)__builtin_neon_vshru_nv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vshrq_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 0);
+ return (uint16x8_t)__builtin_neon_vshru_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vshrq_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 0);
+ return (uint32x4_t)__builtin_neon_vshru_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vshrq_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 0);
+ return (uint64x2_t)__builtin_neon_vshru_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vrshr_n_s8 (int8x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vrshrs_nv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vrshr_n_s16 (int16x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vrshrs_nv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vrshr_n_s32 (int32x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vrshrs_nv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vrshr_n_s64 (int64x1_t __a, const int __b)
{
- return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 5);
+ return (int64x1_t)__builtin_neon_vrshrs_ndi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vrshr_n_u8 (uint8x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 4);
+ return (uint8x8_t)__builtin_neon_vrshru_nv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vrshr_n_u16 (uint16x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 4);
+ return (uint16x4_t)__builtin_neon_vrshru_nv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrshr_n_u32 (uint32x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 4);
+ return (uint32x2_t)__builtin_neon_vrshru_nv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vrshr_n_u64 (uint64x1_t __a, const int __b)
{
- return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 4);
+ return (uint64x1_t)__builtin_neon_vrshru_ndi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vrshrq_n_s8 (int8x16_t __a, const int __b)
{
- return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 5);
+ return (int8x16_t)__builtin_neon_vrshrs_nv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vrshrq_n_s16 (int16x8_t __a, const int __b)
{
- return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 5);
+ return (int16x8_t)__builtin_neon_vrshrs_nv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vrshrq_n_s32 (int32x4_t __a, const int __b)
{
- return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 5);
+ return (int32x4_t)__builtin_neon_vrshrs_nv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vrshrq_n_s64 (int64x2_t __a, const int __b)
{
- return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 5);
+ return (int64x2_t)__builtin_neon_vrshrs_nv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vrshrq_n_u8 (uint8x16_t __a, const int __b)
{
- return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 4);
+ return (uint8x16_t)__builtin_neon_vrshru_nv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vrshrq_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 4);
+ return (uint16x8_t)__builtin_neon_vrshru_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vrshrq_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 4);
+ return (uint32x4_t)__builtin_neon_vrshru_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vrshrq_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 4);
+ return (uint64x2_t)__builtin_neon_vrshru_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vshrn_n_s16 (int16x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vshrn_n_s32 (int32x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vshrn_n_s64 (int64x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vshrn_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 0);
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vshrn_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 0);
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vshrn_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vrshrn_n_s16 (int16x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vrshrn_nv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vrshrn_n_s32 (int32x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vrshrn_nv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vrshrn_n_s64 (int64x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vrshrn_nv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vrshrn_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 4);
+ return (uint8x8_t)__builtin_neon_vrshrn_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vrshrn_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 4);
+ return (uint16x4_t)__builtin_neon_vrshrn_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrshrn_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 4);
+ return (uint32x2_t)__builtin_neon_vrshrn_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqshrn_n_s16 (int16x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vqshrns_nv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqshrn_n_s32 (int32x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vqshrns_nv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqshrn_n_s64 (int64x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vqshrns_nv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqshrn_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 0);
+ return (uint8x8_t)__builtin_neon_vqshrnu_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqshrn_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 0);
+ return (uint16x4_t)__builtin_neon_vqshrnu_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqshrn_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vqshrnu_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqrshrn_n_s16 (int16x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 5);
+ return (int8x8_t)__builtin_neon_vqrshrns_nv8hi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqrshrn_n_s32 (int32x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 5);
+ return (int16x4_t)__builtin_neon_vqrshrns_nv4si (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqrshrn_n_s64 (int64x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 5);
+ return (int32x2_t)__builtin_neon_vqrshrns_nv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqrshrn_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 4);
+ return (uint8x8_t)__builtin_neon_vqrshrnu_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqrshrn_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 4);
+ return (uint16x4_t)__builtin_neon_vqrshrnu_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqrshrn_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 4);
+ return (uint32x2_t)__builtin_neon_vqrshrnu_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqshrun_n_s16 (int16x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 1);
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqshrun_n_s32 (int32x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 1);
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqshrun_n_s64 (int64x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 1);
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqrshrun_n_s16 (int16x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 5);
+ return (uint8x8_t)__builtin_neon_vqrshrun_nv8hi (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqrshrun_n_s32 (int32x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 5);
+ return (uint16x4_t)__builtin_neon_vqrshrun_nv4si (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqrshrun_n_s64 (int64x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 5);
+ return (uint32x2_t)__builtin_neon_vqrshrun_nv2di (__a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vshl_n_s8 (int8x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vshl_nv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vshl_nv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vshl_n_s16 (int16x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vshl_nv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vshl_nv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vshl_n_s32 (int32x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vshl_nv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vshl_nv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vshl_n_s64 (int64x1_t __a, const int __b)
{
- return (int64x1_t)__builtin_neon_vshl_ndi (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vshl_ndi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vshl_n_u8 (uint8x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vshl_nv8qi ((int8x8_t) __a, __b, 0);
+ return (uint8x8_t)__builtin_neon_vshl_nv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vshl_n_u16 (uint16x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vshl_nv4hi ((int16x4_t) __a, __b, 0);
+ return (uint16x4_t)__builtin_neon_vshl_nv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vshl_n_u32 (uint32x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vshl_nv2si ((int32x2_t) __a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vshl_nv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshl_n_u64 (uint64x1_t __a, const int __b)
{
- return (uint64x1_t)__builtin_neon_vshl_ndi ((int64x1_t) __a, __b, 0);
+ return (uint64x1_t)__builtin_neon_vshl_ndi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vshlq_n_s8 (int8x16_t __a, const int __b)
{
- return (int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vshlq_n_s16 (int16x8_t __a, const int __b)
{
- return (int16x8_t)__builtin_neon_vshl_nv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vshl_nv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vshlq_n_s32 (int32x4_t __a, const int __b)
{
- return (int32x4_t)__builtin_neon_vshl_nv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vshl_nv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vshlq_n_s64 (int64x2_t __a, const int __b)
{
- return (int64x2_t)__builtin_neon_vshl_nv2di (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vshl_nv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vshlq_n_u8 (uint8x16_t __a, const int __b)
{
- return (uint8x16_t)__builtin_neon_vshl_nv16qi ((int8x16_t) __a, __b, 0);
+ return (uint8x16_t)__builtin_neon_vshl_nv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vshlq_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint16x8_t)__builtin_neon_vshl_nv8hi ((int16x8_t) __a, __b, 0);
+ return (uint16x8_t)__builtin_neon_vshl_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vshlq_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint32x4_t)__builtin_neon_vshl_nv4si ((int32x4_t) __a, __b, 0);
+ return (uint32x4_t)__builtin_neon_vshl_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vshlq_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint64x2_t)__builtin_neon_vshl_nv2di ((int64x2_t) __a, __b, 0);
+ return (uint64x2_t)__builtin_neon_vshl_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqshl_n_s8 (int8x8_t __a, const int __b)
{
- return (int8x8_t)__builtin_neon_vqshl_nv8qi (__a, __b, 1);
+ return (int8x8_t)__builtin_neon_vqshl_s_nv8qi (__a, __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqshl_n_s16 (int16x4_t __a, const int __b)
{
- return (int16x4_t)__builtin_neon_vqshl_nv4hi (__a, __b, 1);
+ return (int16x4_t)__builtin_neon_vqshl_s_nv4hi (__a, __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqshl_n_s32 (int32x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vqshl_nv2si (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vqshl_s_nv2si (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vqshl_n_s64 (int64x1_t __a, const int __b)
{
- return (int64x1_t)__builtin_neon_vqshl_ndi (__a, __b, 1);
+ return (int64x1_t)__builtin_neon_vqshl_s_ndi (__a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqshl_n_u8 (uint8x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vqshl_nv8qi ((int8x8_t) __a, __b, 0);
+ return (uint8x8_t)__builtin_neon_vqshl_u_nv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqshl_n_u16 (uint16x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vqshl_nv4hi ((int16x4_t) __a, __b, 0);
+ return (uint16x4_t)__builtin_neon_vqshl_u_nv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqshl_n_u32 (uint32x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vqshl_nv2si ((int32x2_t) __a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vqshl_u_nv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vqshl_n_u64 (uint64x1_t __a, const int __b)
{
- return (uint64x1_t)__builtin_neon_vqshl_ndi ((int64x1_t) __a, __b, 0);
+ return (uint64x1_t)__builtin_neon_vqshl_u_ndi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqshlq_n_s8 (int8x16_t __a, const int __b)
{
- return (int8x16_t)__builtin_neon_vqshl_nv16qi (__a, __b, 1);
+ return (int8x16_t)__builtin_neon_vqshl_s_nv16qi (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqshlq_n_s16 (int16x8_t __a, const int __b)
{
- return (int16x8_t)__builtin_neon_vqshl_nv8hi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vqshl_s_nv8hi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqshlq_n_s32 (int32x4_t __a, const int __b)
{
- return (int32x4_t)__builtin_neon_vqshl_nv4si (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vqshl_s_nv4si (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqshlq_n_s64 (int64x2_t __a, const int __b)
{
- return (int64x2_t)__builtin_neon_vqshl_nv2di (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vqshl_s_nv2di (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqshlq_n_u8 (uint8x16_t __a, const int __b)
{
- return (uint8x16_t)__builtin_neon_vqshl_nv16qi ((int8x16_t) __a, __b, 0);
+ return (uint8x16_t)__builtin_neon_vqshl_u_nv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqshlq_n_u16 (uint16x8_t __a, const int __b)
{
- return (uint16x8_t)__builtin_neon_vqshl_nv8hi ((int16x8_t) __a, __b, 0);
+ return (uint16x8_t)__builtin_neon_vqshl_u_nv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqshlq_n_u32 (uint32x4_t __a, const int __b)
{
- return (uint32x4_t)__builtin_neon_vqshl_nv4si ((int32x4_t) __a, __b, 0);
+ return (uint32x4_t)__builtin_neon_vqshl_u_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vqshlq_n_u64 (uint64x2_t __a, const int __b)
{
- return (uint64x2_t)__builtin_neon_vqshl_nv2di ((int64x2_t) __a, __b, 0);
+ return (uint64x2_t)__builtin_neon_vqshl_u_nv2di ((int64x2_t) __a, __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqshlu_n_s8 (int8x8_t __a, const int __b)
{
- return (uint8x8_t)__builtin_neon_vqshlu_nv8qi (__a, __b, 1);
+ return (uint8x8_t)__builtin_neon_vqshlu_nv8qi (__a, __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqshlu_n_s16 (int16x4_t __a, const int __b)
{
- return (uint16x4_t)__builtin_neon_vqshlu_nv4hi (__a, __b, 1);
+ return (uint16x4_t)__builtin_neon_vqshlu_nv4hi (__a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqshlu_n_s32 (int32x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b, 1);
+ return (uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vqshlu_n_s64 (int64x1_t __a, const int __b)
{
- return (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b, 1);
+ return (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqshluq_n_s8 (int8x16_t __a, const int __b)
{
- return (uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b, 1);
+ return (uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqshluq_n_s16 (int16x8_t __a, const int __b)
{
- return (uint16x8_t)__builtin_neon_vqshlu_nv8hi (__a, __b, 1);
+ return (uint16x8_t)__builtin_neon_vqshlu_nv8hi (__a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqshluq_n_s32 (int32x4_t __a, const int __b)
{
- return (uint32x4_t)__builtin_neon_vqshlu_nv4si (__a, __b, 1);
+ return (uint32x4_t)__builtin_neon_vqshlu_nv4si (__a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vqshluq_n_s64 (int64x2_t __a, const int __b)
{
- return (uint64x2_t)__builtin_neon_vqshlu_nv2di (__a, __b, 1);
+ return (uint64x2_t)__builtin_neon_vqshlu_nv2di (__a, __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vshll_n_s8 (int8x8_t __a, const int __b)
{
- return (int16x8_t)__builtin_neon_vshll_nv8qi (__a, __b, 1);
+ return (int16x8_t)__builtin_neon_vshlls_nv8qi (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vshll_n_s16 (int16x4_t __a, const int __b)
{
- return (int32x4_t)__builtin_neon_vshll_nv4hi (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vshlls_nv4hi (__a, __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vshll_n_s32 (int32x2_t __a, const int __b)
{
- return (int64x2_t)__builtin_neon_vshll_nv2si (__a, __b, 1);
+ return (int64x2_t)__builtin_neon_vshlls_nv2si (__a, __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vshll_n_u8 (uint8x8_t __a, const int __b)
{
- return (uint16x8_t)__builtin_neon_vshll_nv8qi ((int8x8_t) __a, __b, 0);
+ return (uint16x8_t)__builtin_neon_vshllu_nv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vshll_n_u16 (uint16x4_t __a, const int __b)
{
- return (uint32x4_t)__builtin_neon_vshll_nv4hi ((int16x4_t) __a, __b, 0);
+ return (uint32x4_t)__builtin_neon_vshllu_nv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vshll_n_u32 (uint32x2_t __a, const int __b)
{
- return (uint64x2_t)__builtin_neon_vshll_nv2si ((int32x2_t) __a, __b, 0);
+ return (uint64x2_t)__builtin_neon_vshllu_nv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
- return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 1);
+ return (int8x8_t)__builtin_neon_vsras_nv8qi (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 1);
+ return (int16x4_t)__builtin_neon_vsras_nv4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 1);
+ return (int32x2_t)__builtin_neon_vsras_nv2si (__a, __b, __c);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
{
- return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 1);
+ return (int64x1_t)__builtin_neon_vsras_ndi (__a, __b, __c);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
{
- return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 0);
+ return (uint8x8_t)__builtin_neon_vsrau_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
- return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+ return (uint16x4_t)__builtin_neon_vsrau_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
- return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+ return (uint32x2_t)__builtin_neon_vsrau_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
{
- return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 0);
+ return (uint64x1_t)__builtin_neon_vsrau_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
- return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 1);
+ return (int8x16_t)__builtin_neon_vsras_nv16qi (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
- return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vsras_nv8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
- return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vsras_nv4si (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
{
- return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vsras_nv2di (__a, __b, __c);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
{
- return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 0);
+ return (uint8x16_t)__builtin_neon_vsrau_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
{
- return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 0);
+ return (uint16x8_t)__builtin_neon_vsrau_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
{
- return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 0);
+ return (uint32x4_t)__builtin_neon_vsrau_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
{
- return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 0);
+ return (uint64x2_t)__builtin_neon_vsrau_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
- return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 5);
+ return (int8x8_t)__builtin_neon_vrsras_nv8qi (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 5);
+ return (int16x4_t)__builtin_neon_vrsras_nv4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 5);
+ return (int32x2_t)__builtin_neon_vrsras_nv2si (__a, __b, __c);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
{
- return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 5);
+ return (int64x1_t)__builtin_neon_vrsras_ndi (__a, __b, __c);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
{
- return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 4);
+ return (uint8x8_t)__builtin_neon_vrsrau_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
- return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 4);
+ return (uint16x4_t)__builtin_neon_vrsrau_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
- return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 4);
+ return (uint32x2_t)__builtin_neon_vrsrau_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
{
- return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 4);
+ return (uint64x1_t)__builtin_neon_vrsrau_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
- return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 5);
+ return (int8x16_t)__builtin_neon_vrsras_nv16qi (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
- return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 5);
+ return (int16x8_t)__builtin_neon_vrsras_nv8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
- return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 5);
+ return (int32x4_t)__builtin_neon_vrsras_nv4si (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
{
- return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 5);
+ return (int64x2_t)__builtin_neon_vrsras_nv2di (__a, __b, __c);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
{
- return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 4);
+ return (uint8x16_t)__builtin_neon_vrsrau_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
{
- return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 4);
+ return (uint16x8_t)__builtin_neon_vrsrau_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
{
- return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 4);
+ return (uint32x4_t)__builtin_neon_vrsrau_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
{
- return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 4);
+ return (uint64x2_t)__builtin_neon_vrsrau_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
}
#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vabs_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vabsv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vabsv8qi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vabs_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vabsv4hi (__a, 1);
+ return (int16x4_t)__builtin_neon_vabsv4hi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vabs_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vabsv2si (__a, 1);
+ return (int32x2_t)__builtin_neon_vabsv2si (__a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vabs_f32 (float32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vabsv2sf (__a, 3);
+ return (float32x2_t)__builtin_neon_vabsv2sf (__a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vabsq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vabsv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vabsv16qi (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vabsq_s16 (int16x8_t __a)
{
- return (int16x8_t)__builtin_neon_vabsv8hi (__a, 1);
+ return (int16x8_t)__builtin_neon_vabsv8hi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vabsq_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vabsv4si (__a, 1);
+ return (int32x4_t)__builtin_neon_vabsv4si (__a);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vabsq_f32 (float32x4_t __a)
{
- return (float32x4_t)__builtin_neon_vabsv4sf (__a, 3);
+ return (float32x4_t)__builtin_neon_vabsv4sf (__a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqabs_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vqabsv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vqabsv8qi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqabs_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vqabsv4hi (__a, 1);
+ return (int16x4_t)__builtin_neon_vqabsv4hi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqabs_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vqabsv2si (__a, 1);
+ return (int32x2_t)__builtin_neon_vqabsv2si (__a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqabsq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vqabsv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vqabsv16qi (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqabsq_s16 (int16x8_t __a)
{
- return (int16x8_t)__builtin_neon_vqabsv8hi (__a, 1);
+ return (int16x8_t)__builtin_neon_vqabsv8hi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqabsq_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vqabsv4si (__a, 1);
+ return (int32x4_t)__builtin_neon_vqabsv4si (__a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vneg_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vnegv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vnegv8qi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vneg_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vnegv4hi (__a, 1);
+ return (int16x4_t)__builtin_neon_vnegv4hi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vneg_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vnegv2si (__a, 1);
+ return (int32x2_t)__builtin_neon_vnegv2si (__a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vneg_f32 (float32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vnegv2sf (__a, 3);
+ return (float32x2_t)__builtin_neon_vnegv2sf (__a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vnegq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vnegv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vnegv16qi (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vnegq_s16 (int16x8_t __a)
{
- return (int16x8_t)__builtin_neon_vnegv8hi (__a, 1);
+ return (int16x8_t)__builtin_neon_vnegv8hi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vnegq_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vnegv4si (__a, 1);
+ return (int32x4_t)__builtin_neon_vnegv4si (__a);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vnegq_f32 (float32x4_t __a)
{
- return (float32x4_t)__builtin_neon_vnegv4sf (__a, 3);
+ return (float32x4_t)__builtin_neon_vnegv4sf (__a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqneg_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vqnegv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vqnegv8qi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqneg_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vqnegv4hi (__a, 1);
+ return (int16x4_t)__builtin_neon_vqnegv4hi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqneg_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vqnegv2si (__a, 1);
+ return (int32x2_t)__builtin_neon_vqnegv2si (__a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqnegq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vqnegv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vqnegv16qi (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqnegq_s16 (int16x8_t __a)
{
- return (int16x8_t)__builtin_neon_vqnegv8hi (__a, 1);
+ return (int16x8_t)__builtin_neon_vqnegv8hi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqnegq_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vqnegv4si (__a, 1);
+ return (int32x4_t)__builtin_neon_vqnegv4si (__a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmvn_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vmvnv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vmvnv8qi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmvn_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vmvnv4hi (__a, 1);
+ return (int16x4_t)__builtin_neon_vmvnv4hi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmvn_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vmvnv2si (__a, 1);
+ return (int32x2_t)__builtin_neon_vmvnv2si (__a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vmvn_u8 (uint8x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 0);
+ return (uint8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmvn_u16 (uint16x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vmvnv4hi ((int16x4_t) __a, 0);
+ return (uint16x4_t)__builtin_neon_vmvnv4hi ((int16x4_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmvn_u32 (uint32x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vmvnv2si ((int32x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vmvnv2si ((int32x2_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vmvn_p8 (poly8x8_t __a)
{
- return (poly8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 2);
+ return (poly8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vmvnq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vmvnv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vmvnv16qi (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmvnq_s16 (int16x8_t __a)
{
- return (int16x8_t)__builtin_neon_vmvnv8hi (__a, 1);
+ return (int16x8_t)__builtin_neon_vmvnv8hi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmvnq_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vmvnv4si (__a, 1);
+ return (int32x4_t)__builtin_neon_vmvnv4si (__a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vmvnq_u8 (uint8x16_t __a)
{
- return (uint8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 0);
+ return (uint8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmvnq_u16 (uint16x8_t __a)
{
- return (uint16x8_t)__builtin_neon_vmvnv8hi ((int16x8_t) __a, 0);
+ return (uint16x8_t)__builtin_neon_vmvnv8hi ((int16x8_t) __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmvnq_u32 (uint32x4_t __a)
{
- return (uint32x4_t)__builtin_neon_vmvnv4si ((int32x4_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vmvnv4si ((int32x4_t) __a);
}
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vmvnq_p8 (poly8x16_t __a)
{
- return (poly8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 2);
+ return (poly8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vcls_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vclsv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vclsv8qi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vcls_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vclsv4hi (__a, 1);
+ return (int16x4_t)__builtin_neon_vclsv4hi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vcls_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vclsv2si (__a, 1);
+ return (int32x2_t)__builtin_neon_vclsv2si (__a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vclsq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vclsv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vclsv16qi (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vclsq_s16 (int16x8_t __a)
{
- return (int16x8_t)__builtin_neon_vclsv8hi (__a, 1);
+ return (int16x8_t)__builtin_neon_vclsv8hi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vclsq_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vclsv4si (__a, 1);
+ return (int32x4_t)__builtin_neon_vclsv4si (__a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vclz_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vclzv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vclzv8qi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vclz_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vclzv4hi (__a, 1);
+ return (int16x4_t)__builtin_neon_vclzv4hi (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vclz_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vclzv2si (__a, 1);
+ return (int32x2_t)__builtin_neon_vclzv2si (__a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vclz_u8 (uint8x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vclzv8qi ((int8x8_t) __a, 0);
+ return (uint8x8_t)__builtin_neon_vclzv8qi ((int8x8_t) __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vclz_u16 (uint16x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vclzv4hi ((int16x4_t) __a, 0);
+ return (uint16x4_t)__builtin_neon_vclzv4hi ((int16x4_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vclz_u32 (uint32x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vclzv2si ((int32x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vclzv2si ((int32x2_t) __a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vclzq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vclzv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vclzv16qi (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vclzq_s16 (int16x8_t __a)
{
- return (int16x8_t)__builtin_neon_vclzv8hi (__a, 1);
+ return (int16x8_t)__builtin_neon_vclzv8hi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vclzq_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vclzv4si (__a, 1);
+ return (int32x4_t)__builtin_neon_vclzv4si (__a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vclzq_u8 (uint8x16_t __a)
{
- return (uint8x16_t)__builtin_neon_vclzv16qi ((int8x16_t) __a, 0);
+ return (uint8x16_t)__builtin_neon_vclzv16qi ((int8x16_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vclzq_u16 (uint16x8_t __a)
{
- return (uint16x8_t)__builtin_neon_vclzv8hi ((int16x8_t) __a, 0);
+ return (uint16x8_t)__builtin_neon_vclzv8hi ((int16x8_t) __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vclzq_u32 (uint32x4_t __a)
{
- return (uint32x4_t)__builtin_neon_vclzv4si ((int32x4_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vclzv4si ((int32x4_t) __a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vcnt_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vcntv8qi (__a, 1);
+ return (int8x8_t)__builtin_neon_vcntv8qi (__a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vcnt_u8 (uint8x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 0);
+ return (uint8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vcnt_p8 (poly8x8_t __a)
{
- return (poly8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 2);
+ return (poly8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vcntq_s8 (int8x16_t __a)
{
- return (int8x16_t)__builtin_neon_vcntv16qi (__a, 1);
+ return (int8x16_t)__builtin_neon_vcntv16qi (__a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vcntq_u8 (uint8x16_t __a)
{
- return (uint8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 0);
+ return (uint8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a);
}
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vcntq_p8 (poly8x16_t __a)
{
- return (poly8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 2);
+ return (poly8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrecpe_f32 (float32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vrecpev2sf (__a, 3);
+ return (float32x2_t)__builtin_neon_vrecpev2sf (__a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrecpe_u32 (uint32x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vrecpev2si ((int32x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vrecpev2si ((int32x2_t) __a);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrecpeq_f32 (float32x4_t __a)
{
- return (float32x4_t)__builtin_neon_vrecpev4sf (__a, 3);
+ return (float32x4_t)__builtin_neon_vrecpev4sf (__a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vrecpeq_u32 (uint32x4_t __a)
{
- return (uint32x4_t)__builtin_neon_vrecpev4si ((int32x4_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vrecpev4si ((int32x4_t) __a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vrsqrte_f32 (float32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vrsqrtev2sf (__a, 3);
+ return (float32x2_t)__builtin_neon_vrsqrtev2sf (__a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vrsqrte_u32 (uint32x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vrsqrtev2si ((int32x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vrsqrtev2si ((int32x2_t) __a);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vrsqrteq_f32 (float32x4_t __a)
{
- return (float32x4_t)__builtin_neon_vrsqrtev4sf (__a, 3);
+ return (float32x4_t)__builtin_neon_vrsqrtev4sf (__a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vrsqrteq_u32 (uint32x4_t __a)
{
- return (uint32x4_t)__builtin_neon_vrsqrtev4si ((int32x4_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vrsqrtev4si ((int32x4_t) __a);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vget_lane_s8 (int8x8_t __a, const int __b)
{
- return (int8_t)__builtin_neon_vget_lanev8qi (__a, __b, 1);
+ return (int8_t)__builtin_neon_vget_lanev8qi (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vget_lane_s16 (int16x4_t __a, const int __b)
{
- return (int16_t)__builtin_neon_vget_lanev4hi (__a, __b, 1);
+ return (int16_t)__builtin_neon_vget_lanev4hi (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vget_lane_s32 (int32x2_t __a, const int __b)
{
- return (int32_t)__builtin_neon_vget_lanev2si (__a, __b, 1);
+ return (int32_t)__builtin_neon_vget_lanev2si (__a, __b);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vget_lane_f32 (float32x2_t __a, const int __b)
{
- return (float32_t)__builtin_neon_vget_lanev2sf (__a, __b, 3);
+ return (float32_t)__builtin_neon_vget_lanev2sf (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vget_lane_u8 (uint8x8_t __a, const int __b)
{
- return (uint8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 0);
+ return (uint8_t)__builtin_neon_vget_laneuv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vget_lane_u16 (uint16x4_t __a, const int __b)
{
- return (uint16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 0);
+ return (uint16_t)__builtin_neon_vget_laneuv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vget_lane_u32 (uint32x2_t __a, const int __b)
{
- return (uint32_t)__builtin_neon_vget_lanev2si ((int32x2_t) __a, __b, 0);
+ return (uint32_t)__builtin_neon_vget_laneuv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vget_lane_p8 (poly8x8_t __a, const int __b)
{
- return (poly8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 2);
+ return (poly8_t)__builtin_neon_vget_laneuv8qi ((int8x8_t) __a, __b);
}
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vget_lane_p16 (poly16x4_t __a, const int __b)
{
- return (poly16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 2);
+ return (poly16_t)__builtin_neon_vget_laneuv4hi ((int16x4_t) __a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vget_lane_s64 (int64x1_t __a, const int __b)
{
- return (int64_t)__builtin_neon_vget_lanedi (__a, __b, 1);
+ return (int64_t)__builtin_neon_vget_lanedi (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vget_lane_u64 (uint64x1_t __a, const int __b)
{
- return (uint64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b, 0);
+ return (uint64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vgetq_lane_s8 (int8x16_t __a, const int __b)
{
- return (int8_t)__builtin_neon_vget_lanev16qi (__a, __b, 1);
+ return (int8_t)__builtin_neon_vget_lanev16qi (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vgetq_lane_s16 (int16x8_t __a, const int __b)
{
- return (int16_t)__builtin_neon_vget_lanev8hi (__a, __b, 1);
+ return (int16_t)__builtin_neon_vget_lanev8hi (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vgetq_lane_s32 (int32x4_t __a, const int __b)
{
- return (int32_t)__builtin_neon_vget_lanev4si (__a, __b, 1);
+ return (int32_t)__builtin_neon_vget_lanev4si (__a, __b);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vgetq_lane_f32 (float32x4_t __a, const int __b)
{
- return (float32_t)__builtin_neon_vget_lanev4sf (__a, __b, 3);
+ return (float32_t)__builtin_neon_vget_lanev4sf (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vgetq_lane_u8 (uint8x16_t __a, const int __b)
{
- return (uint8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 0);
+ return (uint8_t)__builtin_neon_vget_laneuv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vgetq_lane_u16 (uint16x8_t __a, const int __b)
{
- return (uint16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 0);
+ return (uint16_t)__builtin_neon_vget_laneuv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vgetq_lane_u32 (uint32x4_t __a, const int __b)
{
- return (uint32_t)__builtin_neon_vget_lanev4si ((int32x4_t) __a, __b, 0);
+ return (uint32_t)__builtin_neon_vget_laneuv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vgetq_lane_p8 (poly8x16_t __a, const int __b)
{
- return (poly8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 2);
+ return (poly8_t)__builtin_neon_vget_laneuv16qi ((int8x16_t) __a, __b);
}
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vgetq_lane_p16 (poly16x8_t __a, const int __b)
{
- return (poly16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 2);
+ return (poly16_t)__builtin_neon_vget_laneuv8hi ((int16x8_t) __a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vgetq_lane_s64 (int64x2_t __a, const int __b)
{
- return (int64_t)__builtin_neon_vget_lanev2di (__a, __b, 1);
+ return (int64_t)__builtin_neon_vget_lanev2di (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vgetq_lane_u64 (uint64x2_t __a, const int __b)
{
- return (uint64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b, 0);
+ return (uint64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vcvt_s32_f32 (float32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vcvtv2sf (__a, 1);
+ return (int32x2_t)__builtin_neon_vcvtsv2sf (__a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vcvt_f32_s32 (int32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vcvtv2si (__a, 1);
+ return (float32x2_t)__builtin_neon_vcvtsv2si (__a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vcvt_f32_u32 (uint32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vcvtv2si ((int32x2_t) __a, 0);
+ return (float32x2_t)__builtin_neon_vcvtuv2si ((int32x2_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcvt_u32_f32 (float32x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vcvtv2sf (__a, 0);
+ return (uint32x2_t)__builtin_neon_vcvtuv2sf (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vcvtq_s32_f32 (float32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vcvtv4sf (__a, 1);
+ return (int32x4_t)__builtin_neon_vcvtsv4sf (__a);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vcvtq_f32_s32 (int32x4_t __a)
{
- return (float32x4_t)__builtin_neon_vcvtv4si (__a, 1);
+ return (float32x4_t)__builtin_neon_vcvtsv4si (__a);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vcvtq_f32_u32 (uint32x4_t __a)
{
- return (float32x4_t)__builtin_neon_vcvtv4si ((int32x4_t) __a, 0);
+ return (float32x4_t)__builtin_neon_vcvtuv4si ((int32x4_t) __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcvtq_u32_f32 (float32x4_t __a)
{
- return (uint32x4_t)__builtin_neon_vcvtv4sf (__a, 0);
+ return (uint32x4_t)__builtin_neon_vcvtuv4sf (__a);
}
#if ((__ARM_FP & 0x2) != 0)
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vcvt_n_s32_f32 (float32x2_t __a, const int __b)
{
- return (int32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 1);
+ return (int32x2_t)__builtin_neon_vcvts_nv2sf (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vcvt_n_f32_s32 (int32x2_t __a, const int __b)
{
- return (float32x2_t)__builtin_neon_vcvt_nv2si (__a, __b, 1);
+ return (float32x2_t)__builtin_neon_vcvts_nv2si (__a, __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vcvt_n_f32_u32 (uint32x2_t __a, const int __b)
{
- return (float32x2_t)__builtin_neon_vcvt_nv2si ((int32x2_t) __a, __b, 0);
+ return (float32x2_t)__builtin_neon_vcvtu_nv2si ((int32x2_t) __a, __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vcvt_n_u32_f32 (float32x2_t __a, const int __b)
{
- return (uint32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 0);
+ return (uint32x2_t)__builtin_neon_vcvtu_nv2sf (__a, __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vcvtq_n_s32_f32 (float32x4_t __a, const int __b)
{
- return (int32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 1);
+ return (int32x4_t)__builtin_neon_vcvts_nv4sf (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vcvtq_n_f32_s32 (int32x4_t __a, const int __b)
{
- return (float32x4_t)__builtin_neon_vcvt_nv4si (__a, __b, 1);
+ return (float32x4_t)__builtin_neon_vcvts_nv4si (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vcvtq_n_f32_u32 (uint32x4_t __a, const int __b)
{
- return (float32x4_t)__builtin_neon_vcvt_nv4si ((int32x4_t) __a, __b, 0);
+ return (float32x4_t)__builtin_neon_vcvtu_nv4si ((int32x4_t) __a, __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vcvtq_n_u32_f32 (float32x4_t __a, const int __b)
{
- return (uint32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 0);
+ return (uint32x4_t)__builtin_neon_vcvtu_nv4sf (__a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vmovn_s16 (int16x8_t __a)
{
- return (int8x8_t)__builtin_neon_vmovnv8hi (__a, 1);
+ return (int8x8_t)__builtin_neon_vmovnv8hi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmovn_s32 (int32x4_t __a)
{
- return (int16x4_t)__builtin_neon_vmovnv4si (__a, 1);
+ return (int16x4_t)__builtin_neon_vmovnv4si (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmovn_s64 (int64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vmovnv2di (__a, 1);
+ return (int32x2_t)__builtin_neon_vmovnv2di (__a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vmovn_u16 (uint16x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vmovnv8hi ((int16x8_t) __a, 0);
+ return (uint8x8_t)__builtin_neon_vmovnv8hi ((int16x8_t) __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmovn_u32 (uint32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vmovnv4si ((int32x4_t) __a, 0);
+ return (uint16x4_t)__builtin_neon_vmovnv4si ((int32x4_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmovn_u64 (uint64x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vmovnv2di ((int64x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vmovnv2di ((int64x2_t) __a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vqmovn_s16 (int16x8_t __a)
{
- return (int8x8_t)__builtin_neon_vqmovnv8hi (__a, 1);
+ return (int8x8_t)__builtin_neon_vqmovnsv8hi (__a);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqmovn_s32 (int32x4_t __a)
{
- return (int16x4_t)__builtin_neon_vqmovnv4si (__a, 1);
+ return (int16x4_t)__builtin_neon_vqmovnsv4si (__a);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqmovn_s64 (int64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vqmovnv2di (__a, 1);
+ return (int32x2_t)__builtin_neon_vqmovnsv2di (__a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqmovn_u16 (uint16x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vqmovnv8hi ((int16x8_t) __a, 0);
+ return (uint8x8_t)__builtin_neon_vqmovnuv8hi ((int16x8_t) __a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqmovn_u32 (uint32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vqmovnv4si ((int32x4_t) __a, 0);
+ return (uint16x4_t)__builtin_neon_vqmovnuv4si ((int32x4_t) __a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqmovn_u64 (uint64x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vqmovnv2di ((int64x2_t) __a, 0);
+ return (uint32x2_t)__builtin_neon_vqmovnuv2di ((int64x2_t) __a);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vqmovun_s16 (int16x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vqmovunv8hi (__a, 1);
+ return (uint8x8_t)__builtin_neon_vqmovunv8hi (__a);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vqmovun_s32 (int32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vqmovunv4si (__a, 1);
+ return (uint16x4_t)__builtin_neon_vqmovunv4si (__a);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vqmovun_s64 (int64x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vqmovunv2di (__a, 1);
+ return (uint32x2_t)__builtin_neon_vqmovunv2di (__a);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmovl_s8 (int8x8_t __a)
{
- return (int16x8_t)__builtin_neon_vmovlv8qi (__a, 1);
+ return (int16x8_t)__builtin_neon_vmovlsv8qi (__a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmovl_s16 (int16x4_t __a)
{
- return (int32x4_t)__builtin_neon_vmovlv4hi (__a, 1);
+ return (int32x4_t)__builtin_neon_vmovlsv4hi (__a);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmovl_s32 (int32x2_t __a)
{
- return (int64x2_t)__builtin_neon_vmovlv2si (__a, 1);
+ return (int64x2_t)__builtin_neon_vmovlsv2si (__a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmovl_u8 (uint8x8_t __a)
{
- return (uint16x8_t)__builtin_neon_vmovlv8qi ((int8x8_t) __a, 0);
+ return (uint16x8_t)__builtin_neon_vmovluv8qi ((int8x8_t) __a);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmovl_u16 (uint16x4_t __a)
{
- return (uint32x4_t)__builtin_neon_vmovlv4hi ((int16x4_t) __a, 0);
+ return (uint32x4_t)__builtin_neon_vmovluv4hi ((int16x4_t) __a);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmovl_u32 (uint32x2_t __a)
{
- return (uint64x2_t)__builtin_neon_vmovlv2si ((int32x2_t) __a, 0);
+ return (uint64x2_t)__builtin_neon_vmovluv2si ((int32x2_t) __a);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return (int16x4_t)__builtin_neon_vmul_lanev4hi (__a, __b, __c, 1);
+ return (int16x4_t)__builtin_neon_vmul_lanev4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return (int32x2_t)__builtin_neon_vmul_lanev2si (__a, __b, __c, 1);
+ return (int32x2_t)__builtin_neon_vmul_lanev2si (__a, __b, __c);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __c)
{
- return (float32x2_t)__builtin_neon_vmul_lanev2sf (__a, __b, __c, 3);
+ return (float32x2_t)__builtin_neon_vmul_lanev2sf (__a, __b, __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
- return (uint16x4_t)__builtin_neon_vmul_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+ return (uint16x4_t)__builtin_neon_vmul_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
- return (uint32x2_t)__builtin_neon_vmul_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+ return (uint32x2_t)__builtin_neon_vmul_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
- return (int16x8_t)__builtin_neon_vmul_lanev8hi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vmul_lanev8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
- return (int32x4_t)__builtin_neon_vmul_lanev4si (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vmul_lanev4si (__a, __b, __c);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __c)
{
- return (float32x4_t)__builtin_neon_vmul_lanev4sf (__a, __b, __c, 3);
+ return (float32x4_t)__builtin_neon_vmul_lanev4sf (__a, __b, __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __c)
{
- return (uint16x8_t)__builtin_neon_vmul_lanev8hi ((int16x8_t) __a, (int16x4_t) __b, __c, 0);
+ return (uint16x8_t)__builtin_neon_vmul_lanev8hi ((int16x8_t) __a, (int16x4_t) __b, __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __c)
{
- return (uint32x4_t)__builtin_neon_vmul_lanev4si ((int32x4_t) __a, (int32x2_t) __b, __c, 0);
+ return (uint32x4_t)__builtin_neon_vmul_lanev4si ((int32x4_t) __a, (int32x2_t) __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
- return (int16x4_t)__builtin_neon_vmla_lanev4hi (__a, __b, __c, __d, 1);
+ return (int16x4_t)__builtin_neon_vmla_lanev4hi (__a, __b, __c, __d);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
- return (int32x2_t)__builtin_neon_vmla_lanev2si (__a, __b, __c, __d, 1);
+ return (int32x2_t)__builtin_neon_vmla_lanev2si (__a, __b, __c, __d);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
{
- return (float32x2_t)__builtin_neon_vmla_lanev2sf (__a, __b, __c, __d, 3);
+ return (float32x2_t)__builtin_neon_vmla_lanev2sf (__a, __b, __c, __d);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
{
- return (uint16x4_t)__builtin_neon_vmla_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+ return (uint16x4_t)__builtin_neon_vmla_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
{
- return (uint32x2_t)__builtin_neon_vmla_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+ return (uint32x2_t)__builtin_neon_vmla_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
{
- return (int16x8_t)__builtin_neon_vmla_lanev8hi (__a, __b, __c, __d, 1);
+ return (int16x8_t)__builtin_neon_vmla_lanev8hi (__a, __b, __c, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
{
- return (int32x4_t)__builtin_neon_vmla_lanev4si (__a, __b, __c, __d, 1);
+ return (int32x4_t)__builtin_neon_vmla_lanev4si (__a, __b, __c, __d);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
{
- return (float32x4_t)__builtin_neon_vmla_lanev4sf (__a, __b, __c, __d, 3);
+ return (float32x4_t)__builtin_neon_vmla_lanev4sf (__a, __b, __c, __d);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
{
- return (uint16x8_t)__builtin_neon_vmla_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+ return (uint16x8_t)__builtin_neon_vmla_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
{
- return (uint32x4_t)__builtin_neon_vmla_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+ return (uint32x4_t)__builtin_neon_vmla_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
- return (int32x4_t)__builtin_neon_vmlal_lanev4hi (__a, __b, __c, __d, 1);
+ return (int32x4_t)__builtin_neon_vmlals_lanev4hi (__a, __b, __c, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
- return (int64x2_t)__builtin_neon_vmlal_lanev2si (__a, __b, __c, __d, 1);
+ return (int64x2_t)__builtin_neon_vmlals_lanev2si (__a, __b, __c, __d);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlal_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
{
- return (uint32x4_t)__builtin_neon_vmlal_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+ return (uint32x4_t)__builtin_neon_vmlalu_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmlal_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
{
- return (uint64x2_t)__builtin_neon_vmlal_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+ return (uint64x2_t)__builtin_neon_vmlalu_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
- return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi (__a, __b, __c, __d, 1);
+ return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi (__a, __b, __c, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
- return (int64x2_t)__builtin_neon_vqdmlal_lanev2si (__a, __b, __c, __d, 1);
+ return (int64x2_t)__builtin_neon_vqdmlal_lanev2si (__a, __b, __c, __d);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
- return (int16x4_t)__builtin_neon_vmls_lanev4hi (__a, __b, __c, __d, 1);
+ return (int16x4_t)__builtin_neon_vmls_lanev4hi (__a, __b, __c, __d);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
- return (int32x2_t)__builtin_neon_vmls_lanev2si (__a, __b, __c, __d, 1);
+ return (int32x2_t)__builtin_neon_vmls_lanev2si (__a, __b, __c, __d);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
{
- return (float32x2_t)__builtin_neon_vmls_lanev2sf (__a, __b, __c, __d, 3);
+ return (float32x2_t)__builtin_neon_vmls_lanev2sf (__a, __b, __c, __d);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
{
- return (uint16x4_t)__builtin_neon_vmls_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+ return (uint16x4_t)__builtin_neon_vmls_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
{
- return (uint32x2_t)__builtin_neon_vmls_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+ return (uint32x2_t)__builtin_neon_vmls_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
{
- return (int16x8_t)__builtin_neon_vmls_lanev8hi (__a, __b, __c, __d, 1);
+ return (int16x8_t)__builtin_neon_vmls_lanev8hi (__a, __b, __c, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
{
- return (int32x4_t)__builtin_neon_vmls_lanev4si (__a, __b, __c, __d, 1);
+ return (int32x4_t)__builtin_neon_vmls_lanev4si (__a, __b, __c, __d);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
{
- return (float32x4_t)__builtin_neon_vmls_lanev4sf (__a, __b, __c, __d, 3);
+ return (float32x4_t)__builtin_neon_vmls_lanev4sf (__a, __b, __c, __d);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
{
- return (uint16x8_t)__builtin_neon_vmls_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+ return (uint16x8_t)__builtin_neon_vmls_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
{
- return (uint32x4_t)__builtin_neon_vmls_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+ return (uint32x4_t)__builtin_neon_vmls_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
- return (int32x4_t)__builtin_neon_vmlsl_lanev4hi (__a, __b, __c, __d, 1);
+ return (int32x4_t)__builtin_neon_vmlsls_lanev4hi (__a, __b, __c, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
- return (int64x2_t)__builtin_neon_vmlsl_lanev2si (__a, __b, __c, __d, 1);
+ return (int64x2_t)__builtin_neon_vmlsls_lanev2si (__a, __b, __c, __d);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsl_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
{
- return (uint32x4_t)__builtin_neon_vmlsl_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+ return (uint32x4_t)__builtin_neon_vmlslu_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmlsl_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
{
- return (uint64x2_t)__builtin_neon_vmlsl_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+ return (uint64x2_t)__builtin_neon_vmlslu_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
- return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi (__a, __b, __c, __d, 1);
+ return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi (__a, __b, __c, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
- return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si (__a, __b, __c, __d, 1);
+ return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si (__a, __b, __c, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return (int32x4_t)__builtin_neon_vmull_lanev4hi (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vmulls_lanev4hi (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return (int64x2_t)__builtin_neon_vmull_lanev2si (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vmulls_lanev2si (__a, __b, __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
- return (uint32x4_t)__builtin_neon_vmull_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+ return (uint32x4_t)__builtin_neon_vmullu_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
- return (uint64x2_t)__builtin_neon_vmull_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+ return (uint64x2_t)__builtin_neon_vmullu_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return (int32x4_t)__builtin_neon_vqdmull_lanev4hi (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vqdmull_lanev4hi (__a, __b, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return (int64x2_t)__builtin_neon_vqdmull_lanev2si (__a, __b, __c, 1);
+ return (int64x2_t)__builtin_neon_vqdmull_lanev2si (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
- return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 1);
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
- return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 1);
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 1);
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 1);
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
- return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 5);
+ return (int16x8_t)__builtin_neon_vqrdmulh_lanev8hi (__a, __b, __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
- return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 5);
+ return (int32x4_t)__builtin_neon_vqrdmulh_lanev4si (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
- return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 5);
+ return (int16x4_t)__builtin_neon_vqrdmulh_lanev4hi (__a, __b, __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
- return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 5);
+ return (int32x2_t)__builtin_neon_vqrdmulh_lanev2si (__a, __b, __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmul_n_s16 (int16x4_t __a, int16_t __b)
{
- return (int16x4_t)__builtin_neon_vmul_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+ return (int16x4_t)__builtin_neon_vmul_nv4hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmul_n_s32 (int32x2_t __a, int32_t __b)
{
- return (int32x2_t)__builtin_neon_vmul_nv2si (__a, (__builtin_neon_si) __b, 1);
+ return (int32x2_t)__builtin_neon_vmul_nv2si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmul_n_f32 (float32x2_t __a, float32_t __b)
{
- return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, (__builtin_neon_sf) __b, 3);
+ return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, (__builtin_neon_sf) __b);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmul_n_u16 (uint16x4_t __a, uint16_t __b)
{
- return (uint16x4_t)__builtin_neon_vmul_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+ return (uint16x4_t)__builtin_neon_vmul_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmul_n_u32 (uint32x2_t __a, uint32_t __b)
{
- return (uint32x2_t)__builtin_neon_vmul_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+ return (uint32x2_t)__builtin_neon_vmul_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmulq_n_s16 (int16x8_t __a, int16_t __b)
{
- return (int16x8_t)__builtin_neon_vmul_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+ return (int16x8_t)__builtin_neon_vmul_nv8hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmulq_n_s32 (int32x4_t __a, int32_t __b)
{
- return (int32x4_t)__builtin_neon_vmul_nv4si (__a, (__builtin_neon_si) __b, 1);
+ return (int32x4_t)__builtin_neon_vmul_nv4si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmulq_n_f32 (float32x4_t __a, float32_t __b)
{
- return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, (__builtin_neon_sf) __b, 3);
+ return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, (__builtin_neon_sf) __b);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
{
- return (uint16x8_t)__builtin_neon_vmul_nv8hi ((int16x8_t) __a, (__builtin_neon_hi) __b, 0);
+ return (uint16x8_t)__builtin_neon_vmul_nv8hi ((int16x8_t) __a, (__builtin_neon_hi) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
{
- return (uint32x4_t)__builtin_neon_vmul_nv4si ((int32x4_t) __a, (__builtin_neon_si) __b, 0);
+ return (uint32x4_t)__builtin_neon_vmul_nv4si ((int32x4_t) __a, (__builtin_neon_si) __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmull_n_s16 (int16x4_t __a, int16_t __b)
{
- return (int32x4_t)__builtin_neon_vmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+ return (int32x4_t)__builtin_neon_vmulls_nv4hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmull_n_s32 (int32x2_t __a, int32_t __b)
{
- return (int64x2_t)__builtin_neon_vmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+ return (int64x2_t)__builtin_neon_vmulls_nv2si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmull_n_u16 (uint16x4_t __a, uint16_t __b)
{
- return (uint32x4_t)__builtin_neon_vmull_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+ return (uint32x4_t)__builtin_neon_vmullu_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmull_n_u32 (uint32x2_t __a, uint32_t __b)
{
- return (uint64x2_t)__builtin_neon_vmull_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+ return (uint64x2_t)__builtin_neon_vmullu_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmull_n_s16 (int16x4_t __a, int16_t __b)
{
- return (int32x4_t)__builtin_neon_vqdmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+ return (int32x4_t)__builtin_neon_vqdmull_nv4hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmull_n_s32 (int32x2_t __a, int32_t __b)
{
- return (int64x2_t)__builtin_neon_vqdmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+ return (int64x2_t)__builtin_neon_vqdmull_nv2si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
{
- return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
{
- return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 1);
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqdmulh_n_s16 (int16x4_t __a, int16_t __b)
{
- return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqdmulh_n_s32 (int32x2_t __a, int32_t __b)
{
- return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 1);
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
{
- return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 5);
+ return (int16x8_t)__builtin_neon_vqrdmulh_nv8hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
{
- return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 5);
+ return (int32x4_t)__builtin_neon_vqrdmulh_nv4si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqrdmulh_n_s16 (int16x4_t __a, int16_t __b)
{
- return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 5);
+ return (int16x4_t)__builtin_neon_vqrdmulh_nv4hi (__a, (__builtin_neon_hi) __b);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vqrdmulh_n_s32 (int32x2_t __a, int32_t __b)
{
- return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 5);
+ return (int32x2_t)__builtin_neon_vqrdmulh_nv2si (__a, (__builtin_neon_si) __b);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
{
- return (int16x4_t)__builtin_neon_vmla_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int16x4_t)__builtin_neon_vmla_nv4hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
{
- return (int32x2_t)__builtin_neon_vmla_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int32x2_t)__builtin_neon_vmla_nv2si (__a, __b, (__builtin_neon_si) __c);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
{
- return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
+ return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, (__builtin_neon_sf) __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
{
- return (uint16x4_t)__builtin_neon_vmla_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+ return (uint16x4_t)__builtin_neon_vmla_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
{
- return (uint32x2_t)__builtin_neon_vmla_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+ return (uint32x2_t)__builtin_neon_vmla_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
{
- return (int16x8_t)__builtin_neon_vmla_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int16x8_t)__builtin_neon_vmla_nv8hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
{
- return (int32x4_t)__builtin_neon_vmla_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int32x4_t)__builtin_neon_vmla_nv4si (__a, __b, (__builtin_neon_si) __c);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
{
- return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
+ return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, (__builtin_neon_sf) __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
{
- return (uint16x8_t)__builtin_neon_vmla_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+ return (uint16x8_t)__builtin_neon_vmla_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
{
- return (uint32x4_t)__builtin_neon_vmla_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmla_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
{
- return (int32x4_t)__builtin_neon_vmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int32x4_t)__builtin_neon_vmlals_nv4hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
{
- return (int64x2_t)__builtin_neon_vmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int64x2_t)__builtin_neon_vmlals_nv2si (__a, __b, (__builtin_neon_si) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
{
- return (uint32x4_t)__builtin_neon_vmlal_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmlalu_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
{
- return (uint64x2_t)__builtin_neon_vmlal_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+ return (uint64x2_t)__builtin_neon_vmlalu_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
{
- return (int32x4_t)__builtin_neon_vqdmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int32x4_t)__builtin_neon_vqdmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
{
- return (int64x2_t)__builtin_neon_vqdmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int64x2_t)__builtin_neon_vqdmlal_nv2si (__a, __b, (__builtin_neon_si) __c);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
{
- return (int16x4_t)__builtin_neon_vmls_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int16x4_t)__builtin_neon_vmls_nv4hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
{
- return (int32x2_t)__builtin_neon_vmls_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int32x2_t)__builtin_neon_vmls_nv2si (__a, __b, (__builtin_neon_si) __c);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
{
- return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
+ return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, (__builtin_neon_sf) __c);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
{
- return (uint16x4_t)__builtin_neon_vmls_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+ return (uint16x4_t)__builtin_neon_vmls_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
{
- return (uint32x2_t)__builtin_neon_vmls_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+ return (uint32x2_t)__builtin_neon_vmls_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
{
- return (int16x8_t)__builtin_neon_vmls_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int16x8_t)__builtin_neon_vmls_nv8hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
{
- return (int32x4_t)__builtin_neon_vmls_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int32x4_t)__builtin_neon_vmls_nv4si (__a, __b, (__builtin_neon_si) __c);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
{
- return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
+ return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, (__builtin_neon_sf) __c);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
{
- return (uint16x8_t)__builtin_neon_vmls_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+ return (uint16x8_t)__builtin_neon_vmls_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
{
- return (uint32x4_t)__builtin_neon_vmls_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmls_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
{
- return (int32x4_t)__builtin_neon_vmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int32x4_t)__builtin_neon_vmlsls_nv4hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
{
- return (int64x2_t)__builtin_neon_vmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int64x2_t)__builtin_neon_vmlsls_nv2si (__a, __b, (__builtin_neon_si) __c);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
{
- return (uint32x4_t)__builtin_neon_vmlsl_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+ return (uint32x4_t)__builtin_neon_vmlslu_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
{
- return (uint64x2_t)__builtin_neon_vmlsl_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+ return (uint64x2_t)__builtin_neon_vmlslu_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
{
- return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+ return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
{
- return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+ return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c);
}
#ifdef __ARM_FEATURE_CRYPTO
(match_operand:VDQW 2 "s_register_operand" "")))]
"TARGET_NEON && (!<Is_float_mode> || flag_unsafe_math_optimizations)"
{
- HOST_WIDE_INT magic_word = (<MODE>mode == V2SFmode || <MODE>mode == V4SFmode)
- ? 3 : 1;
- rtx magic_rtx = GEN_INT (magic_word);
int inverse = 0;
int use_zero_form = 0;
int swap_bsl_operands = 0;
rtx mask = gen_reg_rtx (<V_cmp_result>mode);
rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
- rtx (*base_comparison) (rtx, rtx, rtx, rtx);
- rtx (*complimentary_comparison) (rtx, rtx, rtx, rtx);
+ rtx (*base_comparison) (rtx, rtx, rtx);
+ rtx (*complimentary_comparison) (rtx, rtx, rtx);
switch (GET_CODE (operands[3]))
{
}
if (!inverse)
- emit_insn (base_comparison (mask, operands[4], operands[5], magic_rtx));
+ emit_insn (base_comparison (mask, operands[4], operands[5]));
else
- emit_insn (complimentary_comparison (mask, operands[5], operands[4], magic_rtx));
+ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
break;
case UNLT:
case UNLE:
a NE b -> !(a EQ b) */
if (inverse)
- emit_insn (base_comparison (mask, operands[4], operands[5], magic_rtx));
+ emit_insn (base_comparison (mask, operands[4], operands[5]));
else
- emit_insn (complimentary_comparison (mask, operands[5], operands[4], magic_rtx));
+ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
swap_bsl_operands = 1;
break;
true iff !(a != b && a ORDERED b), swapping the operands to BSL
will then give us (a == b || a UNORDERED b) as intended. */
- emit_insn (gen_neon_vcgt<mode> (mask, operands[4], operands[5], magic_rtx));
- emit_insn (gen_neon_vcgt<mode> (tmp, operands[5], operands[4], magic_rtx));
+ emit_insn (gen_neon_vcgt<mode> (mask, operands[4], operands[5]));
+ emit_insn (gen_neon_vcgt<mode> (tmp, operands[5], operands[4]));
emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
swap_bsl_operands = 1;
break;
swap_bsl_operands = 1;
/* Fall through. */
case ORDERED:
- emit_insn (gen_neon_vcgt<mode> (tmp, operands[4], operands[5], magic_rtx));
- emit_insn (gen_neon_vcge<mode> (mask, operands[5], operands[4], magic_rtx));
+ emit_insn (gen_neon_vcgt<mode> (tmp, operands[4], operands[5]));
+ emit_insn (gen_neon_vcge<mode> (mask, operands[5], operands[4]));
emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
break;
default:
switch (GET_CODE (operands[3]))
{
case GEU:
- emit_insn (gen_neon_vcge<mode> (mask, operands[4], operands[5],
- const0_rtx));
+ emit_insn (gen_neon_vcgeu<mode> (mask, operands[4], operands[5]));
break;
case GTU:
- emit_insn (gen_neon_vcgt<mode> (mask, operands[4], operands[5],
- const0_rtx));
+ emit_insn (gen_neon_vcgtu<mode> (mask, operands[4], operands[5]));
break;
case EQ:
- emit_insn (gen_neon_vceq<mode> (mask, operands[4], operands[5],
- const0_rtx));
+ emit_insn (gen_neon_vceq<mode> (mask, operands[4], operands[5]));
break;
case LEU:
if (immediate_zero)
- emit_insn (gen_neon_vcle<mode> (mask, operands[4], operands[5],
- const0_rtx));
+ emit_insn (gen_neon_vcle<mode> (mask, operands[4], operands[5]));
else
- emit_insn (gen_neon_vcge<mode> (mask, operands[5], operands[4],
- const0_rtx));
+ emit_insn (gen_neon_vcgeu<mode> (mask, operands[5], operands[4]));
break;
case LTU:
if (immediate_zero)
- emit_insn (gen_neon_vclt<mode> (mask, operands[4], operands[5],
- const0_rtx));
+ emit_insn (gen_neon_vclt<mode> (mask, operands[4], operands[5]));
else
- emit_insn (gen_neon_vcgt<mode> (mask, operands[5], operands[4],
- const0_rtx));
+ emit_insn (gen_neon_vcgtu<mode> (mask, operands[5], operands[4]));
break;
case NE:
- emit_insn (gen_neon_vceq<mode> (mask, operands[4], operands[5],
- const0_rtx));
+ emit_insn (gen_neon_vceq<mode> (mask, operands[4], operands[5]));
inverse = 1;
break;
(define_expand "neon_vadd<mode>"
[(match_operand:VCVTF 0 "s_register_operand" "=w")
(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:VCVTF 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
"TARGET_NEON"
{
if (!<Is_float_mode> || flag_unsafe_math_optimizations)
(const_string "neon_add<q>")))]
)
-; operand 3 represents in bits:
-; bit 0: signed (vs unsigned).
-; bit 1: rounding (vs none).
-
-(define_insn "neon_vaddl<mode>"
+(define_insn "neon_vaddl<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:VDI 1 "s_register_operand" "w")
- (match_operand:VDI 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VADDL))]
+ (match_operand:VDI 2 "s_register_operand" "w")]
+ VADDL))]
"TARGET_NEON"
- "vaddl.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ "vaddl.<sup>%#<V_sz_elem>\t%q0, %P1, %P2"
[(set_attr "type" "neon_add_long")]
)
-(define_insn "neon_vaddw<mode>"
+(define_insn "neon_vaddw<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "w")
- (match_operand:VDI 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VADDW))]
+ (match_operand:VDI 2 "s_register_operand" "w")]
+ VADDW))]
"TARGET_NEON"
- "vaddw.%T3%#<V_sz_elem>\t%q0, %q1, %P2"
+ "vaddw.<sup>%#<V_sz_elem>\t%q0, %q1, %P2"
[(set_attr "type" "neon_add_widen")]
)
; vhadd and vrhadd.
-(define_insn "neon_vhadd<mode>"
+(define_insn "neon_v<r>hadd<sup><mode>"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
(unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:VDQIW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VHADD))]
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
+ VHADD))]
"TARGET_NEON"
- "v%O3hadd.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "v<r>hadd.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_add_halve_q")]
)
-(define_insn "neon_vqadd<mode>"
+(define_insn "neon_vqadd<sup><mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:VDQIX 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VQADD))]
+ (match_operand:VDQIX 2 "s_register_operand" "w")]
+ VQADD))]
"TARGET_NEON"
- "vqadd.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "vqadd.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_qadd<q>")]
)
-(define_insn "neon_vaddhn<mode>"
+(define_insn "neon_v<r>addhn<mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
(unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:VN 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VADDHN))]
+ (match_operand:VN 2 "s_register_operand" "w")]
+ VADDHN))]
"TARGET_NEON"
- "v%O3addhn.<V_if_elem>\t%P0, %q1, %q2"
+ "v<r>addhn.<V_if_elem>\t%P0, %q1, %q2"
[(set_attr "type" "neon_add_halve_narrow_q")]
)
-;; We cannot replace this unspec with mul<mode>3 because of the odd
-;; polynomial multiplication case that can specified by operand 3.
-(define_insn "neon_vmul<mode>"
- [(set (match_operand:VDQW 0 "s_register_operand" "=w")
- (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
- (match_operand:VDQW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+;; Polynomial and Float multiplication.
+(define_insn "neon_vmul<pf><mode>"
+ [(set (match_operand:VPF 0 "s_register_operand" "=w")
+ (unspec:VPF [(match_operand:VPF 1 "s_register_operand" "w")
+ (match_operand:VPF 2 "s_register_operand" "w")]
UNSPEC_VMUL))]
"TARGET_NEON"
- "vmul.%F3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "vmul.<pf>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set (attr "type")
(if_then_else (match_test "<Is_float_mode>")
(const_string "neon_fp_mul_s<q>")
[(match_operand:VDQW 0 "s_register_operand" "=w")
(match_operand:VDQW 1 "s_register_operand" "0")
(match_operand:VDQW 2 "s_register_operand" "w")
- (match_operand:VDQW 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
+ (match_operand:VDQW 3 "s_register_operand" "w")]
"TARGET_NEON"
{
if (!<Is_float_mode> || flag_unsafe_math_optimizations)
[(match_operand:VCVTF 0 "s_register_operand")
(match_operand:VCVTF 1 "s_register_operand")
(match_operand:VCVTF 2 "s_register_operand")
- (match_operand:VCVTF 3 "s_register_operand")
- (match_operand:SI 4 "immediate_operand")]
+ (match_operand:VCVTF 3 "s_register_operand")]
"TARGET_NEON && TARGET_FMA"
{
emit_insn (gen_fma<mode>4_intrinsic (operands[0], operands[2], operands[3],
[(match_operand:VCVTF 0 "s_register_operand")
(match_operand:VCVTF 1 "s_register_operand")
(match_operand:VCVTF 2 "s_register_operand")
- (match_operand:VCVTF 3 "s_register_operand")
- (match_operand:SI 4 "immediate_operand")]
+ (match_operand:VCVTF 3 "s_register_operand")]
"TARGET_NEON && TARGET_FMA"
{
emit_insn (gen_fmsub<mode>4_intrinsic (operands[0], operands[2], operands[3],
(const_string "neon_mla_<V_elem_ch><q>")))]
)
-(define_insn "neon_vmlal<mode>"
+(define_insn "neon_vmlal<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
(match_operand:VW 2 "s_register_operand" "w")
- (match_operand:VW 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VMLAL))]
+ (match_operand:VW 3 "s_register_operand" "w")]
+ VMLAL))]
"TARGET_NEON"
- "vmlal.%T4%#<V_sz_elem>\t%q0, %P2, %P3"
+ "vmlal.<sup>%#<V_sz_elem>\t%q0, %P2, %P3"
[(set_attr "type" "neon_mla_<V_elem_ch>_long")]
)
[(match_operand:VDQW 0 "s_register_operand" "=w")
(match_operand:VDQW 1 "s_register_operand" "0")
(match_operand:VDQW 2 "s_register_operand" "w")
- (match_operand:VDQW 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
+ (match_operand:VDQW 3 "s_register_operand" "w")]
"TARGET_NEON"
{
if (!<Is_float_mode> || flag_unsafe_math_optimizations)
(const_string "neon_mla_<V_elem_ch><q>")))]
)
-(define_insn "neon_vmlsl<mode>"
+(define_insn "neon_vmlsl<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
(match_operand:VW 2 "s_register_operand" "w")
- (match_operand:VW 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VMLSL))]
+ (match_operand:VW 3 "s_register_operand" "w")]
+ VMLSL))]
"TARGET_NEON"
- "vmlsl.%T4%#<V_sz_elem>\t%q0, %P2, %P3"
+ "vmlsl.<sup>%#<V_sz_elem>\t%q0, %P2, %P3"
[(set_attr "type" "neon_mla_<V_elem_ch>_long")]
)
-(define_insn "neon_vqdmulh<mode>"
+;; vqdmulh, vqrdmulh
+(define_insn "neon_vq<r>dmulh<mode>"
[(set (match_operand:VMDQI 0 "s_register_operand" "=w")
(unspec:VMDQI [(match_operand:VMDQI 1 "s_register_operand" "w")
- (match_operand:VMDQI 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VQDMULH))]
+ (match_operand:VMDQI 2 "s_register_operand" "w")]
+ VQDMULH))]
"TARGET_NEON"
- "vq%O3dmulh.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "vq<r>dmulh.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_sat_mul_<V_elem_ch><q>")]
)
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
(match_operand:VMDI 2 "s_register_operand" "w")
- (match_operand:VMDI 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
+ (match_operand:VMDI 3 "s_register_operand" "w")]
UNSPEC_VQDMLAL))]
"TARGET_NEON"
"vqdmlal.<V_s_elem>\t%q0, %P2, %P3"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
(match_operand:VMDI 2 "s_register_operand" "w")
- (match_operand:VMDI 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
+ (match_operand:VMDI 3 "s_register_operand" "w")]
UNSPEC_VQDMLSL))]
"TARGET_NEON"
"vqdmlsl.<V_s_elem>\t%q0, %P2, %P3"
[(set_attr "type" "neon_sat_mla_<V_elem_ch>_long")]
)
-(define_insn "neon_vmull<mode>"
+(define_insn "neon_vmull<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
- (match_operand:VW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VMULL))]
+ (match_operand:VW 2 "s_register_operand" "w")]
+ VMULL))]
"TARGET_NEON"
- "vmull.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ "vmull.<sup>%#<V_sz_elem>\t%q0, %P1, %P2"
[(set_attr "type" "neon_mul_<V_elem_ch>_long")]
)
(define_insn "neon_vqdmull<mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:VMDI 1 "s_register_operand" "w")
- (match_operand:VMDI 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VMDI 2 "s_register_operand" "w")]
UNSPEC_VQDMULL))]
"TARGET_NEON"
"vqdmull.<V_s_elem>\t%q0, %P1, %P2"
(define_expand "neon_vsub<mode>"
[(match_operand:VCVTF 0 "s_register_operand" "=w")
(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:VCVTF 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
"TARGET_NEON"
{
if (!<Is_float_mode> || flag_unsafe_math_optimizations)
(const_string "neon_sub<q>")))]
)
-(define_insn "neon_vsubl<mode>"
+(define_insn "neon_vsubl<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:VDI 1 "s_register_operand" "w")
- (match_operand:VDI 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VSUBL))]
+ (match_operand:VDI 2 "s_register_operand" "w")]
+ VSUBL))]
"TARGET_NEON"
- "vsubl.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ "vsubl.<sup>%#<V_sz_elem>\t%q0, %P1, %P2"
[(set_attr "type" "neon_sub_long")]
)
-(define_insn "neon_vsubw<mode>"
+(define_insn "neon_vsubw<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "w")
- (match_operand:VDI 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VSUBW))]
+ (match_operand:VDI 2 "s_register_operand" "w")]
+ VSUBW))]
"TARGET_NEON"
- "vsubw.%T3%#<V_sz_elem>\t%q0, %q1, %P2"
+ "vsubw.<sup>%#<V_sz_elem>\t%q0, %q1, %P2"
[(set_attr "type" "neon_sub_widen")]
)
-(define_insn "neon_vqsub<mode>"
+(define_insn "neon_vqsub<sup><mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:VDQIX 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VQSUB))]
+ (match_operand:VDQIX 2 "s_register_operand" "w")]
+ VQSUB))]
"TARGET_NEON"
- "vqsub.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "vqsub.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_qsub<q>")]
)
-(define_insn "neon_vhsub<mode>"
+(define_insn "neon_vhsub<sup><mode>"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
(unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:VDQIW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VHSUB))]
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
+ VHSUB))]
"TARGET_NEON"
- "vhsub.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "vhsub.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_sub_halve<q>")]
)
-(define_insn "neon_vsubhn<mode>"
+(define_insn "neon_v<r>subhn<mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
(unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:VN 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VSUBHN))]
+ (match_operand:VN 2 "s_register_operand" "w")]
+ VSUBHN))]
"TARGET_NEON"
- "v%O3subhn.<V_if_elem>\t%P0, %q1, %q2"
+ "v<r>subhn.<V_if_elem>\t%P0, %q1, %q2"
[(set_attr "type" "neon_sub_halve_narrow_q")]
)
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w,w")
(unspec:<V_cmp_result>
[(match_operand:VDQW 1 "s_register_operand" "w,w")
- (match_operand:VDQW 2 "reg_or_zero_operand" "w,Dz")
- (match_operand:SI 3 "immediate_operand" "i,i")]
+ (match_operand:VDQW 2 "reg_or_zero_operand" "w,Dz")]
UNSPEC_VCEQ))]
"TARGET_NEON"
"@
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w,w")
(unspec:<V_cmp_result>
[(match_operand:VDQW 1 "s_register_operand" "w,w")
- (match_operand:VDQW 2 "reg_or_zero_operand" "w,Dz")
- (match_operand:SI 3 "immediate_operand" "i,i")]
+ (match_operand:VDQW 2 "reg_or_zero_operand" "w,Dz")]
UNSPEC_VCGE))]
"TARGET_NEON"
"@
- vcge.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2
- vcge.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, #0"
+ vcge.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2
+ vcge.<V_s_elem>\t%<V_reg>0, %<V_reg>1, #0"
[(set (attr "type")
(if_then_else (match_test "<Is_float_mode>")
(const_string "neon_fp_compare_s<q>")
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
(unspec:<V_cmp_result>
[(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:VDQIW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
UNSPEC_VCGEU))]
"TARGET_NEON"
- "vcge.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "vcge.u%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_compare<q>")]
)
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w,w")
(unspec:<V_cmp_result>
[(match_operand:VDQW 1 "s_register_operand" "w,w")
- (match_operand:VDQW 2 "reg_or_zero_operand" "w,Dz")
- (match_operand:SI 3 "immediate_operand" "i,i")]
+ (match_operand:VDQW 2 "reg_or_zero_operand" "w,Dz")]
UNSPEC_VCGT))]
"TARGET_NEON"
"@
- vcgt.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2
- vcgt.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, #0"
+ vcgt.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2
+ vcgt.<V_s_elem>\t%<V_reg>0, %<V_reg>1, #0"
[(set (attr "type")
(if_then_else (match_test "<Is_float_mode>")
(const_string "neon_fp_compare_s<q>")
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
(unspec:<V_cmp_result>
[(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:VDQIW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
UNSPEC_VCGTU))]
"TARGET_NEON"
- "vcgt.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "vcgt.u%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_compare<q>")]
)
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
(unspec:<V_cmp_result>
[(match_operand:VDQW 1 "s_register_operand" "w")
- (match_operand:VDQW 2 "zero_operand" "Dz")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VDQW 2 "zero_operand" "Dz")]
UNSPEC_VCLE))]
"TARGET_NEON"
- "vcle.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, #0"
+ "vcle.<V_s_elem>\t%<V_reg>0, %<V_reg>1, #0"
[(set (attr "type")
(if_then_else (match_test "<Is_float_mode>")
(const_string "neon_fp_compare_s<q>")
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
(unspec:<V_cmp_result>
[(match_operand:VDQW 1 "s_register_operand" "w")
- (match_operand:VDQW 2 "zero_operand" "Dz")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VDQW 2 "zero_operand" "Dz")]
UNSPEC_VCLT))]
"TARGET_NEON"
- "vclt.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, #0"
+ "vclt.<V_s_elem>\t%<V_reg>0, %<V_reg>1, #0"
[(set (attr "type")
(if_then_else (match_test "<Is_float_mode>")
(const_string "neon_fp_compare_s<q>")
(define_insn "neon_vcage<mode>"
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
(unspec:<V_cmp_result> [(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:VCVTF 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
UNSPEC_VCAGE))]
"TARGET_NEON"
"vacge.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
(define_insn "neon_vcagt<mode>"
[(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
(unspec:<V_cmp_result> [(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:VCVTF 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
UNSPEC_VCAGT))]
"TARGET_NEON"
"vacgt.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
(define_insn "neon_vtst<mode>"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
(unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:VDQIW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
UNSPEC_VTST))]
"TARGET_NEON"
"vtst.<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_tst<q>")]
)
-(define_insn "neon_vabd<mode>"
- [(set (match_operand:VDQW 0 "s_register_operand" "=w")
- (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
- (match_operand:VDQW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VABD))]
+(define_insn "neon_vabd<sup><mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
+ VABD))]
"TARGET_NEON"
- "vabd.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
- (if_then_else (match_test "<Is_float_mode>")
- (const_string "neon_fp_abd_s<q>")
- (const_string "neon_abd<q>")))]
+ "vabd.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_abd<q>")]
)
-(define_insn "neon_vabdl<mode>"
+(define_insn "neon_vabdf<mode>"
+ [(set (match_operand:VCVTF 0 "s_register_operand" "=w")
+ (unspec:VCVTF [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
+ UNSPEC_VABD_F))]
+ "TARGET_NEON"
+ "vabd.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_abd_s<q>")]
+)
+
+(define_insn "neon_vabdl<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
- (match_operand:VW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VABDL))]
+ (match_operand:VW 2 "s_register_operand" "w")]
+ VABDL))]
"TARGET_NEON"
- "vabdl.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ "vabdl.<sup>%#<V_sz_elem>\t%q0, %P1, %P2"
[(set_attr "type" "neon_abd_long")]
)
-(define_insn "neon_vaba<mode>"
+(define_insn "neon_vaba<sup><mode>"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
(plus:VDQIW (unspec:VDQIW [(match_operand:VDQIW 2 "s_register_operand" "w")
- (match_operand:VDQIW 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VABD)
+ (match_operand:VDQIW 3 "s_register_operand" "w")]
+ VABD)
(match_operand:VDQIW 1 "s_register_operand" "0")))]
"TARGET_NEON"
- "vaba.%T4%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %<V_reg>3"
+ "vaba.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %<V_reg>3"
[(set_attr "type" "neon_arith_acc<q>")]
)
-(define_insn "neon_vabal<mode>"
+(define_insn "neon_vabal<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(plus:<V_widen> (unspec:<V_widen> [(match_operand:VW 2 "s_register_operand" "w")
- (match_operand:VW 3 "s_register_operand" "w")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VABDL)
+ (match_operand:VW 3 "s_register_operand" "w")]
+ VABDL)
(match_operand:<V_widen> 1 "s_register_operand" "0")))]
"TARGET_NEON"
- "vabal.%T4%#<V_sz_elem>\t%q0, %P2, %P3"
+ "vabal.<sup>%#<V_sz_elem>\t%q0, %P2, %P3"
[(set_attr "type" "neon_arith_acc<q>")]
)
-(define_insn "neon_vmax<mode>"
- [(set (match_operand:VDQW 0 "s_register_operand" "=w")
- (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
- (match_operand:VDQW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VMAX))]
+(define_insn "neon_v<maxmin><sup><mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
+ VMAXMIN))]
"TARGET_NEON"
- "vmax.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
- (if_then_else (match_test "<Is_float_mode>")
- (const_string "neon_fp_minmax_s<q>")
- (const_string "neon_minmax<q>")))]
+ "v<maxmin>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_minmax<q>")]
)
-(define_insn "neon_vmin<mode>"
- [(set (match_operand:VDQW 0 "s_register_operand" "=w")
- (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
- (match_operand:VDQW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VMIN))]
+(define_insn "neon_v<maxmin>f<mode>"
+ [(set (match_operand:VCVTF 0 "s_register_operand" "=w")
+ (unspec:VCVTF [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
+ VMAXMINF))]
"TARGET_NEON"
- "vmin.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
- (if_then_else (match_test "<Is_float_mode>")
- (const_string "neon_fp_minmax_s<q>")
- (const_string "neon_minmax<q>")))]
+ "v<maxmin>.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_minmax_s<q>")]
)
(define_expand "neon_vpadd<mode>"
[(match_operand:VD 0 "s_register_operand" "=w")
(match_operand:VD 1 "s_register_operand" "w")
- (match_operand:VD 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VD 2 "s_register_operand" "w")]
"TARGET_NEON"
{
emit_insn (gen_neon_vpadd_internal<mode> (operands[0], operands[1],
DONE;
})
-(define_insn "neon_vpaddl<mode>"
+(define_insn "neon_vpaddl<sup><mode>"
[(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
- (unspec:<V_double_width> [(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- UNSPEC_VPADDL))]
+ (unspec:<V_double_width> [(match_operand:VDQIW 1 "s_register_operand" "w")]
+ VPADDL))]
"TARGET_NEON"
- "vpaddl.%T2%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
+ "vpaddl.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
[(set_attr "type" "neon_reduc_add_long")]
)
-(define_insn "neon_vpadal<mode>"
+(define_insn "neon_vpadal<sup><mode>"
[(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
(unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
- (match_operand:VDQIW 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VPADAL))]
+ (match_operand:VDQIW 2 "s_register_operand" "w")]
+ VPADAL))]
"TARGET_NEON"
- "vpadal.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2"
+ "vpadal.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2"
[(set_attr "type" "neon_reduc_add_acc")]
)
-(define_insn "neon_vpmax<mode>"
- [(set (match_operand:VD 0 "s_register_operand" "=w")
- (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
- (match_operand:VD 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VPMAX))]
+(define_insn "neon_vp<maxmin><sup><mode>"
+ [(set (match_operand:VDI 0 "s_register_operand" "=w")
+ (unspec:VDI [(match_operand:VDI 1 "s_register_operand" "w")
+ (match_operand:VDI 2 "s_register_operand" "w")]
+ VPMAXMIN))]
"TARGET_NEON"
- "vpmax.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
- (if_then_else (match_test "<Is_float_mode>")
- (const_string "neon_fp_reduc_minmax_s<q>")
- (const_string "neon_reduc_minmax<q>")))]
+ "vp<maxmin>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_reduc_minmax<q>")]
)
-(define_insn "neon_vpmin<mode>"
- [(set (match_operand:VD 0 "s_register_operand" "=w")
- (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
- (match_operand:VD 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VPMIN))]
+(define_insn "neon_vp<maxmin>f<mode>"
+ [(set (match_operand:VCVTF 0 "s_register_operand" "=w")
+ (unspec:VCVTF [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
+ VPMAXMINF))]
"TARGET_NEON"
- "vpmin.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
- (if_then_else (match_test "<Is_float_mode>")
- (const_string "neon_fp_reduc_minmax_s<q>")
- (const_string "neon_reduc_minmax<q>")))]
+ "vp<maxmin>.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_reduc_minmax_s<q>")]
)
(define_insn "neon_vrecps<mode>"
[(set (match_operand:VCVTF 0 "s_register_operand" "=w")
(unspec:VCVTF [(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:VCVTF 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
UNSPEC_VRECPS))]
"TARGET_NEON"
"vrecps.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
(define_insn "neon_vrsqrts<mode>"
[(set (match_operand:VCVTF 0 "s_register_operand" "=w")
(unspec:VCVTF [(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:VCVTF 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:VCVTF 2 "s_register_operand" "w")]
UNSPEC_VRSQRTS))]
"TARGET_NEON"
"vrsqrts.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
(define_expand "neon_vabs<mode>"
[(match_operand:VDQW 0 "s_register_operand" "")
- (match_operand:VDQW 1 "s_register_operand" "")
- (match_operand:SI 2 "immediate_operand" "")]
+ (match_operand:VDQW 1 "s_register_operand" "")]
"TARGET_NEON"
{
emit_insn (gen_abs<mode>2 (operands[0], operands[1]));
(define_insn "neon_vqabs<mode>"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
- (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")]
UNSPEC_VQABS))]
"TARGET_NEON"
"vqabs.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
(define_expand "neon_vneg<mode>"
[(match_operand:VDQW 0 "s_register_operand" "")
- (match_operand:VDQW 1 "s_register_operand" "")
- (match_operand:SI 2 "immediate_operand" "")]
+ (match_operand:VDQW 1 "s_register_operand" "")]
"TARGET_NEON"
{
emit_insn (gen_neg<mode>2 (operands[0], operands[1]));
(define_insn "neon_vqneg<mode>"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
- (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")]
UNSPEC_VQNEG))]
"TARGET_NEON"
"vqneg.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
(define_insn "neon_vcls<mode>"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
- (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")]
UNSPEC_VCLS))]
"TARGET_NEON"
"vcls.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
(define_expand "neon_vclz<mode>"
[(match_operand:VDQIW 0 "s_register_operand" "")
- (match_operand:VDQIW 1 "s_register_operand" "")
- (match_operand:SI 2 "immediate_operand" "")]
+ (match_operand:VDQIW 1 "s_register_operand" "")]
"TARGET_NEON"
{
emit_insn (gen_clz<mode>2 (operands[0], operands[1]));
(define_expand "neon_vcnt<mode>"
[(match_operand:VE 0 "s_register_operand" "=w")
- (match_operand:VE 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (match_operand:VE 1 "s_register_operand" "w")]
"TARGET_NEON"
{
emit_insn (gen_popcount<mode>2 (operands[0], operands[1]));
(define_insn "neon_vrecpe<mode>"
[(set (match_operand:V32 0 "s_register_operand" "=w")
- (unspec:V32 [(match_operand:V32 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:V32 [(match_operand:V32 1 "s_register_operand" "w")]
UNSPEC_VRECPE))]
"TARGET_NEON"
"vrecpe.<V_u_elem>\t%<V_reg>0, %<V_reg>1"
(define_insn "neon_vrsqrte<mode>"
[(set (match_operand:V32 0 "s_register_operand" "=w")
- (unspec:V32 [(match_operand:V32 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:V32 [(match_operand:V32 1 "s_register_operand" "w")]
UNSPEC_VRSQRTE))]
"TARGET_NEON"
"vrsqrte.<V_u_elem>\t%<V_reg>0, %<V_reg>1"
(define_expand "neon_vmvn<mode>"
[(match_operand:VDQIW 0 "s_register_operand" "")
- (match_operand:VDQIW 1 "s_register_operand" "")
- (match_operand:SI 2 "immediate_operand" "")]
+ (match_operand:VDQIW 1 "s_register_operand" "")]
"TARGET_NEON"
{
emit_insn (gen_one_cmpl<mode>2 (operands[0], operands[1]));
(define_expand "neon_vget_lane<mode>"
[(match_operand:<V_ext> 0 "s_register_operand" "")
(match_operand:VDQW 1 "s_register_operand" "")
- (match_operand:SI 2 "immediate_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:SI 2 "immediate_operand" "")]
"TARGET_NEON"
{
- HOST_WIDE_INT magic = INTVAL (operands[3]);
- rtx insn;
-
neon_lane_bounds (operands[2], 0, GET_MODE_NUNITS (<MODE>mode));
if (BYTES_BIG_ENDIAN)
operands[2] = GEN_INT (elt);
}
- if ((magic & 3) == 3 || GET_MODE_BITSIZE (GET_MODE_INNER (<MODE>mode)) == 32)
- insn = gen_vec_extract<mode> (operands[0], operands[1], operands[2]);
+ if (GET_MODE_BITSIZE (GET_MODE_INNER (<MODE>mode)) == 32)
+ emit_insn (gen_vec_extract<mode> (operands[0], operands[1], operands[2]));
else
+ emit_insn (gen_neon_vget_lane<mode>_sext_internal (operands[0],
+ operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_expand "neon_vget_laneu<mode>"
+ [(match_operand:<V_ext> 0 "s_register_operand" "")
+ (match_operand:VDQIW 1 "s_register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ neon_lane_bounds (operands[2], 0, GET_MODE_NUNITS (<MODE>mode));
+
+ if (BYTES_BIG_ENDIAN)
{
- if ((magic & 1) != 0)
- insn = gen_neon_vget_lane<mode>_sext_internal (operands[0], operands[1],
- operands[2]);
- else
- insn = gen_neon_vget_lane<mode>_zext_internal (operands[0], operands[1],
- operands[2]);
+ /* The intrinsics are defined in terms of a model where the
+ element ordering in memory is vldm order, whereas the generic
+ RTL is defined in terms of a model where the element ordering
+ in memory is array order. Convert the lane number to conform
+ to this model. */
+ unsigned int elt = INTVAL (operands[2]);
+ unsigned int reg_nelts
+ = 64 / GET_MODE_BITSIZE (GET_MODE_INNER (<MODE>mode));
+ elt ^= reg_nelts - 1;
+ operands[2] = GEN_INT (elt);
}
- emit_insn (insn);
+
+ if (GET_MODE_BITSIZE (GET_MODE_INNER (<MODE>mode)) == 32)
+ emit_insn (gen_vec_extract<mode> (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_neon_vget_lane<mode>_zext_internal (operands[0],
+ operands[1],
+ operands[2]));
DONE;
})
-; Operand 3 (info word) is ignored because it does nothing useful with 64-bit
-; elements.
-
(define_expand "neon_vget_lanedi"
[(match_operand:DI 0 "s_register_operand" "=r")
(match_operand:DI 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:SI 2 "immediate_operand" "")]
"TARGET_NEON"
{
neon_lane_bounds (operands[2], 0, 1);
(define_expand "neon_vget_lanev2di"
[(match_operand:DI 0 "s_register_operand" "")
(match_operand:V2DI 1 "s_register_operand" "")
- (match_operand:SI 2 "immediate_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:SI 2 "immediate_operand" "")]
"TARGET_NEON"
{
switch (INTVAL (operands[2]))
[(set_attr "type" "neon_fp_to_int_<V_elem_ch><q>")]
)
-(define_insn "neon_vcvt<mode>"
+(define_insn "neon_vcvt<sup><mode>"
[(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
- (unspec:<V_CVTTO> [(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- UNSPEC_VCVT))]
+ (unspec:<V_CVTTO> [(match_operand:VCVTF 1 "s_register_operand" "w")]
+ VCVT_US))]
"TARGET_NEON"
- "vcvt.%T2%#32.f32\t%<V_reg>0, %<V_reg>1"
+ "vcvt.<sup>%#32.f32\t%<V_reg>0, %<V_reg>1"
[(set_attr "type" "neon_fp_to_int_<V_elem_ch><q>")]
)
-(define_insn "neon_vcvt<mode>"
+(define_insn "neon_vcvt<sup><mode>"
[(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
- (unspec:<V_CVTTO> [(match_operand:VCVTI 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- UNSPEC_VCVT))]
+ (unspec:<V_CVTTO> [(match_operand:VCVTI 1 "s_register_operand" "w")]
+ VCVT_US))]
"TARGET_NEON"
- "vcvt.f32.%T2%#32\t%<V_reg>0, %<V_reg>1"
+ "vcvt.f32.<sup>%#32\t%<V_reg>0, %<V_reg>1"
[(set_attr "type" "neon_int_to_fp_<V_elem_ch><q>")]
)
[(set_attr "type" "neon_fp_cvt_narrow_s_q")]
)
-(define_insn "neon_vcvt_n<mode>"
+(define_insn "neon_vcvt<sup>_n<mode>"
[(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
(unspec:<V_CVTTO> [(match_operand:VCVTF 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VCVT_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VCVT_US_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 1, 33);
- return "vcvt.%T3%#32.f32\t%<V_reg>0, %<V_reg>1, %2";
+ return "vcvt.<sup>%#32.f32\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_fp_to_int_<V_elem_ch><q>")]
)
-(define_insn "neon_vcvt_n<mode>"
+(define_insn "neon_vcvt<sup>_n<mode>"
[(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
(unspec:<V_CVTTO> [(match_operand:VCVTI 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VCVT_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VCVT_US_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 1, 33);
- return "vcvt.f32.%T3%#32\t%<V_reg>0, %<V_reg>1, %2";
+ return "vcvt.f32.<sup>%#32\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_int_to_fp_<V_elem_ch><q>")]
)
(define_insn "neon_vmovn<mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
- (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")]
UNSPEC_VMOVN))]
"TARGET_NEON"
"vmovn.<V_if_elem>\t%P0, %q1"
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
-(define_insn "neon_vqmovn<mode>"
+(define_insn "neon_vqmovn<sup><mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
- (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- UNSPEC_VQMOVN))]
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")]
+ VQMOVN))]
"TARGET_NEON"
- "vqmovn.%T2%#<V_sz_elem>\t%P0, %q1"
+ "vqmovn.<sup>%#<V_sz_elem>\t%P0, %q1"
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
)
(define_insn "neon_vqmovun<mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
- (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")]
UNSPEC_VQMOVUN))]
"TARGET_NEON"
"vqmovun.<V_s_elem>\t%P0, %q1"
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
)
-(define_insn "neon_vmovl<mode>"
+(define_insn "neon_vmovl<sup><mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
- (unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- UNSPEC_VMOVL))]
+ (unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")]
+ VMOVL))]
"TARGET_NEON"
- "vmovl.%T2%#<V_sz_elem>\t%q0, %P1"
+ "vmovl.<sup>%#<V_sz_elem>\t%q0, %P1"
[(set_attr "type" "neon_shift_imm_long")]
)
(unspec:VMD [(match_operand:VMD 1 "s_register_operand" "w")
(match_operand:VMD 2 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 3 "immediate_operand" "i")
- (match_operand:SI 4 "immediate_operand" "i")]
+ (match_operand:SI 3 "immediate_operand" "i")]
UNSPEC_VMUL_LANE))]
"TARGET_NEON"
{
(unspec:VMQ [(match_operand:VMQ 1 "s_register_operand" "w")
(match_operand:<V_HALF> 2 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 3 "immediate_operand" "i")
- (match_operand:SI 4 "immediate_operand" "i")]
+ (match_operand:SI 3 "immediate_operand" "i")]
UNSPEC_VMUL_LANE))]
"TARGET_NEON"
{
(const_string "neon_mul_<V_elem_ch>_scalar<q>")))]
)
-(define_insn "neon_vmull_lane<mode>"
+(define_insn "neon_vmull<sup>_lane<mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:VMDI 1 "s_register_operand" "w")
(match_operand:VMDI 2 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 3 "immediate_operand" "i")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VMULL_LANE))]
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VMULL_LANE))]
"TARGET_NEON"
{
neon_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
- return "vmull.%T4%#<V_sz_elem>\t%q0, %P1, %P2[%c3]";
+ return "vmull.<sup>%#<V_sz_elem>\t%q0, %P1, %P2[%c3]";
}
[(set_attr "type" "neon_mul_<V_elem_ch>_scalar_long")]
)
(unspec:<V_widen> [(match_operand:VMDI 1 "s_register_operand" "w")
(match_operand:VMDI 2 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 3 "immediate_operand" "i")
- (match_operand:SI 4 "immediate_operand" "i")]
+ (match_operand:SI 3 "immediate_operand" "i")]
UNSPEC_VQDMULL_LANE))]
"TARGET_NEON"
{
[(set_attr "type" "neon_sat_mul_<V_elem_ch>_scalar_long")]
)
-(define_insn "neon_vqdmulh_lane<mode>"
+(define_insn "neon_vq<r>dmulh_lane<mode>"
[(set (match_operand:VMQI 0 "s_register_operand" "=w")
(unspec:VMQI [(match_operand:VMQI 1 "s_register_operand" "w")
(match_operand:<V_HALF> 2 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 3 "immediate_operand" "i")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VQDMULH_LANE))]
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VQDMULH_LANE))]
"TARGET_NEON"
{
neon_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
- return "vq%O4dmulh.%T4%#<V_sz_elem>\t%q0, %q1, %P2[%c3]";
+ return "vq<r>dmulh.<V_s_elem>\t%q0, %q1, %P2[%c3]";
}
[(set_attr "type" "neon_sat_mul_<V_elem_ch>_scalar_q")]
)
-(define_insn "neon_vqdmulh_lane<mode>"
+(define_insn "neon_vq<r>dmulh_lane<mode>"
[(set (match_operand:VMDI 0 "s_register_operand" "=w")
(unspec:VMDI [(match_operand:VMDI 1 "s_register_operand" "w")
(match_operand:VMDI 2 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 3 "immediate_operand" "i")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VQDMULH_LANE))]
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VQDMULH_LANE))]
"TARGET_NEON"
{
neon_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
- return "vq%O4dmulh.%T4%#<V_sz_elem>\t%P0, %P1, %P2[%c3]";
+ return "vq<r>dmulh.<V_s_elem>\t%P0, %P1, %P2[%c3]";
}
[(set_attr "type" "neon_sat_mul_<V_elem_ch>_scalar_q")]
)
(match_operand:VMD 2 "s_register_operand" "w")
(match_operand:VMD 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
+ (match_operand:SI 4 "immediate_operand" "i")]
UNSPEC_VMLA_LANE))]
"TARGET_NEON"
{
(match_operand:VMQ 2 "s_register_operand" "w")
(match_operand:<V_HALF> 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
+ (match_operand:SI 4 "immediate_operand" "i")]
UNSPEC_VMLA_LANE))]
"TARGET_NEON"
{
(const_string "neon_mla_<V_elem_ch>_scalar<q>")))]
)
-(define_insn "neon_vmlal_lane<mode>"
+(define_insn "neon_vmlal<sup>_lane<mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
(match_operand:VMDI 2 "s_register_operand" "w")
(match_operand:VMDI 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
- UNSPEC_VMLAL_LANE))]
+ (match_operand:SI 4 "immediate_operand" "i")]
+ VMLAL_LANE))]
"TARGET_NEON"
{
neon_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
- return "vmlal.%T5%#<V_sz_elem>\t%q0, %P2, %P3[%c4]";
+ return "vmlal.<sup>%#<V_sz_elem>\t%q0, %P2, %P3[%c4]";
}
[(set_attr "type" "neon_mla_<V_elem_ch>_scalar_long")]
)
(match_operand:VMDI 2 "s_register_operand" "w")
(match_operand:VMDI 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
+ (match_operand:SI 4 "immediate_operand" "i")]
UNSPEC_VQDMLAL_LANE))]
"TARGET_NEON"
{
(match_operand:VMD 2 "s_register_operand" "w")
(match_operand:VMD 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
+ (match_operand:SI 4 "immediate_operand" "i")]
UNSPEC_VMLS_LANE))]
"TARGET_NEON"
{
(match_operand:VMQ 2 "s_register_operand" "w")
(match_operand:<V_HALF> 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
+ (match_operand:SI 4 "immediate_operand" "i")]
UNSPEC_VMLS_LANE))]
"TARGET_NEON"
{
(const_string "neon_mla_<V_elem_ch>_scalar<q>")))]
)
-(define_insn "neon_vmlsl_lane<mode>"
+(define_insn "neon_vmlsl<sup>_lane<mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
(match_operand:VMDI 2 "s_register_operand" "w")
(match_operand:VMDI 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
- UNSPEC_VMLSL_LANE))]
+ (match_operand:SI 4 "immediate_operand" "i")]
+ VMLSL_LANE))]
"TARGET_NEON"
{
neon_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
- return "vmlsl.%T5%#<V_sz_elem>\t%q0, %P2, %P3[%c4]";
+ return "vmlsl.<sup>%#<V_sz_elem>\t%q0, %P2, %P3[%c4]";
}
[(set_attr "type" "neon_mla_<V_elem_ch>_scalar_long")]
)
(match_operand:VMDI 2 "s_register_operand" "w")
(match_operand:VMDI 3 "s_register_operand"
"<scalar_mul_constraint>")
- (match_operand:SI 4 "immediate_operand" "i")
- (match_operand:SI 5 "immediate_operand" "i")]
+ (match_operand:SI 4 "immediate_operand" "i")]
UNSPEC_VQDMLSL_LANE))]
"TARGET_NEON"
{
(define_expand "neon_vmul_n<mode>"
[(match_operand:VMD 0 "s_register_operand" "")
(match_operand:VMD 1 "s_register_operand" "")
- (match_operand:<V_elem> 2 "s_register_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
emit_insn (gen_neon_vmul_lane<mode> (operands[0], operands[1], tmp,
- const0_rtx, const0_rtx));
+ const0_rtx));
DONE;
})
(define_expand "neon_vmul_n<mode>"
[(match_operand:VMQ 0 "s_register_operand" "")
(match_operand:VMQ 1 "s_register_operand" "")
- (match_operand:<V_elem> 2 "s_register_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<V_HALF>mode);
emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[2], tmp, const0_rtx));
emit_insn (gen_neon_vmul_lane<mode> (operands[0], operands[1], tmp,
- const0_rtx, const0_rtx));
+ const0_rtx));
DONE;
})
-(define_expand "neon_vmull_n<mode>"
+(define_expand "neon_vmulls_n<mode>"
[(match_operand:<V_widen> 0 "s_register_operand" "")
(match_operand:VMDI 1 "s_register_operand" "")
- (match_operand:<V_elem> 2 "s_register_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
- emit_insn (gen_neon_vmull_lane<mode> (operands[0], operands[1], tmp,
- const0_rtx, operands[3]));
+ emit_insn (gen_neon_vmulls_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vmullu_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:VMDI 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vmullu_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx));
DONE;
})
(define_expand "neon_vqdmull_n<mode>"
[(match_operand:<V_widen> 0 "s_register_operand" "")
(match_operand:VMDI 1 "s_register_operand" "")
- (match_operand:<V_elem> 2 "s_register_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
emit_insn (gen_neon_vqdmull_lane<mode> (operands[0], operands[1], tmp,
- const0_rtx, const0_rtx));
+ const0_rtx));
DONE;
})
(define_expand "neon_vqdmulh_n<mode>"
[(match_operand:VMDI 0 "s_register_operand" "")
(match_operand:VMDI 1 "s_register_operand" "")
- (match_operand:<V_elem> 2 "s_register_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
emit_insn (gen_neon_vqdmulh_lane<mode> (operands[0], operands[1], tmp,
- const0_rtx, operands[3]));
+ const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vqrdmulh_n<mode>"
+ [(match_operand:VMDI 0 "s_register_operand" "")
+ (match_operand:VMDI 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vqrdmulh_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx));
DONE;
})
(define_expand "neon_vqdmulh_n<mode>"
[(match_operand:VMQI 0 "s_register_operand" "")
(match_operand:VMQI 1 "s_register_operand" "")
- (match_operand:<V_elem> 2 "s_register_operand" "")
- (match_operand:SI 3 "immediate_operand" "")]
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<V_HALF>mode);
emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[2], tmp, const0_rtx));
emit_insn (gen_neon_vqdmulh_lane<mode> (operands[0], operands[1], tmp,
- const0_rtx, operands[3]));
+ const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vqrdmulh_n<mode>"
+ [(match_operand:VMQI 0 "s_register_operand" "")
+ (match_operand:VMQI 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<V_HALF>mode);
+ emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vqrdmulh_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx));
DONE;
})
[(match_operand:VMD 0 "s_register_operand" "")
(match_operand:VMD 1 "s_register_operand" "")
(match_operand:VMD 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
emit_insn (gen_neon_vmla_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ tmp, const0_rtx));
DONE;
})
[(match_operand:VMQ 0 "s_register_operand" "")
(match_operand:VMQ 1 "s_register_operand" "")
(match_operand:VMQ 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<V_HALF>mode);
emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[3], tmp, const0_rtx));
emit_insn (gen_neon_vmla_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ tmp, const0_rtx));
DONE;
})
-(define_expand "neon_vmlal_n<mode>"
+(define_expand "neon_vmlals_n<mode>"
[(match_operand:<V_widen> 0 "s_register_operand" "")
(match_operand:<V_widen> 1 "s_register_operand" "")
(match_operand:VMDI 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
- emit_insn (gen_neon_vmlal_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ emit_insn (gen_neon_vmlals_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vmlalu_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:<V_widen> 1 "s_register_operand" "")
+ (match_operand:VMDI 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmlalu_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx));
DONE;
})
[(match_operand:<V_widen> 0 "s_register_operand" "")
(match_operand:<V_widen> 1 "s_register_operand" "")
(match_operand:VMDI 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
emit_insn (gen_neon_vqdmlal_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ tmp, const0_rtx));
DONE;
})
[(match_operand:VMD 0 "s_register_operand" "")
(match_operand:VMD 1 "s_register_operand" "")
(match_operand:VMD 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
emit_insn (gen_neon_vmls_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ tmp, const0_rtx));
DONE;
})
[(match_operand:VMQ 0 "s_register_operand" "")
(match_operand:VMQ 1 "s_register_operand" "")
(match_operand:VMQ 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<V_HALF>mode);
emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[3], tmp, const0_rtx));
emit_insn (gen_neon_vmls_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ tmp, const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vmlsls_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:<V_widen> 1 "s_register_operand" "")
+ (match_operand:VMDI 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmlsls_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx));
DONE;
})
-(define_expand "neon_vmlsl_n<mode>"
+(define_expand "neon_vmlslu_n<mode>"
[(match_operand:<V_widen> 0 "s_register_operand" "")
(match_operand:<V_widen> 1 "s_register_operand" "")
(match_operand:VMDI 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
- emit_insn (gen_neon_vmlsl_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ emit_insn (gen_neon_vmlslu_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx));
DONE;
})
[(match_operand:<V_widen> 0 "s_register_operand" "")
(match_operand:<V_widen> 1 "s_register_operand" "")
(match_operand:VMDI 2 "s_register_operand" "")
- (match_operand:<V_elem> 3 "s_register_operand" "")
- (match_operand:SI 4 "immediate_operand" "")]
+ (match_operand:<V_elem> 3 "s_register_operand" "")]
"TARGET_NEON"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
emit_insn (gen_neon_vqdmlsl_lane<mode> (operands[0], operands[1], operands[2],
- tmp, const0_rtx, operands[4]));
+ tmp, const0_rtx));
DONE;
})
(define_insn "neon_vrev64<mode>"
[(set (match_operand:VDQ 0 "s_register_operand" "=w")
- (unspec:VDQ [(match_operand:VDQ 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:VDQ [(match_operand:VDQ 1 "s_register_operand" "w")]
UNSPEC_VREV64))]
"TARGET_NEON"
"vrev64.<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
(define_insn "neon_vrev32<mode>"
[(set (match_operand:VX 0 "s_register_operand" "=w")
- (unspec:VX [(match_operand:VX 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:VX [(match_operand:VX 1 "s_register_operand" "w")]
UNSPEC_VREV32))]
"TARGET_NEON"
"vrev32.<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
(define_insn "neon_vrev16<mode>"
[(set (match_operand:VE 0 "s_register_operand" "=w")
- (unspec:VE [(match_operand:VE 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (unspec:VE [(match_operand:VE 1 "s_register_operand" "w")]
UNSPEC_VREV16))]
"TARGET_NEON"
"vrev16.<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
operands[1] = gen_lowpart (<MODE>mode, operands[1]);
})
-(define_insn "neon_vshl<mode>"
+;; vshl, vrshl
+(define_insn "neon_v<shift_op><sup><mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:VDQIX 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VSHL))]
+ (match_operand:VDQIX 2 "s_register_operand" "w")]
+ VSHL))]
"TARGET_NEON"
- "v%O3shl.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_shift_imm<q>")]
)
-(define_insn "neon_vqshl<mode>"
+;; vqshl, vqrshl
+(define_insn "neon_v<shift_op><sup><mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:VDQIX 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VQSHL))]
+ (match_operand:VDQIX 2 "s_register_operand" "w")]
+ VQSHL))]
"TARGET_NEON"
- "vq%O3shl.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
[(set_attr "type" "neon_sat_shift_imm<q>")]
)
-(define_insn "neon_vshr_n<mode>"
+;; vshr_n, vrshr_n
+(define_insn "neon_v<shift_op><sup>_n<mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VSHR_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHR_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) + 1);
- return "v%O3shr.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
+ return "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_shift_imm<q>")]
)
-(define_insn "neon_vshrn_n<mode>"
+;; vshrn_n, vrshrn_n
+(define_insn "neon_v<shift_op>_n<mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
(unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VSHRN_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHRN_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
- return "v%O3shrn.<V_if_elem>\t%P0, %q1, %2";
+ return "v<shift_op>.<V_if_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
-(define_insn "neon_vqshrn_n<mode>"
+;; vqshrn_n, vqrshrn_n
+(define_insn "neon_v<shift_op><sup>_n<mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
(unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VQSHRN_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHRN_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
- return "vq%O3shrn.%T3%#<V_sz_elem>\t%P0, %q1, %2";
+ return "v<shift_op>.<sup>%#<V_sz_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
)
-(define_insn "neon_vqshrun_n<mode>"
+;; vqshrun_n, vqrshrun_n
+(define_insn "neon_v<shift_op>_n<mode>"
[(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
(unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VQSHRUN_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHRUN_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
- return "vq%O3shrun.%T3%#<V_sz_elem>\t%P0, %q1, %2";
+ return "v<shift_op>.<V_s_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
)
(define_insn "neon_vshl_n<mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:SI 2 "immediate_operand" "i")]
UNSPEC_VSHL_N))]
"TARGET_NEON"
{
[(set_attr "type" "neon_shift_imm<q>")]
)
-(define_insn "neon_vqshl_n<mode>"
+(define_insn "neon_vqshl_<sup>_n<mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VQSHL_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHL_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
- return "vqshl.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
+ return "vqshl.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_sat_shift_imm<q>")]
)
(define_insn "neon_vqshlu_n<mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:SI 2 "immediate_operand" "i")]
UNSPEC_VQSHLU_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
- return "vqshlu.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
+ return "vqshlu.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_sat_shift_imm<q>")]
)
-(define_insn "neon_vshll_n<mode>"
+(define_insn "neon_vshll<sup>_n<mode>"
[(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
(unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")
- (match_operand:SI 3 "immediate_operand" "i")]
- UNSPEC_VSHLL_N))]
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHLL_N))]
"TARGET_NEON"
{
/* The boundaries are: 0 < imm <= size. */
neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode) + 1);
- return "vshll.%T3%#<V_sz_elem>\t%q0, %P1, %2";
+ return "vshll.<sup>%#<V_sz_elem>\t%q0, %P1, %2";
}
[(set_attr "type" "neon_shift_imm_long")]
)
-(define_insn "neon_vsra_n<mode>"
+;; vsra_n, vrsra_n
+(define_insn "neon_v<shift_op><sup>_n<mode>"
[(set (match_operand:VDQIX 0 "s_register_operand" "=w")
(unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "0")
(match_operand:VDQIX 2 "s_register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")
- (match_operand:SI 4 "immediate_operand" "i")]
- UNSPEC_VSRA_N))]
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VSRA_N))]
"TARGET_NEON"
{
neon_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
- return "v%O4sra.%T4%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
+ return "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
}
[(set_attr "type" "neon_shift_acc<q>")]
)