poly16x8_t val[4];
} poly16x8x4_t;
-/* vget_lane internal macros. */
-
-#define __aarch64_vget_lane_any(__size, __cast_ret, __cast_a, __a, __b) \
- (__cast_ret \
- __builtin_aarch64_be_checked_get_lane##__size (__cast_a __a, __b))
-
-#define __aarch64_vget_lane_f32(__a, __b) \
- __aarch64_vget_lane_any (v2sf, , , __a, __b)
-#define __aarch64_vget_lane_f64(__a, __b) __extension__ \
- ({ \
- __AARCH64_LANE_CHECK (__a, __b); \
- __a[0]; \
- })
-
-#define __aarch64_vget_lane_p8(__a, __b) \
- __aarch64_vget_lane_any (v8qi, (poly8_t), (int8x8_t), __a, __b)
-#define __aarch64_vget_lane_p16(__a, __b) \
- __aarch64_vget_lane_any (v4hi, (poly16_t), (int16x4_t), __a, __b)
-
-#define __aarch64_vget_lane_s8(__a, __b) \
- __aarch64_vget_lane_any (v8qi, , ,__a, __b)
-#define __aarch64_vget_lane_s16(__a, __b) \
- __aarch64_vget_lane_any (v4hi, , ,__a, __b)
-#define __aarch64_vget_lane_s32(__a, __b) \
- __aarch64_vget_lane_any (v2si, , ,__a, __b)
-#define __aarch64_vget_lane_s64(__a, __b) __extension__ \
- ({ \
- __AARCH64_LANE_CHECK (__a, __b); \
- __a[0]; \
- })
-
-#define __aarch64_vget_lane_u8(__a, __b) \
- __aarch64_vget_lane_any (v8qi, (uint8_t), (int8x8_t), __a, __b)
-#define __aarch64_vget_lane_u16(__a, __b) \
- __aarch64_vget_lane_any (v4hi, (uint16_t), (int16x4_t), __a, __b)
-#define __aarch64_vget_lane_u32(__a, __b) \
- __aarch64_vget_lane_any (v2si, (uint32_t), (int32x2_t), __a, __b)
-#define __aarch64_vget_lane_u64(__a, __b) __extension__ \
- ({ \
- __AARCH64_LANE_CHECK (__a, __b); \
- __a[0]; \
- })
-
-#define __aarch64_vgetq_lane_f32(__a, __b) \
- __aarch64_vget_lane_any (v4sf, , , __a, __b)
-#define __aarch64_vgetq_lane_f64(__a, __b) \
- __aarch64_vget_lane_any (v2df, , , __a, __b)
-
-#define __aarch64_vgetq_lane_p8(__a, __b) \
- __aarch64_vget_lane_any (v16qi, (poly8_t), (int8x16_t), __a, __b)
-#define __aarch64_vgetq_lane_p16(__a, __b) \
- __aarch64_vget_lane_any (v8hi, (poly16_t), (int16x8_t), __a, __b)
-
-#define __aarch64_vgetq_lane_s8(__a, __b) \
- __aarch64_vget_lane_any (v16qi, , ,__a, __b)
-#define __aarch64_vgetq_lane_s16(__a, __b) \
- __aarch64_vget_lane_any (v8hi, , ,__a, __b)
-#define __aarch64_vgetq_lane_s32(__a, __b) \
- __aarch64_vget_lane_any (v4si, , ,__a, __b)
-#define __aarch64_vgetq_lane_s64(__a, __b) \
- __aarch64_vget_lane_any (v2di, , ,__a, __b)
-
-#define __aarch64_vgetq_lane_u8(__a, __b) \
- __aarch64_vget_lane_any (v16qi, (uint8_t), (int8x16_t), __a, __b)
-#define __aarch64_vgetq_lane_u16(__a, __b) \
- __aarch64_vget_lane_any (v8hi, (uint16_t), (int16x8_t), __a, __b)
-#define __aarch64_vgetq_lane_u32(__a, __b) \
- __aarch64_vget_lane_any (v4si, (uint32_t), (int32x4_t), __a, __b)
-#define __aarch64_vgetq_lane_u64(__a, __b) \
- __aarch64_vget_lane_any (v2di, (uint64_t), (int64x2_t), __a, __b)
-
/* __aarch64_vdup_lane internal macros. */
-#define __aarch64_vdup_lane_any(__size, __q1, __q2, __a, __b) \
- vdup##__q1##_n_##__size (__aarch64_vget##__q2##_lane_##__size (__a, __b))
+#define __aarch64_vdup_lane_any(__size, __q, __a, __b) \
+ vdup##__q##_n_##__size (__aarch64_vget_lane_any (__a, __b))
#define __aarch64_vdup_lane_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, , , __a, __b)
+ __aarch64_vdup_lane_any (f32, , __a, __b)
#define __aarch64_vdup_lane_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, , , __a, __b)
+ __aarch64_vdup_lane_any (f64, , __a, __b)
#define __aarch64_vdup_lane_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, , , __a, __b)
+ __aarch64_vdup_lane_any (p8, , __a, __b)
#define __aarch64_vdup_lane_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, , , __a, __b)
+ __aarch64_vdup_lane_any (p16, , __a, __b)
#define __aarch64_vdup_lane_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, , , __a, __b)
+ __aarch64_vdup_lane_any (s8, , __a, __b)
#define __aarch64_vdup_lane_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, , , __a, __b)
+ __aarch64_vdup_lane_any (s16, , __a, __b)
#define __aarch64_vdup_lane_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, , , __a, __b)
+ __aarch64_vdup_lane_any (s32, , __a, __b)
#define __aarch64_vdup_lane_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, , , __a, __b)
+ __aarch64_vdup_lane_any (s64, , __a, __b)
#define __aarch64_vdup_lane_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, , , __a, __b)
+ __aarch64_vdup_lane_any (u8, , __a, __b)
#define __aarch64_vdup_lane_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, , , __a, __b)
+ __aarch64_vdup_lane_any (u16, , __a, __b)
#define __aarch64_vdup_lane_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, , , __a, __b)
+ __aarch64_vdup_lane_any (u32, , __a, __b)
#define __aarch64_vdup_lane_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, , , __a, __b)
+ __aarch64_vdup_lane_any (u64, , __a, __b)
/* __aarch64_vdup_laneq internal macros. */
#define __aarch64_vdup_laneq_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, , q, __a, __b)
+ __aarch64_vdup_lane_any (f32, , __a, __b)
#define __aarch64_vdup_laneq_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, , q, __a, __b)
+ __aarch64_vdup_lane_any (f64, , __a, __b)
#define __aarch64_vdup_laneq_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, , q, __a, __b)
+ __aarch64_vdup_lane_any (p8, , __a, __b)
#define __aarch64_vdup_laneq_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, , q, __a, __b)
+ __aarch64_vdup_lane_any (p16, , __a, __b)
#define __aarch64_vdup_laneq_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, , q, __a, __b)
+ __aarch64_vdup_lane_any (s8, , __a, __b)
#define __aarch64_vdup_laneq_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, , q, __a, __b)
+ __aarch64_vdup_lane_any (s16, , __a, __b)
#define __aarch64_vdup_laneq_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, , q, __a, __b)
+ __aarch64_vdup_lane_any (s32, , __a, __b)
#define __aarch64_vdup_laneq_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, , q, __a, __b)
+ __aarch64_vdup_lane_any (s64, , __a, __b)
#define __aarch64_vdup_laneq_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, , q, __a, __b)
+ __aarch64_vdup_lane_any (u8, , __a, __b)
#define __aarch64_vdup_laneq_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, , q, __a, __b)
+ __aarch64_vdup_lane_any (u16, , __a, __b)
#define __aarch64_vdup_laneq_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, , q, __a, __b)
+ __aarch64_vdup_lane_any (u32, , __a, __b)
#define __aarch64_vdup_laneq_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, , q, __a, __b)
+ __aarch64_vdup_lane_any (u64, , __a, __b)
/* __aarch64_vdupq_lane internal macros. */
#define __aarch64_vdupq_lane_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, q, , __a, __b)
+ __aarch64_vdup_lane_any (f32, q, __a, __b)
#define __aarch64_vdupq_lane_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, q, , __a, __b)
+ __aarch64_vdup_lane_any (f64, q, __a, __b)
#define __aarch64_vdupq_lane_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, q, , __a, __b)
+ __aarch64_vdup_lane_any (p8, q, __a, __b)
#define __aarch64_vdupq_lane_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, q, , __a, __b)
+ __aarch64_vdup_lane_any (p16, q, __a, __b)
#define __aarch64_vdupq_lane_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, q, , __a, __b)
+ __aarch64_vdup_lane_any (s8, q, __a, __b)
#define __aarch64_vdupq_lane_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, q, , __a, __b)
+ __aarch64_vdup_lane_any (s16, q, __a, __b)
#define __aarch64_vdupq_lane_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, q, , __a, __b)
+ __aarch64_vdup_lane_any (s32, q, __a, __b)
#define __aarch64_vdupq_lane_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, q, , __a, __b)
+ __aarch64_vdup_lane_any (s64, q, __a, __b)
#define __aarch64_vdupq_lane_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, q, , __a, __b)
+ __aarch64_vdup_lane_any (u8, q, __a, __b)
#define __aarch64_vdupq_lane_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, q, , __a, __b)
+ __aarch64_vdup_lane_any (u16, q, __a, __b)
#define __aarch64_vdupq_lane_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, q, , __a, __b)
+ __aarch64_vdup_lane_any (u32, q, __a, __b)
#define __aarch64_vdupq_lane_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, q, , __a, __b)
+ __aarch64_vdup_lane_any (u64, q, __a, __b)
/* __aarch64_vdupq_laneq internal macros. */
#define __aarch64_vdupq_laneq_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, q, q, __a, __b)
+ __aarch64_vdup_lane_any (f32, q, __a, __b)
#define __aarch64_vdupq_laneq_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, q, q, __a, __b)
+ __aarch64_vdup_lane_any (f64, q, __a, __b)
#define __aarch64_vdupq_laneq_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, q, q, __a, __b)
+ __aarch64_vdup_lane_any (p8, q, __a, __b)
#define __aarch64_vdupq_laneq_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, q, q, __a, __b)
+ __aarch64_vdup_lane_any (p16, q, __a, __b)
#define __aarch64_vdupq_laneq_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s8, q, __a, __b)
#define __aarch64_vdupq_laneq_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s16, q, __a, __b)
#define __aarch64_vdupq_laneq_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s32, q, __a, __b)
#define __aarch64_vdupq_laneq_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s64, q, __a, __b)
#define __aarch64_vdupq_laneq_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u8, q, __a, __b)
#define __aarch64_vdupq_laneq_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u16, q, __a, __b)
#define __aarch64_vdupq_laneq_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u32, q, __a, __b)
#define __aarch64_vdupq_laneq_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u64, q, __a, __b)
/* Internal macro for lane indices. */
#define __aarch64_lane(__vec, __idx) __idx
#endif
-/* vset_lane and vld1_lane internal macro. */
+/* vget_lane internal macro. */
+#define __aarch64_vget_lane_any(__vec, __index) \
+ __extension__ \
+ ({ \
+ __AARCH64_LANE_CHECK (__vec, __index); \
+ __vec[__aarch64_lane (__vec, __index)]; \
+ })
+/* vset_lane and vld1_lane internal macro. */
#define __aarch64_vset_lane_any(__elem, __vec, __index) \
__extension__ \
({ \
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vget_lane_f32 (float32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vget_lane_f64 (float64x1_t __a, const int __b)
{
- return __aarch64_vget_lane_f64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vget_lane_p8 (poly8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vget_lane_p16 (poly16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vget_lane_s8 (int8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vget_lane_s16 (int16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vget_lane_s32 (int32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vget_lane_s64 (int64x1_t __a, const int __b)
{
- return __aarch64_vget_lane_s64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vget_lane_u8 (uint8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vget_lane_u16 (uint16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vget_lane_u32 (uint32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vget_lane_u64 (uint64x1_t __a, const int __b)
{
- return __aarch64_vget_lane_u64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vgetq_lane */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vgetq_lane_f32 (float32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vgetq_lane_f64 (float64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vgetq_lane_p8 (poly8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vgetq_lane_p16 (poly16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vgetq_lane_s8 (int8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vgetq_lane_s16 (int16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vgetq_lane_s32 (int32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vgetq_lane_s64 (int64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vgetq_lane_u8 (uint8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vgetq_lane_u16 (uint16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vgetq_lane_u32 (uint32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vgetq_lane_u64 (uint64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vreinterpret */
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vdupb_lane_p8 (poly8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vdupb_lane_s8 (int8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vdupb_lane_u8 (uint8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vduph_lane */
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vduph_lane_p16 (poly16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vduph_lane_s16 (int16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vduph_lane_u16 (uint16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdups_lane */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vdups_lane_f32 (float32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vdups_lane_s32 (int32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vdups_lane_u32 (uint32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdupd_lane */
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vdupb_laneq_p8 (poly8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vdupb_laneq_s8 (int8x16_t __a, const int __attribute__ ((unused)) __b)
{
- return __aarch64_vgetq_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vdupb_laneq_u8 (uint8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vduph_laneq */
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vduph_laneq_p16 (poly16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vduph_laneq_s16 (int16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vduph_laneq_u16 (uint16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdups_laneq */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vdups_laneq_f32 (float32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vdups_laneq_s32 (int32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vdups_laneq_u32 (uint32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdupd_laneq */
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vdupd_laneq_f64 (float64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vdupd_laneq_s64 (int64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vdupd_laneq_u64 (uint64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vext */
vfmas_lane_f32 (float32_t __a, float32_t __b,
float32x2_t __c, const int __lane)
{
- return __builtin_fmaf (__b, __aarch64_vget_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfma_laneq */
vfma_laneq_f64 (float64x1_t __a, float64x1_t __b,
float64x2_t __c, const int __lane)
{
- float64_t __c0 = __aarch64_vgetq_lane_f64 (__c, __lane);
+ float64_t __c0 = __aarch64_vget_lane_any (__c, __lane);
return (float64x1_t) {__builtin_fma (__b[0], __c0, __a[0])};
}
vfmad_laneq_f64 (float64_t __a, float64_t __b,
float64x2_t __c, const int __lane)
{
- return __builtin_fma (__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+ return __builtin_fma (__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vfmas_laneq_f32 (float32_t __a, float32_t __b,
float32x4_t __c, const int __lane)
{
- return __builtin_fmaf (__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfmaq_lane */
vfmss_lane_f32 (float32_t __a, float32_t __b,
float32x2_t __c, const int __lane)
{
- return __builtin_fmaf (-__b, __aarch64_vget_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfms_laneq */
vfms_laneq_f64 (float64x1_t __a, float64x1_t __b,
float64x2_t __c, const int __lane)
{
- float64_t __c0 = __aarch64_vgetq_lane_f64 (__c, __lane);
+ float64_t __c0 = __aarch64_vget_lane_any (__c, __lane);
return (float64x1_t) {__builtin_fma (-__b[0], __c0, __a[0])};
}
vfmsd_laneq_f64 (float64_t __a, float64_t __b,
float64x2_t __c, const int __lane)
{
- return __builtin_fma (-__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+ return __builtin_fma (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vfmss_laneq_f32 (float32_t __a, float32_t __b,
float32x4_t __c, const int __lane)
{
- return __builtin_fmaf (-__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfmsq_lane */
vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmla_lane_s16 (int16x4_t __a, int16x4_t __b,
int16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmla_lane_s32 (int32x2_t __a, int32x2_t __b,
int32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmla_laneq */
vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmla_laneq_s16 (int16x4_t __a, int16x4_t __b,
int16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmla_laneq_s32 (int32x2_t __a, int32x2_t __b,
int32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmla_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlaq_lane */
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b,
int16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b,
int32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlaq_laneq */
vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlaq_laneq_s16 (int16x8_t __a, int16x8_t __b,
int16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlaq_laneq_s32 (int32x4_t __a, int32x4_t __b,
int32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlaq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmls */
vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmls_lane_s16 (int16x4_t __a, int16x4_t __b,
int16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmls_lane_s32 (int32x2_t __a, int32x2_t __b,
int32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmls_laneq */
vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmls_laneq_s16 (int16x4_t __a, int16x4_t __b,
int16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmls_laneq_s32 (int32x2_t __a, int32x2_t __b,
int32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmls_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlsq_lane */
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b,
int16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b,
int32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlsq_laneq */
vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsq_laneq_s16 (int16x8_t __a, int16x8_t __b,
int16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsq_laneq_s32 (int32x4_t __a, int32x4_t __b,
int32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmov_n_ */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmuld_lane */
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vmuld_lane_f64 (float64_t __a, float64x1_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmuls_lane */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vmuls_lane_f32 (float32_t __a, float32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmul_laneq */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmul_laneq_f32 (float32x2_t __a, float32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
vmul_laneq_f64 (float64x1_t __a, float64x2_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmul_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmul_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmul_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmul_n */
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmulq_laneq */
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmulq_laneq_f32 (float32x4_t __a, float32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vmulq_laneq_f64 (float64x2_t __a, float64x2_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmulq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmulq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmulq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vneg */
/* End of optimal implementations in approved order. */
#undef __aarch64_vget_lane_any
-#undef __aarch64_vget_lane_f32
-#undef __aarch64_vget_lane_f64
-#undef __aarch64_vget_lane_p8
-#undef __aarch64_vget_lane_p16
-#undef __aarch64_vget_lane_s8
-#undef __aarch64_vget_lane_s16
-#undef __aarch64_vget_lane_s32
-#undef __aarch64_vget_lane_s64
-#undef __aarch64_vget_lane_u8
-#undef __aarch64_vget_lane_u16
-#undef __aarch64_vget_lane_u32
-#undef __aarch64_vget_lane_u64
-
-#undef __aarch64_vgetq_lane_f32
-#undef __aarch64_vgetq_lane_f64
-#undef __aarch64_vgetq_lane_p8
-#undef __aarch64_vgetq_lane_p16
-#undef __aarch64_vgetq_lane_s8
-#undef __aarch64_vgetq_lane_s16
-#undef __aarch64_vgetq_lane_s32
-#undef __aarch64_vgetq_lane_s64
-#undef __aarch64_vgetq_lane_u8
-#undef __aarch64_vgetq_lane_u16
-#undef __aarch64_vgetq_lane_u32
-#undef __aarch64_vgetq_lane_u64
#undef __aarch64_vdup_lane_any
#undef __aarch64_vdup_lane_f32