result; \
})
+#define vsri_n_p64(a, b, c) \
+ __extension__ \
+ ({ \
+ poly64x1_t b_ = (b); \
+ poly64x1_t a_ = (a); \
+ poly64x1_t result; \
+ __asm__ ("sri %d0,%d2,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers. */); \
+ result; \
+ })
+
#define vsriq_n_p8(a, b, c) \
__extension__ \
({ \
result; \
})
+#define vsriq_n_p64(a, b, c) \
+ __extension__ \
+ ({ \
+ poly64x2_t b_ = (b); \
+ poly64x2_t a_ = (a); \
+ poly64x2_t result; \
+ __asm__ ("sri %0.2d,%2.2d,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers. */); \
+ result; \
+ })
+
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_p8 (poly8x8_t a, poly8x8_t b)
{
return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c);
}
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
+{
+ return (poly64x1_t)
+ {__builtin_aarch64_simd_bsldi_pupp (__a[0], __b[0], __c[0])};
+}
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c);
}
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2di_pupp (__a, __b, __c);
+}
+
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
return (uint8x8_t) (__a == __b);
}
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ return (uint64x1_t) (__a == __b);
+}
+
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_s8 (int8x8_t __a, int8x8_t __b)
#endif
}
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_p64 (poly64x1_t __a, poly64x1_t __b, __const int __c)
+{
+ __AARCH64_LANE_CHECK (__a, __c);
+ /* The only possible index to the assembler instruction returns element 0. */
+ return __a;
+}
+
__extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
#endif
}
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_p64 (poly64x2_t __a, poly64x2_t __b, __const int __c)
+{
+ __AARCH64_LANE_CHECK (__a, __c);
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1});
+#endif
+}
+
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)