From: Andre Simoes Dias Vieira Date: Tue, 7 Apr 2020 14:40:15 +0000 (+0100) Subject: arm: MVE: Add C++ polymorphism and fix some more issues X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=6a90680bfff0a3803b3dc950c847046abb2d7d54;p=gcc.git arm: MVE: Add C++ polymorphism and fix some more issues This patch adds C++ polymorphism for the MVE intrinsics, by using the native C++ polymorphic functions when C++ is used. It also moves the PRESERVE name macro definitions to the right place so that the variants without the '__arm_' prefix are not available if we define the PRESERVE NAMESPACE macro. This patch further fixes two testisms that were brought to light by C++ testing added in this patch. gcc/ChangeLog: 2020-04-07 Andre Vieira * config/arm/arm_mve.h: Add C++ polymorphism and fix preserve MACROs. gcc/testsuite/ChangeLog: 2020-04-07 Andre Vieira * g++.target/arm/mve.exp: New. * gcc.target/arm/mve/intrinsics/vcmpneq_n_f16: Fix testism. * gcc.target/arm/mve/intrinsics/vcmpneq_n_f32: Likewise. --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 34d38779426..b5c9aed6701 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,7 @@ +2020-04-07 Andre Vieira + + * config/arm/arm_mve.h: Add C++ polymorphism and fix preserve MACROs. + 2020-04-07 Andre Vieira * config/arm/arm_mve.h: Cast some pointers to expected types. diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index 49c7fb95f17..d75c430cb83 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -33,10 +33,6 @@ #include #endif -#ifdef __cplusplus -extern "C" { -#endif - #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ typedef __fp16 float16_t; typedef float float32_t; @@ -75,6 +71,499 @@ typedef struct { uint8x16_t val[2]; } uint8x16x2_t; typedef struct { uint8x16_t val[4]; } uint8x16x4_t; #ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE +#define vst4q(__addr, __value) __arm_vst4q(__addr, __value) +#define vdupq_n(__a) __arm_vdupq_n(__a) +#define vabsq(__a) __arm_vabsq(__a) +#define vclsq(__a) __arm_vclsq(__a) +#define vclzq(__a) __arm_vclzq(__a) +#define vnegq(__a) __arm_vnegq(__a) +#define vaddlvq(__a) __arm_vaddlvq(__a) +#define vaddvq(__a) __arm_vaddvq(__a) +#define vmovlbq(__a) __arm_vmovlbq(__a) +#define vmovltq(__a) __arm_vmovltq(__a) +#define vmvnq(__a) __arm_vmvnq(__a) +#define vrev16q(__a) __arm_vrev16q(__a) +#define vrev32q(__a) __arm_vrev32q(__a) +#define vrev64q(__a) __arm_vrev64q(__a) +#define vqabsq(__a) __arm_vqabsq(__a) +#define vqnegq(__a) __arm_vqnegq(__a) +#define vshrq(__a, __imm) __arm_vshrq(__a, __imm) +#define vaddlvq_p(__a, __p) __arm_vaddlvq_p(__a, __p) +#define vcmpneq(__a, __b) __arm_vcmpneq(__a, __b) +#define vshlq(__a, __b) __arm_vshlq(__a, __b) +#define vsubq(__a, __b) __arm_vsubq(__a, __b) +#define vrmulhq(__a, __b) __arm_vrmulhq(__a, __b) +#define vrhaddq(__a, __b) __arm_vrhaddq(__a, __b) +#define vqsubq(__a, __b) __arm_vqsubq(__a, __b) +#define vqaddq(__a, __b) __arm_vqaddq(__a, __b) +#define vorrq(__a, __b) __arm_vorrq(__a, __b) +#define vornq(__a, __b) __arm_vornq(__a, __b) +#define vmulq(__a, __b) __arm_vmulq(__a, __b) +#define vmulltq_int(__a, __b) __arm_vmulltq_int(__a, __b) +#define vmullbq_int(__a, __b) __arm_vmullbq_int(__a, __b) +#define vmulhq(__a, __b) __arm_vmulhq(__a, __b) +#define vmladavq(__a, __b) __arm_vmladavq(__a, __b) +#define vminvq(__a, __b) __arm_vminvq(__a, __b) +#define vminq(__a, __b) __arm_vminq(__a, __b) +#define vmaxvq(__a, __b) __arm_vmaxvq(__a, __b) +#define vmaxq(__a, __b) __arm_vmaxq(__a, __b) +#define vhsubq(__a, __b) __arm_vhsubq(__a, __b) +#define vhaddq(__a, __b) __arm_vhaddq(__a, __b) +#define veorq(__a, __b) __arm_veorq(__a, __b) +#define vcmphiq(__a, __b) __arm_vcmphiq(__a, __b) +#define vcmpeqq(__a, __b) __arm_vcmpeqq(__a, __b) +#define vcmpcsq(__a, __b) __arm_vcmpcsq(__a, __b) +#define vcaddq_rot90(__a, __b) __arm_vcaddq_rot90(__a, __b) +#define vcaddq_rot270(__a, __b) __arm_vcaddq_rot270(__a, __b) +#define vbicq(__a, __b) __arm_vbicq(__a, __b) +#define vandq(__a, __b) __arm_vandq(__a, __b) +#define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p) +#define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b) +#define vaddq(__a, __b) __arm_vaddq(__a, __b) +#define vabdq(__a, __b) __arm_vabdq(__a, __b) +#define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b) +#define vrshlq(__a, __b) __arm_vrshlq(__a, __b) +#define vqshlq(__a, __b) __arm_vqshlq(__a, __b) +#define vqshlq_r(__a, __b) __arm_vqshlq_r(__a, __b) +#define vqrshlq(__a, __b) __arm_vqrshlq(__a, __b) +#define vminavq(__a, __b) __arm_vminavq(__a, __b) +#define vminaq(__a, __b) __arm_vminaq(__a, __b) +#define vmaxavq(__a, __b) __arm_vmaxavq(__a, __b) +#define vmaxaq(__a, __b) __arm_vmaxaq(__a, __b) +#define vbrsrq(__a, __b) __arm_vbrsrq(__a, __b) +#define vshlq_n(__a, __imm) __arm_vshlq_n(__a, __imm) +#define vrshrq(__a, __imm) __arm_vrshrq(__a, __imm) +#define vqshlq_n(__a, __imm) __arm_vqshlq_n(__a, __imm) +#define vcmpltq(__a, __b) __arm_vcmpltq(__a, __b) +#define vcmpleq(__a, __b) __arm_vcmpleq(__a, __b) +#define vcmpgtq(__a, __b) __arm_vcmpgtq(__a, __b) +#define vcmpgeq(__a, __b) __arm_vcmpgeq(__a, __b) +#define vqshluq(__a, __imm) __arm_vqshluq(__a, __imm) +#define vqrdmulhq(__a, __b) __arm_vqrdmulhq(__a, __b) +#define vqdmulhq(__a, __b) __arm_vqdmulhq(__a, __b) +#define vmlsdavxq(__a, __b) __arm_vmlsdavxq(__a, __b) +#define vmlsdavq(__a, __b) __arm_vmlsdavq(__a, __b) +#define vmladavxq(__a, __b) __arm_vmladavxq(__a, __b) +#define vhcaddq_rot90(__a, __b) __arm_vhcaddq_rot90(__a, __b) +#define vhcaddq_rot270(__a, __b) __arm_vhcaddq_rot270(__a, __b) +#define vqmovntq(__a, __b) __arm_vqmovntq(__a, __b) +#define vqmovnbq(__a, __b) __arm_vqmovnbq(__a, __b) +#define vmulltq_poly(__a, __b) __arm_vmulltq_poly(__a, __b) +#define vmullbq_poly(__a, __b) __arm_vmullbq_poly(__a, __b) +#define vmovntq(__a, __b) __arm_vmovntq(__a, __b) +#define vmovnbq(__a, __b) __arm_vmovnbq(__a, __b) +#define vmlaldavq(__a, __b) __arm_vmlaldavq(__a, __b) +#define vqmovuntq(__a, __b) __arm_vqmovuntq(__a, __b) +#define vqmovunbq(__a, __b) __arm_vqmovunbq(__a, __b) +#define vshlltq(__a, __imm) __arm_vshlltq(__a, __imm) +#define vshllbq(__a, __imm) __arm_vshllbq(__a, __imm) +#define vqdmulltq(__a, __b) __arm_vqdmulltq(__a, __b) +#define vqdmullbq(__a, __b) __arm_vqdmullbq(__a, __b) +#define vmlsldavxq(__a, __b) __arm_vmlsldavxq(__a, __b) +#define vmlsldavq(__a, __b) __arm_vmlsldavq(__a, __b) +#define vmlaldavxq(__a, __b) __arm_vmlaldavxq(__a, __b) +#define vrmlaldavhq(__a, __b) __arm_vrmlaldavhq(__a, __b) +#define vaddlvaq(__a, __b) __arm_vaddlvaq(__a, __b) +#define vrmlsldavhxq(__a, __b) __arm_vrmlsldavhxq(__a, __b) +#define vrmlsldavhq(__a, __b) __arm_vrmlsldavhq(__a, __b) +#define vrmlaldavhxq(__a, __b) __arm_vrmlaldavhxq(__a, __b) +#define vabavq(__a, __b, __c) __arm_vabavq(__a, __b, __c) +#define vbicq_m_n(__a, __imm, __p) __arm_vbicq_m_n(__a, __imm, __p) +#define vqrshrnbq(__a, __b, __imm) __arm_vqrshrnbq(__a, __b, __imm) +#define vqrshrunbq(__a, __b, __imm) __arm_vqrshrunbq(__a, __b, __imm) +#define vrmlaldavhaq(__a, __b, __c) __arm_vrmlaldavhaq(__a, __b, __c) +#define vshlcq(__a, __b, __imm) __arm_vshlcq(__a, __b, __imm) +#define vpselq(__a, __b, __p) __arm_vpselq(__a, __b, __p) +#define vrev64q_m(__inactive, __a, __p) __arm_vrev64q_m(__inactive, __a, __p) +#define vqrdmlashq(__a, __b, __c) __arm_vqrdmlashq(__a, __b, __c) +#define vqrdmlahq(__a, __b, __c) __arm_vqrdmlahq(__a, __b, __c) +#define vqdmlahq(__a, __b, __c) __arm_vqdmlahq(__a, __b, __c) +#define vmvnq_m(__inactive, __a, __p) __arm_vmvnq_m(__inactive, __a, __p) +#define vmlasq(__a, __b, __c) __arm_vmlasq(__a, __b, __c) +#define vmlaq(__a, __b, __c) __arm_vmlaq(__a, __b, __c) +#define vmladavq_p(__a, __b, __p) __arm_vmladavq_p(__a, __b, __p) +#define vmladavaq(__a, __b, __c) __arm_vmladavaq(__a, __b, __c) +#define vminvq_p(__a, __b, __p) __arm_vminvq_p(__a, __b, __p) +#define vmaxvq_p(__a, __b, __p) __arm_vmaxvq_p(__a, __b, __p) +#define vdupq_m(__inactive, __a, __p) __arm_vdupq_m(__inactive, __a, __p) +#define vcmpneq_m(__a, __b, __p) __arm_vcmpneq_m(__a, __b, __p) +#define vcmphiq_m(__a, __b, __p) __arm_vcmphiq_m(__a, __b, __p) +#define vcmpeqq_m(__a, __b, __p) __arm_vcmpeqq_m(__a, __b, __p) +#define vcmpcsq_m(__a, __b, __p) __arm_vcmpcsq_m(__a, __b, __p) +#define vcmpcsq_m_n(__a, __b, __p) __arm_vcmpcsq_m_n(__a, __b, __p) +#define vclzq_m(__inactive, __a, __p) __arm_vclzq_m(__inactive, __a, __p) +#define vaddvaq_p(__a, __b, __p) __arm_vaddvaq_p(__a, __b, __p) +#define vsriq(__a, __b, __imm) __arm_vsriq(__a, __b, __imm) +#define vsliq(__a, __b, __imm) __arm_vsliq(__a, __b, __imm) +#define vshlq_m_r(__a, __b, __p) __arm_vshlq_m_r(__a, __b, __p) +#define vrshlq_m_n(__a, __b, __p) __arm_vrshlq_m_n(__a, __b, __p) +#define vqshlq_m_r(__a, __b, __p) __arm_vqshlq_m_r(__a, __b, __p) +#define vqrshlq_m_n(__a, __b, __p) __arm_vqrshlq_m_n(__a, __b, __p) +#define vminavq_p(__a, __b, __p) __arm_vminavq_p(__a, __b, __p) +#define vminaq_m(__a, __b, __p) __arm_vminaq_m(__a, __b, __p) +#define vmaxavq_p(__a, __b, __p) __arm_vmaxavq_p(__a, __b, __p) +#define vmaxaq_m(__a, __b, __p) __arm_vmaxaq_m(__a, __b, __p) +#define vcmpltq_m(__a, __b, __p) __arm_vcmpltq_m(__a, __b, __p) +#define vcmpleq_m(__a, __b, __p) __arm_vcmpleq_m(__a, __b, __p) +#define vcmpgtq_m(__a, __b, __p) __arm_vcmpgtq_m(__a, __b, __p) +#define vcmpgeq_m(__a, __b, __p) __arm_vcmpgeq_m(__a, __b, __p) +#define vqnegq_m(__inactive, __a, __p) __arm_vqnegq_m(__inactive, __a, __p) +#define vqabsq_m(__inactive, __a, __p) __arm_vqabsq_m(__inactive, __a, __p) +#define vnegq_m(__inactive, __a, __p) __arm_vnegq_m(__inactive, __a, __p) +#define vmlsdavxq_p(__a, __b, __p) __arm_vmlsdavxq_p(__a, __b, __p) +#define vmlsdavq_p(__a, __b, __p) __arm_vmlsdavq_p(__a, __b, __p) +#define vmladavxq_p(__a, __b, __p) __arm_vmladavxq_p(__a, __b, __p) +#define vclsq_m(__inactive, __a, __p) __arm_vclsq_m(__inactive, __a, __p) +#define vabsq_m(__inactive, __a, __p) __arm_vabsq_m(__inactive, __a, __p) +#define vqrdmlsdhxq(__inactive, __a, __b) __arm_vqrdmlsdhxq(__inactive, __a, __b) +#define vqrdmlsdhq(__inactive, __a, __b) __arm_vqrdmlsdhq(__inactive, __a, __b) +#define vqrdmladhxq(__inactive, __a, __b) __arm_vqrdmladhxq(__inactive, __a, __b) +#define vqrdmladhq(__inactive, __a, __b) __arm_vqrdmladhq(__inactive, __a, __b) +#define vqdmlsdhxq(__inactive, __a, __b) __arm_vqdmlsdhxq(__inactive, __a, __b) +#define vqdmlsdhq(__inactive, __a, __b) __arm_vqdmlsdhq(__inactive, __a, __b) +#define vqdmladhxq(__inactive, __a, __b) __arm_vqdmladhxq(__inactive, __a, __b) +#define vqdmladhq(__inactive, __a, __b) __arm_vqdmladhq(__inactive, __a, __b) +#define vmlsdavaxq(__a, __b, __c) __arm_vmlsdavaxq(__a, __b, __c) +#define vmlsdavaq(__a, __b, __c) __arm_vmlsdavaq(__a, __b, __c) +#define vmladavaxq(__a, __b, __c) __arm_vmladavaxq(__a, __b, __c) +#define vrmlaldavhaxq(__a, __b, __c) __arm_vrmlaldavhaxq(__a, __b, __c) +#define vrmlsldavhaq(__a, __b, __c) __arm_vrmlsldavhaq(__a, __b, __c) +#define vrmlsldavhaxq(__a, __b, __c) __arm_vrmlsldavhaxq(__a, __b, __c) +#define vaddlvaq_p(__a, __b, __p) __arm_vaddlvaq_p(__a, __b, __p) +#define vrev16q_m(__inactive, __a, __p) __arm_vrev16q_m(__inactive, __a, __p) +#define vrmlaldavhq_p(__a, __b, __p) __arm_vrmlaldavhq_p(__a, __b, __p) +#define vrmlaldavhxq_p(__a, __b, __p) __arm_vrmlaldavhxq_p(__a, __b, __p) +#define vrmlsldavhq_p(__a, __b, __p) __arm_vrmlsldavhq_p(__a, __b, __p) +#define vrmlsldavhxq_p(__a, __b, __p) __arm_vrmlsldavhxq_p(__a, __b, __p) +#define vorrq_m_n(__a, __imm, __p) __arm_vorrq_m_n(__a, __imm, __p) +#define vqrshrntq(__a, __b, __imm) __arm_vqrshrntq(__a, __b, __imm) +#define vqshrnbq(__a, __b, __imm) __arm_vqshrnbq(__a, __b, __imm) +#define vqshrntq(__a, __b, __imm) __arm_vqshrntq(__a, __b, __imm) +#define vrshrnbq(__a, __b, __imm) __arm_vrshrnbq(__a, __b, __imm) +#define vrshrntq(__a, __b, __imm) __arm_vrshrntq(__a, __b, __imm) +#define vshrnbq(__a, __b, __imm) __arm_vshrnbq(__a, __b, __imm) +#define vshrntq(__a, __b, __imm) __arm_vshrntq(__a, __b, __imm) +#define vmlaldavaq(__a, __b, __c) __arm_vmlaldavaq(__a, __b, __c) +#define vmlaldavaxq(__a, __b, __c) __arm_vmlaldavaxq(__a, __b, __c) +#define vmlsldavaq(__a, __b, __c) __arm_vmlsldavaq(__a, __b, __c) +#define vmlsldavaxq(__a, __b, __c) __arm_vmlsldavaxq(__a, __b, __c) +#define vmlaldavq_p(__a, __b, __p) __arm_vmlaldavq_p(__a, __b, __p) +#define vmlaldavxq_p(__a, __b, __p) __arm_vmlaldavxq_p(__a, __b, __p) +#define vmlsldavq_p(__a, __b, __p) __arm_vmlsldavq_p(__a, __b, __p) +#define vmlsldavxq_p(__a, __b, __p) __arm_vmlsldavxq_p(__a, __b, __p) +#define vmovlbq_m(__inactive, __a, __p) __arm_vmovlbq_m(__inactive, __a, __p) +#define vmovltq_m(__inactive, __a, __p) __arm_vmovltq_m(__inactive, __a, __p) +#define vmovnbq_m(__a, __b, __p) __arm_vmovnbq_m(__a, __b, __p) +#define vmovntq_m(__a, __b, __p) __arm_vmovntq_m(__a, __b, __p) +#define vqmovnbq_m(__a, __b, __p) __arm_vqmovnbq_m(__a, __b, __p) +#define vqmovntq_m(__a, __b, __p) __arm_vqmovntq_m(__a, __b, __p) +#define vrev32q_m(__inactive, __a, __p) __arm_vrev32q_m(__inactive, __a, __p) +#define vqrshruntq(__a, __b, __imm) __arm_vqrshruntq(__a, __b, __imm) +#define vqshrunbq(__a, __b, __imm) __arm_vqshrunbq(__a, __b, __imm) +#define vqshruntq(__a, __b, __imm) __arm_vqshruntq(__a, __b, __imm) +#define vqmovunbq_m(__a, __b, __p) __arm_vqmovunbq_m(__a, __b, __p) +#define vqmovuntq_m(__a, __b, __p) __arm_vqmovuntq_m(__a, __b, __p) +#define vsriq_m(__a, __b, __imm, __p) __arm_vsriq_m(__a, __b, __imm, __p) +#define vsubq_m(__inactive, __a, __b, __p) __arm_vsubq_m(__inactive, __a, __b, __p) +#define vqshluq_m(__inactive, __a, __imm, __p) __arm_vqshluq_m(__inactive, __a, __imm, __p) +#define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p) +#define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a, __b, __p) +#define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive, __a, __b, __p) +#define vaddq_m(__inactive, __a, __b, __p) __arm_vaddq_m(__inactive, __a, __b, __p) +#define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive, __a, __b, __p) +#define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a, __b, __p) +#define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive, __a, __b, __p) +#define vcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m(__inactive, __a, __b, __p) +#define vcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m(__inactive, __a, __b, __p) +#define veorq_m(__inactive, __a, __b, __p) __arm_veorq_m(__inactive, __a, __b, __p) +#define vhaddq_m(__inactive, __a, __b, __p) __arm_vhaddq_m(__inactive, __a, __b, __p) +#define vhcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m(__inactive, __a, __b, __p) +#define vhcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m(__inactive, __a, __b, __p) +#define vhsubq_m(__inactive, __a, __b, __p) __arm_vhsubq_m(__inactive, __a, __b, __p) +#define vmaxq_m(__inactive, __a, __b, __p) __arm_vmaxq_m(__inactive, __a, __b, __p) +#define vminq_m(__inactive, __a, __b, __p) __arm_vminq_m(__inactive, __a, __b, __p) +#define vmladavaq_p(__a, __b, __c, __p) __arm_vmladavaq_p(__a, __b, __c, __p) +#define vmladavaxq_p(__a, __b, __c, __p) __arm_vmladavaxq_p(__a, __b, __c, __p) +#define vmlaq_m(__a, __b, __c, __p) __arm_vmlaq_m(__a, __b, __c, __p) +#define vmlasq_m(__a, __b, __c, __p) __arm_vmlasq_m(__a, __b, __c, __p) +#define vmlsdavaq_p(__a, __b, __c, __p) __arm_vmlsdavaq_p(__a, __b, __c, __p) +#define vmlsdavaxq_p(__a, __b, __c, __p) __arm_vmlsdavaxq_p(__a, __b, __c, __p) +#define vmulhq_m(__inactive, __a, __b, __p) __arm_vmulhq_m(__inactive, __a, __b, __p) +#define vmullbq_int_m(__inactive, __a, __b, __p) __arm_vmullbq_int_m(__inactive, __a, __b, __p) +#define vmulltq_int_m(__inactive, __a, __b, __p) __arm_vmulltq_int_m(__inactive, __a, __b, __p) +#define vmulq_m(__inactive, __a, __b, __p) __arm_vmulq_m(__inactive, __a, __b, __p) +#define vornq_m(__inactive, __a, __b, __p) __arm_vornq_m(__inactive, __a, __b, __p) +#define vorrq_m(__inactive, __a, __b, __p) __arm_vorrq_m(__inactive, __a, __b, __p) +#define vqaddq_m(__inactive, __a, __b, __p) __arm_vqaddq_m(__inactive, __a, __b, __p) +#define vqdmladhq_m(__inactive, __a, __b, __p) __arm_vqdmladhq_m(__inactive, __a, __b, __p) +#define vqdmladhxq_m(__inactive, __a, __b, __p) __arm_vqdmladhxq_m(__inactive, __a, __b, __p) +#define vqdmlahq_m(__a, __b, __c, __p) __arm_vqdmlahq_m(__a, __b, __c, __p) +#define vqdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m(__inactive, __a, __b, __p) +#define vqdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m(__inactive, __a, __b, __p) +#define vqdmulhq_m(__inactive, __a, __b, __p) __arm_vqdmulhq_m(__inactive, __a, __b, __p) +#define vqrdmladhq_m(__inactive, __a, __b, __p) __arm_vqrdmladhq_m(__inactive, __a, __b, __p) +#define vqrdmladhxq_m(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m(__inactive, __a, __b, __p) +#define vqrdmlahq_m(__a, __b, __c, __p) __arm_vqrdmlahq_m(__a, __b, __c, __p) +#define vqrdmlashq_m(__a, __b, __c, __p) __arm_vqrdmlashq_m(__a, __b, __c, __p) +#define vqrdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m(__inactive, __a, __b, __p) +#define vqrdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m(__inactive, __a, __b, __p) +#define vqrdmulhq_m(__inactive, __a, __b, __p) __arm_vqrdmulhq_m(__inactive, __a, __b, __p) +#define vqrshlq_m(__inactive, __a, __b, __p) __arm_vqrshlq_m(__inactive, __a, __b, __p) +#define vqshlq_m_n(__inactive, __a, __imm, __p) __arm_vqshlq_m_n(__inactive, __a, __imm, __p) +#define vqshlq_m(__inactive, __a, __b, __p) __arm_vqshlq_m(__inactive, __a, __b, __p) +#define vqsubq_m(__inactive, __a, __b, __p) __arm_vqsubq_m(__inactive, __a, __b, __p) +#define vrhaddq_m(__inactive, __a, __b, __p) __arm_vrhaddq_m(__inactive, __a, __b, __p) +#define vrmulhq_m(__inactive, __a, __b, __p) __arm_vrmulhq_m(__inactive, __a, __b, __p) +#define vrshlq_m(__inactive, __a, __b, __p) __arm_vrshlq_m(__inactive, __a, __b, __p) +#define vrshrq_m(__inactive, __a, __imm, __p) __arm_vrshrq_m(__inactive, __a, __imm, __p) +#define vshlq_m_n(__inactive, __a, __imm, __p) __arm_vshlq_m_n(__inactive, __a, __imm, __p) +#define vshrq_m(__inactive, __a, __imm, __p) __arm_vshrq_m(__inactive, __a, __imm, __p) +#define vsliq_m(__a, __b, __imm, __p) __arm_vsliq_m(__a, __b, __imm, __p) +#define vmlaldavaq_p(__a, __b, __c, __p) __arm_vmlaldavaq_p(__a, __b, __c, __p) +#define vmlaldavaxq_p(__a, __b, __c, __p) __arm_vmlaldavaxq_p(__a, __b, __c, __p) +#define vmlsldavaq_p(__a, __b, __c, __p) __arm_vmlsldavaq_p(__a, __b, __c, __p) +#define vmlsldavaxq_p(__a, __b, __c, __p) __arm_vmlsldavaxq_p(__a, __b, __c, __p) +#define vmullbq_poly_m(__inactive, __a, __b, __p) __arm_vmullbq_poly_m(__inactive, __a, __b, __p) +#define vmulltq_poly_m(__inactive, __a, __b, __p) __arm_vmulltq_poly_m(__inactive, __a, __b, __p) +#define vqdmullbq_m(__inactive, __a, __b, __p) __arm_vqdmullbq_m(__inactive, __a, __b, __p) +#define vqdmulltq_m(__inactive, __a, __b, __p) __arm_vqdmulltq_m(__inactive, __a, __b, __p) +#define vqrshrnbq_m(__a, __b, __imm, __p) __arm_vqrshrnbq_m(__a, __b, __imm, __p) +#define vqrshrntq_m(__a, __b, __imm, __p) __arm_vqrshrntq_m(__a, __b, __imm, __p) +#define vqrshrunbq_m(__a, __b, __imm, __p) __arm_vqrshrunbq_m(__a, __b, __imm, __p) +#define vqrshruntq_m(__a, __b, __imm, __p) __arm_vqrshruntq_m(__a, __b, __imm, __p) +#define vqshrnbq_m(__a, __b, __imm, __p) __arm_vqshrnbq_m(__a, __b, __imm, __p) +#define vqshrntq_m(__a, __b, __imm, __p) __arm_vqshrntq_m(__a, __b, __imm, __p) +#define vqshrunbq_m(__a, __b, __imm, __p) __arm_vqshrunbq_m(__a, __b, __imm, __p) +#define vqshruntq_m(__a, __b, __imm, __p) __arm_vqshruntq_m(__a, __b, __imm, __p) +#define vrmlaldavhaq_p(__a, __b, __c, __p) __arm_vrmlaldavhaq_p(__a, __b, __c, __p) +#define vrmlaldavhaxq_p(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p(__a, __b, __c, __p) +#define vrmlsldavhaq_p(__a, __b, __c, __p) __arm_vrmlsldavhaq_p(__a, __b, __c, __p) +#define vrmlsldavhaxq_p(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p(__a, __b, __c, __p) +#define vrshrnbq_m(__a, __b, __imm, __p) __arm_vrshrnbq_m(__a, __b, __imm, __p) +#define vrshrntq_m(__a, __b, __imm, __p) __arm_vrshrntq_m(__a, __b, __imm, __p) +#define vshllbq_m(__inactive, __a, __imm, __p) __arm_vshllbq_m(__inactive, __a, __imm, __p) +#define vshlltq_m(__inactive, __a, __imm, __p) __arm_vshlltq_m(__inactive, __a, __imm, __p) +#define vshrnbq_m(__a, __b, __imm, __p) __arm_vshrnbq_m(__a, __b, __imm, __p) +#define vshrntq_m(__a, __b, __imm, __p) __arm_vshrntq_m(__a, __b, __imm, __p) +#define vstrbq_scatter_offset(__base, __offset, __value) __arm_vstrbq_scatter_offset(__base, __offset, __value) +#define vstrbq(__addr, __value) __arm_vstrbq(__addr, __value) +#define vstrwq_scatter_base(__addr, __offset, __value) __arm_vstrwq_scatter_base(__addr, __offset, __value) +#define vldrbq_gather_offset(__base, __offset) __arm_vldrbq_gather_offset(__base, __offset) +#define vstrbq_p(__addr, __value, __p) __arm_vstrbq_p(__addr, __value, __p) +#define vstrbq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p(__base, __offset, __value, __p) +#define vstrwq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p(__addr, __offset, __value, __p) +#define vldrbq_gather_offset_z(__base, __offset, __p) __arm_vldrbq_gather_offset_z(__base, __offset, __p) +#define vld1q(__base) __arm_vld1q(__base) +#define vldrhq_gather_offset(__base, __offset) __arm_vldrhq_gather_offset(__base, __offset) +#define vldrhq_gather_offset_z(__base, __offset, __p) __arm_vldrhq_gather_offset_z(__base, __offset, __p) +#define vldrhq_gather_shifted_offset(__base, __offset) __arm_vldrhq_gather_shifted_offset(__base, __offset) +#define vldrhq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z(__base, __offset, __p) +#define vldrdq_gather_offset(__base, __offset) __arm_vldrdq_gather_offset(__base, __offset) +#define vldrdq_gather_offset_z(__base, __offset, __p) __arm_vldrdq_gather_offset_z(__base, __offset, __p) +#define vldrdq_gather_shifted_offset(__base, __offset) __arm_vldrdq_gather_shifted_offset(__base, __offset) +#define vldrdq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z(__base, __offset, __p) +#define vldrwq_gather_offset(__base, __offset) __arm_vldrwq_gather_offset(__base, __offset) +#define vldrwq_gather_offset_z(__base, __offset, __p) __arm_vldrwq_gather_offset_z(__base, __offset, __p) +#define vldrwq_gather_shifted_offset(__base, __offset) __arm_vldrwq_gather_shifted_offset(__base, __offset) +#define vldrwq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z(__base, __offset, __p) +#define vst1q(__addr, __value) __arm_vst1q(__addr, __value) +#define vstrhq_scatter_offset(__base, __offset, __value) __arm_vstrhq_scatter_offset(__base, __offset, __value) +#define vstrhq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p(__base, __offset, __value, __p) +#define vstrhq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset(__base, __offset, __value) +#define vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p) +#define vstrhq(__addr, __value) __arm_vstrhq(__addr, __value) +#define vstrhq_p(__addr, __value, __p) __arm_vstrhq_p(__addr, __value, __p) +#define vstrwq(__addr, __value) __arm_vstrwq(__addr, __value) +#define vstrwq_p(__addr, __value, __p) __arm_vstrwq_p(__addr, __value, __p) +#define vstrdq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p(__addr, __offset, __value, __p) +#define vstrdq_scatter_base(__addr, __offset, __value) __arm_vstrdq_scatter_base(__addr, __offset, __value) +#define vstrdq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p(__base, __offset, __value, __p) +#define vstrdq_scatter_offset(__base, __offset, __value) __arm_vstrdq_scatter_offset(__base, __offset, __value) +#define vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p) +#define vstrdq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset(__base, __offset, __value) +#define vstrwq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p(__base, __offset, __value, __p) +#define vstrwq_scatter_offset(__base, __offset, __value) __arm_vstrwq_scatter_offset(__base, __offset, __value) +#define vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p) +#define vstrwq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset(__base, __offset, __value) +#define vuninitializedq(__v) __arm_vuninitializedq(__v) +#define vreinterpretq_s16(__a) __arm_vreinterpretq_s16(__a) +#define vreinterpretq_s32(__a) __arm_vreinterpretq_s32(__a) +#define vreinterpretq_s64(__a) __arm_vreinterpretq_s64(__a) +#define vreinterpretq_s8(__a) __arm_vreinterpretq_s8(__a) +#define vreinterpretq_u16(__a) __arm_vreinterpretq_u16(__a) +#define vreinterpretq_u32(__a) __arm_vreinterpretq_u32(__a) +#define vreinterpretq_u64(__a) __arm_vreinterpretq_u64(__a) +#define vreinterpretq_u8(__a) __arm_vreinterpretq_u8(__a) +#define vddupq_m(__inactive, __a, __imm, __p) __arm_vddupq_m(__inactive, __a, __imm, __p) +#define vddupq_u8(__a, __imm) __arm_vddupq_u8(__a, __imm) +#define vddupq_u32(__a, __imm) __arm_vddupq_u32(__a, __imm) +#define vddupq_u16(__a, __imm) __arm_vddupq_u16(__a, __imm) +#define vdwdupq_m(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m(__inactive, __a, __b, __imm, __p) +#define vdwdupq_u8(__a, __b, __imm) __arm_vdwdupq_u8(__a, __b, __imm) +#define vdwdupq_u32(__a, __b, __imm) __arm_vdwdupq_u32(__a, __b, __imm) +#define vdwdupq_u16(__a, __b, __imm) __arm_vdwdupq_u16(__a, __b, __imm) +#define vidupq_m(__inactive, __a, __imm, __p) __arm_vidupq_m(__inactive, __a, __imm, __p) +#define vidupq_u8(__a, __imm) __arm_vidupq_u8(__a, __imm) +#define vidupq_u32(__a, __imm) __arm_vidupq_u32(__a, __imm) +#define vidupq_u16(__a, __imm) __arm_vidupq_u16(__a, __imm) +#define viwdupq_m(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m(__inactive, __a, __b, __imm, __p) +#define viwdupq_u8(__a, __b, __imm) __arm_viwdupq_u8(__a, __b, __imm) +#define viwdupq_u32(__a, __b, __imm) __arm_viwdupq_u32(__a, __b, __imm) +#define viwdupq_u16(__a, __b, __imm) __arm_viwdupq_u16(__a, __b, __imm) +#define vstrdq_scatter_base_wb(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb(__addr, __offset, __value) +#define vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) +#define vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) +#define vstrwq_scatter_base_wb(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb(__addr, __offset, __value) +#define vddupq_x_u8(__a, __imm, __p) __arm_vddupq_x_u8(__a, __imm, __p) +#define vddupq_x_u16(__a, __imm, __p) __arm_vddupq_x_u16(__a, __imm, __p) +#define vddupq_x_u32(__a, __imm, __p) __arm_vddupq_x_u32(__a, __imm, __p) +#define vdwdupq_x_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_u8(__a, __b, __imm, __p) +#define vdwdupq_x_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_u16(__a, __b, __imm, __p) +#define vdwdupq_x_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_u32(__a, __b, __imm, __p) +#define vidupq_x_u8(__a, __imm, __p) __arm_vidupq_x_u8(__a, __imm, __p) +#define vidupq_x_u16(__a, __imm, __p) __arm_vidupq_x_u16(__a, __imm, __p) +#define vidupq_x_u32(__a, __imm, __p) __arm_vidupq_x_u32(__a, __imm, __p) +#define viwdupq_x_u8(__a, __b, __imm, __p) __arm_viwdupq_x_u8(__a, __b, __imm, __p) +#define viwdupq_x_u16(__a, __b, __imm, __p) __arm_viwdupq_x_u16(__a, __b, __imm, __p) +#define viwdupq_x_u32(__a, __b, __imm, __p) __arm_viwdupq_x_u32(__a, __b, __imm, __p) +#define vminq_x(__a, __b, __p) __arm_vminq_x(__a, __b, __p) +#define vmaxq_x(__a, __b, __p) __arm_vmaxq_x(__a, __b, __p) +#define vabdq_x(__a, __b, __p) __arm_vabdq_x(__a, __b, __p) +#define vabsq_x(__a, __p) __arm_vabsq_x(__a, __p) +#define vaddq_x(__a, __b, __p) __arm_vaddq_x(__a, __b, __p) +#define vclsq_x(__a, __p) __arm_vclsq_x(__a, __p) +#define vclzq_x(__a, __p) __arm_vclzq_x(__a, __p) +#define vnegq_x(__a, __p) __arm_vnegq_x(__a, __p) +#define vmulhq_x(__a, __b, __p) __arm_vmulhq_x(__a, __b, __p) +#define vmullbq_poly_x(__a, __b, __p) __arm_vmullbq_poly_x(__a, __b, __p) +#define vmullbq_int_x(__a, __b, __p) __arm_vmullbq_int_x(__a, __b, __p) +#define vmulltq_poly_x(__a, __b, __p) __arm_vmulltq_poly_x(__a, __b, __p) +#define vmulltq_int_x(__a, __b, __p) __arm_vmulltq_int_x(__a, __b, __p) +#define vmulq_x(__a, __b, __p) __arm_vmulq_x(__a, __b, __p) +#define vsubq_x(__a, __b, __p) __arm_vsubq_x(__a, __b, __p) +#define vcaddq_rot90_x(__a, __b, __p) __arm_vcaddq_rot90_x(__a, __b, __p) +#define vcaddq_rot270_x(__a, __b, __p) __arm_vcaddq_rot270_x(__a, __b, __p) +#define vhaddq_x(__a, __b, __p) __arm_vhaddq_x(__a, __b, __p) +#define vhcaddq_rot90_x(__a, __b, __p) __arm_vhcaddq_rot90_x(__a, __b, __p) +#define vhcaddq_rot270_x(__a, __b, __p) __arm_vhcaddq_rot270_x(__a, __b, __p) +#define vhsubq_x(__a, __b, __p) __arm_vhsubq_x(__a, __b, __p) +#define vrhaddq_x(__a, __b, __p) __arm_vrhaddq_x(__a, __b, __p) +#define vrmulhq_x(__a, __b, __p) __arm_vrmulhq_x(__a, __b, __p) +#define vandq_x(__a, __b, __p) __arm_vandq_x(__a, __b, __p) +#define vbicq_x(__a, __b, __p) __arm_vbicq_x(__a, __b, __p) +#define vbrsrq_x(__a, __b, __p) __arm_vbrsrq_x(__a, __b, __p) +#define veorq_x(__a, __b, __p) __arm_veorq_x(__a, __b, __p) +#define vmovlbq_x(__a, __p) __arm_vmovlbq_x(__a, __p) +#define vmovltq_x(__a, __p) __arm_vmovltq_x(__a, __p) +#define vmvnq_x(__a, __p) __arm_vmvnq_x(__a, __p) +#define vornq_x(__a, __b, __p) __arm_vornq_x(__a, __b, __p) +#define vorrq_x(__a, __b, __p) __arm_vorrq_x(__a, __b, __p) +#define vrev16q_x(__a, __p) __arm_vrev16q_x(__a, __p) +#define vrev32q_x(__a, __p) __arm_vrev32q_x(__a, __p) +#define vrev64q_x(__a, __p) __arm_vrev64q_x(__a, __p) +#define vrshlq_x(__a, __b, __p) __arm_vrshlq_x(__a, __b, __p) +#define vshllbq_x(__a, __imm, __p) __arm_vshllbq_x(__a, __imm, __p) +#define vshlltq_x(__a, __imm, __p) __arm_vshlltq_x(__a, __imm, __p) +#define vshlq_x(__a, __b, __p) __arm_vshlq_x(__a, __b, __p) +#define vshlq_x_n(__a, __imm, __p) __arm_vshlq_x_n(__a, __imm, __p) +#define vrshrq_x(__a, __imm, __p) __arm_vrshrq_x(__a, __imm, __p) +#define vshrq_x(__a, __imm, __p) __arm_vshrq_x(__a, __imm, __p) +#define vadciq(__a, __b, __carry_out) __arm_vadciq(__a, __b, __carry_out) +#define vadciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m(__inactive, __a, __b, __carry_out, __p) +#define vadcq(__a, __b, __carry) __arm_vadcq(__a, __b, __carry) +#define vadcq_m(__inactive, __a, __b, __carry, __p) __arm_vadcq_m(__inactive, __a, __b, __carry, __p) +#define vsbciq(__a, __b, __carry_out) __arm_vsbciq(__a, __b, __carry_out) +#define vsbciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m(__inactive, __a, __b, __carry_out, __p) +#define vsbcq(__a, __b, __carry) __arm_vsbcq(__a, __b, __carry) +#define vsbcq_m(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m(__inactive, __a, __b, __carry, __p) +#define vst1q_p(__addr, __value, __p) __arm_vst1q_p(__addr, __value, __p) +#define vst2q(__addr, __value) __arm_vst2q(__addr, __value) +#define vld1q_z(__base, __p) __arm_vld1q_z(__base, __p) +#define vld2q(__addr) __arm_vld2q(__addr) +#define vld4q(__addr) __arm_vld4q(__addr) +#define vsetq_lane(__a, __b, __idx) __arm_vsetq_lane(__a, __b, __idx) +#define vgetq_lane(__a, __idx) __arm_vgetq_lane(__a, __idx) +#define vshlcq_m(__a, __b, __imm, __p) __arm_vshlcq_m(__a, __b, __imm, __p) +#define vrndxq(__a) __arm_vrndxq(__a) +#define vrndq(__a) __arm_vrndq(__a) +#define vrndpq(__a) __arm_vrndpq(__a) +#define vrndnq(__a) __arm_vrndnq(__a) +#define vrndmq(__a) __arm_vrndmq(__a) +#define vrndaq(__a) __arm_vrndaq(__a) +#define vcvttq_f32(__a) __arm_vcvttq_f32(__a) +#define vcvtbq_f32(__a) __arm_vcvtbq_f32(__a) +#define vcvtq(__a) __arm_vcvtq(__a) +#define vcvtq_n(__a, __imm6) __arm_vcvtq_n(__a, __imm6) +#define vminnmvq(__a, __b) __arm_vminnmvq(__a, __b) +#define vminnmq(__a, __b) __arm_vminnmq(__a, __b) +#define vminnmavq(__a, __b) __arm_vminnmavq(__a, __b) +#define vminnmaq(__a, __b) __arm_vminnmaq(__a, __b) +#define vmaxnmvq(__a, __b) __arm_vmaxnmvq(__a, __b) +#define vmaxnmq(__a, __b) __arm_vmaxnmq(__a, __b) +#define vmaxnmavq(__a, __b) __arm_vmaxnmavq(__a, __b) +#define vmaxnmaq(__a, __b) __arm_vmaxnmaq(__a, __b) +#define vcmulq_rot90(__a, __b) __arm_vcmulq_rot90(__a, __b) +#define vcmulq_rot270(__a, __b) __arm_vcmulq_rot270(__a, __b) +#define vcmulq_rot180(__a, __b) __arm_vcmulq_rot180(__a, __b) +#define vcmulq(__a, __b) __arm_vcmulq(__a, __b) +#define vcvtaq_m(__inactive, __a, __p) __arm_vcvtaq_m(__inactive, __a, __p) +#define vcvtq_m(__inactive, __a, __p) __arm_vcvtq_m(__inactive, __a, __p) +#define vcvtbq_m(__a, __b, __p) __arm_vcvtbq_m(__a, __b, __p) +#define vcvttq_m(__a, __b, __p) __arm_vcvttq_m(__a, __b, __p) +#define vcmlaq(__a, __b, __c) __arm_vcmlaq(__a, __b, __c) +#define vcmlaq_rot180(__a, __b, __c) __arm_vcmlaq_rot180(__a, __b, __c) +#define vcmlaq_rot270(__a, __b, __c) __arm_vcmlaq_rot270(__a, __b, __c) +#define vcmlaq_rot90(__a, __b, __c) __arm_vcmlaq_rot90(__a, __b, __c) +#define vfmaq(__a, __b, __c) __arm_vfmaq(__a, __b, __c) +#define vfmasq(__a, __b, __c) __arm_vfmasq(__a, __b, __c) +#define vfmsq(__a, __b, __c) __arm_vfmsq(__a, __b, __c) +#define vcvtmq_m(__inactive, __a, __p) __arm_vcvtmq_m(__inactive, __a, __p) +#define vcvtnq_m(__inactive, __a, __p) __arm_vcvtnq_m(__inactive, __a, __p) +#define vcvtpq_m(__inactive, __a, __p) __arm_vcvtpq_m(__inactive, __a, __p) +#define vmaxnmaq_m(__a, __b, __p) __arm_vmaxnmaq_m(__a, __b, __p) +#define vmaxnmavq_p(__a, __b, __p) __arm_vmaxnmavq_p(__a, __b, __p) +#define vmaxnmvq_p(__a, __b, __p) __arm_vmaxnmvq_p(__a, __b, __p) +#define vminnmaq_m(__a, __b, __p) __arm_vminnmaq_m(__a, __b, __p) +#define vminnmavq_p(__a, __b, __p) __arm_vminnmavq_p(__a, __b, __p) +#define vminnmvq_p(__a, __b, __p) __arm_vminnmvq_p(__a, __b, __p) +#define vrndaq_m(__inactive, __a, __p) __arm_vrndaq_m(__inactive, __a, __p) +#define vrndmq_m(__inactive, __a, __p) __arm_vrndmq_m(__inactive, __a, __p) +#define vrndnq_m(__inactive, __a, __p) __arm_vrndnq_m(__inactive, __a, __p) +#define vrndpq_m(__inactive, __a, __p) __arm_vrndpq_m(__inactive, __a, __p) +#define vrndq_m(__inactive, __a, __p) __arm_vrndq_m(__inactive, __a, __p) +#define vrndxq_m(__inactive, __a, __p) __arm_vrndxq_m(__inactive, __a, __p) +#define vcvtq_m_n(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n(__inactive, __a, __imm6, __p) +#define vcmlaq_m(__a, __b, __c, __p) __arm_vcmlaq_m(__a, __b, __c, __p) +#define vcmlaq_rot180_m(__a, __b, __c, __p) __arm_vcmlaq_rot180_m(__a, __b, __c, __p) +#define vcmlaq_rot270_m(__a, __b, __c, __p) __arm_vcmlaq_rot270_m(__a, __b, __c, __p) +#define vcmlaq_rot90_m(__a, __b, __c, __p) __arm_vcmlaq_rot90_m(__a, __b, __c, __p) +#define vcmulq_m(__inactive, __a, __b, __p) __arm_vcmulq_m(__inactive, __a, __b, __p) +#define vcmulq_rot180_m(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m(__inactive, __a, __b, __p) +#define vcmulq_rot270_m(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m(__inactive, __a, __b, __p) +#define vcmulq_rot90_m(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m(__inactive, __a, __b, __p) +#define vfmaq_m(__a, __b, __c, __p) __arm_vfmaq_m(__a, __b, __c, __p) +#define vfmasq_m(__a, __b, __c, __p) __arm_vfmasq_m(__a, __b, __c, __p) +#define vfmsq_m(__a, __b, __c, __p) __arm_vfmsq_m(__a, __b, __c, __p) +#define vmaxnmq_m(__inactive, __a, __b, __p) __arm_vmaxnmq_m(__inactive, __a, __b, __p) +#define vminnmq_m(__inactive, __a, __b, __p) __arm_vminnmq_m(__inactive, __a, __b, __p) +#define vreinterpretq_f16(__a) __arm_vreinterpretq_f16(__a) +#define vreinterpretq_f32(__a) __arm_vreinterpretq_f32(__a) +#define vminnmq_x(__a, __b, __p) __arm_vminnmq_x(__a, __b, __p) +#define vmaxnmq_x(__a, __b, __p) __arm_vmaxnmq_x(__a, __b, __p) +#define vcmulq_x(__a, __b, __p) __arm_vcmulq_x(__a, __b, __p) +#define vcmulq_rot90_x(__a, __b, __p) __arm_vcmulq_rot90_x(__a, __b, __p) +#define vcmulq_rot180_x(__a, __b, __p) __arm_vcmulq_rot180_x(__a, __b, __p) +#define vcmulq_rot270_x(__a, __b, __p) __arm_vcmulq_rot270_x(__a, __b, __p) +#define vcvtq_x(__a, __p) __arm_vcvtq_x(__a, __p) +#define vcvtq_x_n(__a, __imm6, __p) __arm_vcvtq_x_n(__a, __imm6, __p) +#define vrndq_x(__a, __p) __arm_vrndq_x(__a, __p) +#define vrndnq_x(__a, __p) __arm_vrndnq_x(__a, __p) +#define vrndmq_x(__a, __p) __arm_vrndmq_x(__a, __p) +#define vrndpq_x(__a, __p) __arm_vrndpq_x(__a, __p) +#define vrndaq_x(__a, __p) __arm_vrndaq_x(__a, __p) +#define vrndxq_x(__a, __p) __arm_vrndxq_x(__a, __p) + + #define vst4q_s8( __addr, __value) __arm_vst4q_s8( __addr, __value) #define vst4q_s16( __addr, __value) __arm_vst4q_s16( __addr, __value) #define vst4q_s32( __addr, __value) __arm_vst4q_s32( __addr, __value) @@ -20234,236 +20723,16508 @@ __arm_vgetq_lane_f32 (float32x4_t __a, const int __idx) } #endif -enum { - __ARM_mve_type_fp_n = 1, - __ARM_mve_type_int_n, - __ARM_mve_type_float16_t_ptr, - __ARM_mve_type_float16x8_t, - __ARM_mve_type_float16x8x2_t, - __ARM_mve_type_float16x8x4_t, - __ARM_mve_type_float32_t_ptr, - __ARM_mve_type_float32x4_t, - __ARM_mve_type_float32x4x2_t, - __ARM_mve_type_float32x4x4_t, - __ARM_mve_type_int16_t_ptr, - __ARM_mve_type_int16x8_t, - __ARM_mve_type_int16x8x2_t, - __ARM_mve_type_int16x8x4_t, - __ARM_mve_type_int32_t_ptr, - __ARM_mve_type_int32x4_t, - __ARM_mve_type_int32x4x2_t, - __ARM_mve_type_int32x4x4_t, - __ARM_mve_type_int64_t_ptr, - __ARM_mve_type_int64x2_t, - __ARM_mve_type_int8_t_ptr, - __ARM_mve_type_int8x16_t, - __ARM_mve_type_int8x16x2_t, - __ARM_mve_type_int8x16x4_t, - __ARM_mve_type_uint16_t_ptr, - __ARM_mve_type_uint16x8_t, - __ARM_mve_type_uint16x8x2_t, - __ARM_mve_type_uint16x8x4_t, - __ARM_mve_type_uint32_t_ptr, - __ARM_mve_type_uint32x4_t, - __ARM_mve_type_uint32x4x2_t, - __ARM_mve_type_uint32x4x4_t, - __ARM_mve_type_uint64_t_ptr, - __ARM_mve_type_uint64x2_t, - __ARM_mve_type_uint8_t_ptr, - __ARM_mve_type_uint8x16_t, - __ARM_mve_type_uint8x16x2_t, - __ARM_mve_type_uint8x16x4_t, - __ARM_mve_unsupported_type -}; +#ifdef __cplusplus +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (int8_t * __addr, int8x16x4_t __value) +{ + __arm_vst4q_s8 (__addr, __value); +} -#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ -#define __ARM_mve_typeid(x) _Generic(x, \ - float16_t: __ARM_mve_type_fp_n, \ - float16_t *: __ARM_mve_type_float16_t_ptr, \ - float16_t const *: __ARM_mve_type_float16_t_ptr, \ - float16x8_t: __ARM_mve_type_float16x8_t, \ - float16x8x2_t: __ARM_mve_type_float16x8x2_t, \ - float16x8x4_t: __ARM_mve_type_float16x8x4_t, \ - float32_t: __ARM_mve_type_fp_n, \ - float32_t *: __ARM_mve_type_float32_t_ptr, \ - float32_t const *: __ARM_mve_type_float32_t_ptr, \ - float32x4_t: __ARM_mve_type_float32x4_t, \ - float32x4x2_t: __ARM_mve_type_float32x4x2_t, \ - float32x4x4_t: __ARM_mve_type_float32x4x4_t, \ - int16_t: __ARM_mve_type_int_n, \ - int16_t *: __ARM_mve_type_int16_t_ptr, \ - int16_t const *: __ARM_mve_type_int16_t_ptr, \ - int16x8_t: __ARM_mve_type_int16x8_t, \ - int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ - int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ - int32_t: __ARM_mve_type_int_n, \ - int32_t *: __ARM_mve_type_int32_t_ptr, \ - int32_t const *: __ARM_mve_type_int32_t_ptr, \ - int32x4_t: __ARM_mve_type_int32x4_t, \ - int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ - int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ - int64_t: __ARM_mve_type_int_n, \ - int64_t *: __ARM_mve_type_int64_t_ptr, \ - int64_t const *: __ARM_mve_type_int64_t_ptr, \ - int64x2_t: __ARM_mve_type_int64x2_t, \ - int8_t: __ARM_mve_type_int_n, \ - int8_t *: __ARM_mve_type_int8_t_ptr, \ - int8_t const *: __ARM_mve_type_int8_t_ptr, \ - int8x16_t: __ARM_mve_type_int8x16_t, \ - int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ - int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ - uint16_t: __ARM_mve_type_int_n, \ - uint16_t *: __ARM_mve_type_uint16_t_ptr, \ - uint16_t const *: __ARM_mve_type_uint16_t_ptr, \ - uint16x8_t: __ARM_mve_type_uint16x8_t, \ - uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ - uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ - uint32_t: __ARM_mve_type_int_n, \ - uint32_t *: __ARM_mve_type_uint32_t_ptr, \ - uint32_t const *: __ARM_mve_type_uint32_t_ptr, \ - uint32x4_t: __ARM_mve_type_uint32x4_t, \ - uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ - uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ - uint64_t: __ARM_mve_type_int_n, \ - uint64_t *: __ARM_mve_type_uint64_t_ptr, \ - uint64_t const *: __ARM_mve_type_uint64_t_ptr, \ - uint64x2_t: __ARM_mve_type_uint64x2_t, \ - uint8_t: __ARM_mve_type_int_n, \ - uint8_t *: __ARM_mve_type_uint8_t_ptr, \ - uint8_t const *: __ARM_mve_type_uint8_t_ptr, \ - uint8x16_t: __ARM_mve_type_uint8x16_t, \ - uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ - uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ - default: _Generic(x, \ - signed char: __ARM_mve_type_int_n, \ - short: __ARM_mve_type_int_n, \ - int: __ARM_mve_type_int_n, \ - long: __ARM_mve_type_int_n, \ - long long: __ARM_mve_type_int_n, \ - unsigned char: __ARM_mve_type_int_n, \ - unsigned short: __ARM_mve_type_int_n, \ - unsigned int: __ARM_mve_type_int_n, \ - unsigned long: __ARM_mve_type_int_n, \ - unsigned long long: __ARM_mve_type_int_n, \ - default: __ARM_mve_unsupported_type)) -#else -#define __ARM_mve_typeid(x) _Generic(x, \ - int16_t: __ARM_mve_type_int_n, \ - int16_t *: __ARM_mve_type_int16_t_ptr, \ - int16_t const *: __ARM_mve_type_int16_t_ptr, \ - int16x8_t: __ARM_mve_type_int16x8_t, \ - int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ - int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ - int32_t: __ARM_mve_type_int_n, \ - int32_t *: __ARM_mve_type_int32_t_ptr, \ - int32_t const *: __ARM_mve_type_int32_t_ptr, \ - int32x4_t: __ARM_mve_type_int32x4_t, \ - int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ - int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ - int64_t: __ARM_mve_type_int_n, \ - int64_t *: __ARM_mve_type_int64_t_ptr, \ - int64_t const *: __ARM_mve_type_int64_t_ptr, \ - int64x2_t: __ARM_mve_type_int64x2_t, \ - int8_t: __ARM_mve_type_int_n, \ - int8_t *: __ARM_mve_type_int8_t_ptr, \ - int8_t const *: __ARM_mve_type_int8_t_ptr, \ - int8x16_t: __ARM_mve_type_int8x16_t, \ - int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ - int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ - uint16_t: __ARM_mve_type_int_n, \ - uint16_t *: __ARM_mve_type_uint16_t_ptr, \ - uint16_t const *: __ARM_mve_type_uint16_t_ptr, \ - uint16x8_t: __ARM_mve_type_uint16x8_t, \ - uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ - uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ - uint32_t: __ARM_mve_type_int_n, \ - uint32_t *: __ARM_mve_type_uint32_t_ptr, \ - uint32_t const *: __ARM_mve_type_uint32_t_ptr, \ - uint32x4_t: __ARM_mve_type_uint32x4_t, \ - uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ - uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ - uint64_t: __ARM_mve_type_int_n, \ - uint64_t *: __ARM_mve_type_uint64_t_ptr, \ - uint64_t const *: __ARM_mve_type_uint64_t_ptr, \ - uint64x2_t: __ARM_mve_type_uint64x2_t, \ - uint8_t: __ARM_mve_type_int_n, \ - uint8_t *: __ARM_mve_type_uint8_t_ptr, \ - uint8_t const *: __ARM_mve_type_uint8_t_ptr, \ - uint8x16_t: __ARM_mve_type_uint8x16_t, \ - uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ - uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ - default: _Generic(x, \ - signed char: __ARM_mve_type_int_n, \ - short: __ARM_mve_type_int_n, \ - int: __ARM_mve_type_int_n, \ - long: __ARM_mve_type_int_n, \ - long long: __ARM_mve_type_int_n, \ - unsigned char: __ARM_mve_type_int_n, \ - unsigned short: __ARM_mve_type_int_n, \ - unsigned int: __ARM_mve_type_int_n, \ - unsigned long: __ARM_mve_type_int_n, \ - unsigned long long: __ARM_mve_type_int_n, \ - default: __ARM_mve_unsupported_type)) -#endif /* MVE Floating point. */ +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (int16_t * __addr, int16x8x4_t __value) +{ + __arm_vst4q_s16 (__addr, __value); +} -extern void *__ARM_undef; -#define __ARM_mve_coerce(param, type) \ - _Generic(param, type: param, default: *(type *)__ARM_undef) -#define __ARM_mve_coerce1(param, type) \ - _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (int32_t * __addr, int32x4x4_t __value) +{ + __arm_vst4q_s32 (__addr, __value); +} -#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (uint8_t * __addr, uint8x16x4_t __value) +{ + __arm_vst4q_u8 (__addr, __value); +} -#define vst4q(p0,p1) __arm_vst4q(p0,p1) -#define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ - __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ - int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ - int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ - int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ - int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ - int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \ - int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \ - int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));}) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (uint16_t * __addr, uint16x8x4_t __value) +{ + __arm_vst4q_u16 (__addr, __value); +} -#define vrndxq(p0) __arm_vrndxq(p0) -#define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ - int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (uint32_t * __addr, uint32x4x4_t __value) +{ + __arm_vst4q_u32 (__addr, __value); +} -#define vrndq(p0) __arm_vrndq(p0) -#define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ - int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (int8_t __a) +{ + return __arm_vdupq_n_s8 (__a); +} -#define vrndpq(p0) __arm_vrndpq(p0) -#define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ - int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (int16_t __a) +{ + return __arm_vdupq_n_s16 (__a); +} -#define vrndnq(p0) __arm_vrndnq(p0) -#define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ - int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (int32_t __a) +{ + return __arm_vdupq_n_s32 (__a); +} -#define vrndmq(p0) __arm_vrndmq(p0) -#define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ - int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq (int8x16_t __a) +{ + return __arm_vabsq_s8 (__a); +} -#define vrndaq(p0) __arm_vrndaq(p0) -#define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq (int16x8_t __a) +{ + return __arm_vabsq_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq (int32x4_t __a) +{ + return __arm_vabsq_s32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq (int8x16_t __a) +{ + return __arm_vclsq_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq (int16x8_t __a) +{ + return __arm_vclsq_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq (int32x4_t __a) +{ + return __arm_vclsq_s32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq (int8x16_t __a) +{ + return __arm_vclzq_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq (int16x8_t __a) +{ + return __arm_vclzq_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq (int32x4_t __a) +{ + return __arm_vclzq_s32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq (int8x16_t __a) +{ + return __arm_vnegq_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq (int16x8_t __a) +{ + return __arm_vnegq_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq (int32x4_t __a) +{ + return __arm_vnegq_s32 (__a); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvq (int32x4_t __a) +{ + return __arm_vaddlvq_s32 (__a); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq (int8x16_t __a) +{ + return __arm_vaddvq_s8 (__a); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq (int16x8_t __a) +{ + return __arm_vaddvq_s16 (__a); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq (int32x4_t __a) +{ + return __arm_vaddvq_s32 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq (int8x16_t __a) +{ + return __arm_vmovlbq_s8 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq (int16x8_t __a) +{ + return __arm_vmovlbq_s16 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq (int8x16_t __a) +{ + return __arm_vmovltq_s8 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq (int16x8_t __a) +{ + return __arm_vmovltq_s16 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq (int8x16_t __a) +{ + return __arm_vmvnq_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq (int16x8_t __a) +{ + return __arm_vmvnq_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq (int32x4_t __a) +{ + return __arm_vmvnq_s32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev16q (int8x16_t __a) +{ + return __arm_vrev16q_s8 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q (int8x16_t __a) +{ + return __arm_vrev32q_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q (int16x8_t __a) +{ + return __arm_vrev32q_s16 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (int8x16_t __a) +{ + return __arm_vrev64q_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (int16x8_t __a) +{ + return __arm_vrev64q_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (int32x4_t __a) +{ + return __arm_vrev64q_s32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq (int8x16_t __a) +{ + return __arm_vqabsq_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq (int16x8_t __a) +{ + return __arm_vqabsq_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq (int32x4_t __a) +{ + return __arm_vqabsq_s32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq (int8x16_t __a) +{ + return __arm_vqnegq_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq (int16x8_t __a) +{ + return __arm_vqnegq_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq (int32x4_t __a) +{ + return __arm_vqnegq_s32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (uint8x16_t __a) +{ + return __arm_vrev64q_u8 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (uint16x8_t __a) +{ + return __arm_vrev64q_u16 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (uint32x4_t __a) +{ + return __arm_vrev64q_u32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq (uint8x16_t __a) +{ + return __arm_vmvnq_u8 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq (uint16x8_t __a) +{ + return __arm_vmvnq_u16 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq (uint32x4_t __a) +{ + return __arm_vmvnq_u32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (uint8_t __a) +{ + return __arm_vdupq_n_u8 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (uint16_t __a) +{ + return __arm_vdupq_n_u16 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (uint32_t __a) +{ + return __arm_vdupq_n_u32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq (uint8x16_t __a) +{ + return __arm_vclzq_u8 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq (uint16x8_t __a) +{ + return __arm_vclzq_u16 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq (uint32x4_t __a) +{ + return __arm_vclzq_u32 (__a); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq (uint8x16_t __a) +{ + return __arm_vaddvq_u8 (__a); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq (uint16x8_t __a) +{ + return __arm_vaddvq_u16 (__a); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq (uint32x4_t __a) +{ + return __arm_vaddvq_u32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q (uint8x16_t __a) +{ + return __arm_vrev32q_u8 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q (uint16x8_t __a) +{ + return __arm_vrev32q_u16 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq (uint8x16_t __a) +{ + return __arm_vmovltq_u8 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq (uint16x8_t __a) +{ + return __arm_vmovltq_u16 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq (uint8x16_t __a) +{ + return __arm_vmovlbq_u8 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq (uint16x8_t __a) +{ + return __arm_vmovlbq_u16 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev16q (uint8x16_t __a) +{ + return __arm_vrev16q_u8 (__a); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvq (uint32x4_t __a) +{ + return __arm_vaddlvq_u32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq (int8x16_t __a, const int __imm) +{ + return __arm_vshrq_n_s8 (__a, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq (int16x8_t __a, const int __imm) +{ + return __arm_vshrq_n_s16 (__a, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq (int32x4_t __a, const int __imm) +{ + return __arm_vshrq_n_s32 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq (uint8x16_t __a, const int __imm) +{ + return __arm_vshrq_n_u8 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq (uint16x8_t __a, const int __imm) +{ + return __arm_vshrq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq (uint32x4_t __a, const int __imm) +{ + return __arm_vshrq_n_u32 (__a, __imm); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvq_p (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vaddlvq_p_s32 (__a, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvq_p (uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vaddlvq_p_u32 (__a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcmpneq_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcmpneq_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcmpneq_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vcmpneq_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vcmpneq_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vcmpneq_u32 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vshlq_s8 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vshlq_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vshlq_s32 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq (uint8x16_t __a, int8x16_t __b) +{ + return __arm_vshlq_u8 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq (uint16x8_t __a, int16x8_t __b) +{ + return __arm_vshlq_u16 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq (uint32x4_t __a, int32x4_t __b) +{ + return __arm_vshlq_u32 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vsubq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vsubq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vrmulhq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vrhaddq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vqsubq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vqsubq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vqaddq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vqaddq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vorrq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vornq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmulq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vmulq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmulltq_int_u8 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmullbq_int_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmulhq_u8 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmladavq_u8 (__a, __b); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq (uint8_t __a, uint8x16_t __b) +{ + return __arm_vminvq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vminq_u8 (__a, __b); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq (uint8_t __a, uint8x16_t __b) +{ + return __arm_vmaxvq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmaxq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vhsubq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vhsubq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vhaddq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vhaddq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_veorq_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vcmpneq_n_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vcmphiq_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vcmphiq_n_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vcmpeqq_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vcmpeqq_n_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vcmpcsq_u8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vcmpcsq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vcaddq_rot90_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vcaddq_rot270_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vbicq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vandq_u8 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq_p (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vaddvq_p_u8 (__a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq (uint32_t __a, uint8x16_t __b) +{ + return __arm_vaddvaq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (uint8x16_t __a, uint8_t __b) +{ + return __arm_vaddq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vabdq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_r (uint8x16_t __a, int32_t __b) +{ + return __arm_vshlq_r_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (uint8x16_t __a, int8x16_t __b) +{ + return __arm_vrshlq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (uint8x16_t __a, int32_t __b) +{ + return __arm_vrshlq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq (uint8x16_t __a, int8x16_t __b) +{ + return __arm_vqshlq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_r (uint8x16_t __a, int32_t __b) +{ + return __arm_vqshlq_r_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (uint8x16_t __a, int8x16_t __b) +{ + return __arm_vqrshlq_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (uint8x16_t __a, int32_t __b) +{ + return __arm_vqrshlq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq (uint8_t __a, int8x16_t __b) +{ + return __arm_vminavq_s8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq (uint8x16_t __a, int8x16_t __b) +{ + return __arm_vminaq_s8 (__a, __b); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq (uint8_t __a, int8x16_t __b) +{ + return __arm_vmaxavq_s8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq (uint8x16_t __a, int8x16_t __b) +{ + return __arm_vmaxaq_s8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (uint8x16_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_u8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_n (uint8x16_t __a, const int __imm) +{ + return __arm_vshlq_n_u8 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq (uint8x16_t __a, const int __imm) +{ + return __arm_vrshrq_n_u8 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_n (uint8x16_t __a, const int __imm) +{ + return __arm_vqshlq_n_u8 (__a, __imm); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (int8x16_t __a, int8_t __b) +{ + return __arm_vcmpneq_n_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcmpltq_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (int8x16_t __a, int8_t __b) +{ + return __arm_vcmpltq_n_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcmpleq_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (int8x16_t __a, int8_t __b) +{ + return __arm_vcmpleq_n_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcmpgtq_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (int8x16_t __a, int8_t __b) +{ + return __arm_vcmpgtq_n_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcmpgeq_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (int8x16_t __a, int8_t __b) +{ + return __arm_vcmpgeq_n_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcmpeqq_s8 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (int8x16_t __a, int8_t __b) +{ + return __arm_vcmpeqq_n_s8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshluq (int8x16_t __a, const int __imm) +{ + return __arm_vqshluq_n_s8 (__a, __imm); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq_p (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vaddvq_p_s8 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vsubq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (int8x16_t __a, int8_t __b) +{ + return __arm_vsubq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_r (int8x16_t __a, int32_t __b) +{ + return __arm_vshlq_r_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vrshlq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (int8x16_t __a, int32_t __b) +{ + return __arm_vrshlq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vrmulhq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vrhaddq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vqsubq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (int8x16_t __a, int8_t __b) +{ + return __arm_vqsubq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vqshlq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_r (int8x16_t __a, int32_t __b) +{ + return __arm_vqshlq_r_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vqrshlq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (int8x16_t __a, int32_t __b) +{ + return __arm_vqrshlq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vqrdmulhq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq (int8x16_t __a, int8_t __b) +{ + return __arm_vqrdmulhq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vqdmulhq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq (int8x16_t __a, int8_t __b) +{ + return __arm_vqdmulhq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vqaddq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (int8x16_t __a, int8_t __b) +{ + return __arm_vqaddq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vorrq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vornq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmulq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (int8x16_t __a, int8_t __b) +{ + return __arm_vmulq_n_s8 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmulltq_int_s8 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmullbq_int_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmulhq_s8 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmlsdavxq_s8 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmlsdavq_s8 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmladavxq_s8 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmladavq_s8 (__a, __b); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq (int8_t __a, int8x16_t __b) +{ + return __arm_vminvq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vminq_s8 (__a, __b); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq (int8_t __a, int8x16_t __b) +{ + return __arm_vmaxvq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vmaxq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vhsubq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (int8x16_t __a, int8_t __b) +{ + return __arm_vhsubq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90 (int8x16_t __a, int8x16_t __b) +{ + return __arm_vhcaddq_rot90_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270 (int8x16_t __a, int8x16_t __b) +{ + return __arm_vhcaddq_rot270_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vhaddq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (int8x16_t __a, int8_t __b) +{ + return __arm_vhaddq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (int8x16_t __a, int8x16_t __b) +{ + return __arm_veorq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcaddq_rot90_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (int8x16_t __a, int8x16_t __b) +{ + return __arm_vcaddq_rot270_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (int8x16_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vbicq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vandq_s8 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq (int32_t __a, int8x16_t __b) +{ + return __arm_vaddvaq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (int8x16_t __a, int8_t __b) +{ + return __arm_vaddq_n_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vabdq_s8 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_n (int8x16_t __a, const int __imm) +{ + return __arm_vshlq_n_s8 (__a, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq (int8x16_t __a, const int __imm) +{ + return __arm_vrshrq_n_s8 (__a, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_n (int8x16_t __a, const int __imm) +{ + return __arm_vqshlq_n_s8 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vsubq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vsubq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vrmulhq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vrhaddq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vqsubq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vqsubq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vqaddq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vqaddq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vorrq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vornq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmulq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vmulq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmulltq_int_u16 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmullbq_int_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmulhq_u16 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmladavq_u16 (__a, __b); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq (uint16_t __a, uint16x8_t __b) +{ + return __arm_vminvq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vminq_u16 (__a, __b); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq (uint16_t __a, uint16x8_t __b) +{ + return __arm_vmaxvq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmaxq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vhsubq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vhsubq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vhaddq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vhaddq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_veorq_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vcmpneq_n_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vcmphiq_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vcmphiq_n_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vcmpeqq_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vcmpeqq_n_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vcmpcsq_u16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vcmpcsq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vcaddq_rot90_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vcaddq_rot270_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vbicq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vandq_u16 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq_p (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vaddvq_p_u16 (__a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq (uint32_t __a, uint16x8_t __b) +{ + return __arm_vaddvaq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (uint16x8_t __a, uint16_t __b) +{ + return __arm_vaddq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vabdq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_r (uint16x8_t __a, int32_t __b) +{ + return __arm_vshlq_r_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (uint16x8_t __a, int16x8_t __b) +{ + return __arm_vrshlq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (uint16x8_t __a, int32_t __b) +{ + return __arm_vrshlq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq (uint16x8_t __a, int16x8_t __b) +{ + return __arm_vqshlq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_r (uint16x8_t __a, int32_t __b) +{ + return __arm_vqshlq_r_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (uint16x8_t __a, int16x8_t __b) +{ + return __arm_vqrshlq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (uint16x8_t __a, int32_t __b) +{ + return __arm_vqrshlq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq (uint16_t __a, int16x8_t __b) +{ + return __arm_vminavq_s16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq (uint16x8_t __a, int16x8_t __b) +{ + return __arm_vminaq_s16 (__a, __b); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq (uint16_t __a, int16x8_t __b) +{ + return __arm_vmaxavq_s16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq (uint16x8_t __a, int16x8_t __b) +{ + return __arm_vmaxaq_s16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (uint16x8_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_n (uint16x8_t __a, const int __imm) +{ + return __arm_vshlq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq (uint16x8_t __a, const int __imm) +{ + return __arm_vrshrq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_n (uint16x8_t __a, const int __imm) +{ + return __arm_vqshlq_n_u16 (__a, __imm); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (int16x8_t __a, int16_t __b) +{ + return __arm_vcmpneq_n_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcmpltq_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (int16x8_t __a, int16_t __b) +{ + return __arm_vcmpltq_n_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcmpleq_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (int16x8_t __a, int16_t __b) +{ + return __arm_vcmpleq_n_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcmpgtq_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (int16x8_t __a, int16_t __b) +{ + return __arm_vcmpgtq_n_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcmpgeq_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (int16x8_t __a, int16_t __b) +{ + return __arm_vcmpgeq_n_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcmpeqq_s16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (int16x8_t __a, int16_t __b) +{ + return __arm_vcmpeqq_n_s16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshluq (int16x8_t __a, const int __imm) +{ + return __arm_vqshluq_n_s16 (__a, __imm); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq_p (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vaddvq_p_s16 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vsubq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (int16x8_t __a, int16_t __b) +{ + return __arm_vsubq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_r (int16x8_t __a, int32_t __b) +{ + return __arm_vshlq_r_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vrshlq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (int16x8_t __a, int32_t __b) +{ + return __arm_vrshlq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vrmulhq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vrhaddq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqsubq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (int16x8_t __a, int16_t __b) +{ + return __arm_vqsubq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqshlq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_r (int16x8_t __a, int32_t __b) +{ + return __arm_vqshlq_r_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqrshlq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (int16x8_t __a, int32_t __b) +{ + return __arm_vqrshlq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqrdmulhq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq (int16x8_t __a, int16_t __b) +{ + return __arm_vqrdmulhq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqdmulhq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq (int16x8_t __a, int16_t __b) +{ + return __arm_vqdmulhq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqaddq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (int16x8_t __a, int16_t __b) +{ + return __arm_vqaddq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vorrq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vornq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmulq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (int16x8_t __a, int16_t __b) +{ + return __arm_vmulq_n_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmulltq_int_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmullbq_int_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmulhq_s16 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmlsdavxq_s16 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmlsdavq_s16 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmladavxq_s16 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmladavq_s16 (__a, __b); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq (int16_t __a, int16x8_t __b) +{ + return __arm_vminvq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vminq_s16 (__a, __b); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq (int16_t __a, int16x8_t __b) +{ + return __arm_vmaxvq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmaxq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vhsubq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (int16x8_t __a, int16_t __b) +{ + return __arm_vhsubq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90 (int16x8_t __a, int16x8_t __b) +{ + return __arm_vhcaddq_rot90_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270 (int16x8_t __a, int16x8_t __b) +{ + return __arm_vhcaddq_rot270_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vhaddq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (int16x8_t __a, int16_t __b) +{ + return __arm_vhaddq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (int16x8_t __a, int16x8_t __b) +{ + return __arm_veorq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcaddq_rot90_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (int16x8_t __a, int16x8_t __b) +{ + return __arm_vcaddq_rot270_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (int16x8_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vbicq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vandq_s16 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq (int32_t __a, int16x8_t __b) +{ + return __arm_vaddvaq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (int16x8_t __a, int16_t __b) +{ + return __arm_vaddq_n_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vabdq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_n (int16x8_t __a, const int __imm) +{ + return __arm_vshlq_n_s16 (__a, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq (int16x8_t __a, const int __imm) +{ + return __arm_vrshrq_n_s16 (__a, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_n (int16x8_t __a, const int __imm) +{ + return __arm_vqshlq_n_s16 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vsubq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vsubq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vrmulhq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vrhaddq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vqsubq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vqsubq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vqaddq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vqaddq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vorrq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vornq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vmulq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vmulq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vmulltq_int_u32 (__a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vmullbq_int_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vmulhq_u32 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vmladavq_u32 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq (uint32_t __a, uint32x4_t __b) +{ + return __arm_vminvq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vminq_u32 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq (uint32_t __a, uint32x4_t __b) +{ + return __arm_vmaxvq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vmaxq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vhsubq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vhsubq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vhaddq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vhaddq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_veorq_u32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vcmpneq_n_u32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vcmphiq_u32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vcmphiq_n_u32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vcmpeqq_u32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vcmpeqq_n_u32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vcmpcsq_u32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vcmpcsq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vcaddq_rot90_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vcaddq_rot270_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vbicq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vandq_u32 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq_p (uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vaddvq_p_u32 (__a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq (uint32_t __a, uint32x4_t __b) +{ + return __arm_vaddvaq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (uint32x4_t __a, uint32_t __b) +{ + return __arm_vaddq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vabdq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_r (uint32x4_t __a, int32_t __b) +{ + return __arm_vshlq_r_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (uint32x4_t __a, int32x4_t __b) +{ + return __arm_vrshlq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (uint32x4_t __a, int32_t __b) +{ + return __arm_vrshlq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq (uint32x4_t __a, int32x4_t __b) +{ + return __arm_vqshlq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_r (uint32x4_t __a, int32_t __b) +{ + return __arm_vqshlq_r_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (uint32x4_t __a, int32x4_t __b) +{ + return __arm_vqrshlq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (uint32x4_t __a, int32_t __b) +{ + return __arm_vqrshlq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq (uint32_t __a, int32x4_t __b) +{ + return __arm_vminavq_s32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq (uint32x4_t __a, int32x4_t __b) +{ + return __arm_vminaq_s32 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq (uint32_t __a, int32x4_t __b) +{ + return __arm_vmaxavq_s32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq (uint32x4_t __a, int32x4_t __b) +{ + return __arm_vmaxaq_s32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (uint32x4_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_n (uint32x4_t __a, const int __imm) +{ + return __arm_vshlq_n_u32 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq (uint32x4_t __a, const int __imm) +{ + return __arm_vrshrq_n_u32 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_n (uint32x4_t __a, const int __imm) +{ + return __arm_vqshlq_n_u32 (__a, __imm); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (int32x4_t __a, int32_t __b) +{ + return __arm_vcmpneq_n_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcmpltq_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (int32x4_t __a, int32_t __b) +{ + return __arm_vcmpltq_n_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcmpleq_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (int32x4_t __a, int32_t __b) +{ + return __arm_vcmpleq_n_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcmpgtq_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (int32x4_t __a, int32_t __b) +{ + return __arm_vcmpgtq_n_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcmpgeq_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (int32x4_t __a, int32_t __b) +{ + return __arm_vcmpgeq_n_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcmpeqq_s32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (int32x4_t __a, int32_t __b) +{ + return __arm_vcmpeqq_n_s32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshluq (int32x4_t __a, const int __imm) +{ + return __arm_vqshluq_n_s32 (__a, __imm); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvq_p (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vaddvq_p_s32 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vsubq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (int32x4_t __a, int32_t __b) +{ + return __arm_vsubq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_r (int32x4_t __a, int32_t __b) +{ + return __arm_vshlq_r_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vrshlq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq (int32x4_t __a, int32_t __b) +{ + return __arm_vrshlq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vrmulhq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vrhaddq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqsubq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq (int32x4_t __a, int32_t __b) +{ + return __arm_vqsubq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqshlq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_r (int32x4_t __a, int32_t __b) +{ + return __arm_vqshlq_r_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqrshlq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq (int32x4_t __a, int32_t __b) +{ + return __arm_vqrshlq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqrdmulhq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq (int32x4_t __a, int32_t __b) +{ + return __arm_vqrdmulhq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqdmulhq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq (int32x4_t __a, int32_t __b) +{ + return __arm_vqdmulhq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqaddq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq (int32x4_t __a, int32_t __b) +{ + return __arm_vqaddq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vorrq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vornq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmulq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (int32x4_t __a, int32_t __b) +{ + return __arm_vmulq_n_s32 (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmulltq_int_s32 (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmullbq_int_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmulhq_s32 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmlsdavxq_s32 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmlsdavq_s32 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmladavxq_s32 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmladavq_s32 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq (int32_t __a, int32x4_t __b) +{ + return __arm_vminvq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vminq_s32 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq (int32_t __a, int32x4_t __b) +{ + return __arm_vmaxvq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmaxq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vhsubq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq (int32x4_t __a, int32_t __b) +{ + return __arm_vhsubq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90 (int32x4_t __a, int32x4_t __b) +{ + return __arm_vhcaddq_rot90_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270 (int32x4_t __a, int32x4_t __b) +{ + return __arm_vhcaddq_rot270_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vhaddq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq (int32x4_t __a, int32_t __b) +{ + return __arm_vhaddq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (int32x4_t __a, int32x4_t __b) +{ + return __arm_veorq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcaddq_rot90_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (int32x4_t __a, int32x4_t __b) +{ + return __arm_vcaddq_rot270_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (int32x4_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vbicq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vandq_s32 (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq (int32_t __a, int32x4_t __b) +{ + return __arm_vaddvaq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (int32x4_t __a, int32_t __b) +{ + return __arm_vaddq_n_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vabdq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_n (int32x4_t __a, const int __imm) +{ + return __arm_vshlq_n_s32 (__a, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq (int32x4_t __a, const int __imm) +{ + return __arm_vrshrq_n_s32 (__a, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_n (int32x4_t __a, const int __imm) +{ + return __arm_vqshlq_n_s32 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq (uint8x16_t __a, uint16x8_t __b) +{ + return __arm_vqmovntq_u16 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq (uint8x16_t __a, uint16x8_t __b) +{ + return __arm_vqmovnbq_u16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_poly (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmulltq_poly_p8 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_poly (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vmullbq_poly_p8 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq (uint8x16_t __a, uint16x8_t __b) +{ + return __arm_vmovntq_u16 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq (uint8x16_t __a, uint16x8_t __b) +{ + return __arm_vmovnbq_u16 (__a, __b); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmlaldavq_u16 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovuntq (uint8x16_t __a, int16x8_t __b) +{ + return __arm_vqmovuntq_s16 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovunbq (uint8x16_t __a, int16x8_t __b) +{ + return __arm_vqmovunbq_s16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq (uint8x16_t __a, const int __imm) +{ + return __arm_vshlltq_n_u8 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq (uint8x16_t __a, const int __imm) +{ + return __arm_vshllbq_n_u8 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (uint16x8_t __a, const int __imm) +{ + return __arm_vorrq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (uint16x8_t __a, const int __imm) +{ + return __arm_vbicq_n_u16 (__a, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq (int8x16_t __a, int16x8_t __b) +{ + return __arm_vqmovntq_s16 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq (int8x16_t __a, int16x8_t __b) +{ + return __arm_vqmovnbq_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqdmulltq_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq (int16x8_t __a, int16_t __b) +{ + return __arm_vqdmulltq_n_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vqdmullbq_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq (int16x8_t __a, int16_t __b) +{ + return __arm_vqdmullbq_n_s16 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq (int8x16_t __a, int16x8_t __b) +{ + return __arm_vmovntq_s16 (__a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq (int8x16_t __a, int16x8_t __b) +{ + return __arm_vmovnbq_s16 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavxq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmlsldavxq_s16 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmlsldavq_s16 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavxq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmlaldavxq_s16 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vmlaldavq_s16 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq (int8x16_t __a, const int __imm) +{ + return __arm_vshlltq_n_s8 (__a, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq (int8x16_t __a, const int __imm) +{ + return __arm_vshllbq_n_s8 (__a, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (int16x8_t __a, const int __imm) +{ + return __arm_vorrq_n_s16 (__a, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (int16x8_t __a, const int __imm) +{ + return __arm_vbicq_n_s16 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq (uint16x8_t __a, uint32x4_t __b) +{ + return __arm_vqmovntq_u32 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq (uint16x8_t __a, uint32x4_t __b) +{ + return __arm_vqmovnbq_u32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_poly (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmulltq_poly_p16 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_poly (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vmullbq_poly_p16 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq (uint16x8_t __a, uint32x4_t __b) +{ + return __arm_vmovntq_u32 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq (uint16x8_t __a, uint32x4_t __b) +{ + return __arm_vmovnbq_u32 (__a, __b); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vmlaldavq_u32 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovuntq (uint16x8_t __a, int32x4_t __b) +{ + return __arm_vqmovuntq_s32 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovunbq (uint16x8_t __a, int32x4_t __b) +{ + return __arm_vqmovunbq_s32 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq (uint16x8_t __a, const int __imm) +{ + return __arm_vshlltq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq (uint16x8_t __a, const int __imm) +{ + return __arm_vshllbq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (uint32x4_t __a, const int __imm) +{ + return __arm_vorrq_n_u32 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (uint32x4_t __a, const int __imm) +{ + return __arm_vbicq_n_u32 (__a, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq (int16x8_t __a, int32x4_t __b) +{ + return __arm_vqmovntq_s32 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq (int16x8_t __a, int32x4_t __b) +{ + return __arm_vqmovnbq_s32 (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqdmulltq_s32 (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq (int32x4_t __a, int32_t __b) +{ + return __arm_vqdmulltq_n_s32 (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vqdmullbq_s32 (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq (int32x4_t __a, int32_t __b) +{ + return __arm_vqdmullbq_n_s32 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq (int16x8_t __a, int32x4_t __b) +{ + return __arm_vmovntq_s32 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq (int16x8_t __a, int32x4_t __b) +{ + return __arm_vmovnbq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavxq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmlsldavxq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmlsldavq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavxq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmlaldavxq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vmlaldavq_s32 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq (int16x8_t __a, const int __imm) +{ + return __arm_vshlltq_n_s16 (__a, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq (int16x8_t __a, const int __imm) +{ + return __arm_vshllbq_n_s16 (__a, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (int32x4_t __a, const int __imm) +{ + return __arm_vorrq_n_s32 (__a, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (int32x4_t __a, const int __imm) +{ + return __arm_vbicq_n_s32 (__a, __imm); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vrmlaldavhq_u32 (__a, __b); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvaq (uint64_t __a, uint32x4_t __b) +{ + return __arm_vaddlvaq_u32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhxq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vrmlsldavhxq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vrmlsldavhq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhxq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vrmlaldavhxq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vrmlaldavhq_s32 (__a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvaq (int64_t __a, int32x4_t __b) +{ + return __arm_vaddlvaq_s32 (__a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq (uint32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __arm_vabavq_s8 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq (uint32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vabavq_s16 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq (uint32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vabavq_s32 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq (uint32_t __a, uint8x16_t __b, uint8x16_t __c) +{ + return __arm_vabavq_u8 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq (uint32_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return __arm_vabavq_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq (uint32_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return __arm_vabavq_u32 (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vbicq_m_n_s16 (__a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vbicq_m_n_s32 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vbicq_m_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vbicq_m_n_u32 (__a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqrshrnbq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vqrshrnbq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqrshrnbq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vqrshrnbq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqrshrunbq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqrshrunbq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vrmlaldavhaq_s32 (__a, __b, __c); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return __arm_vrmlaldavhaq_u32 (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq (int8x16_t __a, uint32_t * __b, const int __imm) +{ + return __arm_vshlcq_s8 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq (uint8x16_t __a, uint32_t * __b, const int __imm) +{ + return __arm_vshlcq_u8 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq (int16x8_t __a, uint32_t * __b, const int __imm) +{ + return __arm_vshlcq_s16 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq (uint16x8_t __a, uint32_t * __b, const int __imm) +{ + return __arm_vshlcq_u16 (__a, __b, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq (int32x4_t __a, uint32_t * __b, const int __imm) +{ + return __arm_vshlcq_s32 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq (uint32x4_t __a, uint32_t * __b, const int __imm) +{ + return __arm_vshlcq_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_u8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_s8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __arm_vqrdmlashq_n_u8 (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __arm_vqrdmlahq_n_u8 (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __arm_vqdmlahq_n_u8 (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_m_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __arm_vmlasq_n_u8 (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __arm_vmlaq_n_u8 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmladavq_p_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq (uint32_t __a, uint8x16_t __b, uint8x16_t __c) +{ + return __arm_vmladavaq_u8 (__a, __b, __c); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminvq_p_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxvq_p_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_u8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmphiq_m_u8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vcmphiq_m_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_u8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpcsq_m_u8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpcsq_m_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_m_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p (uint32_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vaddvaq_p_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq (uint8x16_t __a, uint8x16_t __b, const int __imm) +{ + return __arm_vsriq_n_u8 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq (uint8x16_t __a, uint8x16_t __b, const int __imm) +{ + return __arm_vsliq_n_u8 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_r_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_r_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminavq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminaq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxavq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxaq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_s8 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_r_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_r_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vqnegq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vqabsq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmlsdavxq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmlsdavq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmladavxq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmladavq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminvq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxvq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (int8x16_t __inactive, int8_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vclsq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p (int32_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vaddvaq_p_s8 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqrdmlsdhxq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqrdmlsdhq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __arm_vqrdmlashq_n_s8 (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __arm_vqrdmlahq_n_s8 (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqrdmladhxq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqrdmladhq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqdmlsdhxq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqdmlsdhq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __arm_vqdmlahq_n_s8 (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqdmladhxq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __arm_vqdmladhq_s8 (__inactive, __a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __arm_vmlsdavaxq_s8 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __arm_vmlsdavaq_s8 (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __arm_vmlasq_n_s8 (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __arm_vmlaq_n_s8 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __arm_vmladavaxq_s8 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __arm_vmladavaq_s8 (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq (int8x16_t __a, int8x16_t __b, const int __imm) +{ + return __arm_vsriq_n_s8 (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq (int8x16_t __a, int8x16_t __b, const int __imm) +{ + return __arm_vsliq_n_s8 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_u16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_s16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __arm_vqrdmlashq_n_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __arm_vqrdmlahq_n_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __arm_vqdmlahq_n_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_m_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __arm_vmlasq_n_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __arm_vmlaq_n_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmladavq_p_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq (uint32_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return __arm_vmladavaq_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminvq_p_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxvq_p_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmphiq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vcmphiq_m_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpcsq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpcsq_m_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_m_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p (uint32_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddvaq_p_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq (uint16x8_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vsriq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq (uint16x8_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vsliq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_r_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_r_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminavq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminaq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxavq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxaq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_r_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_r_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vqnegq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vqabsq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmlsdavxq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmlsdavq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmladavxq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmladavq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminvq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxvq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (int16x8_t __inactive, int16_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vclsq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p (int32_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddvaq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqrdmlsdhxq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqrdmlsdhq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __arm_vqrdmlashq_n_s16 (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __arm_vqrdmlahq_n_s16 (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqrdmladhxq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqrdmladhq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqdmlsdhxq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqdmlsdhq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __arm_vqdmlahq_n_s16 (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqdmladhxq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __arm_vqdmladhq_s16 (__inactive, __a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmlsdavaxq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmlsdavaq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __arm_vmlasq_n_s16 (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __arm_vmlaq_n_s16 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmladavaxq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmladavaq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq (int16x8_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vsriq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq (int16x8_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vsliq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_u32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_u32 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __arm_vqrdmlashq_n_u32 (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __arm_vqrdmlahq_n_u32 (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __arm_vqdmlahq_n_u32 (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_m_u32 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __arm_vmlasq_n_u32 (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __arm_vmlaq_n_u32 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmladavq_p_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq (uint32_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return __arm_vmladavaq_u32 (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminvq_p_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxvq_p_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_u32 (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmphiq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vcmphiq_m_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpcsq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpcsq_m_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_m_u32 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddvaq_p_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq (uint32x4_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vsriq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq (uint32x4_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vsliq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_r_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_r_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminavq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminaq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxavq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxaq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_r_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_r_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vqnegq_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vqabsq_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmlsdavxq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmlsdavq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmladavxq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmladavq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminvq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxvq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (int32x4_t __inactive, int32_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vclsq_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddvaq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_m_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqrdmlsdhxq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqrdmlsdhq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __arm_vqrdmlashq_n_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __arm_vqrdmlahq_n_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqrdmladhxq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqrdmladhq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqdmlsdhxq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqdmlsdhq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __arm_vqdmlahq_n_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqdmladhxq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __arm_vqdmladhq_s32 (__inactive, __a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmlsdavaxq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmlsdavaq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __arm_vmlasq_n_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __arm_vmlaq_n_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmladavaxq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmladavaq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq (int32x4_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vsriq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq (int32x4_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vsliq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_u64 (__a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (int64x2_t __a, int64x2_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_s64 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vrmlaldavhaxq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vrmlsldavhaq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vrmlsldavhaxq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvaq_p (int64_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddlvaq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev16q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev16q_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmlaldavhq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmlaldavhxq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmlsldavhq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmlsldavhxq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddlvaq_p (uint64_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddlvaq_p_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev16q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev16q_m_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmlaldavhq_p_u32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (int16x8_t __inactive, const int __imm, mve_pred16_t __p) +{ + return __arm_vmvnq_m_n_s16 (__inactive, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vorrq_m_n_s16 (__a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqrshrntq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqshrnbq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqshrntq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vrshrnbq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vrshrntq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vshrnbq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq (int8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vshrntq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq (int64_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmlaldavaq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmlaldavaxq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaq (int64_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmlsldavaq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c) +{ + return __arm_vmlsldavaxq_s16 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmlaldavq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmlaldavxq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmlsldavq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmlsldavxq_p_s16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmovnbq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmovntq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqmovnbq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqmovntq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_m_s8 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (uint16x8_t __inactive, const int __imm, mve_pred16_t __p) +{ + return __arm_vmvnq_m_n_u16 (__inactive, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vorrq_m_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshruntq (uint8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqrshruntq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqshrunbq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshruntq (uint8x16_t __a, int16x8_t __b, const int __imm) +{ + return __arm_vqshruntq_n_s16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovunbq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqmovunbq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovuntq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqmovuntq_m_s16 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vqrshrntq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vqshrnbq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vqshrntq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vrshrnbq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vrshrntq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vshrnbq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm) +{ + return __arm_vshrntq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq (uint64_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return __arm_vmlaldavaq_u16 (__a, __b, __c); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmlaldavq_p_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_m_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_m_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmovnbq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmovntq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqmovnbq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqmovntq_m_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_m_u8 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (int32x4_t __inactive, const int __imm, mve_pred16_t __p) +{ + return __arm_vmvnq_m_n_s32 (__inactive, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vorrq_m_n_s32 (__a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqrshrntq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqshrnbq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqshrntq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vrshrnbq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vrshrntq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vshrnbq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq (int16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vshrntq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmlaldavaq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmlaldavaxq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmlsldavaq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c) +{ + return __arm_vmlsldavaxq_s32 (__a, __b, __c); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmlaldavq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmlaldavxq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmlsldavq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmlsldavxq_p_s32 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmovnbq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmovntq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqmovnbq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqmovntq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_m_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m (uint32x4_t __inactive, const int __imm, mve_pred16_t __p) +{ + return __arm_vmvnq_m_n_u32 (__inactive, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vorrq_m_n_u32 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshruntq (uint16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqrshruntq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqshrunbq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshruntq (uint16x8_t __a, int32x4_t __b, const int __imm) +{ + return __arm_vqshruntq_n_s32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovunbq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqmovunbq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovuntq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqmovuntq_m_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vqrshrntq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vqshrnbq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vqshrntq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vrshrnbq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vrshrntq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vshrnbq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm) +{ + return __arm_vshrntq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return __arm_vmlaldavaq_u32 (__a, __b, __c); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmlaldavq_p_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_m_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_m_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmovnbq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmovntq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqmovnbq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqmovntq_m_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_m_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsriq_m_n_s8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshluq_m (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshluq_m_n_s8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq_p (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) +{ + return __arm_vabavq_p_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsriq_m_n_u8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) +{ + return __arm_vabavq_p_u8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsriq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshluq_m (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshluq_m_n_s16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq_p (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vabavq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsriq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) +{ + return __arm_vabavq_p_u16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsriq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshluq_m (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshluq_m_n_s32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq_p (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vabavq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsriq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabavq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) +{ + return __arm_vabavq_p_u32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot270_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot270_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot270_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot90_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot90_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot90_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaq_p_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaq_p_u8 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaq_p_u32 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaq_p_u16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaxq_p_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaxq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmladavaxq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) +{ + return __arm_vmlaq_m_n_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) +{ + return __arm_vmlaq_m_n_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) +{ + return __arm_vmlaq_m_n_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) +{ + return __arm_vmlaq_m_n_u8 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) +{ + return __arm_vmlaq_m_n_u32 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) +{ + return __arm_vmlaq_m_n_u16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) +{ + return __arm_vmlasq_m_n_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) +{ + return __arm_vmlasq_m_n_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) +{ + return __arm_vmlasq_m_n_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p) +{ + return __arm_vmlasq_m_n_u8 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p) +{ + return __arm_vmlasq_m_n_u32 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p) +{ + return __arm_vmlasq_m_n_u16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) +{ + return __arm_vmlsdavaq_p_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlsdavaq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlsdavaq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p) +{ + return __arm_vmlsdavaxq_p_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlsdavaxq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlsdavaxq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqaddq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmladhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqdmladhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmladhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmladhxq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqdmladhxq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmladhxq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) +{ + return __arm_vqdmlahq_m_n_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) +{ + return __arm_vqdmlahq_m_n_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) +{ + return __arm_vqdmlahq_m_n_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmlsdhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqdmlsdhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmlsdhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmlsdhxq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqdmlsdhxq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmlsdhxq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulhq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulhq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulhq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmladhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmladhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmladhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmladhxq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmladhxq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmladhxq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) +{ + return __arm_vqrdmlahq_m_n_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) +{ + return __arm_vqrdmlahq_m_n_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) +{ + return __arm_vqrdmlahq_m_n_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p) +{ + return __arm_vqrdmlashq_m_n_s8 (__a, __b, __c, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p) +{ + return __arm_vqrdmlashq_m_n_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p) +{ + return __arm_vqrdmlashq_m_n_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmlsdhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmlsdhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmlsdhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmlsdhxq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmlsdhxq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmlsdhxq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmulhq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmulhq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmulhq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmulhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmulhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqrdmulhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqrshlq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshlq_m_n_s8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshlq_m_n_s32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshlq_m_n_s16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshlq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshlq_m_n_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshlq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqshlq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqsubq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_m_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_m_n_s8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_m_n_s32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_m_n_s16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_m_n_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_m_n_s8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_m_n_s32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_m_n_s16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_m_n_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_m_n_s8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_m_n_s32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_m_n_s16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_m_n_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsliq_m_n_s8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsliq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsliq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsliq_m_n_u8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsliq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vsliq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_s8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_u8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_u32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_u16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaq_p_u32 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaq_p (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaq_p_u16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaxq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaxq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaxq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaxq_p_u32 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaldavaxq_p (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlaldavaxq_p_u16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlsldavaq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlsldavaq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vmlsldavaxq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p) +{ + return __arm_vmlsldavaxq_p_s16 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_poly_m_p8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_poly_m_p16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_poly_m_p8 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_poly_m_p16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqdmullbq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmullbq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqdmullbq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmullbq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulltq_m_n_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulltq_m_n_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulltq_m_s32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vqdmulltq_m_s16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrnbq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrnbq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrnbq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrnbq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrntq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrntq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrntq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrntq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrunbq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshrunbq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshruntq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqrshruntq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrnbq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrnbq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrnbq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrnbq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrntq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrntq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrntq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrntq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrunbq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshrunbq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshruntq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vqshruntq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vrmlaldavhaq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p) +{ + return __arm_vrmlaldavhaq_p_u32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlaldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vrmlaldavhaxq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vrmlsldavhaq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmlsldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p) +{ + return __arm_vrmlsldavhaxq_p_s32 (__a, __b, __c, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrnbq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrnbq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrnbq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrnbq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrntq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrntq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrntq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrntq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_m_n_s8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_m_n_s16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_m_n_s8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_m_n_s16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrnbq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrnbq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrnbq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrnbq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrntq_m_n_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrntq_m_n_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrntq_m_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrntq_m_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset (int8_t * __base, uint8x16_t __offset, int8x16_t __value) +{ + __arm_vstrbq_scatter_offset_s8 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset (int8_t * __base, uint32x4_t __offset, int32x4_t __value) +{ + __arm_vstrbq_scatter_offset_s32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset (int8_t * __base, uint16x8_t __offset, int16x8_t __value) +{ + __arm_vstrbq_scatter_offset_s16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value) +{ + __arm_vstrbq_scatter_offset_u8 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value) +{ + __arm_vstrbq_scatter_offset_u32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value) +{ + __arm_vstrbq_scatter_offset_u16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq (int8_t * __addr, int8x16_t __value) +{ + __arm_vstrbq_s8 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq (int8_t * __addr, int32x4_t __value) +{ + __arm_vstrbq_s32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq (int8_t * __addr, int16x8_t __value) +{ + __arm_vstrbq_s16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq (uint8_t * __addr, uint8x16_t __value) +{ + __arm_vstrbq_u8 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq (uint8_t * __addr, uint32x4_t __value) +{ + __arm_vstrbq_u32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq (uint8_t * __addr, uint16x8_t __value) +{ + __arm_vstrbq_u16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, int32x4_t __value) +{ + __arm_vstrwq_scatter_base_s32 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, uint32x4_t __value) +{ + __arm_vstrwq_scatter_base_u32 (__addr, __offset, __value); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset (uint8_t const * __base, uint8x16_t __offset) +{ + return __arm_vldrbq_gather_offset_u8 (__base, __offset); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset (int8_t const * __base, uint8x16_t __offset) +{ + return __arm_vldrbq_gather_offset_s8 (__base, __offset); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset (uint8_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrbq_gather_offset_u16 (__base, __offset); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset (int8_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrbq_gather_offset_s16 (__base, __offset); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset (uint8_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrbq_gather_offset_u32 (__base, __offset); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset (int8_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrbq_gather_offset_s32 (__base, __offset); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_p_s8 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_p (int8_t * __addr, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_p_s32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_p (int8_t * __addr, int16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_p_s16 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_p_u8 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_p (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_p_u32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_p (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_p_u16 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset_p (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_scatter_offset_p_s8 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset_p (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_scatter_offset_p_s32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset_p (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_scatter_offset_p_s16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset_p (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_scatter_offset_p_u8 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset_p (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_scatter_offset_p_u32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrbq_scatter_offset_p (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrbq_scatter_offset_p_u16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_base_p_s32 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_base_p_u32 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset_z (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) +{ + return __arm_vldrbq_gather_offset_z_s8 (__base, __offset, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset_z (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrbq_gather_offset_z_s32 (__base, __offset, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset_z (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrbq_gather_offset_z_s16 (__base, __offset, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset_z (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p) +{ + return __arm_vldrbq_gather_offset_z_u8 (__base, __offset, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset_z (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrbq_gather_offset_z_u32 (__base, __offset, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrbq_gather_offset_z (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrbq_gather_offset_z_u16 (__base, __offset, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (int8_t const * __base) +{ + return __arm_vld1q_s8 (__base); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (int32_t const * __base) +{ + return __arm_vld1q_s32 (__base); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (int16_t const * __base) +{ + return __arm_vld1q_s16 (__base); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (uint8_t const * __base) +{ + return __arm_vld1q_u8 (__base); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (uint32_t const * __base) +{ + return __arm_vld1q_u32 (__base); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (uint16_t const * __base) +{ + return __arm_vld1q_u16 (__base); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset (int16_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrhq_gather_offset_s32 (__base, __offset); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset (int16_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrhq_gather_offset_s16 (__base, __offset); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset (uint16_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrhq_gather_offset_u32 (__base, __offset); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset (uint16_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrhq_gather_offset_u16 (__base, __offset); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_offset_z_s32 (__base, __offset, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_offset_z_s16 (__base, __offset, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_offset_z_u32 (__base, __offset, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_offset_z_u16 (__base, __offset, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrhq_gather_shifted_offset_s32 (__base, __offset); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrhq_gather_shifted_offset_s16 (__base, __offset); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrhq_gather_shifted_offset_u32 (__base, __offset); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrhq_gather_shifted_offset_u16 (__base, __offset); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_shifted_offset_z_s32 (__base, __offset, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_shifted_offset_z_s16 (__base, __offset, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_shifted_offset_z_u32 (__base, __offset, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_shifted_offset_z_u16 (__base, __offset, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_offset (int64_t const * __base, uint64x2_t __offset) +{ + return __arm_vldrdq_gather_offset_s64 (__base, __offset); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_offset (uint64_t const * __base, uint64x2_t __offset) +{ + return __arm_vldrdq_gather_offset_u64 (__base, __offset); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) +{ + return __arm_vldrdq_gather_offset_z_s64 (__base, __offset, __p); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) +{ + return __arm_vldrdq_gather_offset_z_u64 (__base, __offset, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_shifted_offset (int64_t const * __base, uint64x2_t __offset) +{ + return __arm_vldrdq_gather_shifted_offset_s64 (__base, __offset); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_shifted_offset (uint64_t const * __base, uint64x2_t __offset) +{ + return __arm_vldrdq_gather_shifted_offset_u64 (__base, __offset); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_shifted_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) +{ + return __arm_vldrdq_gather_shifted_offset_z_s64 (__base, __offset, __p); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrdq_gather_shifted_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p) +{ + return __arm_vldrdq_gather_shifted_offset_z_u64 (__base, __offset, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_offset (int32_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrwq_gather_offset_s32 (__base, __offset); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_offset (uint32_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrwq_gather_offset_u32 (__base, __offset); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrwq_gather_offset_z_s32 (__base, __offset, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrwq_gather_offset_z_u32 (__base, __offset, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_shifted_offset (int32_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrwq_gather_shifted_offset_s32 (__base, __offset); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_shifted_offset (uint32_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrwq_gather_shifted_offset_u32 (__base, __offset); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_shifted_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrwq_gather_shifted_offset_z_s32 (__base, __offset, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_shifted_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrwq_gather_shifted_offset_z_u32 (__base, __offset, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (int8_t * __addr, int8x16_t __value) +{ + __arm_vst1q_s8 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (int32_t * __addr, int32x4_t __value) +{ + __arm_vst1q_s32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (int16_t * __addr, int16x8_t __value) +{ + __arm_vst1q_s16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (uint8_t * __addr, uint8x16_t __value) +{ + __arm_vst1q_u8 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (uint32_t * __addr, uint32x4_t __value) +{ + __arm_vst1q_u32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (uint16_t * __addr, uint16x8_t __value) +{ + __arm_vst1q_u16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value) +{ + __arm_vstrhq_scatter_offset_s32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value) +{ + __arm_vstrhq_scatter_offset_s16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) +{ + __arm_vstrhq_scatter_offset_u32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) +{ + __arm_vstrhq_scatter_offset_u16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_offset_p_s32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_offset_p_s16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_offset_p_u32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_offset_p_u16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value) +{ + __arm_vstrhq_scatter_shifted_offset_s32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value) +{ + __arm_vstrhq_scatter_shifted_offset_s16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value) +{ + __arm_vstrhq_scatter_shifted_offset_u32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value) +{ + __arm_vstrhq_scatter_shifted_offset_u16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_shifted_offset_p_s16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_shifted_offset_p_u16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq (int16_t * __addr, int32x4_t __value) +{ + __arm_vstrhq_s32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq (int16_t * __addr, int16x8_t __value) +{ + __arm_vstrhq_s16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq (uint16_t * __addr, uint32x4_t __value) +{ + __arm_vstrhq_u32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq (uint16_t * __addr, uint16x8_t __value) +{ + __arm_vstrhq_u16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_p (int16_t * __addr, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_p_s32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_p_s16 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_p (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_p_u32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_p_u16 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq (int32_t * __addr, int32x4_t __value) +{ + __arm_vstrwq_s32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq (uint32_t * __addr, uint32x4_t __value) +{ + __arm_vstrwq_u32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_p_s32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_p_u32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_base_p_s64 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_base_p_u64 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, int64x2_t __value) +{ + __arm_vstrdq_scatter_base_s64 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, uint64x2_t __value) +{ + __arm_vstrdq_scatter_base_u64 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_offset_p_s64 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_offset_p_u64 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value) +{ + __arm_vstrdq_scatter_offset_s64 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) +{ + __arm_vstrdq_scatter_offset_u64 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_shifted_offset_p_s64 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_shifted_offset_p_u64 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value) +{ + __arm_vstrdq_scatter_shifted_offset_s64 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) +{ + __arm_vstrdq_scatter_shifted_offset_u64 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_offset_p_s32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_offset_p_u32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value) +{ + __arm_vstrwq_scatter_offset_s32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) +{ + __arm_vstrwq_scatter_offset_u32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value) +{ + __arm_vstrwq_scatter_shifted_offset_s32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) +{ + __arm_vstrwq_scatter_shifted_offset_u32 (__base, __offset, __value); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (int8x16_t __a, int8x16_t __b) +{ + return __arm_vaddq_s8 (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (int16x8_t __a, int16x8_t __b) +{ + return __arm_vaddq_s16 (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (int32x4_t __a, int32x4_t __b) +{ + return __arm_vaddq_s32 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (uint8x16_t __a, uint8x16_t __b) +{ + return __arm_vaddq_u8 (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (uint16x8_t __a, uint16x8_t __b) +{ + return __arm_vaddq_u16 (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (uint32x4_t __a, uint32x4_t __b) +{ + return __arm_vaddq_u32 (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (uint8x16_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_u8 (); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (uint16x8_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_u16 (); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (uint32x4_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_u32 (); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (uint64x2_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_u64 (); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (int8x16_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_s8 (); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (int16x8_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_s16 (); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (int32x4_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_s32 (); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (int64x2_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_s64 (); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (int32x4_t __a) +{ + return __arm_vreinterpretq_s16_s32 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (int64x2_t __a) +{ + return __arm_vreinterpretq_s16_s64 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (int8x16_t __a) +{ + return __arm_vreinterpretq_s16_s8 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (uint16x8_t __a) +{ + return __arm_vreinterpretq_s16_u16 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (uint32x4_t __a) +{ + return __arm_vreinterpretq_s16_u32 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (uint64x2_t __a) +{ + return __arm_vreinterpretq_s16_u64 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (uint8x16_t __a) +{ + return __arm_vreinterpretq_s16_u8 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (int16x8_t __a) +{ + return __arm_vreinterpretq_s32_s16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (int64x2_t __a) +{ + return __arm_vreinterpretq_s32_s64 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (int8x16_t __a) +{ + return __arm_vreinterpretq_s32_s8 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (uint16x8_t __a) +{ + return __arm_vreinterpretq_s32_u16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (uint32x4_t __a) +{ + return __arm_vreinterpretq_s32_u32 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (uint64x2_t __a) +{ + return __arm_vreinterpretq_s32_u64 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (uint8x16_t __a) +{ + return __arm_vreinterpretq_s32_u8 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (int16x8_t __a) +{ + return __arm_vreinterpretq_s64_s16 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (int32x4_t __a) +{ + return __arm_vreinterpretq_s64_s32 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (int8x16_t __a) +{ + return __arm_vreinterpretq_s64_s8 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (uint16x8_t __a) +{ + return __arm_vreinterpretq_s64_u16 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (uint32x4_t __a) +{ + return __arm_vreinterpretq_s64_u32 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (uint64x2_t __a) +{ + return __arm_vreinterpretq_s64_u64 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (uint8x16_t __a) +{ + return __arm_vreinterpretq_s64_u8 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (int16x8_t __a) +{ + return __arm_vreinterpretq_s8_s16 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (int32x4_t __a) +{ + return __arm_vreinterpretq_s8_s32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (int64x2_t __a) +{ + return __arm_vreinterpretq_s8_s64 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (uint16x8_t __a) +{ + return __arm_vreinterpretq_s8_u16 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (uint32x4_t __a) +{ + return __arm_vreinterpretq_s8_u32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (uint64x2_t __a) +{ + return __arm_vreinterpretq_s8_u64 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (uint8x16_t __a) +{ + return __arm_vreinterpretq_s8_u8 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (int16x8_t __a) +{ + return __arm_vreinterpretq_u16_s16 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (int32x4_t __a) +{ + return __arm_vreinterpretq_u16_s32 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (int64x2_t __a) +{ + return __arm_vreinterpretq_u16_s64 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (int8x16_t __a) +{ + return __arm_vreinterpretq_u16_s8 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (uint32x4_t __a) +{ + return __arm_vreinterpretq_u16_u32 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (uint64x2_t __a) +{ + return __arm_vreinterpretq_u16_u64 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (uint8x16_t __a) +{ + return __arm_vreinterpretq_u16_u8 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (int16x8_t __a) +{ + return __arm_vreinterpretq_u32_s16 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (int32x4_t __a) +{ + return __arm_vreinterpretq_u32_s32 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (int64x2_t __a) +{ + return __arm_vreinterpretq_u32_s64 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (int8x16_t __a) +{ + return __arm_vreinterpretq_u32_s8 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (uint16x8_t __a) +{ + return __arm_vreinterpretq_u32_u16 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (uint64x2_t __a) +{ + return __arm_vreinterpretq_u32_u64 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (uint8x16_t __a) +{ + return __arm_vreinterpretq_u32_u8 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (int16x8_t __a) +{ + return __arm_vreinterpretq_u64_s16 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (int32x4_t __a) +{ + return __arm_vreinterpretq_u64_s32 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (int64x2_t __a) +{ + return __arm_vreinterpretq_u64_s64 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (int8x16_t __a) +{ + return __arm_vreinterpretq_u64_s8 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (uint16x8_t __a) +{ + return __arm_vreinterpretq_u64_u16 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (uint32x4_t __a) +{ + return __arm_vreinterpretq_u64_u32 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (uint8x16_t __a) +{ + return __arm_vreinterpretq_u64_u8 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (int16x8_t __a) +{ + return __arm_vreinterpretq_u8_s16 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (int32x4_t __a) +{ + return __arm_vreinterpretq_u8_s32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (int64x2_t __a) +{ + return __arm_vreinterpretq_u8_s64 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (int8x16_t __a) +{ + return __arm_vreinterpretq_u8_s8 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (uint16x8_t __a) +{ + return __arm_vreinterpretq_u8_u16 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (uint32x4_t __a) +{ + return __arm_vreinterpretq_u8_u32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (uint64x2_t __a) +{ + return __arm_vreinterpretq_u8_u64 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_m_n_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_m_wb_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_m_wb_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_m_wb_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_u8 (uint32_t __a, const int __imm) +{ + return __arm_vddupq_n_u8 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_u32 (uint32_t __a, const int __imm) +{ + return __arm_vddupq_n_u32 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_u16 (uint32_t __a, const int __imm) +{ + return __arm_vddupq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm) +{ + return __arm_vdwdupq_n_u8 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm) +{ + return __arm_vdwdupq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm) +{ + return __arm_vdwdupq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm) +{ + return __arm_vdwdupq_wb_u8 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm) +{ + return __arm_vdwdupq_wb_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm) +{ + return __arm_vdwdupq_wb_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_m_n_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_m_n_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_m_n_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_u8 (uint32_t __a, const int __imm) +{ + return __arm_vidupq_n_u8 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_m_wb_u8 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_m_wb_u16 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_m_wb_u32 (__inactive, __a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_u32 (uint32_t __a, const int __imm) +{ + return __arm_vidupq_n_u32 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_u16 (uint32_t __a, const int __imm) +{ + return __arm_vidupq_n_u16 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_u8 (uint32_t * __a, const int __imm) +{ + return __arm_vidupq_wb_u8 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_u16 (uint32_t * __a, const int __imm) +{ + return __arm_vidupq_wb_u16 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_u32 (uint32_t * __a, const int __imm) +{ + return __arm_vidupq_wb_u32 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_u8 (uint32_t * __a, const int __imm) +{ + return __arm_vddupq_wb_u8 (__a, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_u16 (uint32_t * __a, const int __imm) +{ + return __arm_vddupq_wb_u16 (__a, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_u32 (uint32_t * __a, const int __imm) +{ + return __arm_vddupq_wb_u32 (__a, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm) +{ + return __arm_viwdupq_n_u8 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm) +{ + return __arm_viwdupq_n_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm) +{ + return __arm_viwdupq_n_u16 (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm) +{ + return __arm_viwdupq_wb_u8 (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm) +{ + return __arm_viwdupq_wb_u32 (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm) +{ + return __arm_viwdupq_wb_u16 (__a, __b, __imm); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, int64x2_t __value) +{ + __arm_vstrdq_scatter_base_wb_s64 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, uint64x2_t __value) +{ + __arm_vstrdq_scatter_base_wb_u64 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_base_wb_p_s64 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) +{ + __arm_vstrdq_scatter_base_wb_p_u64 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_base_wb_p_s32 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_base_wb_p_u32 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, int32x4_t __value) +{ + __arm_vstrwq_scatter_base_wb_s32 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, uint32x4_t __value) +{ + __arm_vstrwq_scatter_base_wb_u32 (__addr, __offset, __value); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_x_n_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_x_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_x_n_u32 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_x_wb_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_x_wb_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vddupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) +{ + return __arm_vddupq_x_wb_u32 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_x_n_u8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_x_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_x_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_x_wb_u8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_x_wb_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vdwdupq_x_wb_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_x_n_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_x_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_x_n_u32 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_x_wb_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_x_wb_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vidupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p) +{ + return __arm_vidupq_x_wb_u32 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_x_n_u8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_x_n_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_x_n_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_x_wb_u8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_x_wb_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_viwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p) +{ + return __arm_viwdupq_x_wb_u32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vminq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_x_s8 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_x_s16 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_x (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_x_s32 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vclsq_x_s8 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vclsq_x_s16 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_x (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vclsq_x_s32 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_x_s8 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_x_s16 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_x (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_x_s32 (__a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_x (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_x_u8 (__a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_x (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_x_u16 (__a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_x (uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vclzq_x_u32 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_x_s8 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_x_s16 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_x (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_x_s32 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulhq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_poly_x_p8 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_poly_x_p16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmullbq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmullbq_int_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_poly_x_p8 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_poly_x_p16 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulltq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulltq_int_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhaddq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot90_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot90_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot90_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot270_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot270_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhcaddq_rot270_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vhsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vhsubq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrhaddq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrmulhq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_x_s8 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_x_s16 (__a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_x (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_x_u8 (__a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovlbq_x (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovlbq_x_u16 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_x_s8 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_x_s16 (__a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_x (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_x_u8 (__a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmovltq_x (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmovltq_x_u16 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_x_s8 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_x_s16 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_x (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_x_s32 (__a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_x (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_x_u8 (__a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_x (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_x_u16 (__a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_x (uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vmvnq_x_u32 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev16q_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev16q_x_s8 (__a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev16q_x (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev16q_x_u8 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_x_s8 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_x_s16 (__a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_x (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_x_u8 (__a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_x (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_x_u16 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (int8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_s8 (__a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_s16 (__a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_s32 (__a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (uint8x16_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_u8 (__a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_u16 (__a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_u32 (__a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vrshlq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_x_n_s8 (__a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_x_n_s16 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_x_n_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshllbq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshllbq_x_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_x_n_s8 (__a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_x_n_s16 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_x_n_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlltq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlltq_x_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_x_s8 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_x_s16 (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_x_s32 (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_x_u8 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_x_u16 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __arm_vshlq_x_u32 (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x_n (int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_x_n_s8 (__a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x_n (int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_x_n_s16 (__a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x_n (int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_x_n_s32 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x_n (uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_x_n_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x_n (uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_x_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_x_n (uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlq_x_n_u32 (__a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_x_n_s8 (__a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_x_n_s16 (__a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_x_n_s32 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_x_n_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_x_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vrshrq_x_n_u32 (__a, __imm, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_x_n_s8 (__a, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_x_n_s16 (__a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_x_n_s32 (__a, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_x_n_u8 (__a, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_x_n_u16 (__a, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p) +{ + return __arm_vshrq_x_n_u32 (__a, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) +{ + return __arm_vadciq_s32 (__a, __b, __carry_out); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) +{ + return __arm_vadciq_u32 (__a, __b, __carry_out); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + return __arm_vadciq_m_s32 (__inactive, __a, __b, __carry_out, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + return __arm_vadciq_m_u32 (__inactive, __a, __b, __carry_out, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq (int32x4_t __a, int32x4_t __b, unsigned * __carry) +{ + return __arm_vadcq_s32 (__a, __b, __carry); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) +{ + return __arm_vadcq_u32 (__a, __b, __carry); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + return __arm_vadcq_m_s32 (__inactive, __a, __b, __carry, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vadcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + return __arm_vadcq_m_u32 (__inactive, __a, __b, __carry, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out) +{ + return __arm_vsbciq_s32 (__a, __b, __carry_out); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out) +{ + return __arm_vsbciq_u32 (__a, __b, __carry_out); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + return __arm_vsbciq_m_s32 (__inactive, __a, __b, __carry_out, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p) +{ + return __arm_vsbciq_m_u32 (__inactive, __a, __b, __carry_out, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq (int32x4_t __a, int32x4_t __b, unsigned * __carry) +{ + return __arm_vsbcq_s32 (__a, __b, __carry); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry) +{ + return __arm_vsbcq_u32 (__a, __b, __carry); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + return __arm_vsbcq_m_s32 (__inactive, __a, __b, __carry, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsbcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p) +{ + return __arm_vsbcq_m_u32 (__inactive, __a, __b, __carry, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_u8 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_s8 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (int8_t * __addr, int8x16x2_t __value) +{ + __arm_vst2q_s8 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (uint8_t * __addr, uint8x16x2_t __value) +{ + __arm_vst2q_u8 (__addr, __value); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (uint8_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_u8 (__base, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (int8_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_s8 (__base, __p); +} + +__extension__ extern __inline int8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (int8_t const * __addr) +{ + return __arm_vld2q_s8 (__addr); +} + +__extension__ extern __inline uint8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (uint8_t const * __addr) +{ + return __arm_vld2q_u8 (__addr); +} + +__extension__ extern __inline int8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (int8_t const * __addr) +{ + return __arm_vld4q_s8 (__addr); +} + +__extension__ extern __inline uint8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (uint8_t const * __addr) +{ + return __arm_vld4q_u8 (__addr); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_u16 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_s16 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (int16_t * __addr, int16x8x2_t __value) +{ + __arm_vst2q_s16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (uint16_t * __addr, uint16x8x2_t __value) +{ + __arm_vst2q_u16 (__addr, __value); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (uint16_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_u16 (__base, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (int16_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_s16 (__base, __p); +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (int16_t const * __addr) +{ + return __arm_vld2q_s16 (__addr); +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (uint16_t const * __addr) +{ + return __arm_vld2q_u16 (__addr); +} + +__extension__ extern __inline int16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (int16_t const * __addr) +{ + return __arm_vld4q_s16 (__addr); +} + +__extension__ extern __inline uint16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (uint16_t const * __addr) +{ + return __arm_vld4q_u16 (__addr); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_u32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_s32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (int32_t * __addr, int32x4x2_t __value) +{ + __arm_vst2q_s32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (uint32_t * __addr, uint32x4x2_t __value) +{ + __arm_vst2q_u32 (__addr, __value); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (uint32_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_u32 (__base, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (int32_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_s32 (__base, __p); +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (int32_t const * __addr) +{ + return __arm_vld2q_s32 (__addr); +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (uint32_t const * __addr) +{ + return __arm_vld2q_u32 (__addr); +} + +__extension__ extern __inline int32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (int32_t const * __addr) +{ + return __arm_vld4q_s32 (__addr); +} + +__extension__ extern __inline uint32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (uint32_t const * __addr) +{ + return __arm_vld4q_u32 (__addr); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (int16_t __a, int16x8_t __b, const int __idx) +{ + return __arm_vsetq_lane_s16 (__a, __b, __idx); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (int32_t __a, int32x4_t __b, const int __idx) +{ + return __arm_vsetq_lane_s32 (__a, __b, __idx); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (int8_t __a, int8x16_t __b, const int __idx) +{ + return __arm_vsetq_lane_s8 (__a, __b, __idx); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (int64_t __a, int64x2_t __b, const int __idx) +{ + return __arm_vsetq_lane_s64 (__a, __b, __idx); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (uint8_t __a, uint8x16_t __b, const int __idx) +{ + return __arm_vsetq_lane_u8 (__a, __b, __idx); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (uint16_t __a, uint16x8_t __b, const int __idx) +{ + return __arm_vsetq_lane_u16 (__a, __b, __idx); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (uint32_t __a, uint32x4_t __b, const int __idx) +{ + return __arm_vsetq_lane_u32 (__a, __b, __idx); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (uint64_t __a, uint64x2_t __b, const int __idx) +{ + return __arm_vsetq_lane_u64 (__a, __b, __idx); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (int16x8_t __a, const int __idx) +{ + return __arm_vgetq_lane_s16 (__a, __idx); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (int32x4_t __a, const int __idx) +{ + return __arm_vgetq_lane_s32 (__a, __idx); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (int8x16_t __a, const int __idx) +{ + return __arm_vgetq_lane_s8 (__a, __idx); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (int64x2_t __a, const int __idx) +{ + return __arm_vgetq_lane_s64 (__a, __idx); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (uint8x16_t __a, const int __idx) +{ + return __arm_vgetq_lane_u8 (__a, __idx); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (uint16x8_t __a, const int __idx) +{ + return __arm_vgetq_lane_u16 (__a, __idx); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (uint32x4_t __a, const int __idx) +{ + return __arm_vgetq_lane_u32 (__a, __idx); +} + +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (uint64x2_t __a, const int __idx) +{ + return __arm_vgetq_lane_u64 (__a, __idx); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq_m (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlcq_m_s8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq_m (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlcq_m_u8 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq_m (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlcq_m_s16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq_m (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlcq_m_u16 (__a, __b, __imm, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq_m (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlcq_m_s32 (__a, __b, __imm, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlcq_m (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p) +{ + return __arm_vshlcq_m_u32 (__a, __b, __imm, __p); +} + +#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (float16_t * __addr, float16x8x4_t __value) +{ + __arm_vst4q_f16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst4q (float32_t * __addr, float32x4x4_t __value) +{ + __arm_vst4q_f32 (__addr, __value); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndxq (float16x8_t __a) +{ + return __arm_vrndxq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndxq (float32x4_t __a) +{ + return __arm_vrndxq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndq (float16x8_t __a) +{ + return __arm_vrndq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndq (float32x4_t __a) +{ + return __arm_vrndq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndpq (float16x8_t __a) +{ + return __arm_vrndpq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndpq (float32x4_t __a) +{ + return __arm_vrndpq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndnq (float16x8_t __a) +{ + return __arm_vrndnq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndnq (float32x4_t __a) +{ + return __arm_vrndnq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndmq (float16x8_t __a) +{ + return __arm_vrndmq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndmq (float32x4_t __a) +{ + return __arm_vrndmq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndaq (float16x8_t __a) +{ + return __arm_vrndaq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndaq (float32x4_t __a) +{ + return __arm_vrndaq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (float16x8_t __a) +{ + return __arm_vrev64q_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q (float32x4_t __a) +{ + return __arm_vrev64q_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq (float16x8_t __a) +{ + return __arm_vnegq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq (float32x4_t __a) +{ + return __arm_vnegq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (float16_t __a) +{ + return __arm_vdupq_n_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_n (float32_t __a) +{ + return __arm_vdupq_n_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq (float16x8_t __a) +{ + return __arm_vabsq_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq (float32x4_t __a) +{ + return __arm_vabsq_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q (float16x8_t __a) +{ + return __arm_vrev32q_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvttq_f32 (float16x8_t __a) +{ + return __arm_vcvttq_f32_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtbq_f32 (float16x8_t __a) +{ + return __arm_vcvtbq_f32_f16 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq (int16x8_t __a) +{ + return __arm_vcvtq_f16_s16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq (int32x4_t __a) +{ + return __arm_vcvtq_f32_s32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq (uint16x8_t __a) +{ + return __arm_vcvtq_f16_u16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq (uint32x4_t __a) +{ + return __arm_vcvtq_f32_u32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (float16x8_t __a, float16_t __b) +{ + return __arm_vsubq_n_f16 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (float32x4_t __a, float32_t __b) +{ + return __arm_vsubq_n_f32 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (float16x8_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_f16 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq (float32x4_t __a, int32_t __b) +{ + return __arm_vbrsrq_n_f32 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_n (int16x8_t __a, const int __imm6) +{ + return __arm_vcvtq_n_f16_s16 (__a, __imm6); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_n (int32x4_t __a, const int __imm6) +{ + return __arm_vcvtq_n_f32_s32 (__a, __imm6); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_n (uint16x8_t __a, const int __imm6) +{ + return __arm_vcvtq_n_f16_u16 (__a, __imm6); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_n (uint32x4_t __a, const int __imm6) +{ + return __arm_vcvtq_n_f32_u32 (__a, __imm6); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (float16x8_t __a, float16_t __b) +{ + return __arm_vcmpneq_n_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmpneq_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (float16x8_t __a, float16_t __b) +{ + return __arm_vcmpltq_n_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmpltq_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (float16x8_t __a, float16_t __b) +{ + return __arm_vcmpleq_n_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmpleq_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (float16x8_t __a, float16_t __b) +{ + return __arm_vcmpgtq_n_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmpgtq_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (float16x8_t __a, float16_t __b) +{ + return __arm_vcmpgeq_n_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmpgeq_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (float16x8_t __a, float16_t __b) +{ + return __arm_vcmpeqq_n_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmpeqq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vsubq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vorrq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vornq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (float16x8_t __a, float16_t __b) +{ + return __arm_vmulq_n_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vmulq_f16 (__a, __b); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmvq (float16_t __a, float16x8_t __b) +{ + return __arm_vminnmvq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vminnmq_f16 (__a, __b); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmavq (float16_t __a, float16x8_t __b) +{ + return __arm_vminnmavq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmaq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vminnmaq_f16 (__a, __b); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmvq (float16_t __a, float16x8_t __b) +{ + return __arm_vmaxnmvq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vmaxnmq_f16 (__a, __b); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmavq (float16_t __a, float16x8_t __b) +{ + return __arm_vmaxnmavq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmaq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vmaxnmaq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (float16x8_t __a, float16x8_t __b) +{ + return __arm_veorq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot90 (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmulq_rot90_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot270 (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmulq_rot270_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot180 (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmulq_rot180_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcmulq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcaddq_rot90_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (float16x8_t __a, float16x8_t __b) +{ + return __arm_vcaddq_rot270_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vbicq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vandq_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (float16x8_t __a, float16_t __b) +{ + return __arm_vaddq_n_f16 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vabdq_f16 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (float32x4_t __a, float32_t __b) +{ + return __arm_vcmpneq_n_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmpneq_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (float32x4_t __a, float32_t __b) +{ + return __arm_vcmpltq_n_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmpltq_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (float32x4_t __a, float32_t __b) +{ + return __arm_vcmpleq_n_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmpleq_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (float32x4_t __a, float32_t __b) +{ + return __arm_vcmpgtq_n_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmpgtq_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (float32x4_t __a, float32_t __b) +{ + return __arm_vcmpgeq_n_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmpgeq_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (float32x4_t __a, float32_t __b) +{ + return __arm_vcmpeqq_n_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmpeqq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vsubq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vorrq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vornq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (float32x4_t __a, float32_t __b) +{ + return __arm_vmulq_n_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vmulq_f32 (__a, __b); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmvq (float32_t __a, float32x4_t __b) +{ + return __arm_vminnmvq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vminnmq_f32 (__a, __b); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmavq (float32_t __a, float32x4_t __b) +{ + return __arm_vminnmavq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmaq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vminnmaq_f32 (__a, __b); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmvq (float32_t __a, float32x4_t __b) +{ + return __arm_vmaxnmvq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vmaxnmq_f32 (__a, __b); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmavq (float32_t __a, float32x4_t __b) +{ + return __arm_vmaxnmavq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmaq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vmaxnmaq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq (float32x4_t __a, float32x4_t __b) +{ + return __arm_veorq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot90 (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmulq_rot90_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot270 (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmulq_rot270_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot180 (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmulq_rot180_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcmulq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90 (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcaddq_rot90_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270 (float32x4_t __a, float32x4_t __b) +{ + return __arm_vcaddq_rot270_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vbicq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vandq_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (float32x4_t __a, float32_t __b) +{ + return __arm_vaddq_n_f32 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vabdq_f32 (__a, __b); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtaq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtaq_m_s16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtaq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtaq_m_u16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtaq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtaq_m_s32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtaq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtaq_m_u32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_f16_s16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_f16_u16 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_f32_s32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_f32_u32 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtbq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcvtbq_m_f16_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtbq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtbq_m_f32_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvttq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcvttq_m_f16_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvttq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvttq_m_f32_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __arm_vcmlaq_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot180 (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __arm_vcmlaq_rot180_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot270 (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __arm_vcmlaq_rot270_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot90 (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __arm_vcmlaq_rot90_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __arm_vfmaq_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq (float16x8_t __a, float16x8_t __b, float16_t __c) +{ + return __arm_vfmaq_n_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmasq (float16x8_t __a, float16x8_t __b, float16_t __c) +{ + return __arm_vfmasq_n_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmsq (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __arm_vfmsq_f16 (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtmq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtmq_m_s16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtnq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtnq_m_s16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtpq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtpq_m_s16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_s16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (float16x8_t __inactive, float16_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmaq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmavq_p_f16 (__a, __b, __p); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmvq_p_f16 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminnmaq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminnmavq_p_f16 (__a, __b, __p); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminnmvq_p_f16 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_f16 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndaq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndaq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndmq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndmq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndnq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndnq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndpq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndpq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndxq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndxq_m_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_f16 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtmq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtmq_m_u16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtnq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtnq_m_u16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtpq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtpq_m_u16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_u16_f16 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return __arm_vcmlaq_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot180 (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return __arm_vcmlaq_rot180_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot270 (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return __arm_vcmlaq_rot270_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot90 (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return __arm_vcmlaq_rot90_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return __arm_vfmaq_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq (float32x4_t __a, float32x4_t __b, float32_t __c) +{ + return __arm_vfmaq_n_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmasq (float32x4_t __a, float32x4_t __b, float32_t __c) +{ + return __arm_vfmasq_n_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmsq (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return __arm_vfmsq_f32 (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtmq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtmq_m_s32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtnq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtnq_m_s32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtpq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtpq_m_s32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_s32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m (float32x4_t __inactive, float32_t __a, mve_pred16_t __p) +{ + return __arm_vdupq_m_n_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmaq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmavq_p_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmvq_p_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminnmaq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminnmavq_p_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminnmvq_p_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vpselq_f32 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndaq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndaq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndmq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndmq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndnq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndnq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndpq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndpq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndxq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndxq_m_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpeqq_m_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgeq_m_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpgtq_m_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpleq_m_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpltq_m_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_f32 (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vcmpneq_m_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtmq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtmq_m_u32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtnq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtnq_m_u32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtpq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtpq_m_u32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_m_u32_f32 (__inactive, __a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_f16_u16 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_f16_s16 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_f32_u32 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_f32_s32 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_m_n_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vandq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_m (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_m_n_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_m_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_m_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot180_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_rot180_m_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot180_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_rot180_m_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot270_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_rot270_m_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot270_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_rot270_m_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot90_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_rot90_m_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmlaq_rot90_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) +{ + return __arm_vcmlaq_rot90_m_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot180_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot180_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot180_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot180_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot270_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot270_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot90_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot90_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_s32_f32 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_s16_f16 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_u32_f32 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_m_n (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_m_n_u16_f16 (__inactive, __a, __imm6, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_veorq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) +{ + return __arm_vfmaq_m_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) +{ + return __arm_vfmaq_m_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) +{ + return __arm_vfmaq_m_n_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) +{ + return __arm_vfmaq_m_n_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmasq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p) +{ + return __arm_vfmasq_m_n_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmasq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p) +{ + return __arm_vfmasq_m_n_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmsq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p) +{ + return __arm_vfmsq_m_f32 (__a, __b, __c, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vfmsq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p) +{ + return __arm_vfmsq_m_f16 (__a, __b, __c, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminnmq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminnmq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_m_n_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vornq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_f32 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_m_n_f16 (__inactive, __a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (float32_t const * __base) +{ + return __arm_vld1q_f32 (__base); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q (float16_t const * __base) +{ + return __arm_vld1q_f16 (__base); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset (float16_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrhq_gather_offset_f16 (__base, __offset); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_offset_z_f16 (__base, __offset, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset (float16_t const * __base, uint16x8_t __offset) +{ + return __arm_vldrhq_gather_shifted_offset_f16 (__base, __offset); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrhq_gather_shifted_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p) +{ + return __arm_vldrhq_gather_shifted_offset_z_f16 (__base, __offset, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_offset (float32_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrwq_gather_offset_f32 (__base, __offset); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrwq_gather_offset_z_f32 (__base, __offset, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_shifted_offset (float32_t const * __base, uint32x4_t __offset) +{ + return __arm_vldrwq_gather_shifted_offset_f32 (__base, __offset); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vldrwq_gather_shifted_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p) +{ + return __arm_vldrwq_gather_shifted_offset_z_f32 (__base, __offset, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_p_f32 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq (float32_t * __addr, float32x4_t __value) +{ + __arm_vstrwq_f32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (float32_t * __addr, float32x4_t __value) +{ + __arm_vst1q_f32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q (float16_t * __addr, float16x8_t __value) +{ + __arm_vst1q_f16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq (float16_t * __addr, float16x8_t __value) +{ + __arm_vstrhq_f16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_p_f16 (__addr, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value) +{ + __arm_vstrhq_scatter_offset_f16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_offset_p_f16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value) +{ + __arm_vstrhq_scatter_shifted_offset_f16 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) +{ + __arm_vstrhq_scatter_shifted_offset_p_f16 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, float32x4_t __value) +{ + __arm_vstrwq_scatter_base_f32 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_base_p_f32 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value) +{ + __arm_vstrwq_scatter_offset_f32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_offset_p_f32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value) +{ + __arm_vstrwq_scatter_shifted_offset_f32 (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_shifted_offset_p_f32 (__base, __offset, __value, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (float16x8_t __a, float16x8_t __b) +{ + return __arm_vaddq_f16 (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq (float32x4_t __a, float32x4_t __b) +{ + return __arm_vaddq_f32 (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (float16x8_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_f16 (); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vuninitializedq (float32x4_t /* __v ATTRIBUTE UNUSED */) +{ + return __arm_vuninitializedq_f32 (); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (float16x8_t __a) +{ + return __arm_vreinterpretq_s32_f16 (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s32 (float32x4_t __a) +{ + return __arm_vreinterpretq_s32_f32 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (float16x8_t __a) +{ + return __arm_vreinterpretq_s16_f16 (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s16 (float32x4_t __a) +{ + return __arm_vreinterpretq_s16_f32 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (float16x8_t __a) +{ + return __arm_vreinterpretq_s64_f16 (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s64 (float32x4_t __a) +{ + return __arm_vreinterpretq_s64_f32 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (float16x8_t __a) +{ + return __arm_vreinterpretq_s8_f16 (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_s8 (float32x4_t __a) +{ + return __arm_vreinterpretq_s8_f32 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (float16x8_t __a) +{ + return __arm_vreinterpretq_u16_f16 (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u16 (float32x4_t __a) +{ + return __arm_vreinterpretq_u16_f32 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (float16x8_t __a) +{ + return __arm_vreinterpretq_u32_f16 (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u32 (float32x4_t __a) +{ + return __arm_vreinterpretq_u32_f32 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (float16x8_t __a) +{ + return __arm_vreinterpretq_u64_f16 (__a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u64 (float32x4_t __a) +{ + return __arm_vreinterpretq_u64_f32 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (float16x8_t __a) +{ + return __arm_vreinterpretq_u8_f16 (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_u8 (float32x4_t __a) +{ + return __arm_vreinterpretq_u8_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (float32x4_t __a) +{ + return __arm_vreinterpretq_f16_f32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (int16x8_t __a) +{ + return __arm_vreinterpretq_f16_s16 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (int32x4_t __a) +{ + return __arm_vreinterpretq_f16_s32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (int64x2_t __a) +{ + return __arm_vreinterpretq_f16_s64 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (int8x16_t __a) +{ + return __arm_vreinterpretq_f16_s8 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (uint16x8_t __a) +{ + return __arm_vreinterpretq_f16_u16 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (uint32x4_t __a) +{ + return __arm_vreinterpretq_f16_u32 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (uint64x2_t __a) +{ + return __arm_vreinterpretq_f16_u64 (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f16 (uint8x16_t __a) +{ + return __arm_vreinterpretq_f16_u8 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (float16x8_t __a) +{ + return __arm_vreinterpretq_f32_f16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (int16x8_t __a) +{ + return __arm_vreinterpretq_f32_s16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (int32x4_t __a) +{ + return __arm_vreinterpretq_f32_s32 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (int64x2_t __a) +{ + return __arm_vreinterpretq_f32_s64 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (int8x16_t __a) +{ + return __arm_vreinterpretq_f32_s8 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (uint16x8_t __a) +{ + return __arm_vreinterpretq_f32_u16 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (uint32x4_t __a) +{ + return __arm_vreinterpretq_f32_u32 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (uint64x2_t __a) +{ + return __arm_vreinterpretq_f32_u64 (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vreinterpretq_f32 (uint8x16_t __a) +{ + return __arm_vreinterpretq_f32_u8 (__a); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, float32x4_t __value) +{ + __arm_vstrwq_scatter_base_wb_f32 (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) +{ + __arm_vstrwq_scatter_base_wb_p_f32 (__addr, __offset, __value, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vminnmq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vminnmq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmaxnmq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabdq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vabdq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vabsq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vaddq_x_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vnegq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmulq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vmulq_x_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsubq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p) +{ + return __arm_vsubq_x_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot90_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcaddq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcaddq_rot270_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot90_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot90_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot180_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot180_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot180_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot180_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot270_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmulq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vcmulq_rot270_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x (uint16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_x_f16_u16 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x (int16x8_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_x_f16_s16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x (int32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_x_f32_s32 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x (uint32x4_t __a, mve_pred16_t __p) +{ + return __arm_vcvtq_x_f32_u32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x_n (int16x8_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_x_n_f16_s16 (__a, __imm6, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x_n (uint16x8_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_x_n_f16_u16 (__a, __imm6, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x_n (int32x4_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_x_n_f32_s32 (__a, __imm6, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcvtq_x_n (uint32x4_t __a, const int __imm6, mve_pred16_t __p) +{ + return __arm_vcvtq_x_n_f32_u32 (__a, __imm6, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndnq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndnq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndnq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndnq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndmq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndmq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndmq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndmq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndpq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndpq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndpq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndpq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndaq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndaq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndaq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndaq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndxq_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrndxq_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrndxq_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrndxq_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vandq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vandq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbicq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vbicq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (float16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vbrsrq_x (float32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __arm_vbrsrq_x_n_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_veorq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_veorq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vornq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vornq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_f16 (__a, __b, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vorrq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p) +{ + return __arm_vorrq_x_f32 (__a, __b, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev32q_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev32q_x_f16 (__a, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (float16x8_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_f16 (__a, __p); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_x (float32x4_t __a, mve_pred16_t __p) +{ + return __arm_vrev64q_x_f32 (__a, __p); +} + +__extension__ extern __inline float16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (float16_t const * __addr) +{ + return __arm_vld4q_f16 (__addr); +} + +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (float16_t const * __addr) +{ + return __arm_vld2q_f16 (__addr); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (float16_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_f16 (__base, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (float16_t * __addr, float16x8x2_t __value) +{ + __arm_vst2q_f16 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_f16 (__addr, __value, __p); +} + +__extension__ extern __inline float32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld4q (float32_t const * __addr) +{ + return __arm_vld4q_f32 (__addr); +} + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld2q (float32_t const * __addr) +{ + return __arm_vld2q_f32 (__addr); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vld1q_z (float32_t const *__base, mve_pred16_t __p) +{ + return __arm_vld1q_z_f32 (__base, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst2q (float32_t * __addr, float32x4x2_t __value) +{ + __arm_vst2q_f32 (__addr, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vst1q_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p) +{ + __arm_vst1q_p_f32 (__addr, __value, __p); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (float16_t __a, float16x8_t __b, const int __idx) +{ + return __arm_vsetq_lane_f16 (__a, __b, __idx); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsetq_lane (float32_t __a, float32x4_t __b, const int __idx) +{ + return __arm_vsetq_lane_f32 (__a, __b, __idx); +} + +__extension__ extern __inline float16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (float16x8_t __a, const int __idx) +{ + return __arm_vgetq_lane_f16 (__a, __idx); +} + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vgetq_lane (float32x4_t __a, const int __idx) +{ + return __arm_vgetq_lane_f32 (__a, __idx); +} +#endif /* MVE Floating point. */ + +#else +enum { + __ARM_mve_type_fp_n = 1, + __ARM_mve_type_int_n, + __ARM_mve_type_float16_t_ptr, + __ARM_mve_type_float16x8_t, + __ARM_mve_type_float16x8x2_t, + __ARM_mve_type_float16x8x4_t, + __ARM_mve_type_float32_t_ptr, + __ARM_mve_type_float32x4_t, + __ARM_mve_type_float32x4x2_t, + __ARM_mve_type_float32x4x4_t, + __ARM_mve_type_int16_t_ptr, + __ARM_mve_type_int16x8_t, + __ARM_mve_type_int16x8x2_t, + __ARM_mve_type_int16x8x4_t, + __ARM_mve_type_int32_t_ptr, + __ARM_mve_type_int32x4_t, + __ARM_mve_type_int32x4x2_t, + __ARM_mve_type_int32x4x4_t, + __ARM_mve_type_int64_t_ptr, + __ARM_mve_type_int64x2_t, + __ARM_mve_type_int8_t_ptr, + __ARM_mve_type_int8x16_t, + __ARM_mve_type_int8x16x2_t, + __ARM_mve_type_int8x16x4_t, + __ARM_mve_type_uint16_t_ptr, + __ARM_mve_type_uint16x8_t, + __ARM_mve_type_uint16x8x2_t, + __ARM_mve_type_uint16x8x4_t, + __ARM_mve_type_uint32_t_ptr, + __ARM_mve_type_uint32x4_t, + __ARM_mve_type_uint32x4x2_t, + __ARM_mve_type_uint32x4x4_t, + __ARM_mve_type_uint64_t_ptr, + __ARM_mve_type_uint64x2_t, + __ARM_mve_type_uint8_t_ptr, + __ARM_mve_type_uint8x16_t, + __ARM_mve_type_uint8x16x2_t, + __ARM_mve_type_uint8x16x4_t, + __ARM_mve_unsupported_type +}; + +#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ +#define __ARM_mve_typeid(x) _Generic(x, \ + float16_t: __ARM_mve_type_fp_n, \ + float16_t *: __ARM_mve_type_float16_t_ptr, \ + float16_t const *: __ARM_mve_type_float16_t_ptr, \ + float16x8_t: __ARM_mve_type_float16x8_t, \ + float16x8x2_t: __ARM_mve_type_float16x8x2_t, \ + float16x8x4_t: __ARM_mve_type_float16x8x4_t, \ + float32_t: __ARM_mve_type_fp_n, \ + float32_t *: __ARM_mve_type_float32_t_ptr, \ + float32_t const *: __ARM_mve_type_float32_t_ptr, \ + float32x4_t: __ARM_mve_type_float32x4_t, \ + float32x4x2_t: __ARM_mve_type_float32x4x2_t, \ + float32x4x4_t: __ARM_mve_type_float32x4x4_t, \ + int16_t: __ARM_mve_type_int_n, \ + int16_t *: __ARM_mve_type_int16_t_ptr, \ + int16_t const *: __ARM_mve_type_int16_t_ptr, \ + int16x8_t: __ARM_mve_type_int16x8_t, \ + int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ + int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ + int32_t: __ARM_mve_type_int_n, \ + int32_t *: __ARM_mve_type_int32_t_ptr, \ + int32_t const *: __ARM_mve_type_int32_t_ptr, \ + int32x4_t: __ARM_mve_type_int32x4_t, \ + int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ + int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ + int64_t: __ARM_mve_type_int_n, \ + int64_t *: __ARM_mve_type_int64_t_ptr, \ + int64_t const *: __ARM_mve_type_int64_t_ptr, \ + int64x2_t: __ARM_mve_type_int64x2_t, \ + int8_t: __ARM_mve_type_int_n, \ + int8_t *: __ARM_mve_type_int8_t_ptr, \ + int8_t const *: __ARM_mve_type_int8_t_ptr, \ + int8x16_t: __ARM_mve_type_int8x16_t, \ + int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ + int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ + uint16_t: __ARM_mve_type_int_n, \ + uint16_t *: __ARM_mve_type_uint16_t_ptr, \ + uint16_t const *: __ARM_mve_type_uint16_t_ptr, \ + uint16x8_t: __ARM_mve_type_uint16x8_t, \ + uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ + uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ + uint32_t: __ARM_mve_type_int_n, \ + uint32_t *: __ARM_mve_type_uint32_t_ptr, \ + uint32_t const *: __ARM_mve_type_uint32_t_ptr, \ + uint32x4_t: __ARM_mve_type_uint32x4_t, \ + uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ + uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ + uint64_t: __ARM_mve_type_int_n, \ + uint64_t *: __ARM_mve_type_uint64_t_ptr, \ + uint64_t const *: __ARM_mve_type_uint64_t_ptr, \ + uint64x2_t: __ARM_mve_type_uint64x2_t, \ + uint8_t: __ARM_mve_type_int_n, \ + uint8_t *: __ARM_mve_type_uint8_t_ptr, \ + uint8_t const *: __ARM_mve_type_uint8_t_ptr, \ + uint8x16_t: __ARM_mve_type_uint8x16_t, \ + uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ + uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ + default: _Generic(x, \ + signed char: __ARM_mve_type_int_n, \ + short: __ARM_mve_type_int_n, \ + int: __ARM_mve_type_int_n, \ + long: __ARM_mve_type_int_n, \ + long long: __ARM_mve_type_int_n, \ + unsigned char: __ARM_mve_type_int_n, \ + unsigned short: __ARM_mve_type_int_n, \ + unsigned int: __ARM_mve_type_int_n, \ + unsigned long: __ARM_mve_type_int_n, \ + unsigned long long: __ARM_mve_type_int_n, \ + default: __ARM_mve_unsupported_type)) +#else +#define __ARM_mve_typeid(x) _Generic(x, \ + int16_t: __ARM_mve_type_int_n, \ + int16_t *: __ARM_mve_type_int16_t_ptr, \ + int16_t const *: __ARM_mve_type_int16_t_ptr, \ + int16x8_t: __ARM_mve_type_int16x8_t, \ + int16x8x2_t: __ARM_mve_type_int16x8x2_t, \ + int16x8x4_t: __ARM_mve_type_int16x8x4_t, \ + int32_t: __ARM_mve_type_int_n, \ + int32_t *: __ARM_mve_type_int32_t_ptr, \ + int32_t const *: __ARM_mve_type_int32_t_ptr, \ + int32x4_t: __ARM_mve_type_int32x4_t, \ + int32x4x2_t: __ARM_mve_type_int32x4x2_t, \ + int32x4x4_t: __ARM_mve_type_int32x4x4_t, \ + int64_t: __ARM_mve_type_int_n, \ + int64_t *: __ARM_mve_type_int64_t_ptr, \ + int64_t const *: __ARM_mve_type_int64_t_ptr, \ + int64x2_t: __ARM_mve_type_int64x2_t, \ + int8_t: __ARM_mve_type_int_n, \ + int8_t *: __ARM_mve_type_int8_t_ptr, \ + int8_t const *: __ARM_mve_type_int8_t_ptr, \ + int8x16_t: __ARM_mve_type_int8x16_t, \ + int8x16x2_t: __ARM_mve_type_int8x16x2_t, \ + int8x16x4_t: __ARM_mve_type_int8x16x4_t, \ + uint16_t: __ARM_mve_type_int_n, \ + uint16_t *: __ARM_mve_type_uint16_t_ptr, \ + uint16_t const *: __ARM_mve_type_uint16_t_ptr, \ + uint16x8_t: __ARM_mve_type_uint16x8_t, \ + uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \ + uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \ + uint32_t: __ARM_mve_type_int_n, \ + uint32_t *: __ARM_mve_type_uint32_t_ptr, \ + uint32_t const *: __ARM_mve_type_uint32_t_ptr, \ + uint32x4_t: __ARM_mve_type_uint32x4_t, \ + uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \ + uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \ + uint64_t: __ARM_mve_type_int_n, \ + uint64_t *: __ARM_mve_type_uint64_t_ptr, \ + uint64_t const *: __ARM_mve_type_uint64_t_ptr, \ + uint64x2_t: __ARM_mve_type_uint64x2_t, \ + uint8_t: __ARM_mve_type_int_n, \ + uint8_t *: __ARM_mve_type_uint8_t_ptr, \ + uint8_t const *: __ARM_mve_type_uint8_t_ptr, \ + uint8x16_t: __ARM_mve_type_uint8x16_t, \ + uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \ + uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \ + default: _Generic(x, \ + signed char: __ARM_mve_type_int_n, \ + short: __ARM_mve_type_int_n, \ + int: __ARM_mve_type_int_n, \ + long: __ARM_mve_type_int_n, \ + long long: __ARM_mve_type_int_n, \ + unsigned char: __ARM_mve_type_int_n, \ + unsigned short: __ARM_mve_type_int_n, \ + unsigned int: __ARM_mve_type_int_n, \ + unsigned long: __ARM_mve_type_int_n, \ + unsigned long long: __ARM_mve_type_int_n, \ + default: __ARM_mve_unsupported_type)) +#endif /* MVE Floating point. */ + +extern void *__ARM_undef; +#define __ARM_mve_coerce(param, type) \ + _Generic(param, type: param, default: *(type *)__ARM_undef) +#define __ARM_mve_coerce1(param, type) \ + _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef) + +#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ + +#define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \ + int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));}) + +#define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ + int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) + +#define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ + int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) + +#define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ + int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) + +#define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ + int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) + +#define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ + int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) + +#define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vrev64q(p0) __arm_vrev64q(p0) #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20475,7 +37236,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vnegq(p0) __arm_vnegq(p0) #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20484,13 +37244,11 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vdupq_n(p0) __arm_vdupq_n(p0) #define __arm_vdupq_n(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vdupq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vdupq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vabsq(p0) __arm_vabsq(p0) #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20499,7 +37257,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vrev32q(p0) __arm_vrev32q(p0) #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20508,37 +37265,31 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) -#define vcvtbq_f32(p0) __arm_vcvtbq_f32(p0) #define __arm_vcvtbq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvtbq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) -#define vcvttq_f32(p0) __arm_vcvttq_f32(p0) #define __arm_vcvttq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvttq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) -#define vrev16q(p0) __arm_vrev16q(p0) #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) -#define vqabsq(p0) __arm_vqabsq(p0) #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vqnegq(p0) __arm_vqnegq(p0) #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vmvnq(p0) __arm_vmvnq(p0) #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20548,7 +37299,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vmovlbq(p0) __arm_vmovlbq(p0) #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20556,7 +37306,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) -#define vmovltq(p0) __arm_vmovltq(p0) #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20564,7 +37313,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) -#define vclzq(p0) __arm_vclzq(p0) #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -20574,14 +37322,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vclsq(p0) __arm_vclsq(p0) #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vcvtq(p0) __arm_vcvtq(p0) #define __arm_vcvtq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ @@ -20589,7 +37335,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vshlq(p0,p1) __arm_vshlq(p0,p1) #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20600,7 +37345,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vshrq(p0,p1) __arm_vshrq(p0,p1) #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -20610,7 +37354,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vcvtq_n(p0,p1) __arm_vcvtq_n(p0,p1) #define __arm_vcvtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_n_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ @@ -20618,7 +37361,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vorrq(p0,p1) __arm_vorrq(p0,p1) #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20631,7 +37373,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vabdq(p0,p1) __arm_vabdq(p0,p1) #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20644,7 +37385,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vaddq(p0,p1) __arm_vaddq(p0,p1) #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20665,7 +37405,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) -#define vandq(p0,p1) __arm_vandq(p0,p1) #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20678,7 +37417,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vbicq(p0,p1) __arm_vbicq(p0,p1) #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20695,7 +37433,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vornq(p0,p1) __arm_vornq(p0,p1) #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20708,7 +37445,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vmulq(p0,p1) __arm_vmulq(p0,p1) #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20729,7 +37465,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcaddq_rot270(p0,p1) __arm_vcaddq_rot270(p0,p1) #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20742,7 +37477,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcmpeqq(p0,p1) __arm_vcmpeqq(p0,p1) #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20763,7 +37497,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcaddq_rot90(p0,p1) __arm_vcaddq_rot90(p0,p1) #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20776,17 +37509,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcmpgeq_n(p0,p1) __arm_vcmpgeq_n(p0,p1) -#define __arm_vcmpgeq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ - __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ - int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ - int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) - -#define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20807,7 +37529,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) -#define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20822,7 +37543,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) -#define vcmpleq(p0,p1) __arm_vcmpleq(p0,p1) #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20837,7 +37557,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) -#define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20852,7 +37571,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) -#define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20873,35 +37591,30 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcmulq(p0,p1) __arm_vcmulq(p0,p1) #define __arm_vcmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcmulq_rot180(p0,p1) __arm_vcmulq_rot180(p0,p1) #define __arm_vcmulq_rot180(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcmulq_rot270(p0,p1) __arm_vcmulq_rot270(p0,p1) #define __arm_vcmulq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcmulq_rot90(p0,p1) __arm_vcmulq_rot90(p0,p1) #define __arm_vcmulq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define veorq(p0,p1) __arm_veorq(p0,p1) #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -20914,56 +37627,48 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vmaxnmaq(p0,p1) __arm_vmaxnmaq(p0,p1) #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vmaxnmavq(p0,p1) __arm_vmaxnmavq(p0,p1) #define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vmaxnmq(p0,p1) __arm_vmaxnmq(p0,p1) #define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vmaxnmvq(p0,p1) __arm_vmaxnmvq(p0,p1) #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vmaxnmvq(p0,p1) __arm_vmaxnmvq(p0,p1) #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vminnmaq(p0,p1) __arm_vminnmaq(p0,p1) #define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vminnmavq(p0,p1) __arm_vminnmavq(p0,p1) #define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1) #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -20975,14 +37680,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));}) -#define vminnmq(p0,p1) __arm_vminnmq(p0,p1) #define __arm_vminnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vsubq(p0,p1) __arm_vsubq(p0,p1) #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21003,29 +37706,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vminnmvq(p0,p1) __arm_vminnmvq(p0,p1) #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) -#define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ - __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ - int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ - int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ - int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ - int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) - -#define vshlq_r(p0,p1) __arm_vshlq_r(p0,p1) #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21035,7 +37721,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vshlq_n(p0,p1) __arm_vshlq_n(p0,p1) #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21045,7 +37730,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vshlltq(p0,p1) __arm_vshlltq(p0,p1) #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21053,7 +37737,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) -#define vshllbq(p0,p1) __arm_vshllbq(p0,p1) #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21061,7 +37744,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) -#define vrshrq(p0,p1) __arm_vrshrq(p0,p1) #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21071,7 +37753,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vrshrq(p0,p1) __arm_vrshrq(p0,p1) #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21081,7 +37762,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vrshlq(p0,p1) __arm_vrshlq(p0,p1) #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21098,7 +37778,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vrmulhq(p0,p1) __arm_vrmulhq(p0,p1) #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21109,7 +37788,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vrhaddq(p0,p1) __arm_vrhaddq(p0,p1) #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21120,7 +37798,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vqsubq(p0,p1) __arm_vqsubq(p0,p1) #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21137,14 +37814,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vqshluq(p0,p1) __arm_vqshluq(p0,p1) #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) -#define vqshlq(p0,p1) __arm_vqshlq(p0,p1) #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21155,7 +37830,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqshlq_r(p0,p1) __arm_vqshlq_r(p0,p1) #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21165,7 +37839,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vqshlq_n(p0,p1) __arm_vqshlq_n(p0,p1) #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -21175,7 +37848,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vqrshlq(p0,p1) __arm_vqrshlq(p0,p1) #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21192,7 +37864,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vqrdmulhq(p0,p1) __arm_vqrdmulhq(p0,p1) #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21203,21 +37874,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vmlaldavxq(p0,p1) __arm_vmlaldavxq(p0,p1) #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqmovuntq(p0,p1) __arm_vqmovuntq(p0,p1) #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqmovntq(p0,p1) __arm_vqmovntq(p0,p1) #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21226,7 +37894,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vqmovnbq(p0,p1) __arm_vqmovnbq(p0,p1) #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21235,7 +37902,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vqdmulltq(p0,p1) __arm_vqdmulltq(p0,p1) #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21244,14 +37910,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqmovunbq(p0,p1) __arm_vqmovunbq(p0,p1) #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqdmullbq(p0,p1) __arm_vqdmullbq(p0,p1) #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21260,7 +37924,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqdmulhq(p0,p1) __arm_vqdmulhq(p0,p1) #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21271,7 +37934,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqaddq(p0,p1) __arm_vqaddq(p0,p1) #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21288,21 +37950,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmulltq_poly(p0,p1) __arm_vmulltq_poly(p0,p1) #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) -#define vmullbq_poly(p0,p1) __arm_vmullbq_poly(p0,p1) #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) -#define vmulltq_int(p0,p1) __arm_vmulltq_int(p0,p1) #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21313,7 +37972,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vhaddq(p0,p1) __arm_vhaddq(p0,p1) #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21330,7 +37988,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vhcaddq_rot270(p0,p1) __arm_vhcaddq_rot270(p0,p1) #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21338,7 +37995,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vhcaddq_rot90(p0,p1) __arm_vhcaddq_rot90(p0,p1) #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21346,7 +38002,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vhsubq(p0,p1) __arm_vhsubq(p0,p1) #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21363,7 +38018,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vminq(p0,p1) __arm_vminq(p0,p1) #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21374,7 +38028,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vminaq(p0,p1) __arm_vminaq(p0,p1) #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21382,7 +38035,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmaxq(p0,p1) __arm_vmaxq(p0,p1) #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21393,7 +38045,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmaxaq(p0,p1) __arm_vmaxaq(p0,p1) #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21401,7 +38052,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmovntq(p0,p1) __arm_vmovntq(p0,p1) #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21410,7 +38060,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmovnbq(p0,p1) __arm_vmovnbq(p0,p1) #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21419,7 +38068,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmulhq(p0,p1) __arm_vmulhq(p0,p1) #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21430,7 +38078,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmullbq_int(p0,p1) __arm_vmullbq_int(p0,p1) #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21441,7 +38088,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ @@ -21449,7 +38095,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21458,14 +38103,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -21475,7 +38118,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21483,7 +38125,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21494,7 +38135,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21502,7 +38142,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21510,7 +38149,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21522,7 +38160,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21533,7 +38170,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21544,7 +38180,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -21554,7 +38189,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ @@ -21565,7 +38199,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) -#define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -21575,7 +38208,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -21585,7 +38217,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21594,7 +38225,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21603,7 +38233,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21615,7 +38244,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21627,7 +38255,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21639,7 +38266,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21651,7 +38277,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21660,7 +38285,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21669,7 +38293,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21677,7 +38300,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21686,7 +38308,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21695,7 +38316,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21704,7 +38324,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21713,7 +38332,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vmovlbq_m(p0,p1,p2) __arm_vmovlbq_m(p0,p1,p2) #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21722,7 +38340,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vmovnbq_m(p0,p1,p2) __arm_vmovnbq_m(p0,p1,p2) #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21731,7 +38348,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmovntq_m(p0,p1,p2) __arm_vmovntq_m(p0,p1,p2) #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21740,7 +38356,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmovltq_m(p0,p1,p2) __arm_vmovltq_m(p0,p1,p2) #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21749,16 +38364,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vshrntq(p0,p1,p2) __arm_vshrntq(p0,p1,p2) -#define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ - __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) - -#define vshrnbq(p0,p1,p2) __arm_vshrnbq(p0,p1,p2) #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21767,16 +38372,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrshrntq(p0,p1,p2) __arm_vrshrntq(p0,p1,p2) -#define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ - __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) - -#define vcvtaq_m(p0,p1,p2) __arm_vcvtaq_m(p0,p1,p2) #define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21785,7 +38380,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcvtq_m(p0,p1,p2) __arm_vcvtq_m(p0,p1,p2) #define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21798,7 +38392,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcvtq_m_n(p0,p1,p2,p3) __arm_vcvtq_m_n(p0,p1,p2,p3) #define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21811,7 +38404,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2) #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21821,7 +38413,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcmlaq(p0,p1,p2) __arm_vcmlaq(p0,p1,p2) #define __arm_vcmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21829,7 +38420,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vcmlaq_rot180(p0,p1,p2) __arm_vcmlaq_rot180(p0,p1,p2) #define __arm_vcmlaq_rot180(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21837,7 +38427,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vcmlaq_rot270(p0,p1,p2) __arm_vcmlaq_rot270(p0,p1,p2) #define __arm_vcmlaq_rot270(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21845,7 +38434,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vcmlaq_rot90(p0,p1,p2) __arm_vcmlaq_rot90(p0,p1,p2) #define __arm_vcmlaq_rot90(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -21853,28 +38441,24 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vrndxq_m(p0,p1,p2) __arm_vrndxq_m(p0,p1,p2) #define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndxq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndxq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndq_m(p0,p1,p2) __arm_vrndq_m(p0,p1,p2) #define __arm_vrndq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndpq_m(p0,p1,p2) __arm_vrndpq_m(p0,p1,p2) #define __arm_vrndpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndpq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndpq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcmpgtq_m(p0,p1,p2) __arm_vcmpgtq_m(p0,p1,p2) #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21889,7 +38473,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcmpleq_m(p0,p1,p2) __arm_vcmpleq_m(p0,p1,p2) #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21904,7 +38487,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) -#define vcmpltq_m(p0,p1,p2) __arm_vcmpltq_m(p0,p1,p2) #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21919,7 +38501,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) -#define vcmpneq_m(p0,p1,p2) __arm_vcmpneq_m(p0,p1,p2) #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21940,21 +38521,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) -#define vcvtbq_m(p0,p1,p2) __arm_vcvtbq_m(p0,p1,p2) #define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvtbq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvtbq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcvttq_m(p0,p1,p2) __arm_vcvttq_m(p0,p1,p2) #define __arm_vcvttq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvttq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvttq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcvtmq_m(p0,p1,p2) __arm_vcvtmq_m(p0,p1,p2) #define __arm_vcvtmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21963,7 +38541,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcvtnq_m(p0,p1,p2) __arm_vcvtnq_m(p0,p1,p2) #define __arm_vcvtnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21972,7 +38549,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcvtpq_m(p0,p1,p2) __arm_vcvtpq_m(p0,p1,p2) #define __arm_vcvtpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21981,7 +38557,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vdupq_m(p0,p1,p2) __arm_vdupq_m(p0,p1,p2) #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -21994,7 +38569,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));}) -#define vfmaq(p0,p1,p2) __arm_vfmaq(p0,p1,p2) #define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22004,7 +38578,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vfmsq(p0,p1,p2) __arm_vfmsq(p0,p1,p2) #define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22012,7 +38585,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vfmasq(p0,p1,p2) __arm_vfmasq(p0,p1,p2) #define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22020,63 +38592,54 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)));}) -#define vmaxnmaq_m(p0,p1,p2) __arm_vmaxnmaq_m(p0,p1,p2) #define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vmaxnmavq_m(p0,p1,p2) __arm_vmaxnmavq_m(p0,p1,p2) #define __arm_vmaxnmavq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vmaxnmvq_m(p0,p1,p2) __arm_vmaxnmvq_m(p0,p1,p2) #define __arm_vmaxnmvq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vmaxnmavq_p(p0,p1,p2) __arm_vmaxnmavq_p(p0,p1,p2) #define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vmaxnmvq_p(p0,p1,p2) __arm_vmaxnmvq_p(p0,p1,p2) #define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vminnmaq_m(p0,p1,p2) __arm_vminnmaq_m(p0,p1,p2) #define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vminnmavq_p(p0,p1,p2) __arm_vminnmavq_p(p0,p1,p2) #define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vminnmvq_p(p0,p1,p2) __arm_vminnmvq_p(p0,p1,p2) #define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce(__p0, float16_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce(__p0, float32_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndnq_m(p0,p1,p2) __arm_vrndnq_m(p0,p1,p2) #define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22084,21 +38647,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndnq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndnq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __p2));}) -#define vrndaq_m(p0,p1,p2) __arm_vrndaq_m(p0,p1,p2) #define __arm_vrndaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndmq_m(p0,p1,p2) __arm_vrndmq_m(p0,p1,p2) #define __arm_vrndmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrev64q_m(p0,p1,p2) __arm_vrev64q_m(p0,p1,p2) #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22111,7 +38671,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev64q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrev64q_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrev32q_m(p0,p1,p2) __arm_vrev32q_m(p0,p1,p2) #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22121,7 +38680,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev32q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2));}) -#define vpselq(p0,p1,p2) __arm_vpselq(p0,p1,p2) #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22136,7 +38694,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vpselq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vpselq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22151,7 +38708,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) -#define vrshrnbq(p0,p1,p2) __arm_vrshrnbq(p0,p1,p2) #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22160,28 +38716,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrev16q_m(p0,p1,p2) __arm_vrev16q_m(p0,p1,p2) #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) -#define vqshruntq(p0,p1,p2) __arm_vqshruntq(p0,p1,p2) #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqshrunbq_n(p0,p1,p2) __arm_vqshrunbq_n(p0,p1,p2) -#define __arm_vqshrunbq_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ - __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) - -#define vqshrnbq(p0,p1,p2) __arm_vqshrnbq(p0,p1,p2) #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22190,7 +38736,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqshrntq(p0,p1,p2) __arm_vqshrntq(p0,p1,p2) #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22199,14 +38744,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqmovnbq_m(p0,p1,p2) __arm_vqmovnbq_m(p0,p1,p2) #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22215,7 +38758,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqmovntq_m(p0,p1,p2) __arm_vqmovntq_m(p0,p1,p2) #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22224,21 +38766,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqmovunbq_m(p0,p1,p2) __arm_vqmovunbq_m(p0,p1,p2) #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqmovuntq_m(p0,p1,p2) __arm_vqmovuntq_m(p0,p1,p2) #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqrshrntq(p0,p1,p2) __arm_vqrshrntq(p0,p1,p2) #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22247,14 +38786,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vnegq_m(p0,p1,p2) __arm_vnegq_m(p0,p1,p2) #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22264,7 +38801,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vnegq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vnegq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vcmpgeq_m(p0,p1,p2) __arm_vcmpgeq_m(p0,p1,p2) #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22279,7 +38815,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vabdq_m(p0,p1,p2,p3) __arm_vabdq_m(p0,p1,p2,p3) #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22293,7 +38828,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vaddq_m(p0,p1,p2,p3) __arm_vaddq_m(p0,p1,p2,p3) #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22315,7 +38849,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vandq_m(p0,p1,p2,p3) __arm_vandq_m(p0,p1,p2,p3) #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22329,7 +38862,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vbicq_m(p0,p1,p2,p3) __arm_vbicq_m(p0,p1,p2,p3) #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22343,7 +38875,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vbrsrq_m(p0,p1,p2,p3) __arm_vbrsrq_m(p0,p1,p2,p3) #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -22356,7 +38887,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbrsrq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbrsrq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) -#define vcaddq_rot270_m(p0,p1,p2,p3) __arm_vcaddq_rot270_m(p0,p1,p2,p3) #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22370,7 +38900,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcaddq_rot90_m(p0,p1,p2,p3) __arm_vcaddq_rot90_m(p0,p1,p2,p3) #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22384,7 +38913,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmlaq_m(p0,p1,p2,p3) __arm_vcmlaq_m(p0,p1,p2,p3) #define __arm_vcmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22392,7 +38920,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmlaq_rot180_m(p0,p1,p2,p3) __arm_vcmlaq_rot180_m(p0,p1,p2,p3) #define __arm_vcmlaq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22400,7 +38927,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmlaq_rot270_m(p0,p1,p2,p3) __arm_vcmlaq_rot270_m(p0,p1,p2,p3) #define __arm_vcmlaq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22408,7 +38934,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmlaq_rot90_m(p0,p1,p2,p3) __arm_vcmlaq_rot90_m(p0,p1,p2,p3) #define __arm_vcmlaq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22416,7 +38941,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmulq_m(p0,p1,p2,p3) __arm_vcmulq_m(p0,p1,p2,p3) #define __arm_vcmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22424,7 +38948,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmulq_rot180_m(p0,p1,p2,p3) __arm_vcmulq_rot180_m(p0,p1,p2,p3) #define __arm_vcmulq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22432,7 +38955,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmulq_rot270_m(p0,p1,p2,p3) __arm_vcmulq_rot270_m(p0,p1,p2,p3) #define __arm_vcmulq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22440,7 +38962,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmulq_rot90_m(p0,p1,p2,p3) __arm_vcmulq_rot90_m(p0,p1,p2,p3) #define __arm_vcmulq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22448,7 +38969,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define veorq_m(p0,p1,p2,p3) __arm_veorq_m(p0,p1,p2,p3) #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22462,7 +38982,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vfmaq_m(p0,p1,p2,p3) __arm_vfmaq_m(p0,p1,p2,p3) #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22472,7 +38991,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vfmasq_m(p0,p1,p2,p3) __arm_vfmasq_m(p0,p1,p2,p3) #define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22480,7 +38998,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vfmsq_m(p0,p1,p2,p3) __arm_vfmsq_m(p0,p1,p2,p3) #define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22488,7 +39005,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vmaxnmq_m(p0,p1,p2,p3) __arm_vmaxnmq_m(p0,p1,p2,p3) #define __arm_vmaxnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22496,7 +39012,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vminnmq_m(p0,p1,p2,p3) __arm_vminnmq_m(p0,p1,p2,p3) #define __arm_vminnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22504,7 +39019,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vmulq_m(p0,p1,p2,p3) __arm_vmulq_m(p0,p1,p2,p3) #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22526,7 +39040,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vornq_m(p0,p1,p2,p3) __arm_vornq_m(p0,p1,p2,p3) #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22540,7 +39053,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vsubq_m(p0,p1,p2,p3) __arm_vsubq_m(p0,p1,p2,p3) #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22562,7 +39074,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vorrq_m(p0,p1,p2,p3) __arm_vorrq_m(p0,p1,p2,p3) #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -22576,7 +39087,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vld1q(p0) __arm_vld1q(p0) #define __arm_vld1q(p0) (\ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \ @@ -22588,7 +39098,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce(p0, float16_t const *)), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce(p0, float32_t const *)))) -#define vld1q_z(p0,p1) __arm_vld1q_z(p0, p1) #define __arm_vld1q_z(p0,p1) ( \ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce(p0, int8_t const *), p1), \ @@ -22600,7 +39109,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce(p0, float16_t const *), p1), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce(p0, float32_t const *), p1))) -#define vld2q(p0) __arm_vld2q(p0) #define __arm_vld2q(p0) ( \ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \ @@ -22612,7 +39120,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce(p0, float16_t const *)), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce(p0, float32_t const *)))) -#define vld4q(p0) __arm_vld4q(p0) #define __arm_vld4q(p0) ( \ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \ @@ -22624,7 +39131,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce(p0, float16_t const *)), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce(p0, float32_t const *)))) -#define vldrhq_gather_offset(p0,p1) __arm_vldrhq_gather_offset(p0,p1) #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ @@ -22633,7 +39139,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce(p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)));}) -#define vldrhq_gather_offset_z(p0,p1,p2) __arm_vldrhq_gather_offset_z(p0,p1,p2) #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ @@ -22642,7 +39147,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce(p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vldrhq_gather_shifted_offset(p0,p1) __arm_vldrhq_gather_shifted_offset(p0,p1) #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ @@ -22651,7 +39155,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)));}) -#define vldrhq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ @@ -22660,35 +39163,30 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce(p0, float16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vldrwq_gather_offset(p0,p1) __arm_vldrwq_gather_offset(p0,p1) #define __arm_vldrwq_gather_offset(p0,p1) ( \ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce(p0, int32_t const *), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce(p0, uint32_t const *), p1), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce(p0, float32_t const *), p1))) -#define vldrwq_gather_offset_z(p0,p1,p2) __arm_vldrwq_gather_offset_z(p0,p1,p2) #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ( \ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce(p0, int32_t const *), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce(p0, uint32_t const *), p1, p2), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce(p0, float32_t const *), p1, p2))) -#define vldrwq_gather_shifted_offset(p0,p1) __arm_vldrwq_gather_shifted_offset(p0,p1) #define __arm_vldrwq_gather_shifted_offset(p0,p1) ( \ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t const *), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t const *), p1), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t const *), p1))) -#define vldrwq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ( \ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(p0, int32_t const *), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(p0, uint32_t const *), p1, p2), \ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce(p0, float32_t const *), p1, p2))) -#define vst1q_p(p0,p1,p2) __arm_vst1q_p(p0,p1,p2) #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -22700,7 +39198,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vst2q(p0,p1) __arm_vst2q(p0,p1) #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \ @@ -22712,7 +39209,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));}) -#define vst1q(p0,p1) __arm_vst1q(p0,p1) #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ @@ -22724,7 +39220,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vstrhq(p0,p1) __arm_vstrhq(p0,p1) #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ @@ -22733,7 +39228,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));}) -#define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2) #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ @@ -22742,7 +39236,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));}) -#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22752,7 +39245,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) -#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22762,7 +39254,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) -#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22772,7 +39263,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) -#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22782,21 +39272,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) -#define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2) #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vstrwq(p0,p1) __arm_vstrwq(p0,p1) #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) -#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22806,7 +39293,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) -#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22816,7 +39302,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) -#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22826,7 +39311,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) -#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -22836,21 +39320,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) -#define vstrwq_scatter_base(p0,p1,p2) __arm_vstrwq_scatter_base(p0,p1,p2) #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vstrwq_scatter_base_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22858,7 +39339,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22866,7 +39346,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22874,7 +39353,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22882,7 +39360,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22890,7 +39367,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22898,7 +39374,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22906,7 +39381,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ @@ -22914,7 +39388,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vuninitializedq(p0) __arm_vuninitializedq(p0) #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ @@ -22928,7 +39401,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());}) -#define vreinterpretq_f16(p0) __arm_vreinterpretq_f16(p0) #define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -22941,7 +39413,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_f32(p0) __arm_vreinterpretq_f32(p0) #define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -22954,7 +39425,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));}) -#define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0) #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -22967,7 +39437,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0) #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -22980,7 +39449,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0) #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -22993,7 +39461,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0) #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -23006,7 +39473,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0) #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -23019,7 +39485,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0) #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -23032,7 +39497,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0) #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -23045,7 +39509,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0) #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \ @@ -23058,21 +39521,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));}) -#define vstrwq_scatter_base_wb(p0,p1,p2) __arm_vstrwq_scatter_base_wb(p0,p1,p2) #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) -#define vstrwq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3) #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23085,7 +39545,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2) #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -23094,7 +39553,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3) #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23115,7 +39573,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3) #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23128,7 +39585,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3) #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23141,7 +39597,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3) #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ @@ -23153,7 +39608,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));}) -#define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3) #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23166,7 +39620,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3) #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23179,28 +39632,24 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmulq_rot180_x(p1,p2,p3) __arm_vcmulq_rot180_x(p1,p2,p3) #define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmulq_rot270_x(p1,p2,p3) __arm_vcmulq_rot270_x(p1,p2,p3) #define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcmulq_x(p1,p2,p3) __arm_vcmulq_x(p1,p2,p3) #define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vcvtq_x(p1,p2) __arm_vcvtq_x(p1,p2) #define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ @@ -23208,7 +39657,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vcvtq_x_n(p1,p2,p3) __arm_vcvtq_x_n(p1,p2,p3) #define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ @@ -23216,7 +39664,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3) #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23229,21 +39676,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vmaxnmq_x(p1,p2,p3) __arm_vmaxnmq_x(p1,p2,p3) #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vminnmq_x(p1,p2,p3) __arm_vminnmq_x(p1,p2,p3) #define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3) #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23264,7 +39708,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2) #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -23273,7 +39716,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3) #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23286,7 +39728,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3) #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23299,7 +39740,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2) #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -23308,7 +39748,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));}) -#define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2) #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -23320,43 +39759,36 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndaq_x(p1,p2) __arm_vrndaq_x(p1,p2) #define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndmq_x(p1,p2) __arm_vrndmq_x(p1,p2) #define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndnq_x(p1,p2) __arm_vrndnq_x(p1,p2) #define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndpq_x(p1,p2) __arm_vrndpq_x(p1,p2) #define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndq_x(p1,p2) __arm_vrndq_x(p1,p2) #define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vrndxq_x(p1,p2) __arm_vrndxq_x(p1,p2) #define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));}) -#define vsubq_x(p1,p2,p3) __arm_vsubq_x(p1,p2,p3) #define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -23365,14 +39797,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));}) -#define vcmulq_rot90_x(p1,p2,p3) __arm_vcmulq_rot90_x(p1,p2,p3) #define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));}) -#define vgetq_lane(p0,p1) __arm_vgetq_lane(p0,p1) #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23386,7 +39816,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_float16x8_t]: __arm_vgetq_lane_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \ int (*)[__ARM_mve_type_float32x4_t]: __arm_vgetq_lane_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));}) -#define vsetq_lane(p0,p1,p2) __arm_vsetq_lane(p0,p1,p2) #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23403,19 +39832,16 @@ extern void *__ARM_undef; #else /* MVE Integer. */ -#define vstrwq_scatter_base_wb(p0,p1,p2) __arm_vstrwq_scatter_base_wb(p0,p1,p2) #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrwq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vst4q(p0,p1) __arm_vst4q(p0,p1) #define __arm_vst4q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \ @@ -23425,21 +39851,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));}) -#define vabsq(p0) __arm_vabsq(p0) #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vclsq(p0) __arm_vclsq(p0) #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vclzq(p0) __arm_vclzq(p0) #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -23449,14 +39872,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vnegq(p0) __arm_vnegq(p0) #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vmovlbq(p0) __arm_vmovlbq(p0) #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -23464,7 +39885,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) -#define vmovltq(p0) __arm_vmovltq(p0) #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -23472,7 +39892,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) -#define vmvnq(p0) __arm_vmvnq(p0) #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -23482,13 +39901,11 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vrev16q(p0) __arm_vrev16q(p0) #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));}) -#define vrev32q(p0) __arm_vrev32q(p0) #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -23496,7 +39913,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));}) -#define vrev64q(p0) __arm_vrev64q(p0) #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -23506,21 +39922,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vqabsq(p0) __arm_vqabsq(p0) #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vqnegq(p0) __arm_vqnegq(p0) #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));}) -#define vshrq(p0,p1) __arm_vshrq(p0,p1) #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23530,7 +39943,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23541,7 +39953,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vshlq(p0,p1) __arm_vshlq(p0,p1) #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23552,7 +39963,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vsubq(p0,p1) __arm_vsubq(p0,p1) #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23569,7 +39979,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) -#define vshlq_r(p0,p1) __arm_vshlq_r(p0,p1) #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23579,7 +39988,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vrshlq(p0,p1) __arm_vrshlq(p0,p1) #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23596,7 +40004,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vrmulhq(p0,p1) __arm_vrmulhq(p0,p1) #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23607,7 +40014,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vrhaddq(p0,p1) __arm_vrhaddq(p0,p1) #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23618,7 +40024,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vqsubq(p0,p1) __arm_vqsubq(p0,p1) #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23635,7 +40040,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vqshlq(p0,p1) __arm_vqshlq(p0,p1) #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23646,7 +40050,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqshlq_r(p0,p1) __arm_vqshlq_r(p0,p1) #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23656,14 +40059,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vqshluq(p0,p1) __arm_vqshluq(p0,p1) #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));}) -#define vrshrq(p0,p1) __arm_vrshrq(p0,p1) #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23673,7 +40074,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vshlq_n(p0,p1) __arm_vshlq_n(p0,p1) #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23683,7 +40083,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vqshlq_n(p0,p1) __arm_vqshlq_n(p0,p1) #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23693,7 +40092,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vqrshlq(p0,p1) __arm_vqrshlq(p0,p1) #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23710,7 +40108,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vqrdmulhq(p0,p1) __arm_vqrdmulhq(p0,p1) #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23721,7 +40118,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vqdmulhq(p0,p1) __arm_vqdmulhq(p0,p1) #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23732,7 +40128,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqaddq(p0,p1) __arm_vqaddq(p0,p1) #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23749,7 +40144,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vorrq(p0,p1) __arm_vorrq(p0,p1) #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23760,7 +40154,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vornq(p0,p1) __arm_vornq(p0,p1) #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23771,7 +40164,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmulq(p0,p1) __arm_vmulq(p0,p1) #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23788,7 +40180,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmulltq_int(p0,p1) __arm_vmulltq_int(p0,p1) #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23799,7 +40190,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmullbq_int(p0,p1) __arm_vmullbq_int(p0,p1) #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23810,7 +40200,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmulhq(p0,p1) __arm_vmulhq(p0,p1) #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23821,7 +40210,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vminq(p0,p1) __arm_vminq(p0,p1) #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23832,7 +40220,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vminaq(p0,p1) __arm_vminaq(p0,p1) #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23840,7 +40227,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmaxq(p0,p1) __arm_vmaxq(p0,p1) #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23851,7 +40237,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmaxaq(p0,p1) __arm_vmaxaq(p0,p1) #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23859,7 +40244,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vhsubq(p0,p1) __arm_vhsubq(p0,p1) #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23876,7 +40260,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vhcaddq_rot90(p0,p1) __arm_vhcaddq_rot90(p0,p1) #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23884,7 +40267,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vhcaddq_rot270(p0,p1) __arm_vhcaddq_rot270(p0,p1) #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23892,7 +40274,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vhaddq(p0,p1) __arm_vhaddq(p0,p1) #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23909,7 +40290,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define veorq(p0,p1) __arm_veorq(p0,p1) #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23920,7 +40300,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vcaddq_rot90(p0,p1) __arm_vcaddq_rot90(p0,p1) #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23931,7 +40310,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vcaddq_rot270(p0,p1) __arm_vcaddq_rot270(p0,p1) #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23942,7 +40320,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1) #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -23952,7 +40329,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vbicq(p0,p1) __arm_vbicq(p0,p1) #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23967,7 +40343,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vaddq(p0,p1) __arm_vaddq(p0,p1) #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23984,7 +40359,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vandq(p0,p1) __arm_vandq(p0,p1) #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -23995,7 +40369,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vabdq(p0,p1) __arm_vabdq(p0,p1) #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24006,7 +40379,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vcmpeqq(p0,p1) __arm_vcmpeqq(p0,p1) #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24023,7 +40395,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) -#define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24041,7 +40412,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) -#define vqmovntq(p0,p1) __arm_vqmovntq(p0,p1) #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24050,7 +40420,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vqmovnbq(p0,p1) __arm_vqmovnbq(p0,p1) #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24059,21 +40428,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmulltq_poly(p0,p1) __arm_vmulltq_poly(p0,p1) #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) -#define vmullbq_poly(p0,p1) __arm_vmullbq_poly(p0,p1) #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));}) -#define vmovntq(p0,p1) __arm_vmovntq(p0,p1) #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24082,7 +40448,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmovnbq(p0,p1) __arm_vmovnbq(p0,p1) #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24091,21 +40456,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmlaldavxq(p0,p1) __arm_vmlaldavxq(p0,p1) #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqmovuntq(p0,p1) __arm_vqmovuntq(p0,p1) #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vshlltq(p0,p1) __arm_vshlltq(p0,p1) #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -24113,7 +40475,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) -#define vshllbq(p0,p1) __arm_vshllbq(p0,p1) #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -24121,14 +40482,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));}) -#define vqmovunbq(p0,p1) __arm_vqmovunbq(p0,p1) #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqdmulltq(p0,p1) __arm_vqdmulltq(p0,p1) #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24137,7 +40496,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vqdmullbq(p0,p1) __arm_vqdmullbq(p0,p1) #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24146,15 +40504,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vcmpgeq_n(p0,p1) __arm_vcmpgeq_n(p0,p1) -#define __arm_vcmpgeq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \ - __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) - -#define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1) #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24165,7 +40514,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24176,7 +40524,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vcmpleq(p0,p1) __arm_vcmpleq(p0,p1) #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24187,7 +40534,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1) #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24198,7 +40544,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) -#define vcmpneq_m(p0,p1,p2) __arm_vcmpneq_m(p0,p1,p2) #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24215,7 +40560,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24232,7 +40576,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) -#define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -24242,7 +40585,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2) #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24259,7 +40601,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) -#define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ @@ -24267,7 +40608,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24276,14 +40616,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24292,7 +40630,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24301,7 +40638,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -24311,7 +40647,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -24321,7 +40656,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vrev64q_m(p0,p1,p2) __arm_vrev64q_m(p0,p1,p2) #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24332,7 +40666,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ @@ -24343,7 +40676,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) -#define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -24353,7 +40685,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24364,7 +40695,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24375,7 +40705,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24387,7 +40716,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24399,7 +40727,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24408,7 +40735,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24417,7 +40743,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24425,7 +40750,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24434,7 +40758,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2) #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24442,7 +40765,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24450,7 +40772,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24461,7 +40782,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vcmpgeq_m(p0,p1,p2) __arm_vcmpgeq_m(p0,p1,p2) #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24472,7 +40792,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) -#define vdupq_m(p0,p1,p2) __arm_vdupq_m(p0,p1,p2) #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24483,7 +40802,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) -#define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24491,7 +40809,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24503,7 +40820,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24515,7 +40831,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vnegq_m(p0,p1,p2) __arm_vnegq_m(p0,p1,p2) #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24523,7 +40838,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vpselq(p0,p1,p2) __arm_vpselq(p0,p1,p2) #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24536,7 +40850,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));}) -#define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24548,7 +40861,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) -#define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24557,7 +40869,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24566,7 +40877,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24575,7 +40885,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24583,8 +40892,7 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vcmpltq_m(p0,p1,p2) __arm_vcmpltq_m(p0,p1,p2) -#define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ +#define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -24594,8 +40902,7 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) -#define vcmpleq_m(p0,p1,p2) __arm_vcmpleq_m(p0,p1,p2) -#define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ +#define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -24605,8 +40912,7 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) -#define vcmpgtq_m(p0,p1,p2) __arm_vcmpgtq_m(p0,p1,p2) -#define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ +#define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -24616,8 +40922,7 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) -#define vshrntq(p0,p1,p2) __arm_vshrntq(p0,p1,p2) -#define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ +#define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ @@ -24625,8 +40930,7 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrshrntq(p0,p1,p2) __arm_vrshrntq(p0,p1,p2) -#define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ +#define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ @@ -24634,7 +40938,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmovlbq_m(p0,p1,p2) __arm_vmovlbq_m(p0,p1,p2) #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24643,7 +40946,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vmovnbq_m(p0,p1,p2) __arm_vmovnbq_m(p0,p1,p2) #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24652,7 +40954,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmovntq_m(p0,p1,p2) __arm_vmovntq_m(p0,p1,p2) #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24661,7 +40962,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vshrnbq(p0,p1,p2) __arm_vshrnbq(p0,p1,p2) #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24670,7 +40970,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrshrnbq(p0,p1,p2) __arm_vrshrnbq(p0,p1,p2) #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24679,7 +40978,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrev32q_m(p0,p1,p2) __arm_vrev32q_m(p0,p1,p2) #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24688,21 +40986,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vqshruntq(p0,p1,p2) __arm_vqshruntq(p0,p1,p2) #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vrev16q_m(p0,p1,p2) __arm_vrev16q_m(p0,p1,p2) #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));}) -#define vqshrntq(p0,p1,p2) __arm_vqshrntq(p0,p1,p2) #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24711,14 +41006,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqrshruntq(p0,p1,p2) __arm_vqrshruntq(p0,p1,p2) #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqrshrntq(p0,p1,p2) __arm_vqrshrntq(p0,p1,p2) #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24727,7 +41020,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqshrnbq(p0,p1,p2) __arm_vqshrnbq(p0,p1,p2) #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24736,14 +41028,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqmovuntq_m(p0,p1,p2) __arm_vqmovuntq_m(p0,p1,p2) #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqmovntq_m(p0,p1,p2) __arm_vqmovntq_m(p0,p1,p2) #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24752,7 +41042,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqmovnbq_m(p0,p1,p2) __arm_vqmovnbq_m(p0,p1,p2) #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24761,7 +41050,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmovltq_m(p0,p1,p2) __arm_vmovltq_m(p0,p1,p2) #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -24770,14 +41058,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vqmovunbq_m(p0,p1,p2) __arm_vqmovunbq_m(p0,p1,p2) #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vsubq_m(p0,p1,p2,p3) __arm_vsubq_m(p0,p1,p2,p3) #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24795,7 +41081,17 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vabdq_m(p0,p1,p2,p3) __arm_vabdq_m(p0,p1,p2,p3) +#define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24807,7 +41103,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vandq_m(p0,p1,p2,p3) __arm_vandq_m(p0,p1,p2,p3) #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24819,7 +41114,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vbicq_m(p0,p1,p2,p3) __arm_vbicq_m(p0,p1,p2,p3) #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24831,7 +41125,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vbrsrq_m(p0,p1,p2,p3) __arm_vbrsrq_m(p0,p1,p2,p3) #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24843,7 +41136,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __p2, p3));}) -#define vcaddq_rot270_m(p0,p1,p2,p3) __arm_vcaddq_rot270_m(p0,p1,p2,p3) #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24855,7 +41147,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vcaddq_rot90_m(p0,p1,p2,p3) __arm_vcaddq_rot90_m(p0,p1,p2,p3) #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24867,7 +41158,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define veorq_m(p0,p1,p2,p3) __arm_veorq_m(p0,p1,p2,p3) #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24879,7 +41169,17 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vornq_m(p0,p1,p2,p3) __arm_vornq_m(p0,p1,p2,p3) +#define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24891,7 +41191,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vorrq_m(p0,p1,p2,p3) __arm_vorrq_m(p0,p1,p2,p3) #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24903,7 +41202,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vaddq_m(p0,p1,p2,p3) __arm_vaddq_m(p0,p1,p2,p3) #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24921,7 +41219,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmulq_m(p0,p1,p2,p3) __arm_vmulq_m(p0,p1,p2,p3) #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -24939,7 +41236,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vstrbq(p0,p1) __arm_vstrbq(p0,p1) #define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ @@ -24949,13 +41245,22 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vstrwq_scatter_base(p0,p1,p2) __arm_vstrwq_scatter_base(p0,p1,p2) +#define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) + #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vldrbq_gather_offset(p0,p1) __arm_vldrbq_gather_offset(p0,p1) #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ @@ -24965,13 +41270,32 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vstrwq_scatter_base_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) +#define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vld1q(p0) __arm_vld1q(p0) #define __arm_vld1q(p0) (_Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \ @@ -24980,7 +41304,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce(p0, uint32_t const *)))) -#define vldrhq_gather_offset(p0,p1) __arm_vldrhq_gather_offset(p0,p1) #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ @@ -24988,7 +41311,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vldrhq_gather_offset_z(p0,p1,p2) __arm_vldrhq_gather_offset_z(p0,p1,p2) #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ @@ -24996,7 +41318,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vldrhq_gather_shifted_offset(p0,p1) __arm_vldrhq_gather_shifted_offset(p0,p1) #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ @@ -25004,7 +41325,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vldrhq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce(p0, int16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ @@ -25012,31 +41332,26 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(p0, uint16_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vldrwq_gather_offset(p0,p1) __arm_vldrwq_gather_offset(p0,p1) #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1));}) -#define vldrwq_gather_offset_z(p0,p1,p2) __arm_vldrwq_gather_offset_z(p0,p1,p2) #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2));}) -#define vldrwq_gather_shifted_offset(p0,p1) __arm_vldrwq_gather_shifted_offset(p0,p1) #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1));}) -#define vldrwq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce(__p0, int32_t const *), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce(__p0, uint32_t const *), p1, p2));}) -#define vst1q(p0,p1) __arm_vst1q(p0,p1) #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ @@ -25046,7 +41361,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vst1q_p(p0,p1,p2) __arm_vst1q_p(p0,p1,p2) #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -25056,7 +41370,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vst2q(p0,p1) __arm_vst2q(p0,p1) #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \ @@ -25066,7 +41379,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));}) -#define vstrhq(p0,p1) __arm_vstrhq(p0,p1) #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \ @@ -25074,7 +41386,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vstrhq_p(p0,p1,p2) __arm_vstrhq_p(p0,p1,p2) #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \ @@ -25082,7 +41393,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25091,7 +41401,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25100,7 +41409,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25109,7 +41417,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25119,19 +41426,54 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrwq(p0,p1) __arm_vstrwq(p0,p1) #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vstrwq_p(p0,p1,p2) __arm_vstrwq_p(p0,p1,p2) #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) +#define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ + int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) + +#define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ + int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) + +#define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) + +#define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) + +#define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) + +#define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) + #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25140,7 +41482,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25149,7 +41490,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25158,7 +41498,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25167,49 +41506,42 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vuninitializedq(p0) __arm_vuninitializedq(p0) #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \ @@ -25221,7 +41553,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());}) -#define vreinterpretq_s16(p0) __arm_vreinterpretq_s16(p0) #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -25232,7 +41563,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) -#define vreinterpretq_s32(p0) __arm_vreinterpretq_s32(p0) #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ @@ -25243,7 +41573,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) -#define vreinterpretq_s64(p0) __arm_vreinterpretq_s64(p0) #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ @@ -25254,7 +41583,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) -#define vreinterpretq_s8(p0) __arm_vreinterpretq_s8(p0) #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ @@ -25265,7 +41593,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) -#define vreinterpretq_u16(p0) __arm_vreinterpretq_u16(p0) #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -25276,7 +41603,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) -#define vreinterpretq_u32(p0) __arm_vreinterpretq_u32(p0) #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ @@ -25287,7 +41613,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) -#define vreinterpretq_u64(p0) __arm_vreinterpretq_u64(p0) #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ @@ -25298,7 +41623,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));}) -#define vreinterpretq_u8(p0) __arm_vreinterpretq_u8(p0) #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \ @@ -25309,14 +41633,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));}) -#define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2) #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3) #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25333,7 +41655,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) -#define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3) #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25344,7 +41665,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3) #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25355,7 +41675,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3) #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25366,7 +41685,88 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3) +#define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) + +#define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + +#define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) + +#define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) + +#define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + +#define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + +#define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) + +#define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + +#define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) + #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25383,14 +41783,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) -#define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2) #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3) #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25401,7 +41799,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3) #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25412,7 +41809,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2) #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -25420,7 +41816,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2) #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -25430,7 +41825,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3) #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25441,7 +41835,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3) #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25452,7 +41845,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3) #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25463,7 +41855,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3) #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ @@ -25473,7 +41864,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vld1q_z(p0,p1) __arm_vld1q_z(p0, p1) #define __arm_vld1q_z(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce(p0, int8_t const *), p1), \ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce(p0, int16_t const *), p1), \ @@ -25482,7 +41872,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce(p0, uint16_t const *), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce(p0, uint32_t const *), p1))) -#define vld2q(p0) __arm_vld2q(p0) #define __arm_vld2q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \ @@ -25492,7 +41881,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce(p0, uint32_t const *)))) -#define vld4q(p0) __arm_vld4q(p0) #define __arm_vld4q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \ @@ -25501,7 +41889,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce(p0, uint32_t const *)))) -#define vgetq_lane(p0,p1) __arm_vgetq_lane(p0,p1) #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -25513,7 +41900,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1));}) -#define vsetq_lane(p0,p1,p2) __arm_vsetq_lane(p0,p1,p2) #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -25528,7 +41914,24 @@ extern void *__ARM_undef; #endif /* MVE Integer. */ -#define vmvnq_x(p1,p2) __arm_vmvnq_x(p1,p2) +#define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + + +#define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + + #define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -25538,13 +41941,11 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrev16q_x(p1,p2) __arm_vrev16q_x(p1,p2) #define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));}) -#define vrhaddq_x(p1,p2,p3) __arm_vrhaddq_x(p1,p2,p3) #define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25555,7 +41956,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vshlq_x(p0,p1,p2,p3) __arm_vshlq_x(p0,p1,p2,p3) #define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25566,7 +41966,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vrmulhq_x(p1,p2,p3) __arm_vrmulhq_x(p1,p2,p3) #define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25577,7 +41976,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vrshlq_x(p1,p2,p3) __arm_vrshlq_x(p1,p2,p3) #define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25588,7 +41986,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vrshrq_x(p1,p2,p3) __arm_vrshrq_x(p1,p2,p3) #define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ @@ -25598,7 +41995,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vshllbq_x(p1,p2,p3) __arm_vshllbq_x(p1,p2,p3) #define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ @@ -25606,7 +42002,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) -#define vshlltq_x(p1,p2,p3) __arm_vshlltq_x(p1,p2,p3) #define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ @@ -25614,7 +42009,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) -#define vshlq_x_n(p1,p2,p3) __arm_vshlq_x_n(p1,p2,p3) #define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ @@ -25624,80 +42018,66 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vdwdupq_x_u8(p1,p2,p3,p4) __arm_vdwdupq_x_u8(p1,p2,p3,p4) #define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define vdwdupq_x_u16(p1,p2,p3,p4) __arm_vdwdupq_x_u16(p1,p2,p3,p4) #define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define vdwdupq_x_u32(p1,p2,p3,p4) __arm_vdwdupq_x_u32(p1,p2,p3,p4) #define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define viwdupq_x_u8(p1,p2,p3,p4) __arm_viwdupq_x_u8(p1,p2,p3,p4) #define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define viwdupq_x_u16(p1,p2,p3,p4) __arm_viwdupq_x_u16(p1,p2,p3,p4) #define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define viwdupq_x_u32(p1,p2,p3,p4) __arm_viwdupq_x_u32(p1,p2,p3,p4) #define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define vidupq_x_u8(p1,p2,p3) __arm_vidupq_x_u8(p1,p2,p3) #define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vddupq_x_u8(p1,p2,p3) __arm_vddupq_x_u8(p1,p2,p3) #define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vidupq_x_u16(p1,p2,p3) __arm_vidupq_x_u16(p1,p2,p3) #define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vddupq_x_u16(p1,p2,p3) __arm_vddupq_x_u16(p1,p2,p3) #define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vidupq_x_u32(p1,p2,p3) __arm_vidupq_x_u32(p1,p2,p3) #define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vddupq_x_u32(p1,p2,p3) __arm_vddupq_x_u32(p1,p2,p3) #define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vhaddq_x(p1,p2,p3) __arm_vhaddq_x(p1,p2,p3) -#define vshrq_x(p1,p2,p3) __arm_vshrq_x(p1,p2,p3) #define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \ @@ -25723,7 +42103,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vhcaddq_rot270_x(p1,p2,p3) __arm_vhcaddq_rot270_x(p1,p2,p3) #define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25731,7 +42110,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vhcaddq_rot90_x(p1,p2,p3) __arm_vhcaddq_rot90_x(p1,p2,p3) #define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25739,7 +42117,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vhsubq_x(p1,p2,p3) __arm_vhsubq_x(p1,p2,p3) #define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -25756,14 +42133,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vclsq_x(p1,p2) __arm_vclsq_x(p1,p2) #define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vclzq_x(p1,p2) __arm_vclzq_x(p1,p2) #define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -25773,46 +42148,38 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2) #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vstrdq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) -#define vstrdq_scatter_base_wb(p0,p1,p2) __arm_vstrdq_scatter_base_wb(p0,p1,p2) #define __arm_vstrdq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) -#define vldrdq_gather_offset(p0,p1) __arm_vldrdq_gather_offset(p0,p1) #define __arm_vldrdq_gather_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce(p0, int64_t const *), p1), \ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce(p0, uint64_t const *), p1))) -#define vldrdq_gather_offset_z(p0,p1,p2) __arm_vldrdq_gather_offset_z(p0,p1,p2) #define __arm_vldrdq_gather_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce(p0, int64_t const *), p1, p2), \ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce(p0, uint64_t const *), p1, p2))) -#define vldrdq_gather_shifted_offset(p0,p1) __arm_vldrdq_gather_shifted_offset(p0,p1) #define __arm_vldrdq_gather_shifted_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce(p0, int64_t const *), p1), \ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce(p0, uint64_t const *), p1))) -#define vldrdq_gather_shifted_offset_z(p0,p1,p2) __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) #define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce(p0, int64_t const *), p1, p2), \ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce(p0, uint64_t const *), p1, p2))) -#define vadciq_m(p0,p1,p2,p3,p4) __arm_vadciq_m(p0,p1,p2,p3,p4) #define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25820,14 +42187,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) -#define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2) #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vadcq_m(p0,p1,p2,p3,p4) __arm_vadcq_m(p0,p1,p2,p3,p4) #define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25835,14 +42200,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) -#define vadcq(p0,p1,p2) __arm_vadcq(p0,p1,p2) #define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vsbciq_m(p0,p1,p2,p3,p4) __arm_vsbciq_m(p0,p1,p2,p3,p4) #define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25850,14 +42213,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) -#define vsbciq(p0,p1,p2) __arm_vsbciq(p0,p1,p2) #define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vsbcq_m(p0,p1,p2,p3,p4) __arm_vsbcq_m(p0,p1,p2,p3,p4) #define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25865,14 +42226,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));}) -#define vsbcq(p0,p1,p2) __arm_vsbcq(p0,p1,p2) #define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vldrbq_gather_offset_z(p0,p1,p2) __arm_vldrbq_gather_offset_z(p0,p1,p2) #define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ @@ -25882,7 +42241,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vqrdmlahq_m(p0,p1,p2,p3) __arm_vqrdmlahq_m(p0,p1,p2,p3) #define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25891,7 +42249,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) -#define vqrdmlashq_m(p0,p1,p2,p3) __arm_vqrdmlashq_m(p0,p1,p2,p3) #define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25900,7 +42257,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) -#define vqrshlq_m(p0,p1,p2,p3) __arm_vqrshlq_m(p0,p1,p2,p3) #define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25912,7 +42268,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqshlq_m_n(p0,p1,p2,p3) __arm_vqshlq_m_n(p0,p1,p2,p3) #define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -25923,7 +42278,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vqshlq_m(p0,p1,p2,p3) __arm_vqshlq_m(p0,p1,p2,p3) #define __arm_vqshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25935,7 +42289,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vrhaddq_m(p0,p1,p2,p3) __arm_vrhaddq_m(p0,p1,p2,p3) #define __arm_vrhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25947,7 +42300,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vrmulhq_m(p0,p1,p2,p3) __arm_vrmulhq_m(p0,p1,p2,p3) #define __arm_vrmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25959,7 +42311,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vrshlq_m(p0,p1,p2,p3) __arm_vrshlq_m(p0,p1,p2,p3) #define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -25971,7 +42322,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vrshrq_m(p0,p1,p2,p3) __arm_vrshrq_m(p0,p1,p2,p3) #define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -25982,7 +42332,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vshrq_m(p0,p1,p2,p3) __arm_vshrq_m(p0,p1,p2,p3) #define __arm_vshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -25993,7 +42342,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vsliq_m(p0,p1,p2,p3) __arm_vsliq_m(p0,p1,p2,p3) #define __arm_vsliq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26004,7 +42352,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vqsubq_m(p0,p1,p2,p3) __arm_vqsubq_m(p0,p1,p2,p3) #define __arm_vqsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26022,7 +42369,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vqrdmulhq_m(p0,p1,p2,p3) __arm_vqrdmulhq_m(p0,p1,p2,p3) #define __arm_vqrdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26034,7 +42380,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) -#define vqrdmlsdhxq_m(p0,p1,p2,p3) __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) #define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26043,7 +42388,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqrdmlsdhq_m(p0,p1,p2,p3) __arm_vqrdmlsdhq_m(p0,p1,p2,p3) #define __arm_vqrdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26052,7 +42396,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vshllbq_m(p0,p1,p2,p3) __arm_vshllbq_m(p0,p1,p2,p3) #define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26061,7 +42404,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) -#define vshrntq_m(p0,p1,p2,p3) __arm_vshrntq_m(p0,p1,p2,p3) #define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26070,7 +42412,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vshrnbq_m(p0,p1,p2,p3) __arm_vshrnbq_m(p0,p1,p2,p3) #define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26079,7 +42420,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vshlltq_m(p0,p1,p2,p3) __arm_vshlltq_m(p0,p1,p2,p3) #define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26088,7 +42428,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));}) -#define vrshrntq_m(p0,p1,p2,p3) __arm_vrshrntq_m(p0,p1,p2,p3) #define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26097,21 +42436,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vqshruntq_m(p0,p1,p2,p3) __arm_vqshruntq_m(p0,p1,p2,p3) #define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) -#define vqshrunbq_m(p0,p1,p2,p3) __arm_vqshrunbq_m(p0,p1,p2,p3) #define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) -#define vqrshrnbq_m(p0,p1,p2,p3) __arm_vqrshrnbq_m(p0,p1,p2,p3) #define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26120,7 +42456,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vqrshrntq_m(p0,p1,p2,p3) __arm_vqrshrntq_m(p0,p1,p2,p3) #define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26129,21 +42464,18 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vqrshrunbq_m(p0,p1,p2,p3) __arm_vqrshrunbq_m(p0,p1,p2,p3) #define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) -#define vqrshruntq_m(p0,p1,p2,p3) __arm_vqrshruntq_m(p0,p1,p2,p3) #define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) -#define vqshrnbq_m(p0,p1,p2,p3) __arm_vqshrnbq_m(p0,p1,p2,p3) #define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26152,7 +42484,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vqshrntq_m(p0,p1,p2,p3) __arm_vqshrntq_m(p0,p1,p2,p3) #define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26161,7 +42492,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vrshrnbq_m(p0,p1,p2,p3) __arm_vrshrnbq_m(p0,p1,p2,p3) #define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26170,7 +42500,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vmlaldavaq_p(p0,p1,p2,p3) __arm_vmlaldavaq_p(p0,p1,p2,p3) #define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26180,7 +42509,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmlaldavaxq_p(p0,p1,p2,p3) __arm_vmlaldavaxq_p(p0,p1,p2,p3) #define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26190,7 +42518,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaxq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaxq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmlsldavaq_p(p0,p1,p2,p3) __arm_vmlsldavaq_p(p0,p1,p2,p3) #define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26198,7 +42525,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vmlsldavaxq_p(p0,p1,p2,p3) __arm_vmlsldavaxq_p(p0,p1,p2,p3) #define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26206,7 +42532,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vrmlaldavhaq_p(p0,p1,p2,p3) __arm_vrmlaldavhaq_p(p0,p1,p2,p3) #define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26214,16 +42539,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) #define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3) -#define vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p(p0,p1,p2,p3) #define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3) -#define vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) #define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3) -#define vqdmladhq_m(p0,p1,p2,p3) __arm_vqdmladhq_m(p0,p1,p2,p3) #define __arm_vqdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26232,7 +42553,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqdmladhxq_m(p0,p1,p2,p3) __arm_vqdmladhxq_m(p0,p1,p2,p3) #define __arm_vqdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26241,7 +42561,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqdmlsdhq_m(p0,p1,p2,p3) __arm_vqdmlsdhq_m(p0,p1,p2,p3) #define __arm_vqdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26250,7 +42569,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqdmlsdhxq_m(p0,p1,p2,p3) __arm_vqdmlsdhxq_m(p0,p1,p2,p3) #define __arm_vqdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26259,7 +42577,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqabsq_m(p0,p1,p2) __arm_vqabsq_m(p0,p1,p2) #define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26267,7 +42584,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmvnq_m(p0,p1,p2) __arm_vmvnq_m(p0,p1,p2) #define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26282,7 +42598,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int) , p2));}) -#define vorrq_m_n(p0,p1,p2) __arm_vorrq_m_n(p0,p1,p2) #define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int16x8_t]: __arm_vorrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ @@ -26290,14 +42605,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vqshrunbq(p0,p1,p2) __arm_vqshrunbq(p0,p1,p2) #define __arm_vqshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqshluq_m(p0,p1,p2,p3) __arm_vqshluq_m(p0,p1,p2,p3) #define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26305,7 +42618,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));}) -#define vshlq_m(p0,p1,p2,p3) __arm_vshlq_m(p0,p1,p2,p3) #define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26317,7 +42629,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vshlq_m_n(p0,p1,p2,p3) __arm_vshlq_m_n(p0,p1,p2,p3) #define __arm_vshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26328,7 +42639,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ @@ -26338,7 +42648,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) -#define vsriq_m(p0,p1,p2,p3) __arm_vsriq_m(p0,p1,p2,p3) #define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26349,7 +42658,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));}) -#define vhaddq_m(p0,p1,p2,p3) __arm_vhaddq_m(p0,p1,p2,p3) #define __arm_vhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26367,7 +42675,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vhcaddq_rot270_m(p0,p1,p2,p3) __arm_vhcaddq_rot270_m(p0,p1,p2,p3) #define __arm_vhcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26376,7 +42683,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vhcaddq_rot90_m(p0,p1,p2,p3) __arm_vhcaddq_rot90_m(p0,p1,p2,p3) #define __arm_vhcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26385,7 +42691,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vhsubq_m(p0,p1,p2,p3) __arm_vhsubq_m(p0,p1,p2,p3) #define __arm_vhsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26403,7 +42708,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) -#define vmaxq_m(p0,p1,p2,p3) __arm_vmaxq_m(p0,p1,p2,p3) #define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26415,7 +42719,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vminq_m(p0,p1,p2,p3) __arm_vminq_m(p0,p1,p2,p3) #define __arm_vminq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26427,7 +42730,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmlaq_m(p0,p1,p2,p3) __arm_vmlaq_m(p0,p1,p2,p3) #define __arm_vmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26439,7 +42741,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) -#define vmlasq_m(p0,p1,p2,p3) __arm_vmlasq_m(p0,p1,p2,p3) #define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26451,7 +42752,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));}) -#define vmulhq_m(p0,p1,p2,p3) __arm_vmulhq_m(p0,p1,p2,p3) #define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26463,7 +42763,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmullbq_int_m(p0,p1,p2,p3) __arm_vmullbq_int_m(p0,p1,p2,p3) #define __arm_vmullbq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26475,7 +42774,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmulltq_int_m(p0,p1,p2,p3) __arm_vmulltq_int_m(p0,p1,p2,p3) #define __arm_vmulltq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26487,7 +42785,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmulltq_poly_m(p0,p1,p2,p3) __arm_vmulltq_poly_m(p0,p1,p2,p3) #define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26495,7 +42792,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) -#define vqaddq_m(p0,p1,p2,p3) __arm_vqaddq_m(p0,p1,p2,p3) #define __arm_vqaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26513,7 +42809,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vqdmlahq_m(p0,p1,p2,p3) __arm_vqdmlahq_m(p0,p1,p2,p3) #define __arm_vqdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26522,7 +42817,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) -#define vqdmulhq_m(p0,p1,p2,p3) __arm_vqdmulhq_m(p0,p1,p2,p3) #define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26534,7 +42828,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqdmullbq_m(p0,p1,p2,p3) __arm_vqdmullbq_m(p0,p1,p2,p3) #define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26544,7 +42837,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));}) -#define vqdmulltq_m(p0,p1,p2,p3) __arm_vqdmulltq_m(p0,p1,p2,p3) #define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26554,7 +42846,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqrdmladhq_m(p0,p1,p2,p3) __arm_vqrdmladhq_m(p0,p1,p2,p3) #define __arm_vqrdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26563,7 +42854,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vqrdmladhxq_m(p0,p1,p2,p3) __arm_vqrdmladhxq_m(p0,p1,p2,p3) #define __arm_vqrdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26572,7 +42862,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vmlsdavaxq_p(p0,p1,p2,p3) __arm_vmlsdavaxq_p(p0,p1,p2,p3) #define __arm_vmlsdavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26581,7 +42870,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vmlsdavaq_p(p0,p1,p2,p3) __arm_vmlsdavaq_p(p0,p1,p2,p3) #define __arm_vmlsdavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26590,7 +42878,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vmladavaxq_p(p0,p1,p2,p3) __arm_vmladavaxq_p(p0,p1,p2,p3) #define __arm_vmladavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26599,7 +42886,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));}) -#define vmullbq_poly_m(p0,p1,p2,p3) __arm_vmullbq_poly_m(p0,p1,p2,p3) #define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26607,7 +42893,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) -#define vldrbq_gather_offset(p0,p1) __arm_vldrbq_gather_offset(p0,p1) #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \ @@ -26617,7 +42902,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vidupq_m(p0,p1,p2,p3) __arm_vidupq_m(p0,p1,p2,p3) #define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26628,7 +42912,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vddupq_m(p0,p1,p2,p3) __arm_vddupq_m(p0,p1,p2,p3) #define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26639,43 +42922,36 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));}) -#define vidupq_u16(p0,p1) __arm_vidupq_u16(p0,p1) #define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) -#define vidupq_u32(p0,p1) __arm_vidupq_u32(p0,p1) #define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) -#define vidupq_u8(p0,p1) __arm_vidupq_u8(p0,p1) #define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) -#define vddupq_u16(p0,p1) __arm_vddupq_u16(p0,p1) #define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) -#define vddupq_u32(p0,p1) __arm_vddupq_u32(p0,p1) #define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) -#define vddupq_u8(p0,p1) __arm_vddupq_u8(p0,p1) #define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));}) -#define viwdupq_m(p0,p1,p2,p3,p4) __arm_viwdupq_m(p0,p1,p2,p3,p4) #define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26686,25 +42962,21 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define viwdupq_u16(p0,p1,p2) __arm_viwdupq_u16(p0,p1,p2) #define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) -#define viwdupq_u32(p0,p1,p2) __arm_viwdupq_u32(p0,p1,p2) #define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) -#define viwdupq_u8(p0,p1,p2) __arm_viwdupq_u8(p0,p1,p2) #define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) -#define vdwdupq_m(p0,p1,p2,p3,p4) __arm_vdwdupq_m(p0,p1,p2,p3,p4) #define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26715,25 +42987,21 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));}) -#define vdwdupq_u16(p0,p1,p2) __arm_vdwdupq_u16(p0,p1,p2) #define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) -#define vdwdupq_u32(p0,p1,p2) __arm_vdwdupq_u32(p0,p1,p2) #define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) -#define vdwdupq_u8(p0,p1,p2) __arm_vdwdupq_u8(p0,p1,p2) #define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));}) -#define vshlcq_m(p0,p1,p2,p3) __arm_vshlcq_m(p0,p1,p2,p3) #define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \ @@ -26743,7 +43011,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));}) -#define vabavq(p0,p1,p2) __arm_vabavq(p0,p1,p2) #define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26755,7 +43022,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vabavq_p(p0,p1,p2,p3) __arm_vabavq_p(p0,p1,p2,p3) #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26767,33 +43033,28 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vaddlvaq(p0,p1) __arm_vaddlvaq(p0,p1) #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vaddlvaq_p(p0,p1,p2) __arm_vaddlvaq_p(p0,p1,p2) #define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vaddlvq(p0) __arm_vaddlvq(p0) #define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vaddlvq_p(p0,p1) __arm_vaddlvq_p(p0,p1) #define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vaddvaq(p0,p1) __arm_vaddvaq(p0,p1) #define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26804,7 +43065,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vaddvaq_p(p0,p1,p2) __arm_vaddvaq_p(p0,p1,p2) #define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26815,7 +43075,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vaddvq(p0) __arm_vaddvq(p0) #define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \ @@ -26825,7 +43084,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) -#define vaddvq_p(p0,p1) __arm_vaddvq_p(p0,p1) #define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \ @@ -26835,7 +43093,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) -#define vcmpcsq(p0,p1) __arm_vcmpcsq(p0,p1) #define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26846,7 +43103,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) -#define vcmpcsq_m(p0,p1,p2) __arm_vcmpcsq_m(p0,p1,p2) #define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26857,7 +43113,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) -#define vcmphiq(p0,p1) __arm_vcmphiq(p0,p1) #define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26868,7 +43123,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) -#define vcmphiq_m(p0,p1,p2) __arm_vcmphiq_m(p0,p1,p2) #define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26879,7 +43133,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmaxavq(p0,p1) __arm_vmaxavq(p0,p1) #define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26887,7 +43140,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmaxavq_p(p0,p1,p2) __arm_vmaxavq_p(p0,p1,p2) #define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26895,7 +43147,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmaxq_x(p1,p2,p3) __arm_vmaxq_x(p1,p2,p3) #define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -26906,7 +43157,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmaxvq(p0,p1) __arm_vmaxvq(p0,p1) #define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26917,7 +43167,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmaxvq_p(p0,p1,p2) __arm_vmaxvq_p(p0,p1,p2) #define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26928,7 +43177,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vminavq(p0,p1) __arm_vminavq(p0,p1) #define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26936,7 +43184,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vminavq_p(p0,p1,p2) __arm_vminavq_p(p0,p1,p2) #define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26944,7 +43191,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vminq_x(p1,p2,p3) __arm_vminq_x(p1,p2,p3) #define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -26955,7 +43201,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vminvq(p0,p1) __arm_vminvq(p0,p1) #define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26966,7 +43211,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vminvq_p(p0,p1,p2) __arm_vminvq_p(p0,p1,p2) #define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -26977,7 +43221,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmladavaq(p0,p1,p2) __arm_vmladavaq(p0,p1,p2) #define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -26989,7 +43232,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vmladavaq_p(p0,p1,p2,p3) __arm_vmladavaq_p(p0,p1,p2,p3) #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27001,7 +43243,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmladavaxq(p0,p1,p2) __arm_vmladavaxq(p0,p1,p2) #define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27013,7 +43254,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vmladavq(p0,p1) __arm_vmladavq(p0,p1) #define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27024,7 +43264,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmladavq_p(p0,p1,p2) __arm_vmladavq_p(p0,p1,p2) #define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27035,7 +43274,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmladavxq(p0,p1) __arm_vmladavxq(p0,p1) #define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27046,7 +43284,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmladavxq_p(p0,p1,p2) __arm_vmladavxq_p(p0,p1,p2) #define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27054,7 +43291,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmlaldavaq(p0,p1,p2) __arm_vmlaldavaq(p0,p1,p2) #define __arm_vmlaldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27064,7 +43300,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vmlaldavaxq(p0,p1,p2) __arm_vmlaldavaxq(p0,p1,p2) #define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27072,7 +43307,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vmlaldavq(p0,p1) __arm_vmlaldavq(p0,p1) #define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27081,7 +43315,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vmlaldavq_p(p0,p1,p2) __arm_vmlaldavq_p(p0,p1,p2) #define __arm_vmlaldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27090,14 +43323,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vmlaldavxq_p(p0,p1,p2) __arm_vmlaldavxq_p(p0,p1,p2) #define __arm_vmlaldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmlsdavaq(p0,p1,p2) __arm_vmlsdavaq(p0,p1,p2) #define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27106,7 +43337,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vmlsdavaxq(p0,p1,p2) __arm_vmlsdavaxq(p0,p1,p2) #define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27115,7 +43345,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vmlsdavq(p0,p1) __arm_vmlsdavq(p0,p1) #define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27123,7 +43352,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmlsdavq_p(p0,p1,p2) __arm_vmlsdavq_p(p0,p1,p2) #define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27131,7 +43359,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmlsdavxq(p0,p1) __arm_vmlsdavxq(p0,p1) #define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27139,7 +43366,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmlsdavxq_p(p0,p1,p2) __arm_vmlsdavxq_p(p0,p1,p2) #define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ @@ -27147,7 +43373,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmlsldavaq(p0,p1,p2) __arm_vmlsldavaq(p0,p1,p2) #define __arm_vmlsldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27155,7 +43380,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vmlsldavaxq(p0,p1,p2) __arm_vmlsldavaxq(p0,p1,p2) #define __arm_vmlsldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27163,35 +43387,30 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vmlsldavq(p0,p1) __arm_vmlsldavq(p0,p1) #define __arm_vmlsldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmlsldavq_p(p0,p1,p2) __arm_vmlsldavq_p(p0,p1,p2) #define __arm_vmlsldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmlsldavxq(p0,p1) __arm_vmlsldavxq(p0,p1) #define __arm_vmlsldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));}) -#define vmlsldavxq_p(p0,p1,p2) __arm_vmlsldavxq_p(p0,p1,p2) #define __arm_vmlsldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vmovlbq_x(p1,p2) __arm_vmovlbq_x(p1,p2) #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -27199,7 +43418,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vmovltq_x(p1,p2) __arm_vmovltq_x(p1,p2) #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -27207,7 +43425,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));}) -#define vmulhq_x(p1,p2,p3) __arm_vmulhq_x(p1,p2,p3) #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -27218,7 +43435,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmullbq_int_x(p1,p2,p3) __arm_vmullbq_int_x(p1,p2,p3) #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -27229,14 +43445,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmullbq_poly_x(p1,p2,p3) __arm_vmullbq_poly_x(p1,p2,p3) #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) -#define vmulltq_int_x(p1,p2,p3) __arm_vmulltq_int_x(p1,p2,p3) #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -27247,14 +43461,12 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vmulltq_poly_x(p1,p2,p3) __arm_vmulltq_poly_x(p1,p2,p3) #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));}) -#define vrmlaldavhaq(p0,p1,p2) __arm_vrmlaldavhaq(p0,p1,p2) #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ @@ -27262,48 +43474,36 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq(p0,p1,p2) #define __arm_vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq_s32(p0,p1,p2) -#define vrmlaldavhq(p0,p1) __arm_vrmlaldavhq(p0,p1) #define __arm_vrmlaldavhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vrmlaldavhq_p(p0,p1,p2) __arm_vrmlaldavhq_p(p0,p1,p2) #define __arm_vrmlaldavhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq(p0,p1) #define __arm_vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq_s32(p0,p1) -#define vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p(p0,p1,p2) #define __arm_vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p_s32(p0,p1,p2) -#define vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq(p0,p1,p2) #define __arm_vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq_s32(p0,p1,p2) -#define vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq(p0,p1,p2) #define __arm_vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq_s32(p0,p1,p2) -#define vrmlsldavhq(p0,p1) __arm_vrmlsldavhq(p0,p1) #define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1) -#define vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p(p0,p1,p2) #define __arm_vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p_s32(p0,p1,p2) -#define vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq(p0,p1) #define __arm_vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq_s32(p0,p1) -#define vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p(p0,p1,p2) #define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2) -#define vstrbq(p0,p1) __arm_vstrbq(p0,p1) #define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \ @@ -27313,7 +43513,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));}) -#define vstrbq_p(p0,p1,p2) __arm_vstrbq_p(p0,p1,p2) #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \ @@ -27323,7 +43522,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vstrbq_scatter_offset(p0,p1,p2) __arm_vstrbq_scatter_offset(p0,p1,p2) #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -27335,7 +43533,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) -#define vstrbq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({__typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ @@ -27346,49 +43543,40 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) -#define vstrdq_scatter_base(p0,p1,p2) __arm_vstrdq_scatter_base(p0,p1,p2) #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) -#define vstrdq_scatter_base_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) -#define vstrdq_scatter_offset(p0,p1,p2) __arm_vstrdq_scatter_offset(p0,p1,p2) #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) -#define vstrdq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) -#define vstrdq_scatter_shifted_offset(p0,p1,p2) __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) -#define vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) -#ifdef __cplusplus -} -#endif - +#endif /* __cplusplus */ #endif /* __ARM_FEATURE_MVE */ #endif /* _GCC_ARM_MVE_H. */ diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 75a2d3dc69d..4d547a5bade 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,9 @@ +2020-04-07 Andre Vieira + + * g++.target/arm/mve.exp: New. + * gcc.target/arm/mve/intrinsics/vcmpneq_n_f16: Fix testism. + * gcc.target/arm/mve/intrinsics/vcmpneq_n_f32: Likewise. + 2020-04-07 Andre Vieira * gcc.target/arm/mve/intrinsics/vuninitializedq_float.c: Fix testism. diff --git a/gcc/testsuite/g++.target/arm/mve.exp b/gcc/testsuite/g++.target/arm/mve.exp new file mode 100644 index 00000000000..08f8d4d87f6 --- /dev/null +++ b/gcc/testsuite/g++.target/arm/mve.exp @@ -0,0 +1,49 @@ +# Copyright (C) 2019-2020 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +# GCC testsuite that uses the `dg.exp' driver. + +# Exit immediately if this isn't an ARM target. +if ![istarget arm*-*-*] then { + return +} + +# Load support procs. +load_lib g++-dg.exp + +# If a testcase doesn't have special options, use these. +global DEFAULT_CXXFLAGS +if ![info exists DEFAULT_CXXFLAGS] then { + set DEFAULT_CXXFLAGS " -pedantic-errors" +} + +# This variable should only apply to tests called in this exp file. +global dg_runtest_extra_prunes +set dg_runtest_extra_prunes "" +lappend dg_runtest_extra_prunes "warning: switch -m(cpu|arch)=.* conflicts with -m(cpu|arch)=.* switch" + +set dg-do-what-default "assemble" + +# Initialize `dg'. +dg-init + +# Main loop. +dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/../../gcc.target/arm/mve/intrinsics/*.\[cCS\]]] \ + "" $DEFAULT_CXXFLAGS + +# All done. +set dg_runtest_extra_prunes "" +dg-finish diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c index 569207086b0..907fa5d50f6 100644 --- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f16.c @@ -15,7 +15,7 @@ foo (float16x8_t a, float16_t b) mve_pred16_t foo1 (float16x8_t a, float16_t b) { - return vcmpgeq_n (a, b); + return vcmpgeq (a, b); } /* { dg-final { scan-assembler "vcmp.f16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c index 4df0941264d..e4d1406c049 100644 --- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_f32.c @@ -15,7 +15,7 @@ foo (float32x4_t a, float32_t b) mve_pred16_t foo1 (float32x4_t a, float32_t b) { - return vcmpgeq_n (a, b); + return vcmpgeq (a, b); } /* { dg-final { scan-assembler "vcmp.f32" } } */