From 8165795c1555c83c0c6c68650321540f9253d461 Mon Sep 17 00:00:00 2001 From: Srinath Parvathaneni Date: Wed, 18 Mar 2020 16:26:53 +0000 Subject: [PATCH] [ARM][GCC][2/3x]: MVE intrinsics with ternary operands. This patch supports following MVE ACLE intrinsics with ternary operands. vpselq_u8, vpselq_s8, vrev64q_m_u8, vqrdmlashq_n_u8, vqrdmlahq_n_u8, vqdmlahq_n_u8, vmvnq_m_u8, vmlasq_n_u8, vmlaq_n_u8, vmladavq_p_u8, vmladavaq_u8, vminvq_p_u8, vmaxvq_p_u8, vdupq_m_n_u8, vcmpneq_m_u8, vcmpneq_m_n_u8, vcmphiq_m_u8, vcmphiq_m_n_u8, vcmpeqq_m_u8, vcmpeqq_m_n_u8, vcmpcsq_m_u8, vcmpcsq_m_n_u8, vclzq_m_u8, vaddvaq_p_u8, vsriq_n_u8, vsliq_n_u8, vshlq_m_r_u8, vrshlq_m_n_u8, vqshlq_m_r_u8, vqrshlq_m_n_u8, vminavq_p_s8, vminaq_m_s8, vmaxavq_p_s8, vmaxaq_m_s8, vcmpneq_m_s8, vcmpneq_m_n_s8, vcmpltq_m_s8, vcmpltq_m_n_s8, vcmpleq_m_s8, vcmpleq_m_n_s8, vcmpgtq_m_s8, vcmpgtq_m_n_s8, vcmpgeq_m_s8, vcmpgeq_m_n_s8, vcmpeqq_m_s8, vcmpeqq_m_n_s8, vshlq_m_r_s8, vrshlq_m_n_s8, vrev64q_m_s8, vqshlq_m_r_s8, vqrshlq_m_n_s8, vqnegq_m_s8, vqabsq_m_s8, vnegq_m_s8, vmvnq_m_s8, vmlsdavxq_p_s8, vmlsdavq_p_s8, vmladavxq_p_s8, vmladavq_p_s8, vminvq_p_s8, vmaxvq_p_s8, vdupq_m_n_s8, vclzq_m_s8, vclsq_m_s8, vaddvaq_p_s8, vabsq_m_s8, vqrdmlsdhxq_s8, vqrdmlsdhq_s8, vqrdmlashq_n_s8, vqrdmlahq_n_s8, vqrdmladhxq_s8, vqrdmladhq_s8, vqdmlsdhxq_s8, vqdmlsdhq_s8, vqdmlahq_n_s8, vqdmladhxq_s8, vqdmladhq_s8, vmlsdavaxq_s8, vmlsdavaq_s8, vmlasq_n_s8, vmlaq_n_s8, vmladavaxq_s8, vmladavaq_s8, vsriq_n_s8, vsliq_n_s8, vpselq_u16, vpselq_s16, vrev64q_m_u16, vqrdmlashq_n_u16, vqrdmlahq_n_u16, vqdmlahq_n_u16, vmvnq_m_u16, vmlasq_n_u16, vmlaq_n_u16, vmladavq_p_u16, vmladavaq_u16, vminvq_p_u16, vmaxvq_p_u16, vdupq_m_n_u16, vcmpneq_m_u16, vcmpneq_m_n_u16, vcmphiq_m_u16, vcmphiq_m_n_u16, vcmpeqq_m_u16, vcmpeqq_m_n_u16, vcmpcsq_m_u16, vcmpcsq_m_n_u16, vclzq_m_u16, vaddvaq_p_u16, vsriq_n_u16, vsliq_n_u16, vshlq_m_r_u16, vrshlq_m_n_u16, vqshlq_m_r_u16, vqrshlq_m_n_u16, vminavq_p_s16, vminaq_m_s16, vmaxavq_p_s16, vmaxaq_m_s16, vcmpneq_m_s16, vcmpneq_m_n_s16, vcmpltq_m_s16, vcmpltq_m_n_s16, vcmpleq_m_s16, vcmpleq_m_n_s16, vcmpgtq_m_s16, vcmpgtq_m_n_s16, vcmpgeq_m_s16, vcmpgeq_m_n_s16, vcmpeqq_m_s16, vcmpeqq_m_n_s16, vshlq_m_r_s16, vrshlq_m_n_s16, vrev64q_m_s16, vqshlq_m_r_s16, vqrshlq_m_n_s16, vqnegq_m_s16, vqabsq_m_s16, vnegq_m_s16, vmvnq_m_s16, vmlsdavxq_p_s16, vmlsdavq_p_s16, vmladavxq_p_s16, vmladavq_p_s16, vminvq_p_s16, vmaxvq_p_s16, vdupq_m_n_s16, vclzq_m_s16, vclsq_m_s16, vaddvaq_p_s16, vabsq_m_s16, vqrdmlsdhxq_s16, vqrdmlsdhq_s16, vqrdmlashq_n_s16, vqrdmlahq_n_s16, vqrdmladhxq_s16, vqrdmladhq_s16, vqdmlsdhxq_s16, vqdmlsdhq_s16, vqdmlahq_n_s16, vqdmladhxq_s16, vqdmladhq_s16, vmlsdavaxq_s16, vmlsdavaq_s16, vmlasq_n_s16, vmlaq_n_s16, vmladavaxq_s16, vmladavaq_s16, vsriq_n_s16, vsliq_n_s16, vpselq_u32, vpselq_s32, vrev64q_m_u32, vqrdmlashq_n_u32, vqrdmlahq_n_u32, vqdmlahq_n_u32, vmvnq_m_u32, vmlasq_n_u32, vmlaq_n_u32, vmladavq_p_u32, vmladavaq_u32, vminvq_p_u32, vmaxvq_p_u32, vdupq_m_n_u32, vcmpneq_m_u32, vcmpneq_m_n_u32, vcmphiq_m_u32, vcmphiq_m_n_u32, vcmpeqq_m_u32, vcmpeqq_m_n_u32, vcmpcsq_m_u32, vcmpcsq_m_n_u32, vclzq_m_u32, vaddvaq_p_u32, vsriq_n_u32, vsliq_n_u32, vshlq_m_r_u32, vrshlq_m_n_u32, vqshlq_m_r_u32, vqrshlq_m_n_u32, vminavq_p_s32, vminaq_m_s32, vmaxavq_p_s32, vmaxaq_m_s32, vcmpneq_m_s32, vcmpneq_m_n_s32, vcmpltq_m_s32, vcmpltq_m_n_s32, vcmpleq_m_s32, vcmpleq_m_n_s32, vcmpgtq_m_s32, vcmpgtq_m_n_s32, vcmpgeq_m_s32, vcmpgeq_m_n_s32, vcmpeqq_m_s32, vcmpeqq_m_n_s32, vshlq_m_r_s32, vrshlq_m_n_s32, vrev64q_m_s32, vqshlq_m_r_s32, vqrshlq_m_n_s32, vqnegq_m_s32, vqabsq_m_s32, vnegq_m_s32, vmvnq_m_s32, vmlsdavxq_p_s32, vmlsdavq_p_s32, vmladavxq_p_s32, vmladavq_p_s32, vminvq_p_s32, vmaxvq_p_s32, vdupq_m_n_s32, vclzq_m_s32, vclsq_m_s32, vaddvaq_p_s32, vabsq_m_s32, vqrdmlsdhxq_s32, vqrdmlsdhq_s32, vqrdmlashq_n_s32, vqrdmlahq_n_s32, vqrdmladhxq_s32, vqrdmladhq_s32, vqdmlsdhxq_s32, vqdmlsdhq_s32, vqdmlahq_n_s32, vqdmladhxq_s32, vqdmladhq_s32, vmlsdavaxq_s32, vmlsdavaq_s32, vmlasq_n_s32, vmlaq_n_s32, vmladavaxq_s32, vmladavaq_s32, vsriq_n_s32, vsliq_n_s32, vpselq_u64, vpselq_s64. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics In this patch new constraints "Rc" and "Re" are added, which checks the constant is with in the range of 0 to 15 and 0 to 31 respectively. Also a new predicates "mve_imm_15" and "mve_imm_31" are added, to check the the matching constraint Rc and Re respectively. 2020-03-18 Andre Vieira Mihail Ionescu Srinath Parvathaneni * config/arm/arm_mve.h (vpselq_u8): Define macro. (vpselq_s8): Likewise. (vrev64q_m_u8): Likewise. (vqrdmlashq_n_u8): Likewise. (vqrdmlahq_n_u8): Likewise. (vqdmlahq_n_u8): Likewise. (vmvnq_m_u8): Likewise. (vmlasq_n_u8): Likewise. (vmlaq_n_u8): Likewise. (vmladavq_p_u8): Likewise. (vmladavaq_u8): Likewise. (vminvq_p_u8): Likewise. (vmaxvq_p_u8): Likewise. (vdupq_m_n_u8): Likewise. (vcmpneq_m_u8): Likewise. (vcmpneq_m_n_u8): Likewise. (vcmphiq_m_u8): Likewise. (vcmphiq_m_n_u8): Likewise. (vcmpeqq_m_u8): Likewise. (vcmpeqq_m_n_u8): Likewise. (vcmpcsq_m_u8): Likewise. (vcmpcsq_m_n_u8): Likewise. (vclzq_m_u8): Likewise. (vaddvaq_p_u8): Likewise. (vsriq_n_u8): Likewise. (vsliq_n_u8): Likewise. (vshlq_m_r_u8): Likewise. (vrshlq_m_n_u8): Likewise. (vqshlq_m_r_u8): Likewise. (vqrshlq_m_n_u8): Likewise. (vminavq_p_s8): Likewise. (vminaq_m_s8): Likewise. (vmaxavq_p_s8): Likewise. (vmaxaq_m_s8): Likewise. (vcmpneq_m_s8): Likewise. (vcmpneq_m_n_s8): Likewise. (vcmpltq_m_s8): Likewise. (vcmpltq_m_n_s8): Likewise. (vcmpleq_m_s8): Likewise. (vcmpleq_m_n_s8): Likewise. (vcmpgtq_m_s8): Likewise. (vcmpgtq_m_n_s8): Likewise. (vcmpgeq_m_s8): Likewise. (vcmpgeq_m_n_s8): Likewise. (vcmpeqq_m_s8): Likewise. (vcmpeqq_m_n_s8): Likewise. (vshlq_m_r_s8): Likewise. (vrshlq_m_n_s8): Likewise. (vrev64q_m_s8): Likewise. (vqshlq_m_r_s8): Likewise. (vqrshlq_m_n_s8): Likewise. (vqnegq_m_s8): Likewise. (vqabsq_m_s8): Likewise. (vnegq_m_s8): Likewise. (vmvnq_m_s8): Likewise. (vmlsdavxq_p_s8): Likewise. (vmlsdavq_p_s8): Likewise. (vmladavxq_p_s8): Likewise. (vmladavq_p_s8): Likewise. (vminvq_p_s8): Likewise. (vmaxvq_p_s8): Likewise. (vdupq_m_n_s8): Likewise. (vclzq_m_s8): Likewise. (vclsq_m_s8): Likewise. (vaddvaq_p_s8): Likewise. (vabsq_m_s8): Likewise. (vqrdmlsdhxq_s8): Likewise. (vqrdmlsdhq_s8): Likewise. (vqrdmlashq_n_s8): Likewise. (vqrdmlahq_n_s8): Likewise. (vqrdmladhxq_s8): Likewise. (vqrdmladhq_s8): Likewise. (vqdmlsdhxq_s8): Likewise. (vqdmlsdhq_s8): Likewise. (vqdmlahq_n_s8): Likewise. (vqdmladhxq_s8): Likewise. (vqdmladhq_s8): Likewise. (vmlsdavaxq_s8): Likewise. (vmlsdavaq_s8): Likewise. (vmlasq_n_s8): Likewise. (vmlaq_n_s8): Likewise. (vmladavaxq_s8): Likewise. (vmladavaq_s8): Likewise. (vsriq_n_s8): Likewise. (vsliq_n_s8): Likewise. (vpselq_u16): Likewise. (vpselq_s16): Likewise. (vrev64q_m_u16): Likewise. (vqrdmlashq_n_u16): Likewise. (vqrdmlahq_n_u16): Likewise. (vqdmlahq_n_u16): Likewise. (vmvnq_m_u16): Likewise. (vmlasq_n_u16): Likewise. (vmlaq_n_u16): Likewise. (vmladavq_p_u16): Likewise. (vmladavaq_u16): Likewise. (vminvq_p_u16): Likewise. (vmaxvq_p_u16): Likewise. (vdupq_m_n_u16): Likewise. (vcmpneq_m_u16): Likewise. (vcmpneq_m_n_u16): Likewise. (vcmphiq_m_u16): Likewise. (vcmphiq_m_n_u16): Likewise. (vcmpeqq_m_u16): Likewise. (vcmpeqq_m_n_u16): Likewise. (vcmpcsq_m_u16): Likewise. (vcmpcsq_m_n_u16): Likewise. (vclzq_m_u16): Likewise. (vaddvaq_p_u16): Likewise. (vsriq_n_u16): Likewise. (vsliq_n_u16): Likewise. (vshlq_m_r_u16): Likewise. (vrshlq_m_n_u16): Likewise. (vqshlq_m_r_u16): Likewise. (vqrshlq_m_n_u16): Likewise. (vminavq_p_s16): Likewise. (vminaq_m_s16): Likewise. (vmaxavq_p_s16): Likewise. (vmaxaq_m_s16): Likewise. (vcmpneq_m_s16): Likewise. (vcmpneq_m_n_s16): Likewise. (vcmpltq_m_s16): Likewise. (vcmpltq_m_n_s16): Likewise. (vcmpleq_m_s16): Likewise. (vcmpleq_m_n_s16): Likewise. (vcmpgtq_m_s16): Likewise. (vcmpgtq_m_n_s16): Likewise. (vcmpgeq_m_s16): Likewise. (vcmpgeq_m_n_s16): Likewise. (vcmpeqq_m_s16): Likewise. (vcmpeqq_m_n_s16): Likewise. (vshlq_m_r_s16): Likewise. (vrshlq_m_n_s16): Likewise. (vrev64q_m_s16): Likewise. (vqshlq_m_r_s16): Likewise. (vqrshlq_m_n_s16): Likewise. (vqnegq_m_s16): Likewise. (vqabsq_m_s16): Likewise. (vnegq_m_s16): Likewise. (vmvnq_m_s16): Likewise. (vmlsdavxq_p_s16): Likewise. (vmlsdavq_p_s16): Likewise. (vmladavxq_p_s16): Likewise. (vmladavq_p_s16): Likewise. (vminvq_p_s16): Likewise. (vmaxvq_p_s16): Likewise. (vdupq_m_n_s16): Likewise. (vclzq_m_s16): Likewise. (vclsq_m_s16): Likewise. (vaddvaq_p_s16): Likewise. (vabsq_m_s16): Likewise. (vqrdmlsdhxq_s16): Likewise. (vqrdmlsdhq_s16): Likewise. (vqrdmlashq_n_s16): Likewise. (vqrdmlahq_n_s16): Likewise. (vqrdmladhxq_s16): Likewise. (vqrdmladhq_s16): Likewise. (vqdmlsdhxq_s16): Likewise. (vqdmlsdhq_s16): Likewise. (vqdmlahq_n_s16): Likewise. (vqdmladhxq_s16): Likewise. (vqdmladhq_s16): Likewise. (vmlsdavaxq_s16): Likewise. (vmlsdavaq_s16): Likewise. (vmlasq_n_s16): Likewise. (vmlaq_n_s16): Likewise. (vmladavaxq_s16): Likewise. (vmladavaq_s16): Likewise. (vsriq_n_s16): Likewise. (vsliq_n_s16): Likewise. (vpselq_u32): Likewise. (vpselq_s32): Likewise. (vrev64q_m_u32): Likewise. (vqrdmlashq_n_u32): Likewise. (vqrdmlahq_n_u32): Likewise. (vqdmlahq_n_u32): Likewise. (vmvnq_m_u32): Likewise. (vmlasq_n_u32): Likewise. (vmlaq_n_u32): Likewise. (vmladavq_p_u32): Likewise. (vmladavaq_u32): Likewise. (vminvq_p_u32): Likewise. (vmaxvq_p_u32): Likewise. (vdupq_m_n_u32): Likewise. (vcmpneq_m_u32): Likewise. (vcmpneq_m_n_u32): Likewise. (vcmphiq_m_u32): Likewise. (vcmphiq_m_n_u32): Likewise. (vcmpeqq_m_u32): Likewise. (vcmpeqq_m_n_u32): Likewise. (vcmpcsq_m_u32): Likewise. (vcmpcsq_m_n_u32): Likewise. (vclzq_m_u32): Likewise. (vaddvaq_p_u32): Likewise. (vsriq_n_u32): Likewise. (vsliq_n_u32): Likewise. (vshlq_m_r_u32): Likewise. (vrshlq_m_n_u32): Likewise. (vqshlq_m_r_u32): Likewise. (vqrshlq_m_n_u32): Likewise. (vminavq_p_s32): Likewise. (vminaq_m_s32): Likewise. (vmaxavq_p_s32): Likewise. (vmaxaq_m_s32): Likewise. (vcmpneq_m_s32): Likewise. (vcmpneq_m_n_s32): Likewise. (vcmpltq_m_s32): Likewise. (vcmpltq_m_n_s32): Likewise. (vcmpleq_m_s32): Likewise. (vcmpleq_m_n_s32): Likewise. (vcmpgtq_m_s32): Likewise. (vcmpgtq_m_n_s32): Likewise. (vcmpgeq_m_s32): Likewise. (vcmpgeq_m_n_s32): Likewise. (vcmpeqq_m_s32): Likewise. (vcmpeqq_m_n_s32): Likewise. (vshlq_m_r_s32): Likewise. (vrshlq_m_n_s32): Likewise. (vrev64q_m_s32): Likewise. (vqshlq_m_r_s32): Likewise. (vqrshlq_m_n_s32): Likewise. (vqnegq_m_s32): Likewise. (vqabsq_m_s32): Likewise. (vnegq_m_s32): Likewise. (vmvnq_m_s32): Likewise. (vmlsdavxq_p_s32): Likewise. (vmlsdavq_p_s32): Likewise. (vmladavxq_p_s32): Likewise. (vmladavq_p_s32): Likewise. (vminvq_p_s32): Likewise. (vmaxvq_p_s32): Likewise. (vdupq_m_n_s32): Likewise. (vclzq_m_s32): Likewise. (vclsq_m_s32): Likewise. (vaddvaq_p_s32): Likewise. (vabsq_m_s32): Likewise. (vqrdmlsdhxq_s32): Likewise. (vqrdmlsdhq_s32): Likewise. (vqrdmlashq_n_s32): Likewise. (vqrdmlahq_n_s32): Likewise. (vqrdmladhxq_s32): Likewise. (vqrdmladhq_s32): Likewise. (vqdmlsdhxq_s32): Likewise. (vqdmlsdhq_s32): Likewise. (vqdmlahq_n_s32): Likewise. (vqdmladhxq_s32): Likewise. (vqdmladhq_s32): Likewise. (vmlsdavaxq_s32): Likewise. (vmlsdavaq_s32): Likewise. (vmlasq_n_s32): Likewise. (vmlaq_n_s32): Likewise. (vmladavaxq_s32): Likewise. (vmladavaq_s32): Likewise. (vsriq_n_s32): Likewise. (vsliq_n_s32): Likewise. (vpselq_u64): Likewise. (vpselq_s64): Likewise. (__arm_vpselq_u8): Define intrinsic. (__arm_vpselq_s8): Likewise. (__arm_vrev64q_m_u8): Likewise. (__arm_vqrdmlashq_n_u8): Likewise. (__arm_vqrdmlahq_n_u8): Likewise. (__arm_vqdmlahq_n_u8): Likewise. (__arm_vmvnq_m_u8): Likewise. (__arm_vmlasq_n_u8): Likewise. (__arm_vmlaq_n_u8): Likewise. (__arm_vmladavq_p_u8): Likewise. (__arm_vmladavaq_u8): Likewise. (__arm_vminvq_p_u8): Likewise. (__arm_vmaxvq_p_u8): Likewise. (__arm_vdupq_m_n_u8): Likewise. (__arm_vcmpneq_m_u8): Likewise. (__arm_vcmpneq_m_n_u8): Likewise. (__arm_vcmphiq_m_u8): Likewise. (__arm_vcmphiq_m_n_u8): Likewise. (__arm_vcmpeqq_m_u8): Likewise. (__arm_vcmpeqq_m_n_u8): Likewise. (__arm_vcmpcsq_m_u8): Likewise. (__arm_vcmpcsq_m_n_u8): Likewise. (__arm_vclzq_m_u8): Likewise. (__arm_vaddvaq_p_u8): Likewise. (__arm_vsriq_n_u8): Likewise. (__arm_vsliq_n_u8): Likewise. (__arm_vshlq_m_r_u8): Likewise. (__arm_vrshlq_m_n_u8): Likewise. (__arm_vqshlq_m_r_u8): Likewise. (__arm_vqrshlq_m_n_u8): Likewise. (__arm_vminavq_p_s8): Likewise. (__arm_vminaq_m_s8): Likewise. (__arm_vmaxavq_p_s8): Likewise. (__arm_vmaxaq_m_s8): Likewise. (__arm_vcmpneq_m_s8): Likewise. (__arm_vcmpneq_m_n_s8): Likewise. (__arm_vcmpltq_m_s8): Likewise. (__arm_vcmpltq_m_n_s8): Likewise. (__arm_vcmpleq_m_s8): Likewise. (__arm_vcmpleq_m_n_s8): Likewise. (__arm_vcmpgtq_m_s8): Likewise. (__arm_vcmpgtq_m_n_s8): Likewise. (__arm_vcmpgeq_m_s8): Likewise. (__arm_vcmpgeq_m_n_s8): Likewise. (__arm_vcmpeqq_m_s8): Likewise. (__arm_vcmpeqq_m_n_s8): Likewise. (__arm_vshlq_m_r_s8): Likewise. (__arm_vrshlq_m_n_s8): Likewise. (__arm_vrev64q_m_s8): Likewise. (__arm_vqshlq_m_r_s8): Likewise. (__arm_vqrshlq_m_n_s8): Likewise. (__arm_vqnegq_m_s8): Likewise. (__arm_vqabsq_m_s8): Likewise. (__arm_vnegq_m_s8): Likewise. (__arm_vmvnq_m_s8): Likewise. (__arm_vmlsdavxq_p_s8): Likewise. (__arm_vmlsdavq_p_s8): Likewise. (__arm_vmladavxq_p_s8): Likewise. (__arm_vmladavq_p_s8): Likewise. (__arm_vminvq_p_s8): Likewise. (__arm_vmaxvq_p_s8): Likewise. (__arm_vdupq_m_n_s8): Likewise. (__arm_vclzq_m_s8): Likewise. (__arm_vclsq_m_s8): Likewise. (__arm_vaddvaq_p_s8): Likewise. (__arm_vabsq_m_s8): Likewise. (__arm_vqrdmlsdhxq_s8): Likewise. (__arm_vqrdmlsdhq_s8): Likewise. (__arm_vqrdmlashq_n_s8): Likewise. (__arm_vqrdmlahq_n_s8): Likewise. (__arm_vqrdmladhxq_s8): Likewise. (__arm_vqrdmladhq_s8): Likewise. (__arm_vqdmlsdhxq_s8): Likewise. (__arm_vqdmlsdhq_s8): Likewise. (__arm_vqdmlahq_n_s8): Likewise. (__arm_vqdmladhxq_s8): Likewise. (__arm_vqdmladhq_s8): Likewise. (__arm_vmlsdavaxq_s8): Likewise. (__arm_vmlsdavaq_s8): Likewise. (__arm_vmlasq_n_s8): Likewise. (__arm_vmlaq_n_s8): Likewise. (__arm_vmladavaxq_s8): Likewise. (__arm_vmladavaq_s8): Likewise. (__arm_vsriq_n_s8): Likewise. (__arm_vsliq_n_s8): Likewise. (__arm_vpselq_u16): Likewise. (__arm_vpselq_s16): Likewise. (__arm_vrev64q_m_u16): Likewise. (__arm_vqrdmlashq_n_u16): Likewise. (__arm_vqrdmlahq_n_u16): Likewise. (__arm_vqdmlahq_n_u16): Likewise. (__arm_vmvnq_m_u16): Likewise. (__arm_vmlasq_n_u16): Likewise. (__arm_vmlaq_n_u16): Likewise. (__arm_vmladavq_p_u16): Likewise. (__arm_vmladavaq_u16): Likewise. (__arm_vminvq_p_u16): Likewise. (__arm_vmaxvq_p_u16): Likewise. (__arm_vdupq_m_n_u16): Likewise. (__arm_vcmpneq_m_u16): Likewise. (__arm_vcmpneq_m_n_u16): Likewise. (__arm_vcmphiq_m_u16): Likewise. (__arm_vcmphiq_m_n_u16): Likewise. (__arm_vcmpeqq_m_u16): Likewise. (__arm_vcmpeqq_m_n_u16): Likewise. (__arm_vcmpcsq_m_u16): Likewise. (__arm_vcmpcsq_m_n_u16): Likewise. (__arm_vclzq_m_u16): Likewise. (__arm_vaddvaq_p_u16): Likewise. (__arm_vsriq_n_u16): Likewise. (__arm_vsliq_n_u16): Likewise. (__arm_vshlq_m_r_u16): Likewise. (__arm_vrshlq_m_n_u16): Likewise. (__arm_vqshlq_m_r_u16): Likewise. (__arm_vqrshlq_m_n_u16): Likewise. (__arm_vminavq_p_s16): Likewise. (__arm_vminaq_m_s16): Likewise. (__arm_vmaxavq_p_s16): Likewise. (__arm_vmaxaq_m_s16): Likewise. (__arm_vcmpneq_m_s16): Likewise. (__arm_vcmpneq_m_n_s16): Likewise. (__arm_vcmpltq_m_s16): Likewise. (__arm_vcmpltq_m_n_s16): Likewise. (__arm_vcmpleq_m_s16): Likewise. (__arm_vcmpleq_m_n_s16): Likewise. (__arm_vcmpgtq_m_s16): Likewise. (__arm_vcmpgtq_m_n_s16): Likewise. (__arm_vcmpgeq_m_s16): Likewise. (__arm_vcmpgeq_m_n_s16): Likewise. (__arm_vcmpeqq_m_s16): Likewise. (__arm_vcmpeqq_m_n_s16): Likewise. (__arm_vshlq_m_r_s16): Likewise. (__arm_vrshlq_m_n_s16): Likewise. (__arm_vrev64q_m_s16): Likewise. (__arm_vqshlq_m_r_s16): Likewise. (__arm_vqrshlq_m_n_s16): Likewise. (__arm_vqnegq_m_s16): Likewise. (__arm_vqabsq_m_s16): Likewise. (__arm_vnegq_m_s16): Likewise. (__arm_vmvnq_m_s16): Likewise. (__arm_vmlsdavxq_p_s16): Likewise. (__arm_vmlsdavq_p_s16): Likewise. (__arm_vmladavxq_p_s16): Likewise. (__arm_vmladavq_p_s16): Likewise. (__arm_vminvq_p_s16): Likewise. (__arm_vmaxvq_p_s16): Likewise. (__arm_vdupq_m_n_s16): Likewise. (__arm_vclzq_m_s16): Likewise. (__arm_vclsq_m_s16): Likewise. (__arm_vaddvaq_p_s16): Likewise. (__arm_vabsq_m_s16): Likewise. (__arm_vqrdmlsdhxq_s16): Likewise. (__arm_vqrdmlsdhq_s16): Likewise. (__arm_vqrdmlashq_n_s16): Likewise. (__arm_vqrdmlahq_n_s16): Likewise. (__arm_vqrdmladhxq_s16): Likewise. (__arm_vqrdmladhq_s16): Likewise. (__arm_vqdmlsdhxq_s16): Likewise. (__arm_vqdmlsdhq_s16): Likewise. (__arm_vqdmlahq_n_s16): Likewise. (__arm_vqdmladhxq_s16): Likewise. (__arm_vqdmladhq_s16): Likewise. (__arm_vmlsdavaxq_s16): Likewise. (__arm_vmlsdavaq_s16): Likewise. (__arm_vmlasq_n_s16): Likewise. (__arm_vmlaq_n_s16): Likewise. (__arm_vmladavaxq_s16): Likewise. (__arm_vmladavaq_s16): Likewise. (__arm_vsriq_n_s16): Likewise. (__arm_vsliq_n_s16): Likewise. (__arm_vpselq_u32): Likewise. (__arm_vpselq_s32): Likewise. (__arm_vrev64q_m_u32): Likewise. (__arm_vqrdmlashq_n_u32): Likewise. (__arm_vqrdmlahq_n_u32): Likewise. (__arm_vqdmlahq_n_u32): Likewise. (__arm_vmvnq_m_u32): Likewise. (__arm_vmlasq_n_u32): Likewise. (__arm_vmlaq_n_u32): Likewise. (__arm_vmladavq_p_u32): Likewise. (__arm_vmladavaq_u32): Likewise. (__arm_vminvq_p_u32): Likewise. (__arm_vmaxvq_p_u32): Likewise. (__arm_vdupq_m_n_u32): Likewise. (__arm_vcmpneq_m_u32): Likewise. (__arm_vcmpneq_m_n_u32): Likewise. (__arm_vcmphiq_m_u32): Likewise. (__arm_vcmphiq_m_n_u32): Likewise. (__arm_vcmpeqq_m_u32): Likewise. (__arm_vcmpeqq_m_n_u32): Likewise. (__arm_vcmpcsq_m_u32): Likewise. (__arm_vcmpcsq_m_n_u32): Likewise. (__arm_vclzq_m_u32): Likewise. (__arm_vaddvaq_p_u32): Likewise. (__arm_vsriq_n_u32): Likewise. (__arm_vsliq_n_u32): Likewise. (__arm_vshlq_m_r_u32): Likewise. (__arm_vrshlq_m_n_u32): Likewise. (__arm_vqshlq_m_r_u32): Likewise. (__arm_vqrshlq_m_n_u32): Likewise. (__arm_vminavq_p_s32): Likewise. (__arm_vminaq_m_s32): Likewise. (__arm_vmaxavq_p_s32): Likewise. (__arm_vmaxaq_m_s32): Likewise. (__arm_vcmpneq_m_s32): Likewise. (__arm_vcmpneq_m_n_s32): Likewise. (__arm_vcmpltq_m_s32): Likewise. (__arm_vcmpltq_m_n_s32): Likewise. (__arm_vcmpleq_m_s32): Likewise. (__arm_vcmpleq_m_n_s32): Likewise. (__arm_vcmpgtq_m_s32): Likewise. (__arm_vcmpgtq_m_n_s32): Likewise. (__arm_vcmpgeq_m_s32): Likewise. (__arm_vcmpgeq_m_n_s32): Likewise. (__arm_vcmpeqq_m_s32): Likewise. (__arm_vcmpeqq_m_n_s32): Likewise. (__arm_vshlq_m_r_s32): Likewise. (__arm_vrshlq_m_n_s32): Likewise. (__arm_vrev64q_m_s32): Likewise. (__arm_vqshlq_m_r_s32): Likewise. (__arm_vqrshlq_m_n_s32): Likewise. (__arm_vqnegq_m_s32): Likewise. (__arm_vqabsq_m_s32): Likewise. (__arm_vnegq_m_s32): Likewise. (__arm_vmvnq_m_s32): Likewise. (__arm_vmlsdavxq_p_s32): Likewise. (__arm_vmlsdavq_p_s32): Likewise. (__arm_vmladavxq_p_s32): Likewise. (__arm_vmladavq_p_s32): Likewise. (__arm_vminvq_p_s32): Likewise. (__arm_vmaxvq_p_s32): Likewise. (__arm_vdupq_m_n_s32): Likewise. (__arm_vclzq_m_s32): Likewise. (__arm_vclsq_m_s32): Likewise. (__arm_vaddvaq_p_s32): Likewise. (__arm_vabsq_m_s32): Likewise. (__arm_vqrdmlsdhxq_s32): Likewise. (__arm_vqrdmlsdhq_s32): Likewise. (__arm_vqrdmlashq_n_s32): Likewise. (__arm_vqrdmlahq_n_s32): Likewise. (__arm_vqrdmladhxq_s32): Likewise. (__arm_vqrdmladhq_s32): Likewise. (__arm_vqdmlsdhxq_s32): Likewise. (__arm_vqdmlsdhq_s32): Likewise. (__arm_vqdmlahq_n_s32): Likewise. (__arm_vqdmladhxq_s32): Likewise. (__arm_vqdmladhq_s32): Likewise. (__arm_vmlsdavaxq_s32): Likewise. (__arm_vmlsdavaq_s32): Likewise. (__arm_vmlasq_n_s32): Likewise. (__arm_vmlaq_n_s32): Likewise. (__arm_vmladavaxq_s32): Likewise. (__arm_vmladavaq_s32): Likewise. (__arm_vsriq_n_s32): Likewise. (__arm_vsliq_n_s32): Likewise. (__arm_vpselq_u64): Likewise. (__arm_vpselq_s64): Likewise. (vcmpneq_m_n): Define polymorphic variant. (vcmpneq_m): Likewise. (vqrdmlsdhq): Likewise. (vqrdmlsdhxq): Likewise. (vqrshlq_m_n): Likewise. (vqshlq_m_r): Likewise. (vrev64q_m): Likewise. (vrshlq_m_n): Likewise. (vshlq_m_r): Likewise. (vsliq_n): Likewise. (vsriq_n): Likewise. (vqrdmlashq_n): Likewise. (vqrdmlahq): Likewise. (vqrdmladhxq): Likewise. (vqrdmladhq): Likewise. (vqnegq_m): Likewise. (vqdmlsdhxq): Likewise. (vabsq_m): Likewise. (vclsq_m): Likewise. (vclzq_m): Likewise. (vcmpgeq_m): Likewise. (vcmpgeq_m_n): Likewise. (vdupq_m_n): Likewise. (vmaxaq_m): Likewise. (vmlaq_n): Likewise. (vmlasq_n): Likewise. (vmvnq_m): Likewise. (vnegq_m): Likewise. (vpselq): Likewise. (vqdmlahq_n): Likewise. (vqrdmlahq_n): Likewise. (vqdmlsdhq): Likewise. (vqdmladhq): Likewise. (vqabsq_m): Likewise. (vminaq_m): Likewise. (vrmlaldavhaq): Likewise. (vmlsdavxq_p): Likewise. (vmlsdavq_p): Likewise. (vmlsdavaxq): Likewise. (vmlsdavaq): Likewise. (vaddvaq_p): Likewise. (vcmpcsq_m_n): Likewise. (vcmpcsq_m): Likewise. (vcmpeqq_m_n): Likewise. (vcmpeqq_m): Likewise. (vmladavxq_p): Likewise. (vmladavq_p): Likewise. (vmladavaxq): Likewise. (vmladavaq): Likewise. (vminvq_p): Likewise. (vminavq_p): Likewise. (vmaxvq_p): Likewise. (vmaxavq_p): Likewise. (vcmpltq_m_n): Likewise. (vcmpltq_m): Likewise. (vcmpleq_m): Likewise. (vcmpleq_m_n): Likewise. (vcmphiq_m_n): Likewise. (vcmphiq_m): Likewise. (vcmpgtq_m_n): Likewise. (vcmpgtq_m): Likewise. * config/arm/arm_mve_builtins.def (TERNOP_NONE_NONE_NONE_IMM): Use builtin qualifier. (TERNOP_NONE_NONE_NONE_NONE): Likewise. (TERNOP_NONE_NONE_NONE_UNONE): Likewise. (TERNOP_UNONE_NONE_NONE_UNONE): Likewise. (TERNOP_UNONE_UNONE_NONE_UNONE): Likewise. (TERNOP_UNONE_UNONE_UNONE_IMM): Likewise. (TERNOP_UNONE_UNONE_UNONE_UNONE): Likewise. * config/arm/constraints.md (Rc): Define constraint to check constant is in the range of 0 to 15. (Re): Define constraint to check constant is in the range of 0 to 31. * config/arm/mve.md (VADDVAQ_P): Define iterator. (VCLZQ_M): Likewise. (VCMPEQQ_M_N): Likewise. (VCMPEQQ_M): Likewise. (VCMPNEQ_M_N): Likewise. (VCMPNEQ_M): Likewise. (VDUPQ_M_N): Likewise. (VMAXVQ_P): Likewise. (VMINVQ_P): Likewise. (VMLADAVAQ): Likewise. (VMLADAVQ_P): Likewise. (VMLAQ_N): Likewise. (VMLASQ_N): Likewise. (VMVNQ_M): Likewise. (VPSELQ): Likewise. (VQDMLAHQ_N): Likewise. (VQRDMLAHQ_N): Likewise. (VQRDMLASHQ_N): Likewise. (VQRSHLQ_M_N): Likewise. (VQSHLQ_M_R): Likewise. (VREV64Q_M): Likewise. (VRSHLQ_M_N): Likewise. (VSHLQ_M_R): Likewise. (VSLIQ_N): Likewise. (VSRIQ_N): Likewise. (mve_vabsq_m_s): Define RTL pattern. (mve_vaddvaq_p_): Likewise. (mve_vclsq_m_s): Likewise. (mve_vclzq_m_): Likewise. (mve_vcmpcsq_m_n_u): Likewise. (mve_vcmpcsq_m_u): Likewise. (mve_vcmpeqq_m_n_): Likewise. (mve_vcmpeqq_m_): Likewise. (mve_vcmpgeq_m_n_s): Likewise. (mve_vcmpgeq_m_s): Likewise. (mve_vcmpgtq_m_n_s): Likewise. (mve_vcmpgtq_m_s): Likewise. (mve_vcmphiq_m_n_u): Likewise. (mve_vcmphiq_m_u): Likewise. (mve_vcmpleq_m_n_s): Likewise. (mve_vcmpleq_m_s): Likewise. (mve_vcmpltq_m_n_s): Likewise. (mve_vcmpltq_m_s): Likewise. (mve_vcmpneq_m_n_): Likewise. (mve_vcmpneq_m_): Likewise. (mve_vdupq_m_n_): Likewise. (mve_vmaxaq_m_s): Likewise. (mve_vmaxavq_p_s): Likewise. (mve_vmaxvq_p_): Likewise. (mve_vminaq_m_s): Likewise. (mve_vminavq_p_s): Likewise. (mve_vminvq_p_): Likewise. (mve_vmladavaq_): Likewise. (mve_vmladavq_p_): Likewise. (mve_vmladavxq_p_s): Likewise. (mve_vmlaq_n_): Likewise. (mve_vmlasq_n_): Likewise. (mve_vmlsdavq_p_s): Likewise. (mve_vmlsdavxq_p_s): Likewise. (mve_vmvnq_m_): Likewise. (mve_vnegq_m_s): Likewise. (mve_vpselq_): Likewise. (mve_vqabsq_m_s): Likewise. (mve_vqdmlahq_n_): Likewise. (mve_vqnegq_m_s): Likewise. (mve_vqrdmladhq_s): Likewise. (mve_vqrdmladhxq_s): Likewise. (mve_vqrdmlahq_n_): Likewise. (mve_vqrdmlashq_n_): Likewise. (mve_vqrdmlsdhq_s): Likewise. (mve_vqrdmlsdhxq_s): Likewise. (mve_vqrshlq_m_n_): Likewise. (mve_vqshlq_m_r_): Likewise. (mve_vrev64q_m_): Likewise. (mve_vrshlq_m_n_): Likewise. (mve_vshlq_m_r_): Likewise. (mve_vsliq_n_): Likewise. (mve_vsriq_n_): Likewise. (mve_vqdmlsdhxq_s): Likewise. (mve_vqdmlsdhq_s): Likewise. (mve_vqdmladhxq_s): Likewise. (mve_vqdmladhq_s): Likewise. (mve_vmlsdavaxq_s): Likewise. (mve_vmlsdavaq_s): Likewise. (mve_vmladavaxq_s): Likewise. * config/arm/predicates.md (mve_imm_15):Define predicate to check the matching constraint Rc. (mve_imm_31): Define predicate to check the matching constraint Re. gcc/testsuite/ChangeLog: 2020-03-18 Andre Vieira Mihail Ionescu Srinath Parvathaneni * gcc.target/arm/mve/intrinsics/vabsq_m_s16.c: New test. * gcc.target/arm/mve/intrinsics/vabsq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabsq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vclsq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vclsq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vclsq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_m_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_m_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminaq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminaq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminaq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminavq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminavq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminavq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_p_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_p_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_s64.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_u64.c: Likewise. * gcc.target/arm/mve/intrinsics/vpselq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlahq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlahq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlahq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsliq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsliq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsliq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsliq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsliq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsliq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsriq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsriq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsriq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsriq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsriq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsriq_n_u8.c: Likewise. --- gcc/ChangeLog | 679 ++++ gcc/config/arm/arm_mve.h | 2966 ++++++++++++++++- gcc/config/arm/arm_mve_builtins.def | 85 + gcc/config/arm/constraints.md | 10 + gcc/config/arm/mve.md | 1032 +++++- gcc/config/arm/predicates.md | 8 + gcc/testsuite/ChangeLog | 262 ++ .../arm/mve/intrinsics/vabsq_m_s16.c | 23 + .../arm/mve/intrinsics/vabsq_m_s32.c | 23 + .../arm/mve/intrinsics/vabsq_m_s8.c | 23 + .../arm/mve/intrinsics/vaddvaq_p_s16.c | 22 + .../arm/mve/intrinsics/vaddvaq_p_s32.c | 22 + .../arm/mve/intrinsics/vaddvaq_p_s8.c | 22 + .../arm/mve/intrinsics/vaddvaq_p_u16.c | 22 + .../arm/mve/intrinsics/vaddvaq_p_u32.c | 22 + .../arm/mve/intrinsics/vaddvaq_p_u8.c | 22 + .../arm/mve/intrinsics/vclsq_m_s16.c | 23 + .../arm/mve/intrinsics/vclsq_m_s32.c | 23 + .../arm/mve/intrinsics/vclsq_m_s8.c | 23 + .../arm/mve/intrinsics/vclzq_m_s16.c | 23 + .../arm/mve/intrinsics/vclzq_m_s32.c | 23 + .../arm/mve/intrinsics/vclzq_m_s8.c | 23 + .../arm/mve/intrinsics/vclzq_m_u16.c | 23 + .../arm/mve/intrinsics/vclzq_m_u32.c | 23 + .../arm/mve/intrinsics/vclzq_m_u8.c | 23 + .../arm/mve/intrinsics/vcmpcsq_m_n_u16.c | 23 + .../arm/mve/intrinsics/vcmpcsq_m_n_u32.c | 23 + .../arm/mve/intrinsics/vcmpcsq_m_n_u8.c | 23 + .../arm/mve/intrinsics/vcmpcsq_m_u16.c | 23 + .../arm/mve/intrinsics/vcmpcsq_m_u32.c | 23 + .../arm/mve/intrinsics/vcmpcsq_m_u8.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_n_u16.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_n_u32.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_n_u8.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_s16.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_s32.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_s8.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_u16.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_u32.c | 23 + .../arm/mve/intrinsics/vcmpeqq_m_u8.c | 23 + .../arm/mve/intrinsics/vcmpgeq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vcmpgeq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vcmpgeq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vcmpgeq_m_s16.c | 23 + .../arm/mve/intrinsics/vcmpgeq_m_s32.c | 23 + .../arm/mve/intrinsics/vcmpgeq_m_s8.c | 23 + .../arm/mve/intrinsics/vcmpgtq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vcmpgtq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vcmpgtq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vcmpgtq_m_s16.c | 23 + .../arm/mve/intrinsics/vcmpgtq_m_s32.c | 23 + .../arm/mve/intrinsics/vcmpgtq_m_s8.c | 23 + .../arm/mve/intrinsics/vcmphiq_m_n_u16.c | 23 + .../arm/mve/intrinsics/vcmphiq_m_n_u32.c | 23 + .../arm/mve/intrinsics/vcmphiq_m_n_u8.c | 23 + .../arm/mve/intrinsics/vcmphiq_m_u16.c | 23 + .../arm/mve/intrinsics/vcmphiq_m_u32.c | 23 + .../arm/mve/intrinsics/vcmphiq_m_u8.c | 23 + .../arm/mve/intrinsics/vcmpleq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vcmpleq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vcmpleq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vcmpleq_m_s16.c | 23 + .../arm/mve/intrinsics/vcmpleq_m_s32.c | 23 + .../arm/mve/intrinsics/vcmpleq_m_s8.c | 23 + .../arm/mve/intrinsics/vcmpltq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vcmpltq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vcmpltq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vcmpltq_m_s16.c | 23 + .../arm/mve/intrinsics/vcmpltq_m_s32.c | 23 + .../arm/mve/intrinsics/vcmpltq_m_s8.c | 23 + .../arm/mve/intrinsics/vcmpltq_n_f16.c | 2 +- .../arm/mve/intrinsics/vcmpltq_n_f32.c | 2 +- .../arm/mve/intrinsics/vcmpneq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_n_u16.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_n_u32.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_n_u8.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_s16.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_s32.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_s8.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_u16.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_u32.c | 23 + .../arm/mve/intrinsics/vcmpneq_m_u8.c | 23 + .../arm/mve/intrinsics/vdupq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vdupq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vdupq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vdupq_m_n_u16.c | 23 + .../arm/mve/intrinsics/vdupq_m_n_u32.c | 23 + .../arm/mve/intrinsics/vdupq_m_n_u8.c | 23 + .../arm/mve/intrinsics/vmaxaq_m_s16.c | 23 + .../arm/mve/intrinsics/vmaxaq_m_s32.c | 23 + .../arm/mve/intrinsics/vmaxaq_m_s8.c | 23 + .../arm/mve/intrinsics/vmaxavq_p_s16.c | 22 + .../arm/mve/intrinsics/vmaxavq_p_s32.c | 22 + .../arm/mve/intrinsics/vmaxavq_p_s8.c | 22 + .../arm/mve/intrinsics/vmaxvq_p_s16.c | 22 + .../arm/mve/intrinsics/vmaxvq_p_s32.c | 22 + .../arm/mve/intrinsics/vmaxvq_p_s8.c | 22 + .../arm/mve/intrinsics/vmaxvq_p_u16.c | 22 + .../arm/mve/intrinsics/vmaxvq_p_u32.c | 22 + .../arm/mve/intrinsics/vmaxvq_p_u8.c | 22 + .../arm/mve/intrinsics/vminaq_m_s16.c | 23 + .../arm/mve/intrinsics/vminaq_m_s32.c | 23 + .../arm/mve/intrinsics/vminaq_m_s8.c | 23 + .../arm/mve/intrinsics/vminavq_p_s16.c | 22 + .../arm/mve/intrinsics/vminavq_p_s32.c | 22 + .../arm/mve/intrinsics/vminavq_p_s8.c | 22 + .../arm/mve/intrinsics/vminvq_p_s16.c | 22 + .../arm/mve/intrinsics/vminvq_p_s32.c | 22 + .../arm/mve/intrinsics/vminvq_p_s8.c | 22 + .../arm/mve/intrinsics/vminvq_p_u16.c | 22 + .../arm/mve/intrinsics/vminvq_p_u32.c | 22 + .../arm/mve/intrinsics/vminvq_p_u8.c | 22 + .../arm/mve/intrinsics/vmladavaq_s16.c | 22 + .../arm/mve/intrinsics/vmladavaq_s32.c | 22 + .../arm/mve/intrinsics/vmladavaq_s8.c | 22 + .../arm/mve/intrinsics/vmladavaq_u16.c | 22 + .../arm/mve/intrinsics/vmladavaq_u32.c | 22 + .../arm/mve/intrinsics/vmladavaq_u8.c | 22 + .../arm/mve/intrinsics/vmladavaxq_s16.c | 22 + .../arm/mve/intrinsics/vmladavaxq_s32.c | 22 + .../arm/mve/intrinsics/vmladavaxq_s8.c | 22 + .../arm/mve/intrinsics/vmladavq_p_s16.c | 22 + .../arm/mve/intrinsics/vmladavq_p_s32.c | 22 + .../arm/mve/intrinsics/vmladavq_p_s8.c | 22 + .../arm/mve/intrinsics/vmladavq_p_u16.c | 22 + .../arm/mve/intrinsics/vmladavq_p_u32.c | 22 + .../arm/mve/intrinsics/vmladavq_p_u8.c | 22 + .../arm/mve/intrinsics/vmladavxq_p_s16.c | 22 + .../arm/mve/intrinsics/vmladavxq_p_s32.c | 22 + .../arm/mve/intrinsics/vmladavxq_p_s8.c | 22 + .../arm/mve/intrinsics/vmlaq_n_s16.c | 22 + .../arm/mve/intrinsics/vmlaq_n_s32.c | 22 + .../arm/mve/intrinsics/vmlaq_n_s8.c | 22 + .../arm/mve/intrinsics/vmlaq_n_u16.c | 22 + .../arm/mve/intrinsics/vmlaq_n_u32.c | 22 + .../arm/mve/intrinsics/vmlaq_n_u8.c | 22 + .../arm/mve/intrinsics/vmlasq_n_s16.c | 22 + .../arm/mve/intrinsics/vmlasq_n_s32.c | 22 + .../arm/mve/intrinsics/vmlasq_n_s8.c | 22 + .../arm/mve/intrinsics/vmlasq_n_u16.c | 22 + .../arm/mve/intrinsics/vmlasq_n_u32.c | 22 + .../arm/mve/intrinsics/vmlasq_n_u8.c | 22 + .../arm/mve/intrinsics/vmlsdavaq_s16.c | 22 + .../arm/mve/intrinsics/vmlsdavaq_s32.c | 22 + .../arm/mve/intrinsics/vmlsdavaq_s8.c | 22 + .../arm/mve/intrinsics/vmlsdavaxq_s16.c | 22 + .../arm/mve/intrinsics/vmlsdavaxq_s32.c | 22 + .../arm/mve/intrinsics/vmlsdavaxq_s8.c | 22 + .../arm/mve/intrinsics/vmlsdavq_p_s16.c | 22 + .../arm/mve/intrinsics/vmlsdavq_p_s32.c | 22 + .../arm/mve/intrinsics/vmlsdavq_p_s8.c | 22 + .../arm/mve/intrinsics/vmlsdavxq_p_s16.c | 22 + .../arm/mve/intrinsics/vmlsdavxq_p_s32.c | 22 + .../arm/mve/intrinsics/vmlsdavxq_p_s8.c | 22 + .../arm/mve/intrinsics/vmvnq_m_s16.c | 23 + .../arm/mve/intrinsics/vmvnq_m_s32.c | 23 + .../arm/mve/intrinsics/vmvnq_m_s8.c | 23 + .../arm/mve/intrinsics/vmvnq_m_u16.c | 23 + .../arm/mve/intrinsics/vmvnq_m_u32.c | 23 + .../arm/mve/intrinsics/vmvnq_m_u8.c | 23 + .../arm/mve/intrinsics/vnegq_m_s16.c | 23 + .../arm/mve/intrinsics/vnegq_m_s32.c | 23 + .../arm/mve/intrinsics/vnegq_m_s8.c | 23 + .../arm/mve/intrinsics/vpselq_s16.c | 22 + .../arm/mve/intrinsics/vpselq_s32.c | 22 + .../arm/mve/intrinsics/vpselq_s64.c | 22 + .../gcc.target/arm/mve/intrinsics/vpselq_s8.c | 22 + .../arm/mve/intrinsics/vpselq_u16.c | 22 + .../arm/mve/intrinsics/vpselq_u32.c | 22 + .../arm/mve/intrinsics/vpselq_u64.c | 22 + .../gcc.target/arm/mve/intrinsics/vpselq_u8.c | 22 + .../arm/mve/intrinsics/vqabsq_m_s16.c | 23 + .../arm/mve/intrinsics/vqabsq_m_s32.c | 23 + .../arm/mve/intrinsics/vqabsq_m_s8.c | 23 + .../arm/mve/intrinsics/vqdmladhq_s16.c | 22 + .../arm/mve/intrinsics/vqdmladhq_s32.c | 22 + .../arm/mve/intrinsics/vqdmladhq_s8.c | 22 + .../arm/mve/intrinsics/vqdmladhxq_s16.c | 22 + .../arm/mve/intrinsics/vqdmladhxq_s32.c | 22 + .../arm/mve/intrinsics/vqdmladhxq_s8.c | 22 + .../arm/mve/intrinsics/vqdmlahq_n_s16.c | 22 + .../arm/mve/intrinsics/vqdmlahq_n_s32.c | 22 + .../arm/mve/intrinsics/vqdmlahq_n_s8.c | 22 + .../arm/mve/intrinsics/vqdmlahq_n_u16.c | 22 + .../arm/mve/intrinsics/vqdmlahq_n_u32.c | 22 + .../arm/mve/intrinsics/vqdmlahq_n_u8.c | 22 + .../arm/mve/intrinsics/vqdmlsdhq_s16.c | 22 + .../arm/mve/intrinsics/vqdmlsdhq_s32.c | 22 + .../arm/mve/intrinsics/vqdmlsdhq_s8.c | 22 + .../arm/mve/intrinsics/vqdmlsdhxq_s16.c | 22 + .../arm/mve/intrinsics/vqdmlsdhxq_s32.c | 22 + .../arm/mve/intrinsics/vqdmlsdhxq_s8.c | 22 + .../arm/mve/intrinsics/vqnegq_m_s16.c | 23 + .../arm/mve/intrinsics/vqnegq_m_s32.c | 23 + .../arm/mve/intrinsics/vqnegq_m_s8.c | 23 + .../arm/mve/intrinsics/vqrdmladhq_s16.c | 22 + .../arm/mve/intrinsics/vqrdmladhq_s32.c | 22 + .../arm/mve/intrinsics/vqrdmladhq_s8.c | 22 + .../arm/mve/intrinsics/vqrdmladhxq_s16.c | 22 + .../arm/mve/intrinsics/vqrdmladhxq_s32.c | 22 + .../arm/mve/intrinsics/vqrdmladhxq_s8.c | 22 + .../arm/mve/intrinsics/vqrdmlahq_n_s16.c | 22 + .../arm/mve/intrinsics/vqrdmlahq_n_s32.c | 22 + .../arm/mve/intrinsics/vqrdmlahq_n_s8.c | 22 + .../arm/mve/intrinsics/vqrdmlahq_n_u16.c | 22 + .../arm/mve/intrinsics/vqrdmlahq_n_u32.c | 22 + .../arm/mve/intrinsics/vqrdmlahq_n_u8.c | 22 + .../arm/mve/intrinsics/vqrdmlashq_n_s16.c | 22 + .../arm/mve/intrinsics/vqrdmlashq_n_s32.c | 22 + .../arm/mve/intrinsics/vqrdmlashq_n_s8.c | 22 + .../arm/mve/intrinsics/vqrdmlashq_n_u16.c | 22 + .../arm/mve/intrinsics/vqrdmlashq_n_u32.c | 22 + .../arm/mve/intrinsics/vqrdmlashq_n_u8.c | 22 + .../arm/mve/intrinsics/vqrdmlsdhq_s16.c | 22 + .../arm/mve/intrinsics/vqrdmlsdhq_s32.c | 22 + .../arm/mve/intrinsics/vqrdmlsdhq_s8.c | 22 + .../arm/mve/intrinsics/vqrdmlsdhxq_s16.c | 22 + .../arm/mve/intrinsics/vqrdmlsdhxq_s32.c | 22 + .../arm/mve/intrinsics/vqrdmlsdhxq_s8.c | 22 + .../arm/mve/intrinsics/vqrshlq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vqrshlq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vqrshlq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vqrshlq_m_n_u16.c | 23 + .../arm/mve/intrinsics/vqrshlq_m_n_u32.c | 23 + .../arm/mve/intrinsics/vqrshlq_m_n_u8.c | 23 + .../arm/mve/intrinsics/vqshlq_m_r_s16.c | 23 + .../arm/mve/intrinsics/vqshlq_m_r_s32.c | 23 + .../arm/mve/intrinsics/vqshlq_m_r_s8.c | 23 + .../arm/mve/intrinsics/vqshlq_m_r_u16.c | 23 + .../arm/mve/intrinsics/vqshlq_m_r_u32.c | 23 + .../arm/mve/intrinsics/vqshlq_m_r_u8.c | 23 + .../arm/mve/intrinsics/vrev64q_m_s16.c | 23 + .../arm/mve/intrinsics/vrev64q_m_s32.c | 23 + .../arm/mve/intrinsics/vrev64q_m_s8.c | 23 + .../arm/mve/intrinsics/vrev64q_m_u16.c | 23 + .../arm/mve/intrinsics/vrev64q_m_u32.c | 23 + .../arm/mve/intrinsics/vrev64q_m_u8.c | 23 + .../arm/mve/intrinsics/vrshlq_m_n_s16.c | 23 + .../arm/mve/intrinsics/vrshlq_m_n_s32.c | 23 + .../arm/mve/intrinsics/vrshlq_m_n_s8.c | 23 + .../arm/mve/intrinsics/vrshlq_m_n_u16.c | 23 + .../arm/mve/intrinsics/vrshlq_m_n_u32.c | 23 + .../arm/mve/intrinsics/vrshlq_m_n_u8.c | 23 + .../arm/mve/intrinsics/vshlq_m_r_s16.c | 23 + .../arm/mve/intrinsics/vshlq_m_r_s32.c | 23 + .../arm/mve/intrinsics/vshlq_m_r_s8.c | 23 + .../arm/mve/intrinsics/vshlq_m_r_u16.c | 23 + .../arm/mve/intrinsics/vshlq_m_r_u32.c | 23 + .../arm/mve/intrinsics/vshlq_m_r_u8.c | 23 + .../arm/mve/intrinsics/vsliq_n_s16.c | 22 + .../arm/mve/intrinsics/vsliq_n_s32.c | 22 + .../arm/mve/intrinsics/vsliq_n_s8.c | 22 + .../arm/mve/intrinsics/vsliq_n_u16.c | 22 + .../arm/mve/intrinsics/vsliq_n_u32.c | 22 + .../arm/mve/intrinsics/vsliq_n_u8.c | 22 + .../arm/mve/intrinsics/vsriq_n_s16.c | 22 + .../arm/mve/intrinsics/vsriq_n_s32.c | 22 + .../arm/mve/intrinsics/vsriq_n_s8.c | 22 + .../arm/mve/intrinsics/vsriq_n_u16.c | 22 + .../arm/mve/intrinsics/vsriq_n_u32.c | 22 + .../arm/mve/intrinsics/vsriq_n_u8.c | 22 + 266 files changed, 10779 insertions(+), 50 deletions(-) create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s64.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u64.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u8.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index fcd02723866..ebbdb8eac5f 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,682 @@ +2020-03-18 Andre Vieira + Mihail Ionescu + Srinath Parvathaneni + + * config/arm/arm_mve.h (vpselq_u8): Define macro. + (vpselq_s8): Likewise. + (vrev64q_m_u8): Likewise. + (vqrdmlashq_n_u8): Likewise. + (vqrdmlahq_n_u8): Likewise. + (vqdmlahq_n_u8): Likewise. + (vmvnq_m_u8): Likewise. + (vmlasq_n_u8): Likewise. + (vmlaq_n_u8): Likewise. + (vmladavq_p_u8): Likewise. + (vmladavaq_u8): Likewise. + (vminvq_p_u8): Likewise. + (vmaxvq_p_u8): Likewise. + (vdupq_m_n_u8): Likewise. + (vcmpneq_m_u8): Likewise. + (vcmpneq_m_n_u8): Likewise. + (vcmphiq_m_u8): Likewise. + (vcmphiq_m_n_u8): Likewise. + (vcmpeqq_m_u8): Likewise. + (vcmpeqq_m_n_u8): Likewise. + (vcmpcsq_m_u8): Likewise. + (vcmpcsq_m_n_u8): Likewise. + (vclzq_m_u8): Likewise. + (vaddvaq_p_u8): Likewise. + (vsriq_n_u8): Likewise. + (vsliq_n_u8): Likewise. + (vshlq_m_r_u8): Likewise. + (vrshlq_m_n_u8): Likewise. + (vqshlq_m_r_u8): Likewise. + (vqrshlq_m_n_u8): Likewise. + (vminavq_p_s8): Likewise. + (vminaq_m_s8): Likewise. + (vmaxavq_p_s8): Likewise. + (vmaxaq_m_s8): Likewise. + (vcmpneq_m_s8): Likewise. + (vcmpneq_m_n_s8): Likewise. + (vcmpltq_m_s8): Likewise. + (vcmpltq_m_n_s8): Likewise. + (vcmpleq_m_s8): Likewise. + (vcmpleq_m_n_s8): Likewise. + (vcmpgtq_m_s8): Likewise. + (vcmpgtq_m_n_s8): Likewise. + (vcmpgeq_m_s8): Likewise. + (vcmpgeq_m_n_s8): Likewise. + (vcmpeqq_m_s8): Likewise. + (vcmpeqq_m_n_s8): Likewise. + (vshlq_m_r_s8): Likewise. + (vrshlq_m_n_s8): Likewise. + (vrev64q_m_s8): Likewise. + (vqshlq_m_r_s8): Likewise. + (vqrshlq_m_n_s8): Likewise. + (vqnegq_m_s8): Likewise. + (vqabsq_m_s8): Likewise. + (vnegq_m_s8): Likewise. + (vmvnq_m_s8): Likewise. + (vmlsdavxq_p_s8): Likewise. + (vmlsdavq_p_s8): Likewise. + (vmladavxq_p_s8): Likewise. + (vmladavq_p_s8): Likewise. + (vminvq_p_s8): Likewise. + (vmaxvq_p_s8): Likewise. + (vdupq_m_n_s8): Likewise. + (vclzq_m_s8): Likewise. + (vclsq_m_s8): Likewise. + (vaddvaq_p_s8): Likewise. + (vabsq_m_s8): Likewise. + (vqrdmlsdhxq_s8): Likewise. + (vqrdmlsdhq_s8): Likewise. + (vqrdmlashq_n_s8): Likewise. + (vqrdmlahq_n_s8): Likewise. + (vqrdmladhxq_s8): Likewise. + (vqrdmladhq_s8): Likewise. + (vqdmlsdhxq_s8): Likewise. + (vqdmlsdhq_s8): Likewise. + (vqdmlahq_n_s8): Likewise. + (vqdmladhxq_s8): Likewise. + (vqdmladhq_s8): Likewise. + (vmlsdavaxq_s8): Likewise. + (vmlsdavaq_s8): Likewise. + (vmlasq_n_s8): Likewise. + (vmlaq_n_s8): Likewise. + (vmladavaxq_s8): Likewise. + (vmladavaq_s8): Likewise. + (vsriq_n_s8): Likewise. + (vsliq_n_s8): Likewise. + (vpselq_u16): Likewise. + (vpselq_s16): Likewise. + (vrev64q_m_u16): Likewise. + (vqrdmlashq_n_u16): Likewise. + (vqrdmlahq_n_u16): Likewise. + (vqdmlahq_n_u16): Likewise. + (vmvnq_m_u16): Likewise. + (vmlasq_n_u16): Likewise. + (vmlaq_n_u16): Likewise. + (vmladavq_p_u16): Likewise. + (vmladavaq_u16): Likewise. + (vminvq_p_u16): Likewise. + (vmaxvq_p_u16): Likewise. + (vdupq_m_n_u16): Likewise. + (vcmpneq_m_u16): Likewise. + (vcmpneq_m_n_u16): Likewise. + (vcmphiq_m_u16): Likewise. + (vcmphiq_m_n_u16): Likewise. + (vcmpeqq_m_u16): Likewise. + (vcmpeqq_m_n_u16): Likewise. + (vcmpcsq_m_u16): Likewise. + (vcmpcsq_m_n_u16): Likewise. + (vclzq_m_u16): Likewise. + (vaddvaq_p_u16): Likewise. + (vsriq_n_u16): Likewise. + (vsliq_n_u16): Likewise. + (vshlq_m_r_u16): Likewise. + (vrshlq_m_n_u16): Likewise. + (vqshlq_m_r_u16): Likewise. + (vqrshlq_m_n_u16): Likewise. + (vminavq_p_s16): Likewise. + (vminaq_m_s16): Likewise. + (vmaxavq_p_s16): Likewise. + (vmaxaq_m_s16): Likewise. + (vcmpneq_m_s16): Likewise. + (vcmpneq_m_n_s16): Likewise. + (vcmpltq_m_s16): Likewise. + (vcmpltq_m_n_s16): Likewise. + (vcmpleq_m_s16): Likewise. + (vcmpleq_m_n_s16): Likewise. + (vcmpgtq_m_s16): Likewise. + (vcmpgtq_m_n_s16): Likewise. + (vcmpgeq_m_s16): Likewise. + (vcmpgeq_m_n_s16): Likewise. + (vcmpeqq_m_s16): Likewise. + (vcmpeqq_m_n_s16): Likewise. + (vshlq_m_r_s16): Likewise. + (vrshlq_m_n_s16): Likewise. + (vrev64q_m_s16): Likewise. + (vqshlq_m_r_s16): Likewise. + (vqrshlq_m_n_s16): Likewise. + (vqnegq_m_s16): Likewise. + (vqabsq_m_s16): Likewise. + (vnegq_m_s16): Likewise. + (vmvnq_m_s16): Likewise. + (vmlsdavxq_p_s16): Likewise. + (vmlsdavq_p_s16): Likewise. + (vmladavxq_p_s16): Likewise. + (vmladavq_p_s16): Likewise. + (vminvq_p_s16): Likewise. + (vmaxvq_p_s16): Likewise. + (vdupq_m_n_s16): Likewise. + (vclzq_m_s16): Likewise. + (vclsq_m_s16): Likewise. + (vaddvaq_p_s16): Likewise. + (vabsq_m_s16): Likewise. + (vqrdmlsdhxq_s16): Likewise. + (vqrdmlsdhq_s16): Likewise. + (vqrdmlashq_n_s16): Likewise. + (vqrdmlahq_n_s16): Likewise. + (vqrdmladhxq_s16): Likewise. + (vqrdmladhq_s16): Likewise. + (vqdmlsdhxq_s16): Likewise. + (vqdmlsdhq_s16): Likewise. + (vqdmlahq_n_s16): Likewise. + (vqdmladhxq_s16): Likewise. + (vqdmladhq_s16): Likewise. + (vmlsdavaxq_s16): Likewise. + (vmlsdavaq_s16): Likewise. + (vmlasq_n_s16): Likewise. + (vmlaq_n_s16): Likewise. + (vmladavaxq_s16): Likewise. + (vmladavaq_s16): Likewise. + (vsriq_n_s16): Likewise. + (vsliq_n_s16): Likewise. + (vpselq_u32): Likewise. + (vpselq_s32): Likewise. + (vrev64q_m_u32): Likewise. + (vqrdmlashq_n_u32): Likewise. + (vqrdmlahq_n_u32): Likewise. + (vqdmlahq_n_u32): Likewise. + (vmvnq_m_u32): Likewise. + (vmlasq_n_u32): Likewise. + (vmlaq_n_u32): Likewise. + (vmladavq_p_u32): Likewise. + (vmladavaq_u32): Likewise. + (vminvq_p_u32): Likewise. + (vmaxvq_p_u32): Likewise. + (vdupq_m_n_u32): Likewise. + (vcmpneq_m_u32): Likewise. + (vcmpneq_m_n_u32): Likewise. + (vcmphiq_m_u32): Likewise. + (vcmphiq_m_n_u32): Likewise. + (vcmpeqq_m_u32): Likewise. + (vcmpeqq_m_n_u32): Likewise. + (vcmpcsq_m_u32): Likewise. + (vcmpcsq_m_n_u32): Likewise. + (vclzq_m_u32): Likewise. + (vaddvaq_p_u32): Likewise. + (vsriq_n_u32): Likewise. + (vsliq_n_u32): Likewise. + (vshlq_m_r_u32): Likewise. + (vrshlq_m_n_u32): Likewise. + (vqshlq_m_r_u32): Likewise. + (vqrshlq_m_n_u32): Likewise. + (vminavq_p_s32): Likewise. + (vminaq_m_s32): Likewise. + (vmaxavq_p_s32): Likewise. + (vmaxaq_m_s32): Likewise. + (vcmpneq_m_s32): Likewise. + (vcmpneq_m_n_s32): Likewise. + (vcmpltq_m_s32): Likewise. + (vcmpltq_m_n_s32): Likewise. + (vcmpleq_m_s32): Likewise. + (vcmpleq_m_n_s32): Likewise. + (vcmpgtq_m_s32): Likewise. + (vcmpgtq_m_n_s32): Likewise. + (vcmpgeq_m_s32): Likewise. + (vcmpgeq_m_n_s32): Likewise. + (vcmpeqq_m_s32): Likewise. + (vcmpeqq_m_n_s32): Likewise. + (vshlq_m_r_s32): Likewise. + (vrshlq_m_n_s32): Likewise. + (vrev64q_m_s32): Likewise. + (vqshlq_m_r_s32): Likewise. + (vqrshlq_m_n_s32): Likewise. + (vqnegq_m_s32): Likewise. + (vqabsq_m_s32): Likewise. + (vnegq_m_s32): Likewise. + (vmvnq_m_s32): Likewise. + (vmlsdavxq_p_s32): Likewise. + (vmlsdavq_p_s32): Likewise. + (vmladavxq_p_s32): Likewise. + (vmladavq_p_s32): Likewise. + (vminvq_p_s32): Likewise. + (vmaxvq_p_s32): Likewise. + (vdupq_m_n_s32): Likewise. + (vclzq_m_s32): Likewise. + (vclsq_m_s32): Likewise. + (vaddvaq_p_s32): Likewise. + (vabsq_m_s32): Likewise. + (vqrdmlsdhxq_s32): Likewise. + (vqrdmlsdhq_s32): Likewise. + (vqrdmlashq_n_s32): Likewise. + (vqrdmlahq_n_s32): Likewise. + (vqrdmladhxq_s32): Likewise. + (vqrdmladhq_s32): Likewise. + (vqdmlsdhxq_s32): Likewise. + (vqdmlsdhq_s32): Likewise. + (vqdmlahq_n_s32): Likewise. + (vqdmladhxq_s32): Likewise. + (vqdmladhq_s32): Likewise. + (vmlsdavaxq_s32): Likewise. + (vmlsdavaq_s32): Likewise. + (vmlasq_n_s32): Likewise. + (vmlaq_n_s32): Likewise. + (vmladavaxq_s32): Likewise. + (vmladavaq_s32): Likewise. + (vsriq_n_s32): Likewise. + (vsliq_n_s32): Likewise. + (vpselq_u64): Likewise. + (vpselq_s64): Likewise. + (__arm_vpselq_u8): Define intrinsic. + (__arm_vpselq_s8): Likewise. + (__arm_vrev64q_m_u8): Likewise. + (__arm_vqrdmlashq_n_u8): Likewise. + (__arm_vqrdmlahq_n_u8): Likewise. + (__arm_vqdmlahq_n_u8): Likewise. + (__arm_vmvnq_m_u8): Likewise. + (__arm_vmlasq_n_u8): Likewise. + (__arm_vmlaq_n_u8): Likewise. + (__arm_vmladavq_p_u8): Likewise. + (__arm_vmladavaq_u8): Likewise. + (__arm_vminvq_p_u8): Likewise. + (__arm_vmaxvq_p_u8): Likewise. + (__arm_vdupq_m_n_u8): Likewise. + (__arm_vcmpneq_m_u8): Likewise. + (__arm_vcmpneq_m_n_u8): Likewise. + (__arm_vcmphiq_m_u8): Likewise. + (__arm_vcmphiq_m_n_u8): Likewise. + (__arm_vcmpeqq_m_u8): Likewise. + (__arm_vcmpeqq_m_n_u8): Likewise. + (__arm_vcmpcsq_m_u8): Likewise. + (__arm_vcmpcsq_m_n_u8): Likewise. + (__arm_vclzq_m_u8): Likewise. + (__arm_vaddvaq_p_u8): Likewise. + (__arm_vsriq_n_u8): Likewise. + (__arm_vsliq_n_u8): Likewise. + (__arm_vshlq_m_r_u8): Likewise. + (__arm_vrshlq_m_n_u8): Likewise. + (__arm_vqshlq_m_r_u8): Likewise. + (__arm_vqrshlq_m_n_u8): Likewise. + (__arm_vminavq_p_s8): Likewise. + (__arm_vminaq_m_s8): Likewise. + (__arm_vmaxavq_p_s8): Likewise. + (__arm_vmaxaq_m_s8): Likewise. + (__arm_vcmpneq_m_s8): Likewise. + (__arm_vcmpneq_m_n_s8): Likewise. + (__arm_vcmpltq_m_s8): Likewise. + (__arm_vcmpltq_m_n_s8): Likewise. + (__arm_vcmpleq_m_s8): Likewise. + (__arm_vcmpleq_m_n_s8): Likewise. + (__arm_vcmpgtq_m_s8): Likewise. + (__arm_vcmpgtq_m_n_s8): Likewise. + (__arm_vcmpgeq_m_s8): Likewise. + (__arm_vcmpgeq_m_n_s8): Likewise. + (__arm_vcmpeqq_m_s8): Likewise. + (__arm_vcmpeqq_m_n_s8): Likewise. + (__arm_vshlq_m_r_s8): Likewise. + (__arm_vrshlq_m_n_s8): Likewise. + (__arm_vrev64q_m_s8): Likewise. + (__arm_vqshlq_m_r_s8): Likewise. + (__arm_vqrshlq_m_n_s8): Likewise. + (__arm_vqnegq_m_s8): Likewise. + (__arm_vqabsq_m_s8): Likewise. + (__arm_vnegq_m_s8): Likewise. + (__arm_vmvnq_m_s8): Likewise. + (__arm_vmlsdavxq_p_s8): Likewise. + (__arm_vmlsdavq_p_s8): Likewise. + (__arm_vmladavxq_p_s8): Likewise. + (__arm_vmladavq_p_s8): Likewise. + (__arm_vminvq_p_s8): Likewise. + (__arm_vmaxvq_p_s8): Likewise. + (__arm_vdupq_m_n_s8): Likewise. + (__arm_vclzq_m_s8): Likewise. + (__arm_vclsq_m_s8): Likewise. + (__arm_vaddvaq_p_s8): Likewise. + (__arm_vabsq_m_s8): Likewise. + (__arm_vqrdmlsdhxq_s8): Likewise. + (__arm_vqrdmlsdhq_s8): Likewise. + (__arm_vqrdmlashq_n_s8): Likewise. + (__arm_vqrdmlahq_n_s8): Likewise. + (__arm_vqrdmladhxq_s8): Likewise. + (__arm_vqrdmladhq_s8): Likewise. + (__arm_vqdmlsdhxq_s8): Likewise. + (__arm_vqdmlsdhq_s8): Likewise. + (__arm_vqdmlahq_n_s8): Likewise. + (__arm_vqdmladhxq_s8): Likewise. + (__arm_vqdmladhq_s8): Likewise. + (__arm_vmlsdavaxq_s8): Likewise. + (__arm_vmlsdavaq_s8): Likewise. + (__arm_vmlasq_n_s8): Likewise. + (__arm_vmlaq_n_s8): Likewise. + (__arm_vmladavaxq_s8): Likewise. + (__arm_vmladavaq_s8): Likewise. + (__arm_vsriq_n_s8): Likewise. + (__arm_vsliq_n_s8): Likewise. + (__arm_vpselq_u16): Likewise. + (__arm_vpselq_s16): Likewise. + (__arm_vrev64q_m_u16): Likewise. + (__arm_vqrdmlashq_n_u16): Likewise. + (__arm_vqrdmlahq_n_u16): Likewise. + (__arm_vqdmlahq_n_u16): Likewise. + (__arm_vmvnq_m_u16): Likewise. + (__arm_vmlasq_n_u16): Likewise. + (__arm_vmlaq_n_u16): Likewise. + (__arm_vmladavq_p_u16): Likewise. + (__arm_vmladavaq_u16): Likewise. + (__arm_vminvq_p_u16): Likewise. + (__arm_vmaxvq_p_u16): Likewise. + (__arm_vdupq_m_n_u16): Likewise. + (__arm_vcmpneq_m_u16): Likewise. + (__arm_vcmpneq_m_n_u16): Likewise. + (__arm_vcmphiq_m_u16): Likewise. + (__arm_vcmphiq_m_n_u16): Likewise. + (__arm_vcmpeqq_m_u16): Likewise. + (__arm_vcmpeqq_m_n_u16): Likewise. + (__arm_vcmpcsq_m_u16): Likewise. + (__arm_vcmpcsq_m_n_u16): Likewise. + (__arm_vclzq_m_u16): Likewise. + (__arm_vaddvaq_p_u16): Likewise. + (__arm_vsriq_n_u16): Likewise. + (__arm_vsliq_n_u16): Likewise. + (__arm_vshlq_m_r_u16): Likewise. + (__arm_vrshlq_m_n_u16): Likewise. + (__arm_vqshlq_m_r_u16): Likewise. + (__arm_vqrshlq_m_n_u16): Likewise. + (__arm_vminavq_p_s16): Likewise. + (__arm_vminaq_m_s16): Likewise. + (__arm_vmaxavq_p_s16): Likewise. + (__arm_vmaxaq_m_s16): Likewise. + (__arm_vcmpneq_m_s16): Likewise. + (__arm_vcmpneq_m_n_s16): Likewise. + (__arm_vcmpltq_m_s16): Likewise. + (__arm_vcmpltq_m_n_s16): Likewise. + (__arm_vcmpleq_m_s16): Likewise. + (__arm_vcmpleq_m_n_s16): Likewise. + (__arm_vcmpgtq_m_s16): Likewise. + (__arm_vcmpgtq_m_n_s16): Likewise. + (__arm_vcmpgeq_m_s16): Likewise. + (__arm_vcmpgeq_m_n_s16): Likewise. + (__arm_vcmpeqq_m_s16): Likewise. + (__arm_vcmpeqq_m_n_s16): Likewise. + (__arm_vshlq_m_r_s16): Likewise. + (__arm_vrshlq_m_n_s16): Likewise. + (__arm_vrev64q_m_s16): Likewise. + (__arm_vqshlq_m_r_s16): Likewise. + (__arm_vqrshlq_m_n_s16): Likewise. + (__arm_vqnegq_m_s16): Likewise. + (__arm_vqabsq_m_s16): Likewise. + (__arm_vnegq_m_s16): Likewise. + (__arm_vmvnq_m_s16): Likewise. + (__arm_vmlsdavxq_p_s16): Likewise. + (__arm_vmlsdavq_p_s16): Likewise. + (__arm_vmladavxq_p_s16): Likewise. + (__arm_vmladavq_p_s16): Likewise. + (__arm_vminvq_p_s16): Likewise. + (__arm_vmaxvq_p_s16): Likewise. + (__arm_vdupq_m_n_s16): Likewise. + (__arm_vclzq_m_s16): Likewise. + (__arm_vclsq_m_s16): Likewise. + (__arm_vaddvaq_p_s16): Likewise. + (__arm_vabsq_m_s16): Likewise. + (__arm_vqrdmlsdhxq_s16): Likewise. + (__arm_vqrdmlsdhq_s16): Likewise. + (__arm_vqrdmlashq_n_s16): Likewise. + (__arm_vqrdmlahq_n_s16): Likewise. + (__arm_vqrdmladhxq_s16): Likewise. + (__arm_vqrdmladhq_s16): Likewise. + (__arm_vqdmlsdhxq_s16): Likewise. + (__arm_vqdmlsdhq_s16): Likewise. + (__arm_vqdmlahq_n_s16): Likewise. + (__arm_vqdmladhxq_s16): Likewise. + (__arm_vqdmladhq_s16): Likewise. + (__arm_vmlsdavaxq_s16): Likewise. + (__arm_vmlsdavaq_s16): Likewise. + (__arm_vmlasq_n_s16): Likewise. + (__arm_vmlaq_n_s16): Likewise. + (__arm_vmladavaxq_s16): Likewise. + (__arm_vmladavaq_s16): Likewise. + (__arm_vsriq_n_s16): Likewise. + (__arm_vsliq_n_s16): Likewise. + (__arm_vpselq_u32): Likewise. + (__arm_vpselq_s32): Likewise. + (__arm_vrev64q_m_u32): Likewise. + (__arm_vqrdmlashq_n_u32): Likewise. + (__arm_vqrdmlahq_n_u32): Likewise. + (__arm_vqdmlahq_n_u32): Likewise. + (__arm_vmvnq_m_u32): Likewise. + (__arm_vmlasq_n_u32): Likewise. + (__arm_vmlaq_n_u32): Likewise. + (__arm_vmladavq_p_u32): Likewise. + (__arm_vmladavaq_u32): Likewise. + (__arm_vminvq_p_u32): Likewise. + (__arm_vmaxvq_p_u32): Likewise. + (__arm_vdupq_m_n_u32): Likewise. + (__arm_vcmpneq_m_u32): Likewise. + (__arm_vcmpneq_m_n_u32): Likewise. + (__arm_vcmphiq_m_u32): Likewise. + (__arm_vcmphiq_m_n_u32): Likewise. + (__arm_vcmpeqq_m_u32): Likewise. + (__arm_vcmpeqq_m_n_u32): Likewise. + (__arm_vcmpcsq_m_u32): Likewise. + (__arm_vcmpcsq_m_n_u32): Likewise. + (__arm_vclzq_m_u32): Likewise. + (__arm_vaddvaq_p_u32): Likewise. + (__arm_vsriq_n_u32): Likewise. + (__arm_vsliq_n_u32): Likewise. + (__arm_vshlq_m_r_u32): Likewise. + (__arm_vrshlq_m_n_u32): Likewise. + (__arm_vqshlq_m_r_u32): Likewise. + (__arm_vqrshlq_m_n_u32): Likewise. + (__arm_vminavq_p_s32): Likewise. + (__arm_vminaq_m_s32): Likewise. + (__arm_vmaxavq_p_s32): Likewise. + (__arm_vmaxaq_m_s32): Likewise. + (__arm_vcmpneq_m_s32): Likewise. + (__arm_vcmpneq_m_n_s32): Likewise. + (__arm_vcmpltq_m_s32): Likewise. + (__arm_vcmpltq_m_n_s32): Likewise. + (__arm_vcmpleq_m_s32): Likewise. + (__arm_vcmpleq_m_n_s32): Likewise. + (__arm_vcmpgtq_m_s32): Likewise. + (__arm_vcmpgtq_m_n_s32): Likewise. + (__arm_vcmpgeq_m_s32): Likewise. + (__arm_vcmpgeq_m_n_s32): Likewise. + (__arm_vcmpeqq_m_s32): Likewise. + (__arm_vcmpeqq_m_n_s32): Likewise. + (__arm_vshlq_m_r_s32): Likewise. + (__arm_vrshlq_m_n_s32): Likewise. + (__arm_vrev64q_m_s32): Likewise. + (__arm_vqshlq_m_r_s32): Likewise. + (__arm_vqrshlq_m_n_s32): Likewise. + (__arm_vqnegq_m_s32): Likewise. + (__arm_vqabsq_m_s32): Likewise. + (__arm_vnegq_m_s32): Likewise. + (__arm_vmvnq_m_s32): Likewise. + (__arm_vmlsdavxq_p_s32): Likewise. + (__arm_vmlsdavq_p_s32): Likewise. + (__arm_vmladavxq_p_s32): Likewise. + (__arm_vmladavq_p_s32): Likewise. + (__arm_vminvq_p_s32): Likewise. + (__arm_vmaxvq_p_s32): Likewise. + (__arm_vdupq_m_n_s32): Likewise. + (__arm_vclzq_m_s32): Likewise. + (__arm_vclsq_m_s32): Likewise. + (__arm_vaddvaq_p_s32): Likewise. + (__arm_vabsq_m_s32): Likewise. + (__arm_vqrdmlsdhxq_s32): Likewise. + (__arm_vqrdmlsdhq_s32): Likewise. + (__arm_vqrdmlashq_n_s32): Likewise. + (__arm_vqrdmlahq_n_s32): Likewise. + (__arm_vqrdmladhxq_s32): Likewise. + (__arm_vqrdmladhq_s32): Likewise. + (__arm_vqdmlsdhxq_s32): Likewise. + (__arm_vqdmlsdhq_s32): Likewise. + (__arm_vqdmlahq_n_s32): Likewise. + (__arm_vqdmladhxq_s32): Likewise. + (__arm_vqdmladhq_s32): Likewise. + (__arm_vmlsdavaxq_s32): Likewise. + (__arm_vmlsdavaq_s32): Likewise. + (__arm_vmlasq_n_s32): Likewise. + (__arm_vmlaq_n_s32): Likewise. + (__arm_vmladavaxq_s32): Likewise. + (__arm_vmladavaq_s32): Likewise. + (__arm_vsriq_n_s32): Likewise. + (__arm_vsliq_n_s32): Likewise. + (__arm_vpselq_u64): Likewise. + (__arm_vpselq_s64): Likewise. + (vcmpneq_m_n): Define polymorphic variant. + (vcmpneq_m): Likewise. + (vqrdmlsdhq): Likewise. + (vqrdmlsdhxq): Likewise. + (vqrshlq_m_n): Likewise. + (vqshlq_m_r): Likewise. + (vrev64q_m): Likewise. + (vrshlq_m_n): Likewise. + (vshlq_m_r): Likewise. + (vsliq_n): Likewise. + (vsriq_n): Likewise. + (vqrdmlashq_n): Likewise. + (vqrdmlahq): Likewise. + (vqrdmladhxq): Likewise. + (vqrdmladhq): Likewise. + (vqnegq_m): Likewise. + (vqdmlsdhxq): Likewise. + (vabsq_m): Likewise. + (vclsq_m): Likewise. + (vclzq_m): Likewise. + (vcmpgeq_m): Likewise. + (vcmpgeq_m_n): Likewise. + (vdupq_m_n): Likewise. + (vmaxaq_m): Likewise. + (vmlaq_n): Likewise. + (vmlasq_n): Likewise. + (vmvnq_m): Likewise. + (vnegq_m): Likewise. + (vpselq): Likewise. + (vqdmlahq_n): Likewise. + (vqrdmlahq_n): Likewise. + (vqdmlsdhq): Likewise. + (vqdmladhq): Likewise. + (vqabsq_m): Likewise. + (vminaq_m): Likewise. + (vrmlaldavhaq): Likewise. + (vmlsdavxq_p): Likewise. + (vmlsdavq_p): Likewise. + (vmlsdavaxq): Likewise. + (vmlsdavaq): Likewise. + (vaddvaq_p): Likewise. + (vcmpcsq_m_n): Likewise. + (vcmpcsq_m): Likewise. + (vcmpeqq_m_n): Likewise. + (vcmpeqq_m): Likewise. + (vmladavxq_p): Likewise. + (vmladavq_p): Likewise. + (vmladavaxq): Likewise. + (vmladavaq): Likewise. + (vminvq_p): Likewise. + (vminavq_p): Likewise. + (vmaxvq_p): Likewise. + (vmaxavq_p): Likewise. + (vcmpltq_m_n): Likewise. + (vcmpltq_m): Likewise. + (vcmpleq_m): Likewise. + (vcmpleq_m_n): Likewise. + (vcmphiq_m_n): Likewise. + (vcmphiq_m): Likewise. + (vcmpgtq_m_n): Likewise. + (vcmpgtq_m): Likewise. + * config/arm/arm_mve_builtins.def (TERNOP_NONE_NONE_NONE_IMM): Use + builtin qualifier. + (TERNOP_NONE_NONE_NONE_NONE): Likewise. + (TERNOP_NONE_NONE_NONE_UNONE): Likewise. + (TERNOP_UNONE_NONE_NONE_UNONE): Likewise. + (TERNOP_UNONE_UNONE_NONE_UNONE): Likewise. + (TERNOP_UNONE_UNONE_UNONE_IMM): Likewise. + (TERNOP_UNONE_UNONE_UNONE_UNONE): Likewise. + * config/arm/constraints.md (Rc): Define constraint to check constant is + in the range of 0 to 15. + (Re): Define constraint to check constant is in the range of 0 to 31. + * config/arm/mve.md (VADDVAQ_P): Define iterator. + (VCLZQ_M): Likewise. + (VCMPEQQ_M_N): Likewise. + (VCMPEQQ_M): Likewise. + (VCMPNEQ_M_N): Likewise. + (VCMPNEQ_M): Likewise. + (VDUPQ_M_N): Likewise. + (VMAXVQ_P): Likewise. + (VMINVQ_P): Likewise. + (VMLADAVAQ): Likewise. + (VMLADAVQ_P): Likewise. + (VMLAQ_N): Likewise. + (VMLASQ_N): Likewise. + (VMVNQ_M): Likewise. + (VPSELQ): Likewise. + (VQDMLAHQ_N): Likewise. + (VQRDMLAHQ_N): Likewise. + (VQRDMLASHQ_N): Likewise. + (VQRSHLQ_M_N): Likewise. + (VQSHLQ_M_R): Likewise. + (VREV64Q_M): Likewise. + (VRSHLQ_M_N): Likewise. + (VSHLQ_M_R): Likewise. + (VSLIQ_N): Likewise. + (VSRIQ_N): Likewise. + (mve_vabsq_m_s): Define RTL pattern. + (mve_vaddvaq_p_): Likewise. + (mve_vclsq_m_s): Likewise. + (mve_vclzq_m_): Likewise. + (mve_vcmpcsq_m_n_u): Likewise. + (mve_vcmpcsq_m_u): Likewise. + (mve_vcmpeqq_m_n_): Likewise. + (mve_vcmpeqq_m_): Likewise. + (mve_vcmpgeq_m_n_s): Likewise. + (mve_vcmpgeq_m_s): Likewise. + (mve_vcmpgtq_m_n_s): Likewise. + (mve_vcmpgtq_m_s): Likewise. + (mve_vcmphiq_m_n_u): Likewise. + (mve_vcmphiq_m_u): Likewise. + (mve_vcmpleq_m_n_s): Likewise. + (mve_vcmpleq_m_s): Likewise. + (mve_vcmpltq_m_n_s): Likewise. + (mve_vcmpltq_m_s): Likewise. + (mve_vcmpneq_m_n_): Likewise. + (mve_vcmpneq_m_): Likewise. + (mve_vdupq_m_n_): Likewise. + (mve_vmaxaq_m_s): Likewise. + (mve_vmaxavq_p_s): Likewise. + (mve_vmaxvq_p_): Likewise. + (mve_vminaq_m_s): Likewise. + (mve_vminavq_p_s): Likewise. + (mve_vminvq_p_): Likewise. + (mve_vmladavaq_): Likewise. + (mve_vmladavq_p_): Likewise. + (mve_vmladavxq_p_s): Likewise. + (mve_vmlaq_n_): Likewise. + (mve_vmlasq_n_): Likewise. + (mve_vmlsdavq_p_s): Likewise. + (mve_vmlsdavxq_p_s): Likewise. + (mve_vmvnq_m_): Likewise. + (mve_vnegq_m_s): Likewise. + (mve_vpselq_): Likewise. + (mve_vqabsq_m_s): Likewise. + (mve_vqdmlahq_n_): Likewise. + (mve_vqnegq_m_s): Likewise. + (mve_vqrdmladhq_s): Likewise. + (mve_vqrdmladhxq_s): Likewise. + (mve_vqrdmlahq_n_): Likewise. + (mve_vqrdmlashq_n_): Likewise. + (mve_vqrdmlsdhq_s): Likewise. + (mve_vqrdmlsdhxq_s): Likewise. + (mve_vqrshlq_m_n_): Likewise. + (mve_vqshlq_m_r_): Likewise. + (mve_vrev64q_m_): Likewise. + (mve_vrshlq_m_n_): Likewise. + (mve_vshlq_m_r_): Likewise. + (mve_vsliq_n_): Likewise. + (mve_vsriq_n_): Likewise. + (mve_vqdmlsdhxq_s): Likewise. + (mve_vqdmlsdhq_s): Likewise. + (mve_vqdmladhxq_s): Likewise. + (mve_vqdmladhq_s): Likewise. + (mve_vmlsdavaxq_s): Likewise. + (mve_vmlsdavaq_s): Likewise. + (mve_vmladavaxq_s): Likewise. + * config/arm/predicates.md (mve_imm_15):Define predicate to check the + matching constraint Rc. + (mve_imm_31): Define predicate to check the matching constraint Re. + 2020-03-18 Andrew Stubbs * config/gcn/gcn-valu.md (vec_cmpdi): Set operand 1 to DImode. diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index c15bb8b38fb..f852c68d9ae 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -776,6 +776,263 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t; #define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c) #define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c) #define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c) +#define vpselq_u8(__a, __b, __p) __arm_vpselq_u8(__a, __b, __p) +#define vpselq_s8(__a, __b, __p) __arm_vpselq_s8(__a, __b, __p) +#define vrev64q_m_u8(__inactive, __a, __p) __arm_vrev64q_m_u8(__inactive, __a, __p) +#define vqrdmlashq_n_u8(__a, __b, __c) __arm_vqrdmlashq_n_u8(__a, __b, __c) +#define vqrdmlahq_n_u8(__a, __b, __c) __arm_vqrdmlahq_n_u8(__a, __b, __c) +#define vqdmlahq_n_u8(__a, __b, __c) __arm_vqdmlahq_n_u8(__a, __b, __c) +#define vmvnq_m_u8(__inactive, __a, __p) __arm_vmvnq_m_u8(__inactive, __a, __p) +#define vmlasq_n_u8(__a, __b, __c) __arm_vmlasq_n_u8(__a, __b, __c) +#define vmlaq_n_u8(__a, __b, __c) __arm_vmlaq_n_u8(__a, __b, __c) +#define vmladavq_p_u8(__a, __b, __p) __arm_vmladavq_p_u8(__a, __b, __p) +#define vmladavaq_u8(__a, __b, __c) __arm_vmladavaq_u8(__a, __b, __c) +#define vminvq_p_u8(__a, __b, __p) __arm_vminvq_p_u8(__a, __b, __p) +#define vmaxvq_p_u8(__a, __b, __p) __arm_vmaxvq_p_u8(__a, __b, __p) +#define vdupq_m_n_u8(__inactive, __a, __p) __arm_vdupq_m_n_u8(__inactive, __a, __p) +#define vcmpneq_m_u8(__a, __b, __p) __arm_vcmpneq_m_u8(__a, __b, __p) +#define vcmpneq_m_n_u8(__a, __b, __p) __arm_vcmpneq_m_n_u8(__a, __b, __p) +#define vcmphiq_m_u8(__a, __b, __p) __arm_vcmphiq_m_u8(__a, __b, __p) +#define vcmphiq_m_n_u8(__a, __b, __p) __arm_vcmphiq_m_n_u8(__a, __b, __p) +#define vcmpeqq_m_u8(__a, __b, __p) __arm_vcmpeqq_m_u8(__a, __b, __p) +#define vcmpeqq_m_n_u8(__a, __b, __p) __arm_vcmpeqq_m_n_u8(__a, __b, __p) +#define vcmpcsq_m_u8(__a, __b, __p) __arm_vcmpcsq_m_u8(__a, __b, __p) +#define vcmpcsq_m_n_u8(__a, __b, __p) __arm_vcmpcsq_m_n_u8(__a, __b, __p) +#define vclzq_m_u8(__inactive, __a, __p) __arm_vclzq_m_u8(__inactive, __a, __p) +#define vaddvaq_p_u8(__a, __b, __p) __arm_vaddvaq_p_u8(__a, __b, __p) +#define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm) +#define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm) +#define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p) +#define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p) +#define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p) +#define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p) +#define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p) +#define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p) +#define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p) +#define vmaxaq_m_s8(__a, __b, __p) __arm_vmaxaq_m_s8(__a, __b, __p) +#define vcmpneq_m_s8(__a, __b, __p) __arm_vcmpneq_m_s8(__a, __b, __p) +#define vcmpneq_m_n_s8(__a, __b, __p) __arm_vcmpneq_m_n_s8(__a, __b, __p) +#define vcmpltq_m_s8(__a, __b, __p) __arm_vcmpltq_m_s8(__a, __b, __p) +#define vcmpltq_m_n_s8(__a, __b, __p) __arm_vcmpltq_m_n_s8(__a, __b, __p) +#define vcmpleq_m_s8(__a, __b, __p) __arm_vcmpleq_m_s8(__a, __b, __p) +#define vcmpleq_m_n_s8(__a, __b, __p) __arm_vcmpleq_m_n_s8(__a, __b, __p) +#define vcmpgtq_m_s8(__a, __b, __p) __arm_vcmpgtq_m_s8(__a, __b, __p) +#define vcmpgtq_m_n_s8(__a, __b, __p) __arm_vcmpgtq_m_n_s8(__a, __b, __p) +#define vcmpgeq_m_s8(__a, __b, __p) __arm_vcmpgeq_m_s8(__a, __b, __p) +#define vcmpgeq_m_n_s8(__a, __b, __p) __arm_vcmpgeq_m_n_s8(__a, __b, __p) +#define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p) +#define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p) +#define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p) +#define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p) +#define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p) +#define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p) +#define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p) +#define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p) +#define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p) +#define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p) +#define vmvnq_m_s8(__inactive, __a, __p) __arm_vmvnq_m_s8(__inactive, __a, __p) +#define vmlsdavxq_p_s8(__a, __b, __p) __arm_vmlsdavxq_p_s8(__a, __b, __p) +#define vmlsdavq_p_s8(__a, __b, __p) __arm_vmlsdavq_p_s8(__a, __b, __p) +#define vmladavxq_p_s8(__a, __b, __p) __arm_vmladavxq_p_s8(__a, __b, __p) +#define vmladavq_p_s8(__a, __b, __p) __arm_vmladavq_p_s8(__a, __b, __p) +#define vminvq_p_s8(__a, __b, __p) __arm_vminvq_p_s8(__a, __b, __p) +#define vmaxvq_p_s8(__a, __b, __p) __arm_vmaxvq_p_s8(__a, __b, __p) +#define vdupq_m_n_s8(__inactive, __a, __p) __arm_vdupq_m_n_s8(__inactive, __a, __p) +#define vclzq_m_s8(__inactive, __a, __p) __arm_vclzq_m_s8(__inactive, __a, __p) +#define vclsq_m_s8(__inactive, __a, __p) __arm_vclsq_m_s8(__inactive, __a, __p) +#define vaddvaq_p_s8(__a, __b, __p) __arm_vaddvaq_p_s8(__a, __b, __p) +#define vabsq_m_s8(__inactive, __a, __p) __arm_vabsq_m_s8(__inactive, __a, __p) +#define vqrdmlsdhxq_s8(__inactive, __a, __b) __arm_vqrdmlsdhxq_s8(__inactive, __a, __b) +#define vqrdmlsdhq_s8(__inactive, __a, __b) __arm_vqrdmlsdhq_s8(__inactive, __a, __b) +#define vqrdmlashq_n_s8(__a, __b, __c) __arm_vqrdmlashq_n_s8(__a, __b, __c) +#define vqrdmlahq_n_s8(__a, __b, __c) __arm_vqrdmlahq_n_s8(__a, __b, __c) +#define vqrdmladhxq_s8(__inactive, __a, __b) __arm_vqrdmladhxq_s8(__inactive, __a, __b) +#define vqrdmladhq_s8(__inactive, __a, __b) __arm_vqrdmladhq_s8(__inactive, __a, __b) +#define vqdmlsdhxq_s8(__inactive, __a, __b) __arm_vqdmlsdhxq_s8(__inactive, __a, __b) +#define vqdmlsdhq_s8(__inactive, __a, __b) __arm_vqdmlsdhq_s8(__inactive, __a, __b) +#define vqdmlahq_n_s8(__a, __b, __c) __arm_vqdmlahq_n_s8(__a, __b, __c) +#define vqdmladhxq_s8(__inactive, __a, __b) __arm_vqdmladhxq_s8(__inactive, __a, __b) +#define vqdmladhq_s8(__inactive, __a, __b) __arm_vqdmladhq_s8(__inactive, __a, __b) +#define vmlsdavaxq_s8(__a, __b, __c) __arm_vmlsdavaxq_s8(__a, __b, __c) +#define vmlsdavaq_s8(__a, __b, __c) __arm_vmlsdavaq_s8(__a, __b, __c) +#define vmlasq_n_s8(__a, __b, __c) __arm_vmlasq_n_s8(__a, __b, __c) +#define vmlaq_n_s8(__a, __b, __c) __arm_vmlaq_n_s8(__a, __b, __c) +#define vmladavaxq_s8(__a, __b, __c) __arm_vmladavaxq_s8(__a, __b, __c) +#define vmladavaq_s8(__a, __b, __c) __arm_vmladavaq_s8(__a, __b, __c) +#define vsriq_n_s8(__a, __b, __imm) __arm_vsriq_n_s8(__a, __b, __imm) +#define vsliq_n_s8(__a, __b, __imm) __arm_vsliq_n_s8(__a, __b, __imm) +#define vpselq_u16(__a, __b, __p) __arm_vpselq_u16(__a, __b, __p) +#define vpselq_s16(__a, __b, __p) __arm_vpselq_s16(__a, __b, __p) +#define vrev64q_m_u16(__inactive, __a, __p) __arm_vrev64q_m_u16(__inactive, __a, __p) +#define vqrdmlashq_n_u16(__a, __b, __c) __arm_vqrdmlashq_n_u16(__a, __b, __c) +#define vqrdmlahq_n_u16(__a, __b, __c) __arm_vqrdmlahq_n_u16(__a, __b, __c) +#define vqdmlahq_n_u16(__a, __b, __c) __arm_vqdmlahq_n_u16(__a, __b, __c) +#define vmvnq_m_u16(__inactive, __a, __p) __arm_vmvnq_m_u16(__inactive, __a, __p) +#define vmlasq_n_u16(__a, __b, __c) __arm_vmlasq_n_u16(__a, __b, __c) +#define vmlaq_n_u16(__a, __b, __c) __arm_vmlaq_n_u16(__a, __b, __c) +#define vmladavq_p_u16(__a, __b, __p) __arm_vmladavq_p_u16(__a, __b, __p) +#define vmladavaq_u16(__a, __b, __c) __arm_vmladavaq_u16(__a, __b, __c) +#define vminvq_p_u16(__a, __b, __p) __arm_vminvq_p_u16(__a, __b, __p) +#define vmaxvq_p_u16(__a, __b, __p) __arm_vmaxvq_p_u16(__a, __b, __p) +#define vdupq_m_n_u16(__inactive, __a, __p) __arm_vdupq_m_n_u16(__inactive, __a, __p) +#define vcmpneq_m_u16(__a, __b, __p) __arm_vcmpneq_m_u16(__a, __b, __p) +#define vcmpneq_m_n_u16(__a, __b, __p) __arm_vcmpneq_m_n_u16(__a, __b, __p) +#define vcmphiq_m_u16(__a, __b, __p) __arm_vcmphiq_m_u16(__a, __b, __p) +#define vcmphiq_m_n_u16(__a, __b, __p) __arm_vcmphiq_m_n_u16(__a, __b, __p) +#define vcmpeqq_m_u16(__a, __b, __p) __arm_vcmpeqq_m_u16(__a, __b, __p) +#define vcmpeqq_m_n_u16(__a, __b, __p) __arm_vcmpeqq_m_n_u16(__a, __b, __p) +#define vcmpcsq_m_u16(__a, __b, __p) __arm_vcmpcsq_m_u16(__a, __b, __p) +#define vcmpcsq_m_n_u16(__a, __b, __p) __arm_vcmpcsq_m_n_u16(__a, __b, __p) +#define vclzq_m_u16(__inactive, __a, __p) __arm_vclzq_m_u16(__inactive, __a, __p) +#define vaddvaq_p_u16(__a, __b, __p) __arm_vaddvaq_p_u16(__a, __b, __p) +#define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b, __imm) +#define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b, __imm) +#define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p) +#define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p) +#define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p) +#define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p) +#define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p) +#define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p) +#define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p) +#define vmaxaq_m_s16(__a, __b, __p) __arm_vmaxaq_m_s16(__a, __b, __p) +#define vcmpneq_m_s16(__a, __b, __p) __arm_vcmpneq_m_s16(__a, __b, __p) +#define vcmpneq_m_n_s16(__a, __b, __p) __arm_vcmpneq_m_n_s16(__a, __b, __p) +#define vcmpltq_m_s16(__a, __b, __p) __arm_vcmpltq_m_s16(__a, __b, __p) +#define vcmpltq_m_n_s16(__a, __b, __p) __arm_vcmpltq_m_n_s16(__a, __b, __p) +#define vcmpleq_m_s16(__a, __b, __p) __arm_vcmpleq_m_s16(__a, __b, __p) +#define vcmpleq_m_n_s16(__a, __b, __p) __arm_vcmpleq_m_n_s16(__a, __b, __p) +#define vcmpgtq_m_s16(__a, __b, __p) __arm_vcmpgtq_m_s16(__a, __b, __p) +#define vcmpgtq_m_n_s16(__a, __b, __p) __arm_vcmpgtq_m_n_s16(__a, __b, __p) +#define vcmpgeq_m_s16(__a, __b, __p) __arm_vcmpgeq_m_s16(__a, __b, __p) +#define vcmpgeq_m_n_s16(__a, __b, __p) __arm_vcmpgeq_m_n_s16(__a, __b, __p) +#define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p) +#define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p) +#define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p) +#define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p) +#define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p) +#define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p) +#define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p) +#define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p) +#define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p) +#define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p) +#define vmvnq_m_s16(__inactive, __a, __p) __arm_vmvnq_m_s16(__inactive, __a, __p) +#define vmlsdavxq_p_s16(__a, __b, __p) __arm_vmlsdavxq_p_s16(__a, __b, __p) +#define vmlsdavq_p_s16(__a, __b, __p) __arm_vmlsdavq_p_s16(__a, __b, __p) +#define vmladavxq_p_s16(__a, __b, __p) __arm_vmladavxq_p_s16(__a, __b, __p) +#define vmladavq_p_s16(__a, __b, __p) __arm_vmladavq_p_s16(__a, __b, __p) +#define vminvq_p_s16(__a, __b, __p) __arm_vminvq_p_s16(__a, __b, __p) +#define vmaxvq_p_s16(__a, __b, __p) __arm_vmaxvq_p_s16(__a, __b, __p) +#define vdupq_m_n_s16(__inactive, __a, __p) __arm_vdupq_m_n_s16(__inactive, __a, __p) +#define vclzq_m_s16(__inactive, __a, __p) __arm_vclzq_m_s16(__inactive, __a, __p) +#define vclsq_m_s16(__inactive, __a, __p) __arm_vclsq_m_s16(__inactive, __a, __p) +#define vaddvaq_p_s16(__a, __b, __p) __arm_vaddvaq_p_s16(__a, __b, __p) +#define vabsq_m_s16(__inactive, __a, __p) __arm_vabsq_m_s16(__inactive, __a, __p) +#define vqrdmlsdhxq_s16(__inactive, __a, __b) __arm_vqrdmlsdhxq_s16(__inactive, __a, __b) +#define vqrdmlsdhq_s16(__inactive, __a, __b) __arm_vqrdmlsdhq_s16(__inactive, __a, __b) +#define vqrdmlashq_n_s16(__a, __b, __c) __arm_vqrdmlashq_n_s16(__a, __b, __c) +#define vqrdmlahq_n_s16(__a, __b, __c) __arm_vqrdmlahq_n_s16(__a, __b, __c) +#define vqrdmladhxq_s16(__inactive, __a, __b) __arm_vqrdmladhxq_s16(__inactive, __a, __b) +#define vqrdmladhq_s16(__inactive, __a, __b) __arm_vqrdmladhq_s16(__inactive, __a, __b) +#define vqdmlsdhxq_s16(__inactive, __a, __b) __arm_vqdmlsdhxq_s16(__inactive, __a, __b) +#define vqdmlsdhq_s16(__inactive, __a, __b) __arm_vqdmlsdhq_s16(__inactive, __a, __b) +#define vqdmlahq_n_s16(__a, __b, __c) __arm_vqdmlahq_n_s16(__a, __b, __c) +#define vqdmladhxq_s16(__inactive, __a, __b) __arm_vqdmladhxq_s16(__inactive, __a, __b) +#define vqdmladhq_s16(__inactive, __a, __b) __arm_vqdmladhq_s16(__inactive, __a, __b) +#define vmlsdavaxq_s16(__a, __b, __c) __arm_vmlsdavaxq_s16(__a, __b, __c) +#define vmlsdavaq_s16(__a, __b, __c) __arm_vmlsdavaq_s16(__a, __b, __c) +#define vmlasq_n_s16(__a, __b, __c) __arm_vmlasq_n_s16(__a, __b, __c) +#define vmlaq_n_s16(__a, __b, __c) __arm_vmlaq_n_s16(__a, __b, __c) +#define vmladavaxq_s16(__a, __b, __c) __arm_vmladavaxq_s16(__a, __b, __c) +#define vmladavaq_s16(__a, __b, __c) __arm_vmladavaq_s16(__a, __b, __c) +#define vsriq_n_s16(__a, __b, __imm) __arm_vsriq_n_s16(__a, __b, __imm) +#define vsliq_n_s16(__a, __b, __imm) __arm_vsliq_n_s16(__a, __b, __imm) +#define vpselq_u32(__a, __b, __p) __arm_vpselq_u32(__a, __b, __p) +#define vpselq_s32(__a, __b, __p) __arm_vpselq_s32(__a, __b, __p) +#define vrev64q_m_u32(__inactive, __a, __p) __arm_vrev64q_m_u32(__inactive, __a, __p) +#define vqrdmlashq_n_u32(__a, __b, __c) __arm_vqrdmlashq_n_u32(__a, __b, __c) +#define vqrdmlahq_n_u32(__a, __b, __c) __arm_vqrdmlahq_n_u32(__a, __b, __c) +#define vqdmlahq_n_u32(__a, __b, __c) __arm_vqdmlahq_n_u32(__a, __b, __c) +#define vmvnq_m_u32(__inactive, __a, __p) __arm_vmvnq_m_u32(__inactive, __a, __p) +#define vmlasq_n_u32(__a, __b, __c) __arm_vmlasq_n_u32(__a, __b, __c) +#define vmlaq_n_u32(__a, __b, __c) __arm_vmlaq_n_u32(__a, __b, __c) +#define vmladavq_p_u32(__a, __b, __p) __arm_vmladavq_p_u32(__a, __b, __p) +#define vmladavaq_u32(__a, __b, __c) __arm_vmladavaq_u32(__a, __b, __c) +#define vminvq_p_u32(__a, __b, __p) __arm_vminvq_p_u32(__a, __b, __p) +#define vmaxvq_p_u32(__a, __b, __p) __arm_vmaxvq_p_u32(__a, __b, __p) +#define vdupq_m_n_u32(__inactive, __a, __p) __arm_vdupq_m_n_u32(__inactive, __a, __p) +#define vcmpneq_m_u32(__a, __b, __p) __arm_vcmpneq_m_u32(__a, __b, __p) +#define vcmpneq_m_n_u32(__a, __b, __p) __arm_vcmpneq_m_n_u32(__a, __b, __p) +#define vcmphiq_m_u32(__a, __b, __p) __arm_vcmphiq_m_u32(__a, __b, __p) +#define vcmphiq_m_n_u32(__a, __b, __p) __arm_vcmphiq_m_n_u32(__a, __b, __p) +#define vcmpeqq_m_u32(__a, __b, __p) __arm_vcmpeqq_m_u32(__a, __b, __p) +#define vcmpeqq_m_n_u32(__a, __b, __p) __arm_vcmpeqq_m_n_u32(__a, __b, __p) +#define vcmpcsq_m_u32(__a, __b, __p) __arm_vcmpcsq_m_u32(__a, __b, __p) +#define vcmpcsq_m_n_u32(__a, __b, __p) __arm_vcmpcsq_m_n_u32(__a, __b, __p) +#define vclzq_m_u32(__inactive, __a, __p) __arm_vclzq_m_u32(__inactive, __a, __p) +#define vaddvaq_p_u32(__a, __b, __p) __arm_vaddvaq_p_u32(__a, __b, __p) +#define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b, __imm) +#define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b, __imm) +#define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p) +#define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p) +#define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p) +#define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p) +#define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p) +#define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p) +#define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p) +#define vmaxaq_m_s32(__a, __b, __p) __arm_vmaxaq_m_s32(__a, __b, __p) +#define vcmpneq_m_s32(__a, __b, __p) __arm_vcmpneq_m_s32(__a, __b, __p) +#define vcmpneq_m_n_s32(__a, __b, __p) __arm_vcmpneq_m_n_s32(__a, __b, __p) +#define vcmpltq_m_s32(__a, __b, __p) __arm_vcmpltq_m_s32(__a, __b, __p) +#define vcmpltq_m_n_s32(__a, __b, __p) __arm_vcmpltq_m_n_s32(__a, __b, __p) +#define vcmpleq_m_s32(__a, __b, __p) __arm_vcmpleq_m_s32(__a, __b, __p) +#define vcmpleq_m_n_s32(__a, __b, __p) __arm_vcmpleq_m_n_s32(__a, __b, __p) +#define vcmpgtq_m_s32(__a, __b, __p) __arm_vcmpgtq_m_s32(__a, __b, __p) +#define vcmpgtq_m_n_s32(__a, __b, __p) __arm_vcmpgtq_m_n_s32(__a, __b, __p) +#define vcmpgeq_m_s32(__a, __b, __p) __arm_vcmpgeq_m_s32(__a, __b, __p) +#define vcmpgeq_m_n_s32(__a, __b, __p) __arm_vcmpgeq_m_n_s32(__a, __b, __p) +#define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p) +#define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p) +#define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p) +#define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p) +#define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p) +#define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p) +#define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p) +#define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p) +#define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p) +#define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p) +#define vmvnq_m_s32(__inactive, __a, __p) __arm_vmvnq_m_s32(__inactive, __a, __p) +#define vmlsdavxq_p_s32(__a, __b, __p) __arm_vmlsdavxq_p_s32(__a, __b, __p) +#define vmlsdavq_p_s32(__a, __b, __p) __arm_vmlsdavq_p_s32(__a, __b, __p) +#define vmladavxq_p_s32(__a, __b, __p) __arm_vmladavxq_p_s32(__a, __b, __p) +#define vmladavq_p_s32(__a, __b, __p) __arm_vmladavq_p_s32(__a, __b, __p) +#define vminvq_p_s32(__a, __b, __p) __arm_vminvq_p_s32(__a, __b, __p) +#define vmaxvq_p_s32(__a, __b, __p) __arm_vmaxvq_p_s32(__a, __b, __p) +#define vdupq_m_n_s32(__inactive, __a, __p) __arm_vdupq_m_n_s32(__inactive, __a, __p) +#define vclzq_m_s32(__inactive, __a, __p) __arm_vclzq_m_s32(__inactive, __a, __p) +#define vclsq_m_s32(__inactive, __a, __p) __arm_vclsq_m_s32(__inactive, __a, __p) +#define vaddvaq_p_s32(__a, __b, __p) __arm_vaddvaq_p_s32(__a, __b, __p) +#define vabsq_m_s32(__inactive, __a, __p) __arm_vabsq_m_s32(__inactive, __a, __p) +#define vqrdmlsdhxq_s32(__inactive, __a, __b) __arm_vqrdmlsdhxq_s32(__inactive, __a, __b) +#define vqrdmlsdhq_s32(__inactive, __a, __b) __arm_vqrdmlsdhq_s32(__inactive, __a, __b) +#define vqrdmlashq_n_s32(__a, __b, __c) __arm_vqrdmlashq_n_s32(__a, __b, __c) +#define vqrdmlahq_n_s32(__a, __b, __c) __arm_vqrdmlahq_n_s32(__a, __b, __c) +#define vqrdmladhxq_s32(__inactive, __a, __b) __arm_vqrdmladhxq_s32(__inactive, __a, __b) +#define vqrdmladhq_s32(__inactive, __a, __b) __arm_vqrdmladhq_s32(__inactive, __a, __b) +#define vqdmlsdhxq_s32(__inactive, __a, __b) __arm_vqdmlsdhxq_s32(__inactive, __a, __b) +#define vqdmlsdhq_s32(__inactive, __a, __b) __arm_vqdmlsdhq_s32(__inactive, __a, __b) +#define vqdmlahq_n_s32(__a, __b, __c) __arm_vqdmlahq_n_s32(__a, __b, __c) +#define vqdmladhxq_s32(__inactive, __a, __b) __arm_vqdmladhxq_s32(__inactive, __a, __b) +#define vqdmladhq_s32(__inactive, __a, __b) __arm_vqdmladhq_s32(__inactive, __a, __b) +#define vmlsdavaxq_s32(__a, __b, __c) __arm_vmlsdavaxq_s32(__a, __b, __c) +#define vmlsdavaq_s32(__a, __b, __c) __arm_vmlsdavaq_s32(__a, __b, __c) +#define vmlasq_n_s32(__a, __b, __c) __arm_vmlasq_n_s32(__a, __b, __c) +#define vmlaq_n_s32(__a, __b, __c) __arm_vmlaq_n_s32(__a, __b, __c) +#define vmladavaxq_s32(__a, __b, __c) __arm_vmladavaxq_s32(__a, __b, __c) +#define vmladavaq_s32(__a, __b, __c) __arm_vmladavaq_s32(__a, __b, __c) +#define vsriq_n_s32(__a, __b, __imm) __arm_vsriq_n_s32(__a, __b, __imm) +#define vsliq_n_s32(__a, __b, __imm) __arm_vsliq_n_s32(__a, __b, __imm) +#define vpselq_u64(__a, __b, __p) __arm_vpselq_u64(__a, __b, __p) +#define vpselq_s64(__a, __b, __p) __arm_vpselq_s64(__a, __b, __p) #endif __extension__ extern __inline void @@ -4699,6 +4956,1805 @@ __arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm) return __res; } +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vrev64q_m_uv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __builtin_mve_vqrdmlashq_n_uv16qi (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __builtin_mve_vqrdmlahq_n_uv16qi (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __builtin_mve_vqdmlahq_n_uv16qi (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vmvnq_m_uv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __builtin_mve_vmlasq_n_uv16qi (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c) +{ + return __builtin_mve_vmlaq_n_uv16qi (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavq_p_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c) +{ + return __builtin_mve_vmladavaq_uv16qi (__a, __b, __c); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminvq_p_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxvq_p_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m_n_u8 (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vdupq_m_n_uv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_n_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmphiq_m_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmphiq_m_n_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_n_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpcsq_m_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpcsq_m_n_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclzq_m_uv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p_u8 (uint32_t __a, uint8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vaddvaq_p_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) +{ + return __builtin_mve_vsriq_n_uv16qi (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm) +{ + return __builtin_mve_vsliq_n_uv16qi (__a, __b, __imm); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminavq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminaq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxavq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxaq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpltq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpltq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpleq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpleq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgtq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgtq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgeq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgeq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vrev64q_m_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vqnegq_m_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vqabsq_m_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vnegq_m_sv16qi (__inactive, __a, __p); +} + + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vmvnq_m_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmlsdavxq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmlsdavq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavxq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminvq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxvq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m_n_s8 (int8x16_t __inactive, int8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vdupq_m_n_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclzq_m_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclsq_m_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p_s8 (int32_t __a, int8x16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vaddvaq_p_sv16qi (__a, __b, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vabsq_m_sv16qi (__inactive, __a, __p); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqrdmlsdhxq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqrdmlsdhq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __builtin_mve_vqrdmlashq_n_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __builtin_mve_vqrdmlahq_n_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqrdmladhxq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqrdmladhq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqdmlsdhxq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqdmlsdhq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __builtin_mve_vqdmlahq_n_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqdmladhxq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b) +{ + return __builtin_mve_vqdmladhq_sv16qi (__inactive, __a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __builtin_mve_vmlsdavaxq_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __builtin_mve_vmlsdavaq_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __builtin_mve_vmlasq_n_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c) +{ + return __builtin_mve_vmlaq_n_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __builtin_mve_vmladavaxq_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c) +{ + return __builtin_mve_vmladavaq_sv16qi (__a, __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) +{ + return __builtin_mve_vsriq_n_sv16qi (__a, __b, __imm); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm) +{ + return __builtin_mve_vsliq_n_sv16qi (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vrev64q_m_uv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __builtin_mve_vqrdmlashq_n_uv8hi (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __builtin_mve_vqrdmlahq_n_uv8hi (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __builtin_mve_vqdmlahq_n_uv8hi (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vmvnq_m_uv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __builtin_mve_vmlasq_n_uv8hi (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return __builtin_mve_vmlaq_n_uv8hi (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavq_p_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return __builtin_mve_vmladavaq_uv8hi (__a, __b, __c); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminvq_p_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxvq_p_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m_n_u16 (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vdupq_m_n_uv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_n_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmphiq_m_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmphiq_m_n_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_n_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpcsq_m_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpcsq_m_n_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclzq_m_uv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p_u16 (uint32_t __a, uint16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vaddvaq_p_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) +{ + return __builtin_mve_vsriq_n_uv8hi (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm) +{ + return __builtin_mve_vsliq_n_uv8hi (__a, __b, __imm); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminavq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminaq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxavq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxaq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpltq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpltq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpleq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpleq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgtq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgtq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgeq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgeq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vrev64q_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vqnegq_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vqabsq_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vnegq_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vmvnq_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmlsdavxq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmlsdavq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavxq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminvq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxvq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m_n_s16 (int16x8_t __inactive, int16_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vdupq_m_n_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclzq_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclsq_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p_s16 (int32_t __a, int16x8_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vaddvaq_p_sv8hi (__a, __b, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vabsq_m_sv8hi (__inactive, __a, __p); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqrdmlsdhxq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqrdmlsdhq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __builtin_mve_vqrdmlashq_n_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __builtin_mve_vqrdmlahq_n_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqrdmladhxq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqrdmladhq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqdmlsdhxq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqdmlsdhq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __builtin_mve_vqdmlahq_n_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqdmladhxq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b) +{ + return __builtin_mve_vqdmladhq_sv8hi (__inactive, __a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __builtin_mve_vmlsdavaxq_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __builtin_mve_vmlsdavaq_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __builtin_mve_vmlasq_n_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return __builtin_mve_vmlaq_n_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __builtin_mve_vmladavaxq_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c) +{ + return __builtin_mve_vmladavaq_sv8hi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) +{ + return __builtin_mve_vsriq_n_sv8hi (__a, __b, __imm); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm) +{ + return __builtin_mve_vsliq_n_sv8hi (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_uv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_sv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vrev64q_m_uv4si (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __builtin_mve_vqrdmlashq_n_uv4si (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __builtin_mve_vqrdmlahq_n_uv4si (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __builtin_mve_vqdmlahq_n_uv4si (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vmvnq_m_uv4si (__inactive, __a, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __builtin_mve_vmlasq_n_uv4si (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return __builtin_mve_vmlaq_n_uv4si (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavq_p_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return __builtin_mve_vmladavaq_uv4si (__a, __b, __c); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminvq_p_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxvq_p_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vdupq_m_n_uv4si (__inactive, __a, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_uv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_n_uv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmphiq_m_uv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmphiq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmphiq_m_n_uv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_uv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_n_uv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpcsq_m_uv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpcsq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpcsq_m_n_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclzq_m_uv4si (__inactive, __a, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vaddvaq_p_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) +{ + return __builtin_mve_vsriq_n_uv4si (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm) +{ + return __builtin_mve_vsliq_n_uv4si (__a, __b, __imm); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminavq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminaq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxavq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxaq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpneq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpneq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpltq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpltq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpltq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpleq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpleq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpleq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgtq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgtq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgtq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgeq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpgeq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpgeq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_sv4si (__a, __b, __p); +} + +__extension__ extern __inline mve_pred16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vcmpeqq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vcmpeqq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vrev64q_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vqnegq_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vqabsq_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vnegq_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmvnq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vmvnq_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmlsdavxq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmlsdavq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavxq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmladavq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vminvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vminvq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmaxvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vmaxvq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vdupq_m_n_s32 (int32x4_t __inactive, int32_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vdupq_m_n_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclzq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclzq_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vclsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vclsq_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddvaq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vaddvaq_p_sv4si (__a, __b, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p) +{ + return __builtin_mve_vabsq_m_sv4si (__inactive, __a, __p); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqrdmlsdhxq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqrdmlsdhq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __builtin_mve_vqrdmlashq_n_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __builtin_mve_vqrdmlahq_n_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqrdmladhxq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqrdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqrdmladhq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqdmlsdhxq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqdmlsdhq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __builtin_mve_vqdmlahq_n_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqdmladhxq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vqdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b) +{ + return __builtin_mve_vqdmladhq_sv4si (__inactive, __a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __builtin_mve_vmlsdavaxq_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlsdavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __builtin_mve_vmlsdavaq_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlasq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __builtin_mve_vmlasq_n_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return __builtin_mve_vmlaq_n_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __builtin_mve_vmladavaxq_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vmladavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c) +{ + return __builtin_mve_vmladavaq_sv4si (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) +{ + return __builtin_mve_vsriq_n_sv4si (__a, __b, __imm); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm) +{ + return __builtin_mve_vsliq_n_sv4si (__a, __b, __imm); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_u64 (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_uv2di (__a, __b, __p); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vpselq_s64 (int64x2_t __a, int64x2_t __b, mve_pred16_t __p) +{ + return __builtin_mve_vpselq_sv2di (__a, __b, __p); +} #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ __extension__ extern __inline void @@ -6939,61 +8995,326 @@ extern void *__ARM_undef; #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ - int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));}) + +#define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) +#define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ + int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ + int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ + int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ + int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) + +#define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) +#define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) +#define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) +#define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) +#define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) +#define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) +#define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) +#define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) +#define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) +#define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) +#define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) +#define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) +#define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) +#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) + +#define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) +#define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) +#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) +#define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) +#define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) +#define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlashq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) +#define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) +#define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) +#define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) +#define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) +#define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) +#define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1) -#define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ +#define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) +#define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ - int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ - int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \ - int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \ - int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \ - int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));}) + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2) -#define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ - int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ - int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) +#define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) +#define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) -#define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2) -#define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ +#define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) +#define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) +#define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqabsq_m(p0,p1,p2) __arm_vqabsq_m(p0,p1,p2) +#define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ - int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) -#define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2) -#define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ +#define vmvnq_m(p0,p1,p2) __arm_vmvnq_m(p0,p1,p2) +#define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ - int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ - int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2) -#define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ - int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ - int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ - int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ - int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) +#define vqabsq_m(p0,p1,p2) __arm_vqabsq_m(p0,p1,p2) +#define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) #define vcvtaq_m(p0,p1,p2) __arm_vcvtaq_m(p0,p1,p2) #define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ @@ -7951,6 +10272,40 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));}) +#define vcmpneq_m(p0,p1,p2) __arm_vcmpneq_m(p0,p1,p2) +#define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1) +#define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));}) + #define vaddlvaq(p0,p1) __arm_vaddlvaq(p0,p1) #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ @@ -8062,6 +10417,527 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) +#define vqrdmlsdhq(p0,p1,p2) __arm_vqrdmlsdhq(p0,p1,p2) +#define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqrdmlsdhxq(p0,p1,p2) __arm_vqrdmlsdhxq(p0,p1,p2) +#define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqrshlq_m_n(p0,p1,p2) __arm_vqrshlq_m_n(p0,p1,p2) +#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vqshlq_m_r(p0,p1,p2) __arm_vqshlq_m_r(p0,p1,p2) +#define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vrev64q_m(p0,p1,p2) __arm_vrev64q_m(p0,p1,p2) +#define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vrshlq_m_n(p0,p1,p2) __arm_vrshlq_m_n(p0,p1,p2) +#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));}) + +#define vshlq_m_r(p0,p1,p2) __arm_vshlq_m_r(p0,p1,p2) +#define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ + int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \ + int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \ + int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));}) + +#define vsliq(p0,p1,p2) __arm_vsliq(p0,p1,p2) +#define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vsriq(p0,p1,p2) __arm_vsriq(p0,p1,p2) +#define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vqrdmlashq(p0,p1,p2) __arm_vqrdmlashq(p0,p1,p2) +#define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlashq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlashq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlashq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vqrdmlahq(p0,p1,p2) __arm_vqrdmlahq(p0,p1,p2) +#define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqrdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqrdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqrdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vqrdmladhxq(p0,p1,p2) __arm_vqrdmladhxq(p0,p1,p2) +#define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqrdmladhq(p0,p1,p2) __arm_vqrdmladhq(p0,p1,p2) +#define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqnegq_m(p0,p1,p2) __arm_vqnegq_m(p0,p1,p2) +#define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vqdmlsdhxq(p0,p1,p2) __arm_vqdmlsdhxq(p0,p1,p2) +#define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vabsq_m(p0,p1,p2) __arm_vabsq_m(p0,p1,p2) +#define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vclsq_m(p0,p1,p2) __arm_vclsq_m(p0,p1,p2) +#define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vclzq_m(p0,p1,p2) __arm_vclzq_m(p0,p1,p2) +#define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vcmpgeq_m(p0,p1,p2) __arm_vcmpgeq_m(p0,p1,p2) +#define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vcmpgeq_m_n(p0,p1,p2) __arm_vcmpgeq_m_n(p0,p1,p2) +#define __arm_vcmpgeq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) + +#define vdupq_m(p0,p1,p2) __arm_vdupq_m(p0,p1,p2) +#define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) + +#define vmaxaq_m(p0,p1,p2) __arm_vmaxaq_m(p0,p1,p2) +#define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vmlaq(p0,p1,p2) __arm_vmlaq(p0,p1,p2) +#define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vmlasq(p0,p1,p2) __arm_vmlasq(p0,p1,p2) +#define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vmvnq_m(p0,p1,p2) __arm_vmvnq_m(p0,p1,p2) +#define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vnegq_m(p0,p1,p2) __arm_vnegq_m(p0,p1,p2) +#define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vpselq(p0,p1,p2) __arm_vpselq(p0,p1,p2) +#define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ + int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));}) + +#define vqdmlahq(p0,p1,p2) __arm_vqdmlahq(p0,p1,p2) +#define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqdmlahq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqdmlahq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqdmlahq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));}) + +#define vqdmlsdhq(p0,p1,p2) __arm_vqdmlsdhq(p0,p1,p2) +#define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqdmladhxq(p0,p1,p2) __arm_vqdmladhxq(p0,p1,p2) +#define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqdmladhq(p0,p1,p2) __arm_vqdmladhq(p0,p1,p2) +#define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vqabsq_m(p0,p1,p2) __arm_vqabsq_m(p0,p1,p2) +#define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vminaq_m(p0,p1,p2) __arm_vminaq_m(p0,p1,p2) +#define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vrmlaldavhaq(p0,p1,p2) __arm_vrmlaldavhaq(p0,p1,p2) +#define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vmlsdavxq_p(p0,p1,p2) __arm_vmlsdavxq_p(p0,p1,p2) +#define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vmlsdavq_p(p0,p1,p2) __arm_vmlsdavq_p(p0,p1,p2) +#define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vmlsdavaxq(p0,p1,p2) __arm_vmlsdavaxq(p0,p1,p2) +#define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vmlsdavaq(p0,p1,p2) __arm_vmlsdavaq(p0,p1,p2) +#define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));}) + +#define vaddvaq_p(p0,p1,p2) __arm_vaddvaq_p(p0,p1,p2) +#define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vcmpcsq_m_n(p0,p1,p2) __arm_vcmpcsq_m_n(p0,p1,p2) +#define __arm_vcmpcsq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));}) + +#define vcmpcsq_m(p0,p1,p2) __arm_vcmpcsq_m(p0,p1,p2) +#define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vmladavxq_p(p0,p1,p2) __arm_vmladavxq_p(p0,p1,p2) +#define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vmladavq_p(p0,p1,p2) __arm_vmladavq_p(p0,p1,p2) +#define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vmladavaxq(p0,p1,p2) __arm_vmladavaxq(p0,p1,p2) +#define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vmladavaq(p0,p1,p2) __arm_vmladavaq(p0,p1,p2) +#define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vminvq_p(p0,p1,p2) __arm_vminvq_p(p0,p1,p2) +#define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vminvq_p_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vminvq_p_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vminvq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vminvq_p_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vminavq_p(p0,p1,p2) __arm_vminavq_p(p0,p1,p2) +#define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vminavq_p_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vmaxvq_p(p0,p1,p2) __arm_vmaxvq_p(p0,p1,p2) +#define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_p_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_p_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_p_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vmaxavq_p(p0,p1,p2) __arm_vmaxavq_p(p0,p1,p2) +#define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_p_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t), p2));}) + +#define vcmpltq_m(p0,p1,p2) __arm_vcmpltq_m(p0,p1,p2) +#define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) + +#define vcmpleq_m(p0,p1,p2) __arm_vcmpleq_m(p0,p1,p2) +#define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) + +#define vcmphiq_m(p0,p1,p2) __arm_vcmphiq_m(p0,p1,p2) +#define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \ + int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \ + int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \ + int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) + +#define vcmpgtq_m(p0,p1,p2) __arm_vcmpgtq_m(p0,p1,p2) +#define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \ + int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \ + int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \ + int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));}) + #endif /* MVE Floating point. */ #ifdef __cplusplus diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def index 3ac96305e38..25badfbca93 100644 --- a/gcc/config/arm/arm_mve_builtins.def +++ b/gcc/config/arm/arm_mve_builtins.def @@ -309,3 +309,88 @@ VAR2 (TERNOP_UNONE_UNONE_NONE_UNONE, vcvtaq_m_u, v8hi, v4si) VAR2 (TERNOP_NONE_NONE_NONE_UNONE, vcvtaq_m_s, v8hi, v4si) VAR3 (TERNOP_UNONE_UNONE_UNONE_IMM, vshlcq_vec_u, v16qi, v8hi, v4si) VAR3 (TERNOP_NONE_NONE_UNONE_IMM, vshlcq_vec_s, v16qi, v8hi, v4si) +VAR4 (TERNOP_UNONE_UNONE_UNONE_UNONE, vpselq_u, v16qi, v8hi, v4si, v2di) +VAR4 (TERNOP_NONE_NONE_NONE_UNONE, vpselq_s, v16qi, v8hi, v4si, v2di) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vrev64q_m_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vqrdmlashq_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vqrdmlahq_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vqdmlahq_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vmvnq_m_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vmlasq_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vmlaq_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vmladavq_p_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vmladavaq_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vminvq_p_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vmaxvq_p_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vdupq_m_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmpneq_m_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmpneq_m_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmphiq_m_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmphiq_m_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmpeqq_m_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmpeqq_m_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmpcsq_m_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vcmpcsq_m_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vclzq_m_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vaddvaq_p_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_IMM, vsriq_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_UNONE_IMM, vsliq_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vshlq_m_r_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vrshlq_m_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vqshlq_m_r_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vqrshlq_m_n_u, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vminavq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vminaq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vmaxavq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_UNONE_NONE_UNONE, vmaxaq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpneq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpneq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpltq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpltq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpleq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpleq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpgtq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpgtq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpgeq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpgeq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpeqq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpeqq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vshlq_m_r_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vrshlq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vrev64q_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vqshlq_m_r_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vqrshlq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vqnegq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vqabsq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vnegq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vmvnq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vmlsdavxq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vmlsdavq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vmladavxq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vmladavq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vminvq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vmaxvq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vdupq_m_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vclzq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vclsq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vaddvaq_p_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_UNONE, vabsq_m_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqrdmlsdhxq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqrdmlsdhq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqrdmlashq_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqrdmlahq_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqrdmladhxq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqrdmladhq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqdmlsdhxq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqdmlsdhq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqdmlahq_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqdmladhxq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vqdmladhq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vmlsdavaxq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vmlsdavaq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vmlasq_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vmlaq_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vmladavaxq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_NONE, vmladavaq_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_IMM, vsriq_n_s, v16qi, v8hi, v4si) +VAR3 (TERNOP_NONE_NONE_NONE_IMM, vsliq_n_s, v16qi, v8hi, v4si) diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md index cdf75ab3fc3..2641669d8e0 100644 --- a/gcc/config/arm/constraints.md +++ b/gcc/config/arm/constraints.md @@ -69,6 +69,16 @@ (and (match_code "const_int") (match_test "TARGET_HAVE_MVE && ival >= 1 && ival <= 8"))) +(define_constraint "Rc" + "@internal In Thumb-2 state a constant in range 0 to 15" + (and (match_code "const_int") + (match_test "TARGET_HAVE_MVE && ival >= 0 && ival <= 15"))) + +(define_constraint "Re" + "@internal In Thumb-2 state a constant in range 0 to 31" + (and (match_code "const_int") + (match_test "TARGET_HAVE_MVE && ival >= 0 && ival <= 31"))) + (define_constraint "Rf" "@internal In Thumb-2 state a constant in range 1 to 32" (and (match_code "const_int") diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index 3cdb2e71cf0..b9985a031fa 100644 --- a/gcc/config/arm/mve.md +++ b/gcc/config/arm/mve.md @@ -89,7 +89,28 @@ VCVTAQ_M_S VCVTAQ_M_U VCVTQ_M_TO_F_S VCVTQ_M_TO_F_U VQRSHRNBQ_N_U VQRSHRNBQ_N_S VQRSHRUNBQ_N_S VRMLALDAVHAQ_S VABAVQ_S VABAVQ_U VSHLCQ_S VSHLCQ_U - VRMLALDAVHAQ_U]) + VRMLALDAVHAQ_U VABSQ_M_S VADDVAQ_P_S VADDVAQ_P_U + VCLSQ_M_S VCLZQ_M_S VCLZQ_M_U VCMPCSQ_M_N_U + VCMPCSQ_M_U VCMPEQQ_M_N_S VCMPEQQ_M_N_U VCMPEQQ_M_S + VCMPEQQ_M_U VCMPGEQ_M_N_S VCMPGEQ_M_S VCMPGTQ_M_N_S + VCMPGTQ_M_S VCMPHIQ_M_N_U VCMPHIQ_M_U VCMPLEQ_M_N_S + VCMPLEQ_M_S VCMPLTQ_M_N_S VCMPLTQ_M_S VCMPNEQ_M_N_S + VCMPNEQ_M_N_U VCMPNEQ_M_S VCMPNEQ_M_U VDUPQ_M_N_S + VDUPQ_M_N_U VDWDUPQ_N_U VDWDUPQ_WB_U VIWDUPQ_N_U + VIWDUPQ_WB_U VMAXAQ_M_S VMAXAVQ_P_S VMAXVQ_P_S + VMAXVQ_P_U VMINAQ_M_S VMINAVQ_P_S VMINVQ_P_S VMINVQ_P_U + VMLADAVAQ_S VMLADAVAQ_U VMLADAVQ_P_S VMLADAVQ_P_U + VMLADAVXQ_P_S VMLAQ_N_S VMLAQ_N_U VMLASQ_N_S VMLASQ_N_U + VMLSDAVQ_P_S VMLSDAVXQ_P_S VMVNQ_M_S VMVNQ_M_U + VNEGQ_M_S VPSELQ_S VPSELQ_U VQABSQ_M_S VQDMLAHQ_N_S + VQDMLAHQ_N_U VQNEGQ_M_S VQRDMLADHQ_S VQRDMLADHXQ_S + VQRDMLAHQ_N_S VQRDMLAHQ_N_U VQRDMLASHQ_N_S + VQRDMLASHQ_N_U VQRDMLSDHQ_S VQRDMLSDHXQ_S VQRSHLQ_M_N_S + VQRSHLQ_M_N_U VQSHLQ_M_R_S VQSHLQ_M_R_U VREV64Q_M_S + VREV64Q_M_U VRSHLQ_M_N_S VRSHLQ_M_N_U VSHLQ_M_R_S + VSHLQ_M_R_U VSLIQ_N_S VSLIQ_N_U VSRIQ_N_S VSRIQ_N_U + VQDMLSDHXQ_S VQDMLSDHQ_S VQDMLADHXQ_S VQDMLADHQ_S + VMLSDAVAXQ_S VMLSDAVAQ_S VMLADAVAXQ_S]) (define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF") (V8HF "V8HI") (V4SF "V4SI")]) @@ -155,7 +176,24 @@ (VCVTQ_M_TO_F_U "u") (VQRSHRNBQ_N_S "s") (VQRSHRNBQ_N_U "u") (VABAVQ_S "s") (VABAVQ_U "u") (VRMLALDAVHAQ_U "u") (VRMLALDAVHAQ_S "s") (VSHLCQ_S "s") - (VSHLCQ_U "u")]) + (VSHLCQ_U "u") (VADDVAQ_P_S "s") (VADDVAQ_P_U "u") + (VCLZQ_M_S "s") (VCLZQ_M_U "u") (VCMPEQQ_M_N_S "s") + (VCMPEQQ_M_N_U "u") (VCMPEQQ_M_S "s") (VCMPEQQ_M_U "u") + (VCMPNEQ_M_N_S "s") (VCMPNEQ_M_N_U "u") (VCMPNEQ_M_S "s") + (VCMPNEQ_M_U "u") (VDUPQ_M_N_S "s") (VDUPQ_M_N_U "u") + (VMAXVQ_P_S "s") (VMAXVQ_P_U "u") (VMINVQ_P_S "s") + (VMINVQ_P_U "u") (VMLADAVAQ_S "s") (VMLADAVAQ_U "u") + (VMLADAVQ_P_S "s") (VMLADAVQ_P_U "u") (VMLAQ_N_S "s") + (VMLAQ_N_U "u") (VMLASQ_N_S "s") (VMLASQ_N_U "u") + (VMVNQ_M_S "s") (VMVNQ_M_U "u") (VPSELQ_S "s") + (VPSELQ_U "u") (VQDMLAHQ_N_S "s") (VQDMLAHQ_N_U "u") + (VQRDMLAHQ_N_S "s") (VQRDMLAHQ_N_U "u") + (VQRDMLASHQ_N_S "s") (VQRDMLASHQ_N_U "u") + (VQRSHLQ_M_N_S "s") (VQRSHLQ_M_N_U "u") + (VQSHLQ_M_R_S "s") (VQSHLQ_M_R_U "u") (VSRIQ_N_S "s") + (VREV64Q_M_S "s") (VREV64Q_M_U "u") (VSRIQ_N_U "u") + (VRSHLQ_M_N_S "s") (VRSHLQ_M_N_U "u") (VSHLQ_M_R_S "s") + (VSHLQ_M_R_U "u") (VSLIQ_N_S "s") (VSLIQ_N_U "u")]) (define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32") (VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16") @@ -164,6 +202,9 @@ (V4SI "mve_imm_32")]) (define_mode_attr MVE_constraint2 [(V16QI "Rb") (V8HI "Rd") (V4SI "Rf")]) (define_mode_attr MVE_LANES [(V16QI "16") (V8HI "8") (V4SI "4")]) +(define_mode_attr MVE_constraint [ (V16QI "Ra") (V8HI "Rc") (V4SI "Re")]) +(define_mode_attr MVE_pred [ (V16QI "mve_imm_7") (V8HI "mve_imm_15") + (V4SI "mve_imm_31")]) (define_int_iterator VCVTQ_TO_F [VCVTQ_TO_F_S VCVTQ_TO_F_U]) (define_int_iterator VMVNQ_N [VMVNQ_N_U VMVNQ_N_S]) @@ -257,6 +298,31 @@ (define_int_iterator VABAVQ [VABAVQ_S VABAVQ_U]) (define_int_iterator VSHLCQ [VSHLCQ_S VSHLCQ_U]) (define_int_iterator VRMLALDAVHAQ [VRMLALDAVHAQ_S VRMLALDAVHAQ_U]) +(define_int_iterator VADDVAQ_P [VADDVAQ_P_S VADDVAQ_P_U]) +(define_int_iterator VCLZQ_M [VCLZQ_M_S VCLZQ_M_U]) +(define_int_iterator VCMPEQQ_M_N [VCMPEQQ_M_N_S VCMPEQQ_M_N_U]) +(define_int_iterator VCMPEQQ_M [VCMPEQQ_M_S VCMPEQQ_M_U]) +(define_int_iterator VCMPNEQ_M_N [VCMPNEQ_M_N_S VCMPNEQ_M_N_U]) +(define_int_iterator VCMPNEQ_M [VCMPNEQ_M_S VCMPNEQ_M_U]) +(define_int_iterator VDUPQ_M_N [VDUPQ_M_N_S VDUPQ_M_N_U]) +(define_int_iterator VMAXVQ_P [VMAXVQ_P_S VMAXVQ_P_U]) +(define_int_iterator VMINVQ_P [VMINVQ_P_S VMINVQ_P_U]) +(define_int_iterator VMLADAVAQ [VMLADAVAQ_S VMLADAVAQ_U]) +(define_int_iterator VMLADAVQ_P [VMLADAVQ_P_S VMLADAVQ_P_U]) +(define_int_iterator VMLAQ_N [VMLAQ_N_S VMLAQ_N_U]) +(define_int_iterator VMLASQ_N [VMLASQ_N_S VMLASQ_N_U]) +(define_int_iterator VMVNQ_M [VMVNQ_M_S VMVNQ_M_U]) +(define_int_iterator VPSELQ [VPSELQ_S VPSELQ_U]) +(define_int_iterator VQDMLAHQ_N [VQDMLAHQ_N_S VQDMLAHQ_N_U]) +(define_int_iterator VQRDMLAHQ_N [VQRDMLAHQ_N_S VQRDMLAHQ_N_U]) +(define_int_iterator VQRDMLASHQ_N [VQRDMLASHQ_N_S VQRDMLASHQ_N_U]) +(define_int_iterator VQRSHLQ_M_N [VQRSHLQ_M_N_S VQRSHLQ_M_N_U]) +(define_int_iterator VQSHLQ_M_R [VQSHLQ_M_R_S VQSHLQ_M_R_U]) +(define_int_iterator VREV64Q_M [VREV64Q_M_S VREV64Q_M_U]) +(define_int_iterator VRSHLQ_M_N [VRSHLQ_M_N_S VRSHLQ_M_N_U]) +(define_int_iterator VSHLQ_M_R [VSHLQ_M_R_S VSHLQ_M_R_U]) +(define_int_iterator VSLIQ_N [VSLIQ_N_S VSLIQ_N_U]) +(define_int_iterator VSRIQ_N [VSRIQ_N_S VSRIQ_N_U]) (define_insn "*mve_mov" [(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us") @@ -3209,7 +3275,7 @@ { rtx ignore_wb = gen_reg_rtx (SImode); emit_insn(gen_mve_vshlcq_(operands[0], ignore_wb, operands[1], - operands[2], operands[3])); + operands[2], operands[3])); DONE; }) @@ -3240,3 +3306,963 @@ VSHLCQ))] "TARGET_HAVE_MVE" "vshlc %q0, %1, %4") + +;; +;; [vabsq_m_s]) +;; +(define_insn "mve_vabsq_m_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VABSQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vabst.s%# %q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vaddvaq_p_u, vaddvaq_p_s]) +;; +(define_insn "mve_vaddvaq_p_" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:SI 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VADDVAQ_P)) + ] + "TARGET_HAVE_MVE" + "vpst\;vaddvat.%# %0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vclsq_m_s]) +;; +(define_insn "mve_vclsq_m_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCLSQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vclst.s%# %q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vclzq_m_s, vclzq_m_u]) +;; +(define_insn "mve_vclzq_m_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCLZQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vclzt.i%# %q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpcsq_m_n_u]) +;; +(define_insn "mve_vcmpcsq_m_n_u" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPCSQ_M_N_U)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.u%# cs, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpcsq_m_u]) +;; +(define_insn "mve_vcmpcsq_m_u" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPCSQ_M_U)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.u%# cs, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpeqq_m_n_u, vcmpeqq_m_n_s]) +;; +(define_insn "mve_vcmpeqq_m_n_" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPEQQ_M_N)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.i%# eq, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpeqq_m_u, vcmpeqq_m_s]) +;; +(define_insn "mve_vcmpeqq_m_" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPEQQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.i%# eq, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpgeq_m_n_s]) +;; +(define_insn "mve_vcmpgeq_m_n_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPGEQ_M_N_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# ge, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpgeq_m_s]) +;; +(define_insn "mve_vcmpgeq_m_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPGEQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# ge, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpgtq_m_n_s]) +;; +(define_insn "mve_vcmpgtq_m_n_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPGTQ_M_N_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# gt, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpgtq_m_s]) +;; +(define_insn "mve_vcmpgtq_m_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPGTQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# gt, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmphiq_m_n_u]) +;; +(define_insn "mve_vcmphiq_m_n_u" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPHIQ_M_N_U)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.u%# hi, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmphiq_m_u]) +;; +(define_insn "mve_vcmphiq_m_u" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPHIQ_M_U)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.u%# hi, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpleq_m_n_s]) +;; +(define_insn "mve_vcmpleq_m_n_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPLEQ_M_N_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# le, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpleq_m_s]) +;; +(define_insn "mve_vcmpleq_m_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPLEQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# le, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpltq_m_n_s]) +;; +(define_insn "mve_vcmpltq_m_n_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPLTQ_M_N_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# lt, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpltq_m_s]) +;; +(define_insn "mve_vcmpltq_m_s" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPLTQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.s%# lt, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpneq_m_n_u, vcmpneq_m_n_s]) +;; +(define_insn "mve_vcmpneq_m_n_" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPNEQ_M_N)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.i%# ne, %q1, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vcmpneq_m_s, vcmpneq_m_u]) +;; +(define_insn "mve_vcmpneq_m_" + [ + (set (match_operand:HI 0 "vpr_register_operand" "=Up") + (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VCMPNEQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vcmpt.i%# ne, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vdupq_m_n_s, vdupq_m_n_u]) +;; +(define_insn "mve_vdupq_m_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand: 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VDUPQ_M_N)) + ] + "TARGET_HAVE_MVE" + "vpst\;vdupt.%# %q0, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmaxaq_m_s]) +;; +(define_insn "mve_vmaxaq_m_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMAXAQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmaxat.s%# %q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmaxavq_p_s]) +;; +(define_insn "mve_vmaxavq_p_s" + [ + (set (match_operand: 0 "s_register_operand" "=r") + (unspec: [(match_operand: 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMAXAVQ_P_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmaxavt.s%# %0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmaxvq_p_u, vmaxvq_p_s]) +;; +(define_insn "mve_vmaxvq_p_" + [ + (set (match_operand: 0 "s_register_operand" "=r") + (unspec: [(match_operand: 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMAXVQ_P)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmaxvt.%# %0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vminaq_m_s]) +;; +(define_insn "mve_vminaq_m_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMINAQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vminat.s%# %q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vminavq_p_s]) +;; +(define_insn "mve_vminavq_p_s" + [ + (set (match_operand: 0 "s_register_operand" "=r") + (unspec: [(match_operand: 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMINAVQ_P_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vminavt.s%# %0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vminvq_p_s, vminvq_p_u]) +;; +(define_insn "mve_vminvq_p_" + [ + (set (match_operand: 0 "s_register_operand" "=r") + (unspec: [(match_operand: 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMINVQ_P)) + ] + "TARGET_HAVE_MVE" + "vpst\;vminvt.%#\t%0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmladavaq_u, vmladavaq_s]) +;; +(define_insn "mve_vmladavaq_" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:SI 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VMLADAVAQ)) + ] + "TARGET_HAVE_MVE" + "vmladava.%# %0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vmladavq_p_u, vmladavq_p_s]) +;; +(define_insn "mve_vmladavq_p_" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMLADAVQ_P)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmladavt.%#\t%0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmladavxq_p_s]) +;; +(define_insn "mve_vmladavxq_p_s" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMLADAVXQ_P_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmladavxt.s%#\t%0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmlaq_n_u, vmlaq_n_s]) +;; +(define_insn "mve_vmlaq_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand: 3 "s_register_operand" "r")] + VMLAQ_N)) + ] + "TARGET_HAVE_MVE" + "vmla.%#\t%q0, %q2, %3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vmlasq_n_u, vmlasq_n_s]) +;; +(define_insn "mve_vmlasq_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand: 3 "s_register_operand" "r")] + VMLASQ_N)) + ] + "TARGET_HAVE_MVE" + "vmlas.%# %q0, %q2, %3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vmlsdavq_p_s]) +;; +(define_insn "mve_vmlsdavq_p_s" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMLSDAVQ_P_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmlsdavt.s%# %0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmlsdavxq_p_s]) +;; +(define_insn "mve_vmlsdavxq_p_s" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMLSDAVXQ_P_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmlsdavxt.s%# %0, %q1, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vmvnq_m_s, vmvnq_m_u]) +;; +(define_insn "mve_vmvnq_m_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VMVNQ_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vmvnt %q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vnegq_m_s]) +;; +(define_insn "mve_vnegq_m_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VNEGQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vnegt.s%#\t%q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vpselq_u, vpselq_s]) +;; +(define_insn "mve_vpselq_" + [ + (set (match_operand:MVE_1 0 "s_register_operand" "=w") + (unspec:MVE_1 [(match_operand:MVE_1 1 "s_register_operand" "w") + (match_operand:MVE_1 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VPSELQ)) + ] + "TARGET_HAVE_MVE" + "vpsel %q0, %q1, %q2" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqabsq_m_s]) +;; +(define_insn "mve_vqabsq_m_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VQABSQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vqabst.s%#\t%q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vqdmlahq_n_s, vqdmlahq_n_u]) +;; +(define_insn "mve_vqdmlahq_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand: 3 "s_register_operand" "r")] + VQDMLAHQ_N)) + ] + "TARGET_HAVE_MVE" + "vqdmlah.s%#\t%q0, %q2, %3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqnegq_m_s]) +;; +(define_insn "mve_vqnegq_m_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VQNEGQ_M_S)) + ] + "TARGET_HAVE_MVE" + "vpst\;vqnegt.s%# %q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vqrdmladhq_s]) +;; +(define_insn "mve_vqrdmladhq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQRDMLADHQ_S)) + ] + "TARGET_HAVE_MVE" + "vqrdmladh.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqrdmladhxq_s]) +;; +(define_insn "mve_vqrdmladhxq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQRDMLADHXQ_S)) + ] + "TARGET_HAVE_MVE" + "vqrdmladhx.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqrdmlahq_n_s, vqrdmlahq_n_u]) +;; +(define_insn "mve_vqrdmlahq_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand: 3 "s_register_operand" "r")] + VQRDMLAHQ_N)) + ] + "TARGET_HAVE_MVE" + "vqrdmlah.s%#\t%q0, %q2, %3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqrdmlashq_n_s, vqrdmlashq_n_u]) +;; +(define_insn "mve_vqrdmlashq_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand: 3 "s_register_operand" "r")] + VQRDMLASHQ_N)) + ] + "TARGET_HAVE_MVE" + "vqrdmlash.s%#\t%q0, %q2, %3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqrdmlsdhq_s]) +;; +(define_insn "mve_vqrdmlsdhq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQRDMLSDHQ_S)) + ] + "TARGET_HAVE_MVE" + "vqrdmlsdh.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqrdmlsdhxq_s]) +;; +(define_insn "mve_vqrdmlsdhxq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQRDMLSDHXQ_S)) + ] + "TARGET_HAVE_MVE" + "vqrdmlsdhx.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqrshlq_m_n_s, vqrshlq_m_n_u]) +;; +(define_insn "mve_vqrshlq_m_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VQRSHLQ_M_N)) + ] + "TARGET_HAVE_MVE" + "vpst\;vqrshlt.%# %q0, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vqshlq_m_r_u, vqshlq_m_r_s]) +;; +(define_insn "mve_vqshlq_m_r_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VQSHLQ_M_R)) + ] + "TARGET_HAVE_MVE" + "vpst\;vqshlt.%#\t%q0, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vrev64q_m_u, vrev64q_m_s]) +;; +(define_insn "mve_vrev64q_m_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VREV64Q_M)) + ] + "TARGET_HAVE_MVE" + "vpst\;vrev64t.%#\t%q0, %q2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vrshlq_m_n_s, vrshlq_m_n_u]) +;; +(define_insn "mve_vrshlq_m_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VRSHLQ_M_N)) + ] + "TARGET_HAVE_MVE" + "vpst\;vrshlt.%#\t%q0, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vshlq_m_r_u, vshlq_m_r_s]) +;; +(define_insn "mve_vshlq_m_r_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSHLQ_M_R)) + ] + "TARGET_HAVE_MVE" + "vpst\;vshlt.%#\t%q0, %2" + [(set_attr "type" "mve_move") + (set_attr "length""8")]) + +;; +;; [vsliq_n_u, vsliq_n_s]) +;; +(define_insn "mve_vsliq_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:SI 3 "" "")] + VSLIQ_N)) + ] + "TARGET_HAVE_MVE" + "vsli.%#\t%q0, %q2, %3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vsriq_n_u, vsriq_n_s]) +;; +(define_insn "mve_vsriq_n_" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:SI 3 "mve_imm_selective_upto_8" "Rg")] + VSRIQ_N)) + ] + "TARGET_HAVE_MVE" + "vsri.%#\t%q0, %q2, %3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqdmlsdhxq_s]) +;; +(define_insn "mve_vqdmlsdhxq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQDMLSDHXQ_S)) + ] + "TARGET_HAVE_MVE" + "vqdmlsdhx.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqdmlsdhq_s]) +;; +(define_insn "mve_vqdmlsdhq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQDMLSDHQ_S)) + ] + "TARGET_HAVE_MVE" + "vqdmlsdh.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqdmladhxq_s]) +;; +(define_insn "mve_vqdmladhxq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQDMLADHXQ_S)) + ] + "TARGET_HAVE_MVE" + "vqdmladhx.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vqdmladhq_s]) +;; +(define_insn "mve_vqdmladhq_s" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VQDMLADHQ_S)) + ] + "TARGET_HAVE_MVE" + "vqdmladh.s%#\t%q0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vmlsdavaxq_s]) +;; +(define_insn "mve_vmlsdavaxq_s" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:SI 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VMLSDAVAXQ_S)) + ] + "TARGET_HAVE_MVE" + "vmlsdavax.s%#\t%0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vmlsdavaq_s]) +;; +(define_insn "mve_vmlsdavaq_s" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:SI 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VMLSDAVAQ_S)) + ] + "TARGET_HAVE_MVE" + "vmlsdava.s%#\t%0, %q2, %q3" + [(set_attr "type" "mve_move") +]) + +;; +;; [vmladavaxq_s]) +;; +(define_insn "mve_vmladavaxq_s" + [ + (set (match_operand:SI 0 "s_register_operand" "=e") + (unspec:SI [(match_operand:SI 1 "s_register_operand" "0") + (match_operand:MVE_2 2 "s_register_operand" "w") + (match_operand:MVE_2 3 "s_register_operand" "w")] + VMLADAVAXQ_S)) + ] + "TARGET_HAVE_MVE" + "vmladavax.s%#\t%0, %q2, %q3" + [(set_attr "type" "mve_move") +]) diff --git a/gcc/config/arm/predicates.md b/gcc/config/arm/predicates.md index 9c9a84b0f65..2b65e646b05 100644 --- a/gcc/config/arm/predicates.md +++ b/gcc/config/arm/predicates.md @@ -43,6 +43,14 @@ (define_predicate "mve_imm_8" (match_test "satisfies_constraint_Rb (op)")) +;; True for immediates in the range of 0 to 15 for MVE. +(define_predicate "mve_imm_15" + (match_test "satisfies_constraint_Rc (op)")) + +;; True for immediates in the range of 0 to 31 for MVE. +(define_predicate "mve_imm_31" + (match_test "satisfies_constraint_Re (op)")) + ;; True for immediates in the range of 1 to 32 for MVE. (define_predicate "mve_imm_32" (match_test "satisfies_constraint_Rf (op)")) diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index a91d7377888..6cae249eee8 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,265 @@ +2020-03-18 Andre Vieira + Mihail Ionescu + Srinath Parvathaneni + + * gcc.target/arm/mve/intrinsics/vabsq_m_s16.c: New test. + * gcc.target/arm/mve/intrinsics/vabsq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vabsq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclsq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclsq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclsq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclzq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclzq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclzq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclzq_m_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclzq_m_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vclzq_m_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminaq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminaq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminaq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminavq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminavq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminavq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminvq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminvq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminvq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminvq_p_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminvq_p_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vminvq_p_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaq_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaq_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaq_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vnegq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vnegq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vnegq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_s64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_u64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vpselq_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlahq_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlahq_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlahq_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsliq_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsliq_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsliq_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsliq_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsliq_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsliq_n_u8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsriq_n_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsriq_n_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsriq_n_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsriq_n_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsriq_n_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vsriq_n_u8.c: Likewise. + 2020-03-18 David Malcolm PR analyzer/94047 diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s16.c new file mode 100644 index 00000000000..4c07ad253f0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vabsq_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vabst.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vabsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s32.c new file mode 100644 index 00000000000..0566786a857 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vabsq_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vabst.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vabsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s8.c new file mode 100644 index 00000000000..f16bf7e8e47 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vabsq_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vabst.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vabsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c new file mode 100644 index 00000000000..6799c80ecc3 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int16x8_t b, mve_pred16_t p) +{ + return vaddvaq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.s16" } } */ + +int32_t +foo1 (int32_t a, int16x8_t b, mve_pred16_t p) +{ + return vaddvaq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c new file mode 100644 index 00000000000..fde294286a2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int32x4_t b, mve_pred16_t p) +{ + return vaddvaq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.s32" } } */ + +int32_t +foo1 (int32_t a, int32x4_t b, mve_pred16_t p) +{ + return vaddvaq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c new file mode 100644 index 00000000000..53a6fe0c967 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int8x16_t b, mve_pred16_t p) +{ + return vaddvaq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.s8" } } */ + +int32_t +foo1 (int32_t a, int8x16_t b, mve_pred16_t p) +{ + return vaddvaq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c new file mode 100644 index 00000000000..9dc39da1cc6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint16x8_t b, mve_pred16_t p) +{ + return vaddvaq_p_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.u16" } } */ + +uint32_t +foo1 (uint32_t a, uint16x8_t b, mve_pred16_t p) +{ + return vaddvaq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.u16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c new file mode 100644 index 00000000000..445accb5799 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint32x4_t b, mve_pred16_t p) +{ + return vaddvaq_p_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.u32" } } */ + +uint32_t +foo1 (uint32_t a, uint32x4_t b, mve_pred16_t p) +{ + return vaddvaq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c new file mode 100644 index 00000000000..774ee9f5b90 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_p_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint8x16_t b, mve_pred16_t p) +{ + return vaddvaq_p_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.u8" } } */ + +uint32_t +foo1 (uint32_t a, uint8x16_t b, mve_pred16_t p) +{ + return vaddvaq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vaddvat.u8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s16.c new file mode 100644 index 00000000000..8277577971a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vclsq_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclst.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vclsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s32.c new file mode 100644 index 00000000000..88b94db536c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vclsq_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclst.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vclsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s8.c new file mode 100644 index 00000000000..2fbc10c211e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vclsq_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclst.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vclsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s16.c new file mode 100644 index 00000000000..6f1acc4c2ad --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vclzq_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclzt.i16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vclzq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s32.c new file mode 100644 index 00000000000..d5f6f19c28f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vclzq_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclzt.i32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vclzq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s8.c new file mode 100644 index 00000000000..db7df73d271 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vclzq_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclzt.i8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vclzq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u16.c new file mode 100644 index 00000000000..a59409a1aef --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p) +{ + return vclzq_m_u16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclzt.i16" } } */ + +uint16x8_t +foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p) +{ + return vclzq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u32.c new file mode 100644 index 00000000000..02906421d39 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p) +{ + return vclzq_m_u32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclzt.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p) +{ + return vclzq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u8.c new file mode 100644 index 00000000000..ee294336824 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_m_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p) +{ + return vclzq_m_u8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vclzt.i8" } } */ + +uint8x16_t +foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p) +{ + return vclzq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c new file mode 100644 index 00000000000..8682715473b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmpcsq_m_n_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmpcsq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c new file mode 100644 index 00000000000..9909279dfd3 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmpcsq_m_n_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmpcsq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c new file mode 100644 index 00000000000..0350934e65f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmpcsq_m_n_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmpcsq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c new file mode 100644 index 00000000000..85accf7f311 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmpcsq_m_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmpcsq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c new file mode 100644 index 00000000000..9018c7118d0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmpcsq_m_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmpcsq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c new file mode 100644 index 00000000000..9f73e6f24ad --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_m_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmpcsq_m_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmpcsq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c new file mode 100644 index 00000000000..751b95e7379 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpeqq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c new file mode 100644 index 00000000000..b26993b5d38 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpeqq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c new file mode 100644 index 00000000000..105002b89d8 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpeqq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c new file mode 100644 index 00000000000..97d5cfb2b41 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmpeqq_m_n_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c new file mode 100644 index 00000000000..aa99be6a604 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmpeqq_m_n_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c new file mode 100644 index 00000000000..f61acc69966 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_n_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmpeqq_m_n_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c new file mode 100644 index 00000000000..c0277445444 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpeqq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c new file mode 100644 index 00000000000..0eab804324c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpeqq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c new file mode 100644 index 00000000000..68300400e7c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpeqq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c new file mode 100644 index 00000000000..3484f3b48de --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmpeqq_m_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c new file mode 100644 index 00000000000..efec6cf916d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmpeqq_m_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c new file mode 100644 index 00000000000..34942fc3e41 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmpeqq_m_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmpeqq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c new file mode 100644 index 00000000000..f755e770c8c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpgeq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpgeq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c new file mode 100644 index 00000000000..a12de372f2f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpgeq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpgeq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c new file mode 100644 index 00000000000..dacc7bb0cc6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpgeq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpgeq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c new file mode 100644 index 00000000000..9763eea7956 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpgeq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpgeq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c new file mode 100644 index 00000000000..0fd2c6105c0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpgeq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpgeq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c new file mode 100644 index 00000000000..589d9c4e40e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpgeq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpgeq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c new file mode 100644 index 00000000000..49462ea68b6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpgtq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpgtq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c new file mode 100644 index 00000000000..d60110017e8 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpgtq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpgtq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c new file mode 100644 index 00000000000..0bf9c59a092 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpgtq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpgtq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c new file mode 100644 index 00000000000..4f10cf6f930 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpgtq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpgtq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c new file mode 100644 index 00000000000..600dbc15192 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpgtq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpgtq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c new file mode 100644 index 00000000000..8ff848d19a4 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpgtq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpgtq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c new file mode 100644 index 00000000000..428d3e54ef2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmphiq_m_n_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmphiq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c new file mode 100644 index 00000000000..c5d5b2b0bd9 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmphiq_m_n_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmphiq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c new file mode 100644 index 00000000000..34dba3a33ba --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_n_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmphiq_m_n_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmphiq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c new file mode 100644 index 00000000000..8bf9bfedb6e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmphiq_m_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmphiq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c new file mode 100644 index 00000000000..3d1a35ad957 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmphiq_m_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmphiq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c new file mode 100644 index 00000000000..0c66a82e120 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_m_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmphiq_m_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.u8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmphiq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c new file mode 100644 index 00000000000..e5e48fbbd1b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpleq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpleq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c new file mode 100644 index 00000000000..b18a1e6138e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpleq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpleq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c new file mode 100644 index 00000000000..a846f43eff0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpleq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpleq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c new file mode 100644 index 00000000000..2f2a8d4dec7 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpleq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpleq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c new file mode 100644 index 00000000000..67eb5b0a732 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpleq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpleq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c new file mode 100644 index 00000000000..c0e3c3f337d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpleq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpleq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c new file mode 100644 index 00000000000..f90a4aba7b1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpltq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpltq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c new file mode 100644 index 00000000000..e199f652b1f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpltq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpltq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c new file mode 100644 index 00000000000..833383052b4 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpltq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpltq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c new file mode 100644 index 00000000000..ea2696fc259 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpltq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpltq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c new file mode 100644 index 00000000000..a68225915f0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpltq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpltq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c new file mode 100644 index 00000000000..3d4ab7702d1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpltq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.s8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpltq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c index 0e51876955f..50cacad1899 100644 --- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f16.c @@ -16,7 +16,7 @@ foo (float16x8_t a, float16_t b) mve_pred16_t foo1 (float16x8_t a, float16_t b) { - return vcmpltq_n (a, b); + return vcmpltq (a, b); } /* { dg-final { scan-assembler "vcmp.f16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c index 5f7cf8a5204..dc6578742d5 100644 --- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_f32.c @@ -16,7 +16,7 @@ foo (float32x4_t a, float32_t b) mve_pred16_t foo1 (float32x4_t a, float32_t b) { - return vcmpltq_n (a, b); + return vcmpltq (a, b); } /* { dg-final { scan-assembler "vcmp.f32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c new file mode 100644 index 00000000000..832fec2510a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpneq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c new file mode 100644 index 00000000000..c351cb9f235 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpneq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c new file mode 100644 index 00000000000..40e38e8b036 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpneq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c new file mode 100644 index 00000000000..a287f33660f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmpneq_m_n_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c new file mode 100644 index 00000000000..cb81ec4d1ca --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmpneq_m_n_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c new file mode 100644 index 00000000000..21ed443a47d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_n_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmpneq_m_n_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c new file mode 100644 index 00000000000..e98c854ab23 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpneq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c new file mode 100644 index 00000000000..240dd6320b6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpneq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c new file mode 100644 index 00000000000..a941bac878f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpneq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c new file mode 100644 index 00000000000..1f4bdb814d0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmpneq_m_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i16" } } */ + +mve_pred16_t +foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c new file mode 100644 index 00000000000..eaeb6a60591 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmpneq_m_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i32" } } */ + +mve_pred16_t +foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c new file mode 100644 index 00000000000..4a31aa3bcd5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_m_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +mve_pred16_t +foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmpneq_m_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vcmpt.i8" } } */ + +mve_pred16_t +foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vcmpneq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c new file mode 100644 index 00000000000..d24d63676f5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16_t a, mve_pred16_t p) +{ + return vdupq_m_n_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vdupt.16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16_t a, mve_pred16_t p) +{ + return vdupq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c new file mode 100644 index 00000000000..d685fcf88a6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32_t a, mve_pred16_t p) +{ + return vdupq_m_n_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vdupt.32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32_t a, mve_pred16_t p) +{ + return vdupq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c new file mode 100644 index 00000000000..28fb04821ed --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8_t a, mve_pred16_t p) +{ + return vdupq_m_n_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vdupt.8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8_t a, mve_pred16_t p) +{ + return vdupq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c new file mode 100644 index 00000000000..e0c101ab56f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t inactive, uint16_t a, mve_pred16_t p) +{ + return vdupq_m_n_u16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vdupt.16" } } */ + +uint16x8_t +foo1 (uint16x8_t inactive, uint16_t a, mve_pred16_t p) +{ + return vdupq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c new file mode 100644 index 00000000000..b1f0ef51bcd --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32_t a, mve_pred16_t p) +{ + return vdupq_m_n_u32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vdupt.32" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32_t a, mve_pred16_t p) +{ + return vdupq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c new file mode 100644 index 00000000000..be23f15ac19 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_m_n_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t inactive, uint8_t a, mve_pred16_t p) +{ + return vdupq_m_n_u8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vdupt.8" } } */ + +uint8x16_t +foo1 (uint8x16_t inactive, uint8_t a, mve_pred16_t p) +{ + return vdupq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c new file mode 100644 index 00000000000..6f967e012ce --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmaxaq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmaxat.s16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmaxaq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c new file mode 100644 index 00000000000..ef2daa765d4 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmaxaq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmaxat.s32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmaxaq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c new file mode 100644 index 00000000000..a6ef4f9d3d2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmaxaq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmaxat.s8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmaxaq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c new file mode 100644 index 00000000000..d4996c30ee6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16_t +foo (uint16_t a, int16x8_t b, mve_pred16_t p) +{ + return vmaxavq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxavt.s16" } } */ + +uint16_t +foo1 (uint16_t a, int16x8_t b, mve_pred16_t p) +{ + return vmaxavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxavt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c new file mode 100644 index 00000000000..3d45852586d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, int32x4_t b, mve_pred16_t p) +{ + return vmaxavq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxavt.s32" } } */ + +uint32_t +foo1 (uint32_t a, int32x4_t b, mve_pred16_t p) +{ + return vmaxavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxavt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c new file mode 100644 index 00000000000..cc65137240d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8_t +foo (uint8_t a, int8x16_t b, mve_pred16_t p) +{ + return vmaxavq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxavt.s8" } } */ + +uint8_t +foo1 (uint8_t a, int8x16_t b, mve_pred16_t p) +{ + return vmaxavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxavt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c new file mode 100644 index 00000000000..4c1e2ceb6f9 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16_t +foo (int16_t a, int16x8_t b, mve_pred16_t p) +{ + return vmaxvq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.s16" } } */ + +int16_t +foo1 (int16_t a, int16x8_t b, mve_pred16_t p) +{ + return vmaxvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c new file mode 100644 index 00000000000..2ed9fcfab83 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int32x4_t b, mve_pred16_t p) +{ + return vmaxvq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.s32" } } */ + +int32_t +foo1 (int32_t a, int32x4_t b, mve_pred16_t p) +{ + return vmaxvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c new file mode 100644 index 00000000000..25c1ef09927 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8_t +foo (int8_t a, int8x16_t b, mve_pred16_t p) +{ + return vmaxvq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.s8" } } */ + +int8_t +foo1 (int8_t a, int8x16_t b, mve_pred16_t p) +{ + return vmaxvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c new file mode 100644 index 00000000000..35390363811 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16_t +foo (uint16_t a, uint16x8_t b, mve_pred16_t p) +{ + return vmaxvq_p_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.u16" } } */ + +uint16_t +foo1 (uint16_t a, uint16x8_t b, mve_pred16_t p) +{ + return vmaxvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.u16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c new file mode 100644 index 00000000000..464c05a5a28 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint32x4_t b, mve_pred16_t p) +{ + return vmaxvq_p_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.u32" } } */ + +uint32_t +foo1 (uint32_t a, uint32x4_t b, mve_pred16_t p) +{ + return vmaxvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c new file mode 100644 index 00000000000..26c6d24a6c6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_p_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8_t +foo (uint8_t a, uint8x16_t b, mve_pred16_t p) +{ + return vmaxvq_p_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.u8" } } */ + +uint8_t +foo1 (uint8_t a, uint8x16_t b, mve_pred16_t p) +{ + return vmaxvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmaxvt.u8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s16.c new file mode 100644 index 00000000000..8342abc67ea --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vminaq_m_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vminat.s16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vminaq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s32.c new file mode 100644 index 00000000000..5527d17bcd3 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vminaq_m_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vminat.s32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vminaq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s8.c new file mode 100644 index 00000000000..48d10224ff6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vminaq_m_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vminat.s8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vminaq_m (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s16.c new file mode 100644 index 00000000000..e754069e8e0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16_t +foo (uint16_t a, int16x8_t b, mve_pred16_t p) +{ + return vminavq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vminavt.s16" } } */ + +uint16_t +foo1 (uint16_t a, int16x8_t b, mve_pred16_t p) +{ + return vminavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminavt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s32.c new file mode 100644 index 00000000000..758547b7495 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, int32x4_t b, mve_pred16_t p) +{ + return vminavq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vminavt.s32" } } */ + +uint32_t +foo1 (uint32_t a, int32x4_t b, mve_pred16_t p) +{ + return vminavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminavt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s8.c new file mode 100644 index 00000000000..d59b8e93d6f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8_t +foo (uint8_t a, int8x16_t b, mve_pred16_t p) +{ + return vminavq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vminavt.s8" } } */ + +uint8_t +foo1 (uint8_t a, int8x16_t b, mve_pred16_t p) +{ + return vminavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminavt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s16.c new file mode 100644 index 00000000000..872fc982a5c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16_t +foo (int16_t a, int16x8_t b, mve_pred16_t p) +{ + return vminvq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.s16" } } */ + +int16_t +foo1 (int16_t a, int16x8_t b, mve_pred16_t p) +{ + return vminvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s32.c new file mode 100644 index 00000000000..47c6a010583 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int32x4_t b, mve_pred16_t p) +{ + return vminvq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.s32" } } */ + +int32_t +foo1 (int32_t a, int32x4_t b, mve_pred16_t p) +{ + return vminvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s8.c new file mode 100644 index 00000000000..a2e330718ff --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8_t +foo (int8_t a, int8x16_t b, mve_pred16_t p) +{ + return vminvq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.s8" } } */ + +int8_t +foo1 (int8_t a, int8x16_t b, mve_pred16_t p) +{ + return vminvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u16.c new file mode 100644 index 00000000000..0100340affb --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16_t +foo (uint16_t a, uint16x8_t b, mve_pred16_t p) +{ + return vminvq_p_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.u16" } } */ + +uint16_t +foo1 (uint16_t a, uint16x8_t b, mve_pred16_t p) +{ + return vminvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.u16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u32.c new file mode 100644 index 00000000000..964455fc41e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint32x4_t b, mve_pred16_t p) +{ + return vminvq_p_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.u32" } } */ + +uint32_t +foo1 (uint32_t a, uint32x4_t b, mve_pred16_t p) +{ + return vminvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u8.c new file mode 100644 index 00000000000..dfae14d8ed5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_p_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8_t +foo (uint8_t a, uint8x16_t b, mve_pred16_t p) +{ + return vminvq_p_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.u8" } } */ + +uint8_t +foo1 (uint8_t a, uint8x16_t b, mve_pred16_t p) +{ + return vminvq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vminvt.u8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s16.c new file mode 100644 index 00000000000..6d7bbce6e90 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmladavaq_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.s16" } } */ + +int32_t +foo1 (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmladavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s32.c new file mode 100644 index 00000000000..5c0b9e2aad1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmladavaq_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.s32" } } */ + +int32_t +foo1 (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmladavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s8.c new file mode 100644 index 00000000000..5620ba4fbd2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmladavaq_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.s8" } } */ + +int32_t +foo1 (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmladavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u16.c new file mode 100644 index 00000000000..34e01291b9c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint16x8_t b, uint16x8_t c) +{ + return vmladavaq_u16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.u16" } } */ + +uint32_t +foo1 (uint32_t a, uint16x8_t b, uint16x8_t c) +{ + return vmladavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.u16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u32.c new file mode 100644 index 00000000000..55185651a61 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint32x4_t b, uint32x4_t c) +{ + return vmladavaq_u32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.u32" } } */ + +uint32_t +foo1 (uint32_t a, uint32x4_t b, uint32x4_t c) +{ + return vmladavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u8.c new file mode 100644 index 00000000000..1d5af5e9d46 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaq_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32_t a, uint8x16_t b, uint8x16_t c) +{ + return vmladavaq_u8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.u8" } } */ + +uint32_t +foo1 (uint32_t a, uint8x16_t b, uint8x16_t c) +{ + return vmladavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladava.u8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c new file mode 100644 index 00000000000..e74b07e8302 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmladavaxq_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladavax.s16" } } */ + +int32_t +foo1 (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmladavaxq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladavax.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c new file mode 100644 index 00000000000..5bf679da86b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmladavaxq_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladavax.s32" } } */ + +int32_t +foo1 (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmladavaxq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladavax.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c new file mode 100644 index 00000000000..67dd10be027 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavaxq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmladavaxq_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmladavax.s8" } } */ + +int32_t +foo1 (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmladavaxq (a, b, c); +} + +/* { dg-final { scan-assembler "vmladavax.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c new file mode 100644 index 00000000000..20638fef671 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmladavq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.s16" } } */ + +int32_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmladavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c new file mode 100644 index 00000000000..be640188e5b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmladavq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.s32" } } */ + +int32_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmladavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c new file mode 100644 index 00000000000..20cbcdfab32 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmladavq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.s8" } } */ + +int32_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmladavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c new file mode 100644 index 00000000000..9573d2705e6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vmladavq_p_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.u16" } } */ + +uint32_t +foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vmladavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.u16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c new file mode 100644 index 00000000000..52d5adb81cb --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vmladavq_p_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.u32" } } */ + +uint32_t +foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vmladavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c new file mode 100644 index 00000000000..d51d02b4a8d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_p_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32_t +foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vmladavq_p_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.u8" } } */ + +uint32_t +foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vmladavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavt.u8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c new file mode 100644 index 00000000000..2a86612d2bf --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmladavxq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavxt.s16" } } */ + +int32_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmladavxq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavxt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c new file mode 100644 index 00000000000..7f2811320d1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmladavxq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavxt.s32" } } */ + +int32_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmladavxq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavxt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c new file mode 100644 index 00000000000..2765be8a819 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmladavxq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavxt.s8" } } */ + +int32_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmladavxq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmladavxt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c new file mode 100644 index 00000000000..e14adfa94d6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b, int16_t c) +{ + return vmlaq_n_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b, int16_t c) +{ + return vmlaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c new file mode 100644 index 00000000000..e68fecbef36 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, int32_t c) +{ + return vmlaq_n_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, int32_t c) +{ + return vmlaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c new file mode 100644 index 00000000000..ca4446c5b14 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b, int8_t c) +{ + return vmlaq_n_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b, int8_t c) +{ + return vmlaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c new file mode 100644 index 00000000000..81219f09b77 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vmlaq_n_u16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.u16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vmlaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.u16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c new file mode 100644 index 00000000000..871c7fc2564 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vmlaq_n_u32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.u32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vmlaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c new file mode 100644 index 00000000000..bb0412f81d5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlaq_n_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vmlaq_n_u8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.u8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vmlaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmla.u8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c new file mode 100644 index 00000000000..abe9e1dfa61 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b, int16_t c) +{ + return vmlasq_n_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b, int16_t c) +{ + return vmlasq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c new file mode 100644 index 00000000000..d18178c7681 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, int32_t c) +{ + return vmlasq_n_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, int32_t c) +{ + return vmlasq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c new file mode 100644 index 00000000000..a67f6242eb3 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b, int8_t c) +{ + return vmlasq_n_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b, int8_t c) +{ + return vmlasq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c new file mode 100644 index 00000000000..d86a35daa65 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vmlasq_n_u16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.u16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vmlasq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.u16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c new file mode 100644 index 00000000000..bd9280d41ce --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vmlasq_n_u32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.u32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vmlasq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c new file mode 100644 index 00000000000..22d6344f825 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlasq_n_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vmlasq_n_u8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.u8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vmlasq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlas.u8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c new file mode 100644 index 00000000000..2e1ba824c6f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmlsdavaq_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdava.s16" } } */ + +int32_t +foo1 (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmlsdavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdava.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c new file mode 100644 index 00000000000..0a4a4cdc0c8 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmlsdavaq_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdava.s32" } } */ + +int32_t +foo1 (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmlsdavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdava.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c new file mode 100644 index 00000000000..3151a59ba66 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmlsdavaq_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdava.s8" } } */ + +int32_t +foo1 (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmlsdavaq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdava.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c new file mode 100644 index 00000000000..fcd0f39176d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmlsdavaxq_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdavax.s16" } } */ + +int32_t +foo1 (int32_t a, int16x8_t b, int16x8_t c) +{ + return vmlsdavaxq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdavax.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c new file mode 100644 index 00000000000..36b5c549400 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmlsdavaxq_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdavax.s32" } } */ + +int32_t +foo1 (int32_t a, int32x4_t b, int32x4_t c) +{ + return vmlsdavaxq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdavax.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c new file mode 100644 index 00000000000..7170a57fe32 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavaxq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmlsdavaxq_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdavax.s8" } } */ + +int32_t +foo1 (int32_t a, int8x16_t b, int8x16_t c) +{ + return vmlsdavaxq (a, b, c); +} + +/* { dg-final { scan-assembler "vmlsdavax.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c new file mode 100644 index 00000000000..41cc553612e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmlsdavq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavt.s16" } } */ + +int32_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmlsdavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c new file mode 100644 index 00000000000..b74d60282bc --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmlsdavq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavt.s32" } } */ + +int32_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmlsdavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c new file mode 100644 index 00000000000..5457e2cba39 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmlsdavq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavt.s8" } } */ + +int32_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmlsdavq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c new file mode 100644 index 00000000000..cb900bb161f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmlsdavxq_p_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavxt.s16" } } */ + +int32_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vmlsdavxq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavxt.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c new file mode 100644 index 00000000000..682d62ac104 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmlsdavxq_p_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavxt.s32" } } */ + +int32_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vmlsdavxq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavxt.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c new file mode 100644 index 00000000000..96b81d79e70 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_p_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmlsdavxq_p_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavxt.s8" } } */ + +int32_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vmlsdavxq_p (a, b, p); +} + +/* { dg-final { scan-assembler "vmlsdavxt.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c new file mode 100644 index 00000000000..24317ba822e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vmvnq_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmvnt" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vmvnq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c new file mode 100644 index 00000000000..ac6caa1302f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vmvnq_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmvnt" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vmvnq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c new file mode 100644 index 00000000000..95be493ef75 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vmvnq_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmvnt" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vmvnq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c new file mode 100644 index 00000000000..6d85ba9c3c9 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p) +{ + return vmvnq_m_u16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmvnt" } } */ + +uint16x8_t +foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p) +{ + return vmvnq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c new file mode 100644 index 00000000000..e08d1fcb203 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p) +{ + return vmvnq_m_u32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmvnt" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p) +{ + return vmvnq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c new file mode 100644 index 00000000000..73edc54e7c9 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_m_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p) +{ + return vmvnq_m_u8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vmvnt" } } */ + +uint8x16_t +foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p) +{ + return vmvnq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s16.c new file mode 100644 index 00000000000..32261610185 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vnegq_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vnegt.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vnegq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s32.c new file mode 100644 index 00000000000..1c54ff7fb6d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vnegq_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vnegt.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vnegq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s8.c new file mode 100644 index 00000000000..2e828b18cfe --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vnegq_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vnegt.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vnegq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s16.c new file mode 100644 index 00000000000..379972a2dec --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vpselq_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s32.c new file mode 100644 index 00000000000..8b79e311826 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vpselq_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s64.c new file mode 100644 index 00000000000..daf67f5bae1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int64x2_t +foo (int64x2_t a, int64x2_t b, mve_pred16_t p) +{ + return vpselq_s64 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +int64x2_t +foo1 (int64x2_t a, int64x2_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s8.c new file mode 100644 index 00000000000..def4758fc75 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vpselq_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u16.c new file mode 100644 index 00000000000..c559afd7a66 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vpselq_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u32.c new file mode 100644 index 00000000000..ac7c11a2081 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vpselq_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u64.c new file mode 100644 index 00000000000..09303ac9da4 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint64x2_t +foo (uint64x2_t a, uint64x2_t b, mve_pred16_t p) +{ + return vpselq_u64 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +uint64x2_t +foo1 (uint64x2_t a, uint64x2_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u8.c new file mode 100644 index 00000000000..a592d15aa57 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vpselq_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vpselq_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p) +{ + return vpselq (a, b, p); +} + +/* { dg-final { scan-assembler "vpsel" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c new file mode 100644 index 00000000000..2047818c086 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vqabsq_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqabst.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vqabsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c new file mode 100644 index 00000000000..95af4623d9e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vqabsq_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqabst.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vqabsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c new file mode 100644 index 00000000000..cde5328aaf2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqabsq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vqabsq_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqabst.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vqabsq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c new file mode 100644 index 00000000000..fa5bcc48ba2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmladhq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladh.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmladhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladh.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c new file mode 100644 index 00000000000..ba37ad53422 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmladhq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladh.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmladhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladh.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c new file mode 100644 index 00000000000..32d2eb86044 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmladhq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladh.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmladhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladh.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c new file mode 100644 index 00000000000..ff95d20c619 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmladhxq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladhx.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmladhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladhx.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c new file mode 100644 index 00000000000..d8f08ee96ee --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmladhxq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladhx.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmladhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladhx.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c new file mode 100644 index 00000000000..6ffbfb05346 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmladhxq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmladhxq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladhx.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmladhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmladhx.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c new file mode 100644 index 00000000000..7b90c71cba8 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b, int16_t c) +{ + return vqdmlahq_n_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b, int16_t c) +{ + return vqdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c new file mode 100644 index 00000000000..6c2e487e1d6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, int32_t c) +{ + return vqdmlahq_n_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, int32_t c) +{ + return vqdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c new file mode 100644 index 00000000000..7a62762e883 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b, int8_t c) +{ + return vqdmlahq_n_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b, int8_t c) +{ + return vqdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u16.c new file mode 100644 index 00000000000..926a2fb1763 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vqdmlahq_n_u16 (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vqdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u32.c new file mode 100644 index 00000000000..9f59e5487b8 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vqdmlahq_n_u32 (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vqdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u8.c new file mode 100644 index 00000000000..9a578fadb82 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlahq_n_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vqdmlahq_n_u8 (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vqdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqdmlah.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c new file mode 100644 index 00000000000..3084c6ea699 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmlsdhq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdh.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmlsdhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdh.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c new file mode 100644 index 00000000000..243b545c089 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmlsdhq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdh.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmlsdhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdh.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c new file mode 100644 index 00000000000..6b31a593062 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmlsdhq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdh.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmlsdhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdh.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c new file mode 100644 index 00000000000..99efed1ed9b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmlsdhxq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdhx.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqdmlsdhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdhx.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c new file mode 100644 index 00000000000..e654b64943a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmlsdhxq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdhx.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqdmlsdhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdhx.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c new file mode 100644 index 00000000000..653732cd05e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmlsdhxq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmlsdhxq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdhx.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqdmlsdhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqdmlsdhx.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c new file mode 100644 index 00000000000..0faae49a1fc --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vqnegq_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqnegt.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vqnegq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c new file mode 100644 index 00000000000..a4b9b9ef061 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vqnegq_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqnegt.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vqnegq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c new file mode 100644 index 00000000000..5b6b8d8af06 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqnegq_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vqnegq_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqnegt.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vqnegq_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c new file mode 100644 index 00000000000..68814a0b2b2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmladhq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladh.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmladhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladh.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c new file mode 100644 index 00000000000..b76abef0d34 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmladhq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladh.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmladhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladh.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c new file mode 100644 index 00000000000..af692a6402d --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmladhq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladh.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmladhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladh.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c new file mode 100644 index 00000000000..9a265012d13 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmladhxq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladhx.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmladhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladhx.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c new file mode 100644 index 00000000000..7b79fd199cb --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmladhxq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladhx.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmladhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladhx.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c new file mode 100644 index 00000000000..0e6cc19fabc --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmladhxq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmladhxq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladhx.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmladhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmladhx.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c new file mode 100644 index 00000000000..bea00388f16 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b, int16_t c) +{ + return vqrdmlahq_n_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b, int16_t c) +{ + return vqrdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c new file mode 100644 index 00000000000..50878ca9142 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, int32_t c) +{ + return vqrdmlahq_n_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, int32_t c) +{ + return vqrdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c new file mode 100644 index 00000000000..8d75b1a8d8f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b, int8_t c) +{ + return vqrdmlahq_n_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b, int8_t c) +{ + return vqrdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u16.c new file mode 100644 index 00000000000..aecc0f54f8c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vqrdmlahq_n_u16 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vqrdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u32.c new file mode 100644 index 00000000000..bef2b3c707a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vqrdmlahq_n_u32 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vqrdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u8.c new file mode 100644 index 00000000000..69aab303f48 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlahq_n_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vqrdmlahq_n_u8 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vqrdmlahq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlah.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c new file mode 100644 index 00000000000..b02cb583ce1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b, int16_t c) +{ + return vqrdmlashq_n_s16 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b, int16_t c) +{ + return vqrdmlashq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c new file mode 100644 index 00000000000..0b2e2aab737 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b, int32_t c) +{ + return vqrdmlashq_n_s32 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b, int32_t c) +{ + return vqrdmlashq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c new file mode 100644 index 00000000000..fe0c7b5f95e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b, int8_t c) +{ + return vqrdmlashq_n_s8 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b, int8_t c) +{ + return vqrdmlashq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u16.c new file mode 100644 index 00000000000..5390e4d1acc --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vqrdmlashq_n_u16 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b, uint16_t c) +{ + return vqrdmlashq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u32.c new file mode 100644 index 00000000000..716ead2303c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vqrdmlashq_n_u32 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b, uint32_t c) +{ + return vqrdmlashq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u8.c new file mode 100644 index 00000000000..5ec79f9e5fd --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlashq_n_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vqrdmlashq_n_u8 (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b, uint8_t c) +{ + return vqrdmlashq (a, b, c); +} + +/* { dg-final { scan-assembler "vqrdmlash.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c new file mode 100644 index 00000000000..ca0a8d11080 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmlsdhq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdh.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmlsdhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdh.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c new file mode 100644 index 00000000000..9c16270a674 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmlsdhq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdh.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmlsdhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdh.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c new file mode 100644 index 00000000000..2a0479340ee --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmlsdhq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdh.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmlsdhq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdh.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c new file mode 100644 index 00000000000..c7fda4336bd --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmlsdhxq_s16 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdhx.s16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, int16x8_t b) +{ + return vqrdmlsdhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdhx.s16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c new file mode 100644 index 00000000000..eaa8ae8e8af --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmlsdhxq_s32 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdhx.s32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, int32x4_t b) +{ + return vqrdmlsdhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdhx.s32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c new file mode 100644 index 00000000000..faca85e3458 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmlsdhxq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmlsdhxq_s8 (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdhx.s8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, int8x16_t b) +{ + return vqrdmlsdhxq (inactive, a, b); +} + +/* { dg-final { scan-assembler "vqrdmlsdhx.s8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c new file mode 100644 index 00000000000..399d2149a97 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqrshlt.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c new file mode 100644 index 00000000000..cdf08ddf8da --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqrshlt.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c new file mode 100644 index 00000000000..82c221dd470 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqrshlt.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c new file mode 100644 index 00000000000..d366ebe81be --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqrshlt.u16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c new file mode 100644 index 00000000000..058492ae419 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqrshlt.u32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c new file mode 100644 index 00000000000..9cb660be141 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_m_n_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqrshlt.u8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c new file mode 100644 index 00000000000..677e6733fdc --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqshlt.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c new file mode 100644 index 00000000000..6cc14a44149 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqshlt.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c new file mode 100644 index 00000000000..0cbb8a09e56 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqshlt.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c new file mode 100644 index 00000000000..30d83028fb5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqshlt.u16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c new file mode 100644 index 00000000000..02ae638cdbf --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqshlt.u32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c new file mode 100644 index 00000000000..db706812d65 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_m_r_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vqshlt.u8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vqshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c new file mode 100644 index 00000000000..fc8a6ee4526 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vrev64q_m_s16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrev64t.16" } } */ + +int16x8_t +foo1 (int16x8_t inactive, int16x8_t a, mve_pred16_t p) +{ + return vrev64q_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c new file mode 100644 index 00000000000..8ee35c66f7e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vrev64q_m_s32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrev64t.32" } } */ + +int32x4_t +foo1 (int32x4_t inactive, int32x4_t a, mve_pred16_t p) +{ + return vrev64q_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c new file mode 100644 index 00000000000..390652ca2df --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vrev64q_m_s8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrev64t.8" } } */ + +int8x16_t +foo1 (int8x16_t inactive, int8x16_t a, mve_pred16_t p) +{ + return vrev64q_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c new file mode 100644 index 00000000000..51e7859f659 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p) +{ + return vrev64q_m_u16 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrev64t.16" } } */ + +uint16x8_t +foo1 (uint16x8_t inactive, uint16x8_t a, mve_pred16_t p) +{ + return vrev64q_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c new file mode 100644 index 00000000000..7bcd7c805d0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p) +{ + return vrev64q_m_u32 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrev64t.32" } } */ + +uint32x4_t +foo1 (uint32x4_t inactive, uint32x4_t a, mve_pred16_t p) +{ + return vrev64q_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c new file mode 100644 index 00000000000..96a9d2be1d2 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_m_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p) +{ + return vrev64q_m_u8 (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrev64t.8" } } */ + +uint8x16_t +foo1 (uint8x16_t inactive, uint8x16_t a, mve_pred16_t p) +{ + return vrev64q_m (inactive, a, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c new file mode 100644 index 00000000000..348a1f84546 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrshlt.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c new file mode 100644 index 00000000000..c15c1baaeef --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrshlt.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c new file mode 100644 index 00000000000..ac9b90d7d3e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrshlt.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c new file mode 100644 index 00000000000..72fc614a3fe --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrshlt.u16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c new file mode 100644 index 00000000000..db3db7aec0e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrshlt.u32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c new file mode 100644 index 00000000000..74ee42dfe14 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_m_n_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vrshlt.u8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vrshlq_m_n (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c new file mode 100644 index 00000000000..c02823212c0 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r_s16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vshlt.s16" } } */ + +int16x8_t +foo1 (int16x8_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c new file mode 100644 index 00000000000..2d48bceb061 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r_s32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vshlt.s32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c new file mode 100644 index 00000000000..ba8a62a1a25 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_s8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r_s8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vshlt.s8" } } */ + +int8x16_t +foo1 (int8x16_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c new file mode 100644 index 00000000000..6d16399f66c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u16.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r_u16 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vshlt.u16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c new file mode 100644 index 00000000000..e597dd420ee --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u32.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r_u32 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vshlt.u32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c new file mode 100644 index 00000000000..d95a312ab16 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_m_r_u8.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r_u8 (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ +/* { dg-final { scan-assembler "vshlt.u8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, int32_t b, mve_pred16_t p) +{ + return vshlq_m_r (a, b, p); +} + +/* { dg-final { scan-assembler "vpst" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s16.c new file mode 100644 index 00000000000..64f6c980b01 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b) +{ + return vsliq_n_s16 (a, b, 15); +} + +/* { dg-final { scan-assembler "vsli.16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b) +{ + return vsliq (a, b, 15); +} + +/* { dg-final { scan-assembler "vsli.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s32.c new file mode 100644 index 00000000000..25bc97b4227 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b) +{ + return vsliq_n_s32 (a, b, 31); +} + +/* { dg-final { scan-assembler "vsli.32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b) +{ + return vsliq (a, b, 31); +} + +/* { dg-final { scan-assembler "vsli.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s8.c new file mode 100644 index 00000000000..6516b0ccc88 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b) +{ + return vsliq_n_s8 (a, b, 7); +} + +/* { dg-final { scan-assembler "vsli.8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b) +{ + return vsliq (a, b, 7); +} + +/* { dg-final { scan-assembler "vsli.8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u16.c new file mode 100644 index 00000000000..dc70a657023 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b) +{ + return vsliq_n_u16 (a, b, 15); +} + +/* { dg-final { scan-assembler "vsli.16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b) +{ + return vsliq (a, b, 15); +} + +/* { dg-final { scan-assembler "vsli.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u32.c new file mode 100644 index 00000000000..cb0d191600c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b) +{ + return vsliq_n_u32 (a, b, 31); +} + +/* { dg-final { scan-assembler "vsli.32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b) +{ + return vsliq (a, b, 31); +} + +/* { dg-final { scan-assembler "vsli.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u8.c new file mode 100644 index 00000000000..765853a923f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsliq_n_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b) +{ + return vsliq_n_u8 (a, b, 7); +} + +/* { dg-final { scan-assembler "vsli.8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b) +{ + return vsliq (a, b, 7); +} + +/* { dg-final { scan-assembler "vsli.8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s16.c new file mode 100644 index 00000000000..f1a80919e73 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b) +{ + return vsriq_n_s16 (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b) +{ + return vsriq (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s32.c new file mode 100644 index 00000000000..f1288340d16 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b) +{ + return vsriq_n_s32 (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b) +{ + return vsriq (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s8.c new file mode 100644 index 00000000000..5e51bd9663e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b) +{ + return vsriq_n_s8 (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b) +{ + return vsriq (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u16.c new file mode 100644 index 00000000000..5ce35ffedef --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b) +{ + return vsriq_n_u16 (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b) +{ + return vsriq (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u32.c new file mode 100644 index 00000000000..861a3b753b6 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b) +{ + return vsriq_n_u32 (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b) +{ + return vsriq (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u8.c new file mode 100644 index 00000000000..bca07881a12 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsriq_n_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b) +{ + return vsriq_n_u8 (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b) +{ + return vsriq (a, b, 4); +} + +/* { dg-final { scan-assembler "vsri.8" } } */ -- 2.30.2