From ed225d0c7b74eb6fa125cf791b7d50e2000883bc Mon Sep 17 00:00:00 2001 From: Alan Lawrence Date: Tue, 9 Dec 2014 20:23:36 +0000 Subject: [PATCH] [AArch64]Remove be_checked_get_lane, check bounds with __builtin_aarch64_im_lane_boundsi. gcc/: PR target/63870 * config/aarch64/aarch64-simd-builtins.def (be_checked_get_lane): Delete. * config/aarch64/aarch64-simd.md (aarch64_be_checked_get_lane): Delete. * config/aarch64/arm_neon.h (aarch64_vget_lane_any): Use GCC vector extensions, __aarch64_lane, __builtin_aarch64_im_lane_boundsi. (__aarch64_vget_lane_f32, __aarch64_vget_lane_f64, __aarch64_vget_lane_p8, __aarch64_vget_lane_p16, __aarch64_vget_lane_s8, __aarch64_vget_lane_s16, __aarch64_vget_lane_s32, __aarch64_vget_lane_s64, __aarch64_vget_lane_u8, __aarch64_vget_lane_u16, __aarch64_vget_lane_u32, __aarch64_vget_lane_u64, __aarch64_vgetq_lane_f32, __aarch64_vgetq_lane_f64, __aarch64_vgetq_lane_p8, __aarch64_vgetq_lane_p16, __aarch64_vgetq_lane_s8, __aarch64_vgetq_lane_s16, __aarch64_vgetq_lane_s32, __aarch64_vgetq_lane_s64, __aarch64_vgetq_lane_u8, __aarch64_vgetq_lane_u16, __aarch64_vgetq_lane_u32, __aarch64_vgetq_lane_u64): Delete. (__aarch64_vdup_lane_any): Use __aarch64_vget_lane_any, remove 'q2' argument. (__aarch64_vdup_lane_f32, __aarch64_vdup_lane_f64, __aarch64_vdup_lane_p8, __aarch64_vdup_lane_p16, __aarch64_vdup_lane_s8, __aarch64_vdup_lane_s16, __aarch64_vdup_lane_s32, __aarch64_vdup_lane_s64, __aarch64_vdup_lane_u8, __aarch64_vdup_lane_u16, __aarch64_vdup_lane_u32, __aarch64_vdup_lane_u64, __aarch64_vdup_laneq_f32, __aarch64_vdup_laneq_f64, __aarch64_vdup_laneq_p8, __aarch64_vdup_laneq_p16, __aarch64_vdup_laneq_s8, __aarch64_vdup_laneq_s16, __aarch64_vdup_laneq_s32, __aarch64_vdup_laneq_s64, __aarch64_vdup_laneq_u8, __aarch64_vdup_laneq_u16, __aarch64_vdup_laneq_u32, __aarch64_vdup_laneq_u64): Remove argument to __aarch64_vdup_lane_any. (vget_lane_f32, vget_lane_f64, vget_lane_p8, vget_lane_p16, vget_lane_s8, vget_lane_s16, vget_lane_s32, vget_lane_s64, vget_lane_u8, vget_lane_u16, vget_lane_u32, vget_lane_u64, vgetq_lane_f32, vgetq_lane_f64, vgetq_lane_p8, vgetq_lane_p16, vgetq_lane_s8, vgetq_lane_s16, vgetq_lane_s32, vgetq_lane_s64, vgetq_lane_u8, vgetq_lane_u16, vgetq_lane_u32, vgetq_lane_u64, vdupb_lane_p8, vdupb_lane_s8, vdupb_lane_u8, vduph_lane_p16, vduph_lane_s16, vduph_lane_u16, vdups_lane_f32, vdups_lane_s32, vdups_lane_u32, vdupb_laneq_p8, vdupb_laneq_s8, vdupb_laneq_u8, vduph_laneq_p16, vduph_laneq_s16, vduph_laneq_u16, vdups_laneq_f32, vdups_laneq_s32, vdups_laneq_u32, vdupd_laneq_f64, vdupd_laneq_s64, vdupd_laneq_u64, vfmas_lane_f32, vfma_laneq_f64, vfmad_laneq_f64, vfmas_laneq_f32, vfmss_lane_f32, vfms_laneq_f64, vfmsd_laneq_f64, vfmss_laneq_f32, vmla_lane_f32, vmla_lane_s16, vmla_lane_s32, vmla_lane_u16, vmla_lane_u32, vmla_laneq_f32, vmla_laneq_s16, vmla_laneq_s32, vmla_laneq_u16, vmla_laneq_u32, vmlaq_lane_f32, vmlaq_lane_s16, vmlaq_lane_s32, vmlaq_lane_u16, vmlaq_lane_u32, vmlaq_laneq_f32, vmlaq_laneq_s16, vmlaq_laneq_s32, vmlaq_laneq_u16, vmlaq_laneq_u32, vmls_lane_f32, vmls_lane_s16, vmls_lane_s32, vmls_lane_u16, vmls_lane_u32, vmls_laneq_f32, vmls_laneq_s16, vmls_laneq_s32, vmls_laneq_u16, vmls_laneq_u32, vmlsq_lane_f32, vmlsq_lane_s16, vmlsq_lane_s32, vmlsq_lane_u16, vmlsq_lane_u32, vmlsq_laneq_f32, vmlsq_laneq_s16, vmlsq_laneq_s32, vmlsq_laneq_u16, vmlsq_laneq_u32, vmul_lane_f32, vmul_lane_s16, vmul_lane_s32, vmul_lane_u16, vmul_lane_u32, vmuld_lane_f64, vmuld_laneq_f64, vmuls_lane_f32, vmuls_laneq_f32, vmul_laneq_f32, vmul_laneq_f64, vmul_laneq_s16, vmul_laneq_s32, vmul_laneq_u16, vmul_laneq_u32, vmulq_lane_f32, vmulq_lane_s16, vmulq_lane_s32, vmulq_lane_u16, vmulq_lane_u32, vmulq_laneq_f32, vmulq_laneq_f64, vmulq_laneq_s16, vmulq_laneq_s32, vmulq_laneq_u16, vmulq_laneq_u32) : Use __aarch64_vget_lane_any. gcc/testsuite/: * gcc.target/aarch64/simd/vget_lane_f32_indices_1.c: New test. * gcc.target/aarch64/simd/vget_lane_f64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_p16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_p8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_f32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_f64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_p16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_p8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u8_indices_1.c: Likewise. From-SVN: r218536 --- gcc/ChangeLog | 68 +++ gcc/config/aarch64/aarch64-simd-builtins.def | 3 - gcc/config/aarch64/aarch64-simd.md | 15 +- gcc/config/aarch64/arm_neon.h | 443 +++++++----------- gcc/testsuite/ChangeLog | 28 ++ .../aarch64/simd/vget_lane_f32_indices_1.c | 17 + .../aarch64/simd/vget_lane_f64_indices_1.c | 17 + .../aarch64/simd/vget_lane_p16_indices_1.c | 17 + .../aarch64/simd/vget_lane_p8_indices_1.c | 17 + .../aarch64/simd/vget_lane_s16_indices_1.c | 17 + .../aarch64/simd/vget_lane_s32_indices_1.c | 17 + .../aarch64/simd/vget_lane_s64_indices_1.c | 17 + .../aarch64/simd/vget_lane_s8_indices_1.c | 17 + .../aarch64/simd/vget_lane_u16_indices_1.c | 17 + .../aarch64/simd/vget_lane_u32_indices_1.c | 17 + .../aarch64/simd/vget_lane_u64_indices_1.c | 17 + .../aarch64/simd/vget_lane_u8_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_f32_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_f64_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_p16_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_p8_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_s16_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_s32_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_s64_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_s8_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_u16_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_u32_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_u64_indices_1.c | 17 + .../aarch64/simd/vgetq_lane_u8_indices_1.c | 17 + 29 files changed, 682 insertions(+), 283 deletions(-) create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f32_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f64_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p16_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p8_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s16_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s32_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s64_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s8_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u16_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u32_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u64_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u8_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f32_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f64_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p16_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p8_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s16_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s32_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s64_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s8_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u16_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u32_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u64_indices_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u8_indices_1.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index af590f270c2..6612cbbdc9d 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,71 @@ +2014-12-09 Alan Lawrence + + PR target/63870 + * config/aarch64/aarch64-simd-builtins.def (be_checked_get_lane): + Delete. + * config/aarch64/aarch64-simd.md (aarch64_be_checked_get_lane): + Delete. + * config/aarch64/arm_neon.h (aarch64_vget_lane_any): Use GCC + vector extensions, __aarch64_lane, __builtin_aarch64_im_lane_boundsi. + (__aarch64_vget_lane_f32, __aarch64_vget_lane_f64, + __aarch64_vget_lane_p8, __aarch64_vget_lane_p16, + __aarch64_vget_lane_s8, __aarch64_vget_lane_s16, + __aarch64_vget_lane_s32, __aarch64_vget_lane_s64, + __aarch64_vget_lane_u8, __aarch64_vget_lane_u16, + __aarch64_vget_lane_u32, __aarch64_vget_lane_u64, + __aarch64_vgetq_lane_f32, __aarch64_vgetq_lane_f64, + __aarch64_vgetq_lane_p8, __aarch64_vgetq_lane_p16, + __aarch64_vgetq_lane_s8, __aarch64_vgetq_lane_s16, + __aarch64_vgetq_lane_s32, __aarch64_vgetq_lane_s64, + __aarch64_vgetq_lane_u8, __aarch64_vgetq_lane_u16, + __aarch64_vgetq_lane_u32, __aarch64_vgetq_lane_u64): Delete. + (__aarch64_vdup_lane_any): Use __aarch64_vget_lane_any, remove + 'q2' argument. + (__aarch64_vdup_lane_f32, __aarch64_vdup_lane_f64, + __aarch64_vdup_lane_p8, __aarch64_vdup_lane_p16, + __aarch64_vdup_lane_s8, __aarch64_vdup_lane_s16, + __aarch64_vdup_lane_s32, __aarch64_vdup_lane_s64, + __aarch64_vdup_lane_u8, __aarch64_vdup_lane_u16, + __aarch64_vdup_lane_u32, __aarch64_vdup_lane_u64, + __aarch64_vdup_laneq_f32, __aarch64_vdup_laneq_f64, + __aarch64_vdup_laneq_p8, __aarch64_vdup_laneq_p16, + __aarch64_vdup_laneq_s8, __aarch64_vdup_laneq_s16, + __aarch64_vdup_laneq_s32, __aarch64_vdup_laneq_s64, + __aarch64_vdup_laneq_u8, __aarch64_vdup_laneq_u16, + __aarch64_vdup_laneq_u32, __aarch64_vdup_laneq_u64): Remove argument + to __aarch64_vdup_lane_any. + (vget_lane_f32, vget_lane_f64, vget_lane_p8, vget_lane_p16, + vget_lane_s8, vget_lane_s16, vget_lane_s32, vget_lane_s64, + vget_lane_u8, vget_lane_u16, vget_lane_u32, vget_lane_u64, + vgetq_lane_f32, vgetq_lane_f64, vgetq_lane_p8, vgetq_lane_p16, + vgetq_lane_s8, vgetq_lane_s16, vgetq_lane_s32, vgetq_lane_s64, + vgetq_lane_u8, vgetq_lane_u16, vgetq_lane_u32, vgetq_lane_u64, + vdupb_lane_p8, vdupb_lane_s8, vdupb_lane_u8, vduph_lane_p16, + vduph_lane_s16, vduph_lane_u16, vdups_lane_f32, vdups_lane_s32, + vdups_lane_u32, vdupb_laneq_p8, vdupb_laneq_s8, vdupb_laneq_u8, + vduph_laneq_p16, vduph_laneq_s16, vduph_laneq_u16, vdups_laneq_f32, + vdups_laneq_s32, vdups_laneq_u32, vdupd_laneq_f64, vdupd_laneq_s64, + vdupd_laneq_u64, vfmas_lane_f32, vfma_laneq_f64, vfmad_laneq_f64, + vfmas_laneq_f32, vfmss_lane_f32, vfms_laneq_f64, vfmsd_laneq_f64, + vfmss_laneq_f32, vmla_lane_f32, vmla_lane_s16, vmla_lane_s32, + vmla_lane_u16, vmla_lane_u32, vmla_laneq_f32, vmla_laneq_s16, + vmla_laneq_s32, vmla_laneq_u16, vmla_laneq_u32, vmlaq_lane_f32, + vmlaq_lane_s16, vmlaq_lane_s32, vmlaq_lane_u16, vmlaq_lane_u32, + vmlaq_laneq_f32, vmlaq_laneq_s16, vmlaq_laneq_s32, vmlaq_laneq_u16, + vmlaq_laneq_u32, vmls_lane_f32, vmls_lane_s16, vmls_lane_s32, + vmls_lane_u16, vmls_lane_u32, vmls_laneq_f32, vmls_laneq_s16, + vmls_laneq_s32, vmls_laneq_u16, vmls_laneq_u32, vmlsq_lane_f32, + vmlsq_lane_s16, vmlsq_lane_s32, vmlsq_lane_u16, vmlsq_lane_u32, + vmlsq_laneq_f32, vmlsq_laneq_s16, vmlsq_laneq_s32, vmlsq_laneq_u16, + vmlsq_laneq_u32, vmul_lane_f32, vmul_lane_s16, vmul_lane_s32, + vmul_lane_u16, vmul_lane_u32, vmuld_lane_f64, vmuld_laneq_f64, + vmuls_lane_f32, vmuls_laneq_f32, vmul_laneq_f32, vmul_laneq_f64, + vmul_laneq_s16, vmul_laneq_s32, vmul_laneq_u16, vmul_laneq_u32, + vmulq_lane_f32, vmulq_lane_s16, vmulq_lane_s32, vmulq_lane_u16, + vmulq_lane_u32, vmulq_laneq_f32, vmulq_laneq_f64, vmulq_laneq_s16, + vmulq_laneq_s32, vmulq_laneq_u16, vmulq_laneq_u32) : Use + __aarch64_vget_lane_any. + 2014-12-09 Alan Lawrence PR target/63870 diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 9f479e8b07f..16fdb5a59e0 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -49,9 +49,6 @@ BUILTIN_VS (UNOP, ctz, 2) BUILTIN_VB (UNOP, popcount, 2) - /* be_checked_get_lane does its own lane swapping, so not a lane index. */ - BUILTIN_VALL (GETREG, be_checked_get_lane, 0) - /* Implemented by aarch64_qshl. */ BUILTIN_VSDQ_I (BINOP, sqshl, 0) BUILTIN_VSDQ_I (BINOP_UUS, uqshl, 0) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 49a2e888ae7..78c9df0d27c 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -2438,22 +2438,9 @@ [(set_attr "type" "neon_to_gp")] ) -(define_expand "aarch64_be_checked_get_lane" - [(match_operand: 0 "aarch64_simd_nonimmediate_operand") - (match_operand:VALL 1 "register_operand") - (match_operand:SI 2 "immediate_operand")] - "TARGET_SIMD" - { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); - emit_insn (gen_aarch64_get_lane (operands[0], - operands[1], - operands[2])); - DONE; - } -) - ;; Lane extraction of a value, neither sign nor zero extension ;; is guaranteed so upper bits should be considered undefined. +;; RTL uses GCC vector extension indices throughout so flip only for assembly. (define_insn "aarch64_get_lane" [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv") (vec_select: diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index d4d4ee973ef..319cd8c1a0a 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -426,183 +426,112 @@ typedef struct poly16x8x4_t poly16x8_t val[4]; } poly16x8x4_t; -/* vget_lane internal macros. */ - -#define __aarch64_vget_lane_any(__size, __cast_ret, __cast_a, __a, __b) \ - (__cast_ret \ - __builtin_aarch64_be_checked_get_lane##__size (__cast_a __a, __b)) - -#define __aarch64_vget_lane_f32(__a, __b) \ - __aarch64_vget_lane_any (v2sf, , , __a, __b) -#define __aarch64_vget_lane_f64(__a, __b) __extension__ \ - ({ \ - __AARCH64_LANE_CHECK (__a, __b); \ - __a[0]; \ - }) - -#define __aarch64_vget_lane_p8(__a, __b) \ - __aarch64_vget_lane_any (v8qi, (poly8_t), (int8x8_t), __a, __b) -#define __aarch64_vget_lane_p16(__a, __b) \ - __aarch64_vget_lane_any (v4hi, (poly16_t), (int16x4_t), __a, __b) - -#define __aarch64_vget_lane_s8(__a, __b) \ - __aarch64_vget_lane_any (v8qi, , ,__a, __b) -#define __aarch64_vget_lane_s16(__a, __b) \ - __aarch64_vget_lane_any (v4hi, , ,__a, __b) -#define __aarch64_vget_lane_s32(__a, __b) \ - __aarch64_vget_lane_any (v2si, , ,__a, __b) -#define __aarch64_vget_lane_s64(__a, __b) __extension__ \ - ({ \ - __AARCH64_LANE_CHECK (__a, __b); \ - __a[0]; \ - }) - -#define __aarch64_vget_lane_u8(__a, __b) \ - __aarch64_vget_lane_any (v8qi, (uint8_t), (int8x8_t), __a, __b) -#define __aarch64_vget_lane_u16(__a, __b) \ - __aarch64_vget_lane_any (v4hi, (uint16_t), (int16x4_t), __a, __b) -#define __aarch64_vget_lane_u32(__a, __b) \ - __aarch64_vget_lane_any (v2si, (uint32_t), (int32x2_t), __a, __b) -#define __aarch64_vget_lane_u64(__a, __b) __extension__ \ - ({ \ - __AARCH64_LANE_CHECK (__a, __b); \ - __a[0]; \ - }) - -#define __aarch64_vgetq_lane_f32(__a, __b) \ - __aarch64_vget_lane_any (v4sf, , , __a, __b) -#define __aarch64_vgetq_lane_f64(__a, __b) \ - __aarch64_vget_lane_any (v2df, , , __a, __b) - -#define __aarch64_vgetq_lane_p8(__a, __b) \ - __aarch64_vget_lane_any (v16qi, (poly8_t), (int8x16_t), __a, __b) -#define __aarch64_vgetq_lane_p16(__a, __b) \ - __aarch64_vget_lane_any (v8hi, (poly16_t), (int16x8_t), __a, __b) - -#define __aarch64_vgetq_lane_s8(__a, __b) \ - __aarch64_vget_lane_any (v16qi, , ,__a, __b) -#define __aarch64_vgetq_lane_s16(__a, __b) \ - __aarch64_vget_lane_any (v8hi, , ,__a, __b) -#define __aarch64_vgetq_lane_s32(__a, __b) \ - __aarch64_vget_lane_any (v4si, , ,__a, __b) -#define __aarch64_vgetq_lane_s64(__a, __b) \ - __aarch64_vget_lane_any (v2di, , ,__a, __b) - -#define __aarch64_vgetq_lane_u8(__a, __b) \ - __aarch64_vget_lane_any (v16qi, (uint8_t), (int8x16_t), __a, __b) -#define __aarch64_vgetq_lane_u16(__a, __b) \ - __aarch64_vget_lane_any (v8hi, (uint16_t), (int16x8_t), __a, __b) -#define __aarch64_vgetq_lane_u32(__a, __b) \ - __aarch64_vget_lane_any (v4si, (uint32_t), (int32x4_t), __a, __b) -#define __aarch64_vgetq_lane_u64(__a, __b) \ - __aarch64_vget_lane_any (v2di, (uint64_t), (int64x2_t), __a, __b) - /* __aarch64_vdup_lane internal macros. */ -#define __aarch64_vdup_lane_any(__size, __q1, __q2, __a, __b) \ - vdup##__q1##_n_##__size (__aarch64_vget##__q2##_lane_##__size (__a, __b)) +#define __aarch64_vdup_lane_any(__size, __q, __a, __b) \ + vdup##__q##_n_##__size (__aarch64_vget_lane_any (__a, __b)) #define __aarch64_vdup_lane_f32(__a, __b) \ - __aarch64_vdup_lane_any (f32, , , __a, __b) + __aarch64_vdup_lane_any (f32, , __a, __b) #define __aarch64_vdup_lane_f64(__a, __b) \ - __aarch64_vdup_lane_any (f64, , , __a, __b) + __aarch64_vdup_lane_any (f64, , __a, __b) #define __aarch64_vdup_lane_p8(__a, __b) \ - __aarch64_vdup_lane_any (p8, , , __a, __b) + __aarch64_vdup_lane_any (p8, , __a, __b) #define __aarch64_vdup_lane_p16(__a, __b) \ - __aarch64_vdup_lane_any (p16, , , __a, __b) + __aarch64_vdup_lane_any (p16, , __a, __b) #define __aarch64_vdup_lane_s8(__a, __b) \ - __aarch64_vdup_lane_any (s8, , , __a, __b) + __aarch64_vdup_lane_any (s8, , __a, __b) #define __aarch64_vdup_lane_s16(__a, __b) \ - __aarch64_vdup_lane_any (s16, , , __a, __b) + __aarch64_vdup_lane_any (s16, , __a, __b) #define __aarch64_vdup_lane_s32(__a, __b) \ - __aarch64_vdup_lane_any (s32, , , __a, __b) + __aarch64_vdup_lane_any (s32, , __a, __b) #define __aarch64_vdup_lane_s64(__a, __b) \ - __aarch64_vdup_lane_any (s64, , , __a, __b) + __aarch64_vdup_lane_any (s64, , __a, __b) #define __aarch64_vdup_lane_u8(__a, __b) \ - __aarch64_vdup_lane_any (u8, , , __a, __b) + __aarch64_vdup_lane_any (u8, , __a, __b) #define __aarch64_vdup_lane_u16(__a, __b) \ - __aarch64_vdup_lane_any (u16, , , __a, __b) + __aarch64_vdup_lane_any (u16, , __a, __b) #define __aarch64_vdup_lane_u32(__a, __b) \ - __aarch64_vdup_lane_any (u32, , , __a, __b) + __aarch64_vdup_lane_any (u32, , __a, __b) #define __aarch64_vdup_lane_u64(__a, __b) \ - __aarch64_vdup_lane_any (u64, , , __a, __b) + __aarch64_vdup_lane_any (u64, , __a, __b) /* __aarch64_vdup_laneq internal macros. */ #define __aarch64_vdup_laneq_f32(__a, __b) \ - __aarch64_vdup_lane_any (f32, , q, __a, __b) + __aarch64_vdup_lane_any (f32, , __a, __b) #define __aarch64_vdup_laneq_f64(__a, __b) \ - __aarch64_vdup_lane_any (f64, , q, __a, __b) + __aarch64_vdup_lane_any (f64, , __a, __b) #define __aarch64_vdup_laneq_p8(__a, __b) \ - __aarch64_vdup_lane_any (p8, , q, __a, __b) + __aarch64_vdup_lane_any (p8, , __a, __b) #define __aarch64_vdup_laneq_p16(__a, __b) \ - __aarch64_vdup_lane_any (p16, , q, __a, __b) + __aarch64_vdup_lane_any (p16, , __a, __b) #define __aarch64_vdup_laneq_s8(__a, __b) \ - __aarch64_vdup_lane_any (s8, , q, __a, __b) + __aarch64_vdup_lane_any (s8, , __a, __b) #define __aarch64_vdup_laneq_s16(__a, __b) \ - __aarch64_vdup_lane_any (s16, , q, __a, __b) + __aarch64_vdup_lane_any (s16, , __a, __b) #define __aarch64_vdup_laneq_s32(__a, __b) \ - __aarch64_vdup_lane_any (s32, , q, __a, __b) + __aarch64_vdup_lane_any (s32, , __a, __b) #define __aarch64_vdup_laneq_s64(__a, __b) \ - __aarch64_vdup_lane_any (s64, , q, __a, __b) + __aarch64_vdup_lane_any (s64, , __a, __b) #define __aarch64_vdup_laneq_u8(__a, __b) \ - __aarch64_vdup_lane_any (u8, , q, __a, __b) + __aarch64_vdup_lane_any (u8, , __a, __b) #define __aarch64_vdup_laneq_u16(__a, __b) \ - __aarch64_vdup_lane_any (u16, , q, __a, __b) + __aarch64_vdup_lane_any (u16, , __a, __b) #define __aarch64_vdup_laneq_u32(__a, __b) \ - __aarch64_vdup_lane_any (u32, , q, __a, __b) + __aarch64_vdup_lane_any (u32, , __a, __b) #define __aarch64_vdup_laneq_u64(__a, __b) \ - __aarch64_vdup_lane_any (u64, , q, __a, __b) + __aarch64_vdup_lane_any (u64, , __a, __b) /* __aarch64_vdupq_lane internal macros. */ #define __aarch64_vdupq_lane_f32(__a, __b) \ - __aarch64_vdup_lane_any (f32, q, , __a, __b) + __aarch64_vdup_lane_any (f32, q, __a, __b) #define __aarch64_vdupq_lane_f64(__a, __b) \ - __aarch64_vdup_lane_any (f64, q, , __a, __b) + __aarch64_vdup_lane_any (f64, q, __a, __b) #define __aarch64_vdupq_lane_p8(__a, __b) \ - __aarch64_vdup_lane_any (p8, q, , __a, __b) + __aarch64_vdup_lane_any (p8, q, __a, __b) #define __aarch64_vdupq_lane_p16(__a, __b) \ - __aarch64_vdup_lane_any (p16, q, , __a, __b) + __aarch64_vdup_lane_any (p16, q, __a, __b) #define __aarch64_vdupq_lane_s8(__a, __b) \ - __aarch64_vdup_lane_any (s8, q, , __a, __b) + __aarch64_vdup_lane_any (s8, q, __a, __b) #define __aarch64_vdupq_lane_s16(__a, __b) \ - __aarch64_vdup_lane_any (s16, q, , __a, __b) + __aarch64_vdup_lane_any (s16, q, __a, __b) #define __aarch64_vdupq_lane_s32(__a, __b) \ - __aarch64_vdup_lane_any (s32, q, , __a, __b) + __aarch64_vdup_lane_any (s32, q, __a, __b) #define __aarch64_vdupq_lane_s64(__a, __b) \ - __aarch64_vdup_lane_any (s64, q, , __a, __b) + __aarch64_vdup_lane_any (s64, q, __a, __b) #define __aarch64_vdupq_lane_u8(__a, __b) \ - __aarch64_vdup_lane_any (u8, q, , __a, __b) + __aarch64_vdup_lane_any (u8, q, __a, __b) #define __aarch64_vdupq_lane_u16(__a, __b) \ - __aarch64_vdup_lane_any (u16, q, , __a, __b) + __aarch64_vdup_lane_any (u16, q, __a, __b) #define __aarch64_vdupq_lane_u32(__a, __b) \ - __aarch64_vdup_lane_any (u32, q, , __a, __b) + __aarch64_vdup_lane_any (u32, q, __a, __b) #define __aarch64_vdupq_lane_u64(__a, __b) \ - __aarch64_vdup_lane_any (u64, q, , __a, __b) + __aarch64_vdup_lane_any (u64, q, __a, __b) /* __aarch64_vdupq_laneq internal macros. */ #define __aarch64_vdupq_laneq_f32(__a, __b) \ - __aarch64_vdup_lane_any (f32, q, q, __a, __b) + __aarch64_vdup_lane_any (f32, q, __a, __b) #define __aarch64_vdupq_laneq_f64(__a, __b) \ - __aarch64_vdup_lane_any (f64, q, q, __a, __b) + __aarch64_vdup_lane_any (f64, q, __a, __b) #define __aarch64_vdupq_laneq_p8(__a, __b) \ - __aarch64_vdup_lane_any (p8, q, q, __a, __b) + __aarch64_vdup_lane_any (p8, q, __a, __b) #define __aarch64_vdupq_laneq_p16(__a, __b) \ - __aarch64_vdup_lane_any (p16, q, q, __a, __b) + __aarch64_vdup_lane_any (p16, q, __a, __b) #define __aarch64_vdupq_laneq_s8(__a, __b) \ - __aarch64_vdup_lane_any (s8, q, q, __a, __b) + __aarch64_vdup_lane_any (s8, q, __a, __b) #define __aarch64_vdupq_laneq_s16(__a, __b) \ - __aarch64_vdup_lane_any (s16, q, q, __a, __b) + __aarch64_vdup_lane_any (s16, q, __a, __b) #define __aarch64_vdupq_laneq_s32(__a, __b) \ - __aarch64_vdup_lane_any (s32, q, q, __a, __b) + __aarch64_vdup_lane_any (s32, q, __a, __b) #define __aarch64_vdupq_laneq_s64(__a, __b) \ - __aarch64_vdup_lane_any (s64, q, q, __a, __b) + __aarch64_vdup_lane_any (s64, q, __a, __b) #define __aarch64_vdupq_laneq_u8(__a, __b) \ - __aarch64_vdup_lane_any (u8, q, q, __a, __b) + __aarch64_vdup_lane_any (u8, q, __a, __b) #define __aarch64_vdupq_laneq_u16(__a, __b) \ - __aarch64_vdup_lane_any (u16, q, q, __a, __b) + __aarch64_vdup_lane_any (u16, q, __a, __b) #define __aarch64_vdupq_laneq_u32(__a, __b) \ - __aarch64_vdup_lane_any (u32, q, q, __a, __b) + __aarch64_vdup_lane_any (u32, q, __a, __b) #define __aarch64_vdupq_laneq_u64(__a, __b) \ - __aarch64_vdup_lane_any (u64, q, q, __a, __b) + __aarch64_vdup_lane_any (u64, q, __a, __b) /* Internal macro for lane indices. */ @@ -618,8 +547,15 @@ typedef struct poly16x8x4_t #define __aarch64_lane(__vec, __idx) __idx #endif -/* vset_lane and vld1_lane internal macro. */ +/* vget_lane internal macro. */ +#define __aarch64_vget_lane_any(__vec, __index) \ + __extension__ \ + ({ \ + __AARCH64_LANE_CHECK (__vec, __index); \ + __vec[__aarch64_lane (__vec, __index)]; \ + }) +/* vset_lane and vld1_lane internal macro. */ #define __aarch64_vset_lane_any(__elem, __vec, __index) \ __extension__ \ ({ \ @@ -2754,73 +2690,73 @@ vcreate_p16 (uint64_t __a) __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vget_lane_f32 (float32x2_t __a, const int __b) { - return __aarch64_vget_lane_f32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vget_lane_f64 (float64x1_t __a, const int __b) { - return __aarch64_vget_lane_f64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vget_lane_p8 (poly8x8_t __a, const int __b) { - return __aarch64_vget_lane_p8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vget_lane_p16 (poly16x4_t __a, const int __b) { - return __aarch64_vget_lane_p16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vget_lane_s8 (int8x8_t __a, const int __b) { - return __aarch64_vget_lane_s8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vget_lane_s16 (int16x4_t __a, const int __b) { - return __aarch64_vget_lane_s16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vget_lane_s32 (int32x2_t __a, const int __b) { - return __aarch64_vget_lane_s32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vget_lane_s64 (int64x1_t __a, const int __b) { - return __aarch64_vget_lane_s64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vget_lane_u8 (uint8x8_t __a, const int __b) { - return __aarch64_vget_lane_u8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vget_lane_u16 (uint16x4_t __a, const int __b) { - return __aarch64_vget_lane_u16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vget_lane_u32 (uint32x2_t __a, const int __b) { - return __aarch64_vget_lane_u32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vget_lane_u64 (uint64x1_t __a, const int __b) { - return __aarch64_vget_lane_u64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vgetq_lane */ @@ -2828,73 +2764,73 @@ vget_lane_u64 (uint64x1_t __a, const int __b) __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vgetq_lane_f32 (float32x4_t __a, const int __b) { - return __aarch64_vgetq_lane_f32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vgetq_lane_f64 (float64x2_t __a, const int __b) { - return __aarch64_vgetq_lane_f64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vgetq_lane_p8 (poly8x16_t __a, const int __b) { - return __aarch64_vgetq_lane_p8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vgetq_lane_p16 (poly16x8_t __a, const int __b) { - return __aarch64_vgetq_lane_p16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vgetq_lane_s8 (int8x16_t __a, const int __b) { - return __aarch64_vgetq_lane_s8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vgetq_lane_s16 (int16x8_t __a, const int __b) { - return __aarch64_vgetq_lane_s16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vgetq_lane_s32 (int32x4_t __a, const int __b) { - return __aarch64_vgetq_lane_s32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vgetq_lane_s64 (int64x2_t __a, const int __b) { - return __aarch64_vgetq_lane_s64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vgetq_lane_u8 (uint8x16_t __a, const int __b) { - return __aarch64_vgetq_lane_u8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vgetq_lane_u16 (uint16x8_t __a, const int __b) { - return __aarch64_vgetq_lane_u16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vgetq_lane_u32 (uint32x4_t __a, const int __b) { - return __aarch64_vgetq_lane_u32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vgetq_lane_u64 (uint64x2_t __a, const int __b) { - return __aarch64_vgetq_lane_u64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vreinterpret */ @@ -14708,57 +14644,57 @@ vdupq_laneq_u64 (uint64x2_t __a, const int __b) __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vdupb_lane_p8 (poly8x8_t __a, const int __b) { - return __aarch64_vget_lane_p8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vdupb_lane_s8 (int8x8_t __a, const int __b) { - return __aarch64_vget_lane_s8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vdupb_lane_u8 (uint8x8_t __a, const int __b) { - return __aarch64_vget_lane_u8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vduph_lane */ __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vduph_lane_p16 (poly16x4_t __a, const int __b) { - return __aarch64_vget_lane_p16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vduph_lane_s16 (int16x4_t __a, const int __b) { - return __aarch64_vget_lane_s16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vduph_lane_u16 (uint16x4_t __a, const int __b) { - return __aarch64_vget_lane_u16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vdups_lane */ __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vdups_lane_f32 (float32x2_t __a, const int __b) { - return __aarch64_vget_lane_f32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vdups_lane_s32 (int32x2_t __a, const int __b) { - return __aarch64_vget_lane_s32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vdups_lane_u32 (uint32x2_t __a, const int __b) { - return __aarch64_vget_lane_u32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vdupd_lane */ @@ -14787,76 +14723,76 @@ vdupd_lane_u64 (uint64x1_t __a, const int __b) __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vdupb_laneq_p8 (poly8x16_t __a, const int __b) { - return __aarch64_vgetq_lane_p8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vdupb_laneq_s8 (int8x16_t __a, const int __attribute__ ((unused)) __b) { - return __aarch64_vgetq_lane_s8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vdupb_laneq_u8 (uint8x16_t __a, const int __b) { - return __aarch64_vgetq_lane_u8 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vduph_laneq */ __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vduph_laneq_p16 (poly16x8_t __a, const int __b) { - return __aarch64_vgetq_lane_p16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vduph_laneq_s16 (int16x8_t __a, const int __b) { - return __aarch64_vgetq_lane_s16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vduph_laneq_u16 (uint16x8_t __a, const int __b) { - return __aarch64_vgetq_lane_u16 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vdups_laneq */ __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vdups_laneq_f32 (float32x4_t __a, const int __b) { - return __aarch64_vgetq_lane_f32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vdups_laneq_s32 (int32x4_t __a, const int __b) { - return __aarch64_vgetq_lane_s32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vdups_laneq_u32 (uint32x4_t __a, const int __b) { - return __aarch64_vgetq_lane_u32 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vdupd_laneq */ __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vdupd_laneq_f64 (float64x2_t __a, const int __b) { - return __aarch64_vgetq_lane_f64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vdupd_laneq_s64 (int64x2_t __a, const int __b) { - return __aarch64_vgetq_lane_s64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vdupd_laneq_u64 (uint64x2_t __a, const int __b) { - return __aarch64_vgetq_lane_u64 (__a, __b); + return __aarch64_vget_lane_any (__a, __b); } /* vext */ @@ -15218,7 +15154,7 @@ __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmas_lane_f32 (float32_t __a, float32_t __b, float32x2_t __c, const int __lane) { - return __builtin_fmaf (__b, __aarch64_vget_lane_f32 (__c, __lane), __a); + return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a); } /* vfma_laneq */ @@ -15236,7 +15172,7 @@ __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfma_laneq_f64 (float64x1_t __a, float64x1_t __b, float64x2_t __c, const int __lane) { - float64_t __c0 = __aarch64_vgetq_lane_f64 (__c, __lane); + float64_t __c0 = __aarch64_vget_lane_any (__c, __lane); return (float64x1_t) {__builtin_fma (__b[0], __c0, __a[0])}; } @@ -15244,14 +15180,14 @@ __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vfmad_laneq_f64 (float64_t __a, float64_t __b, float64x2_t __c, const int __lane) { - return __builtin_fma (__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a); + return __builtin_fma (__b, __aarch64_vget_lane_any (__c, __lane), __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmas_laneq_f32 (float32_t __a, float32_t __b, float32x4_t __c, const int __lane) { - return __builtin_fmaf (__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a); + return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a); } /* vfmaq_lane */ @@ -15348,7 +15284,7 @@ __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmss_lane_f32 (float32_t __a, float32_t __b, float32x2_t __c, const int __lane) { - return __builtin_fmaf (-__b, __aarch64_vget_lane_f32 (__c, __lane), __a); + return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a); } /* vfms_laneq */ @@ -15366,7 +15302,7 @@ __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfms_laneq_f64 (float64x1_t __a, float64x1_t __b, float64x2_t __c, const int __lane) { - float64_t __c0 = __aarch64_vgetq_lane_f64 (__c, __lane); + float64_t __c0 = __aarch64_vget_lane_any (__c, __lane); return (float64x1_t) {__builtin_fma (-__b[0], __c0, __a[0])}; } @@ -15374,14 +15310,14 @@ __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vfmsd_laneq_f64 (float64_t __a, float64_t __b, float64x2_t __c, const int __lane) { - return __builtin_fma (-__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a); + return __builtin_fma (-__b, __aarch64_vget_lane_any (__c, __lane), __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmss_laneq_f32 (float32_t __a, float32_t __b, float32x4_t __c, const int __lane) { - return __builtin_fmaf (-__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a); + return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a); } /* vfmsq_lane */ @@ -18382,35 +18318,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmla_laneq */ @@ -18419,35 +18355,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmla_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmla_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmla_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmla_laneq_u16 (uint16x4_t __a, uint16x4_t __b, uint16x8_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b, uint32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmlaq_lane */ @@ -18456,35 +18392,35 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmlaq_laneq */ @@ -18493,35 +18429,35 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlaq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlaq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlaq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane))); + return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmls */ @@ -18556,35 +18492,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmls_laneq */ @@ -18593,35 +18529,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmls_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmls_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmls_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmls_laneq_u16 (uint16x4_t __a, uint16x4_t __b, uint16x8_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b, uint32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmlsq_lane */ @@ -18630,35 +18566,35 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmlsq_laneq */ @@ -18667,34 +18603,34 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane))); + return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); } /* vmov_n_ */ @@ -18848,7 +18784,7 @@ vmovq_n_u64 (uint64_t __a) __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __lane) { - return __a * __aarch64_vget_lane_f32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) @@ -18860,25 +18796,25 @@ vmul_lane_f64 (float64x1_t __a, float64x1_t __b, const int __lane) __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __lane) { - return __a * __aarch64_vget_lane_s16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __lane) { - return __a * __aarch64_vget_lane_s32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __lane) { - return __a * __aarch64_vget_lane_u16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane) { - return __a * __aarch64_vget_lane_u32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } /* vmuld_lane */ @@ -18886,13 +18822,13 @@ vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane) __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmuld_lane_f64 (float64_t __a, float64x1_t __b, const int __lane) { - return __a * __aarch64_vget_lane_f64 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_f64 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } /* vmuls_lane */ @@ -18900,13 +18836,13 @@ vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane) __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmuls_lane_f32 (float32_t __a, float32x2_t __b, const int __lane) { - return __a * __aarch64_vget_lane_f32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_f32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } /* vmul_laneq */ @@ -18914,37 +18850,37 @@ vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane) __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmul_laneq_f32 (float32x2_t __a, float32x4_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_f32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmul_laneq_f64 (float64x1_t __a, float64x2_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_f64 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmul_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_s16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmul_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_s32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmul_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_u16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_u32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } /* vmul_n */ @@ -18960,7 +18896,7 @@ vmul_n_f64 (float64x1_t __a, float64_t __b) __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane) { - return __a * __aarch64_vget_lane_f32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) @@ -18973,25 +18909,25 @@ vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane) __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __lane) { - return __a * __aarch64_vget_lane_s16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __lane) { - return __a * __aarch64_vget_lane_s32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __lane) { - return __a * __aarch64_vget_lane_u16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane) { - return __a * __aarch64_vget_lane_u32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } /* vmulq_laneq */ @@ -18999,37 +18935,37 @@ vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane) __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmulq_laneq_f32 (float32x4_t __a, float32x4_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_f32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmulq_laneq_f64 (float64x2_t __a, float64x2_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_f64 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmulq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_s16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmulq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_s32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmulq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_u16 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane) { - return __a * __aarch64_vgetq_lane_u32 (__b, __lane); + return __a * __aarch64_vget_lane_any (__b, __lane); } /* vneg */ @@ -25221,31 +25157,6 @@ __INTERLEAVE_LIST (zip) /* End of optimal implementations in approved order. */ #undef __aarch64_vget_lane_any -#undef __aarch64_vget_lane_f32 -#undef __aarch64_vget_lane_f64 -#undef __aarch64_vget_lane_p8 -#undef __aarch64_vget_lane_p16 -#undef __aarch64_vget_lane_s8 -#undef __aarch64_vget_lane_s16 -#undef __aarch64_vget_lane_s32 -#undef __aarch64_vget_lane_s64 -#undef __aarch64_vget_lane_u8 -#undef __aarch64_vget_lane_u16 -#undef __aarch64_vget_lane_u32 -#undef __aarch64_vget_lane_u64 - -#undef __aarch64_vgetq_lane_f32 -#undef __aarch64_vgetq_lane_f64 -#undef __aarch64_vgetq_lane_p8 -#undef __aarch64_vgetq_lane_p16 -#undef __aarch64_vgetq_lane_s8 -#undef __aarch64_vgetq_lane_s16 -#undef __aarch64_vgetq_lane_s32 -#undef __aarch64_vgetq_lane_s64 -#undef __aarch64_vgetq_lane_u8 -#undef __aarch64_vgetq_lane_u16 -#undef __aarch64_vgetq_lane_u32 -#undef __aarch64_vgetq_lane_u64 #undef __aarch64_vdup_lane_any #undef __aarch64_vdup_lane_f32 diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index c7e9dec7de9..9d03fb8e852 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,31 @@ +2014-12-09 Alan Lawrence + + PR target/63870 + * gcc.target/aarch64/simd/vget_lane_f32_indices_1.c: New test. + * gcc.target/aarch64/simd/vget_lane_f64_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_p16_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_p8_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_s16_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_s32_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_s64_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_s8_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_u16_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_u32_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_u64_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vget_lane_u8_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_f32_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_f64_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_p16_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_p8_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_s16_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_s32_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_s64_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_s8_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_u16_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_u32_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_u64_indices_1.c: Likewise. + * gcc.target/aarch64/simd/vgetq_lane_u8_indices_1.c: Likewise. + 2014-12-09 Alan Lawrence PR target/63870 diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f32_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f32_indices_1.c new file mode 100644 index 00000000000..d16a3e882d5 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f32_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +float32_t +test_vget_lane_f32_before (float32x2_t in) +{ + /* { dg-error "lane -1 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vget_lane_f32 (in, -1); +} + +float32_t +test_vget_lane_f32_beyond (float32x2_t in) +{ + /* { dg-error "lane 2 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vget_lane_f32 (in, 2); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f64_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f64_indices_1.c new file mode 100644 index 00000000000..0e90429ae96 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_f64_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +float64_t +test_vget_lane_f64_before (float64x1_t in) +{ + /* { dg-error "lane -1 out of range 0 - 0" "" {target *-*-*} 0 } */ + return vget_lane_f64 (in, -1); +} + +float64_t +test_vget_lane_f64_beyond (float64x1_t in) +{ + /* { dg-error "lane 1 out of range 0 - 0" "" {target *-*-*} 0 } */ + return vget_lane_f64 (in, 1); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p16_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p16_indices_1.c new file mode 100644 index 00000000000..bcf25394519 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p16_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +poly16_t +test_vget_lane_p16_before (poly16x4_t in) +{ + /* { dg-error "lane -1 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vget_lane_p16 (in, -1); +} + +poly16_t +test_vget_lane_p16_beyond (poly16x4_t in) +{ + /* { dg-error "lane 4 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vget_lane_p16 (in, 4); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p8_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p8_indices_1.c new file mode 100644 index 00000000000..5dc8dc47afe --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_p8_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +poly8_t +test_vget_lane_p8_before (poly8x8_t in) +{ + /* { dg-error "lane -1 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vget_lane_p8 (in, -1); +} + +poly8_t +test_vget_lane_p8_beyond (poly8x8_t in) +{ + /* { dg-error "lane 8 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vget_lane_p8 (in, 8); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s16_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s16_indices_1.c new file mode 100644 index 00000000000..c65fb40721a --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s16_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int16_t +test_vget_lane_s16_before (int16x4_t in) +{ + /* { dg-error "lane -1 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vget_lane_s16 (in, -1); +} + +int16_t +test_vget_lane_s16_beyond (int16x4_t in) +{ + /* { dg-error "lane 4 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vget_lane_s16 (in, 4); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s32_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s32_indices_1.c new file mode 100644 index 00000000000..1f95137832d --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s32_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int32_t +test_vget_lane_s32_before (int32x2_t in) +{ + /* { dg-error "lane -1 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vget_lane_s32 (in, -1); +} + +int32_t +test_vget_lane_s32_beyond (int32x2_t in) +{ + /* { dg-error "lane 2 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vget_lane_s32 (in, 2); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s64_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s64_indices_1.c new file mode 100644 index 00000000000..e449797fe54 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s64_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int64_t +test_vget_lane_s64_before (int64x1_t in) +{ + /* { dg-error "lane -1 out of range 0 - 0" "" {target *-*-*} 0 } */ + return vget_lane_s64 (in, -1); +} + +int64_t +test_vget_lane_s64_beyond (int64x1_t in) +{ + /* { dg-error "lane 1 out of range 0 - 0" "" {target *-*-*} 0 } */ + return vget_lane_s64 (in, 1); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s8_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s8_indices_1.c new file mode 100644 index 00000000000..77e94860f1d --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_s8_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int8_t +test_vget_lane_s8_before (int8x8_t in) +{ + /* { dg-error "lane -1 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vget_lane_s8 (in, -1); +} + +int8_t +test_vget_lane_s8_beyond (int8x8_t in) +{ + /* { dg-error "lane 8 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vget_lane_s8 (in, 8); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u16_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u16_indices_1.c new file mode 100644 index 00000000000..77fb3c8057c --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u16_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint16_t +test_vget_lane_u16_before (uint16x4_t in) +{ + /* { dg-error "lane -1 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vget_lane_u16 (in, -1); +} + +uint16_t +test_vget_lane_u16_beyond (uint16x4_t in) +{ + /* { dg-error "lane 4 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vget_lane_u16 (in, 4); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u32_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u32_indices_1.c new file mode 100644 index 00000000000..e670626a060 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u32_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint32_t +test_vget_lane_u32_before (uint32x2_t in) +{ + /* { dg-error "lane -1 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vget_lane_u32 (in, -1); +} + +uint32_t +test_vget_lane_u32_beyond (uint32x2_t in) +{ + /* { dg-error "lane 2 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vget_lane_u32 (in, 2); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u64_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u64_indices_1.c new file mode 100644 index 00000000000..44d5a4d9e01 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u64_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint64_t +test_vget_lane_u64_before (uint64x1_t in) +{ + /* { dg-error "lane -1 out of range 0 - 0" "" {target *-*-*} 0 } */ + return vget_lane_u64 (in, -1); +} + +uint64_t +test_vget_lane_u64_beyond (uint64x1_t in) +{ + /* { dg-error "lane 1 out of range 0 - 0" "" {target *-*-*} 0 } */ + return vget_lane_u64 (in, 1); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u8_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u8_indices_1.c new file mode 100644 index 00000000000..b452d56c9c1 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vget_lane_u8_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint8_t +test_vget_lane_u8_before (uint8x8_t in) +{ + /* { dg-error "lane -1 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vget_lane_u8 (in, -1); +} + +uint8_t +test_vget_lane_u8_beyond (uint8x8_t in) +{ + /* { dg-error "lane 8 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vget_lane_u8 (in, 8); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f32_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f32_indices_1.c new file mode 100644 index 00000000000..8a50ed2ac11 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f32_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +float32_t +test_vgetq_lane_f32_before (float32x4_t in) +{ + /* { dg-error "lane -1 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vgetq_lane_f32 (in, -1); +} + +float32_t +test_vgetq_lane_f32_beyond (float32x4_t in) +{ + /* { dg-error "lane 4 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vgetq_lane_f32 (in, 4); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f64_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f64_indices_1.c new file mode 100644 index 00000000000..492b1ae66b9 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_f64_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +float64_t +test_vgetq_lane_f64_before (float64x2_t in) +{ + /* { dg-error "lane -1 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vgetq_lane_f64 (in, -1); +} + +float64_t +test_vgetq_lane_f64_beyond (float64x2_t in) +{ + /* { dg-error "lane 2 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vgetq_lane_f64 (in, 2); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p16_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p16_indices_1.c new file mode 100644 index 00000000000..caa41b269c3 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p16_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +poly16_t +test_vgetq_lane_p16_before (poly16x8_t in) +{ + /* { dg-error "lane -1 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vgetq_lane_p16 (in, -1); +} + +poly16_t +test_vgetq_lane_p16_beyond (poly16x8_t in) +{ + /* { dg-error "lane 8 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vgetq_lane_p16 (in, 8); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p8_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p8_indices_1.c new file mode 100644 index 00000000000..38caa27e108 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_p8_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +poly8_t +test_vgetq_lane_p8_before (poly8x16_t in) +{ + /* { dg-error "lane -1 out of range 0 - 15" "" {target *-*-*} 0 } */ + return vgetq_lane_p8 (in, -1); +} + +poly8_t +test_vgetq_lane_p8_beyond (poly8x16_t in) +{ + /* { dg-error "lane 16 out of range 0 - 15" "" {target *-*-*} 0 } */ + return vgetq_lane_p8 (in, 16); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s16_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s16_indices_1.c new file mode 100644 index 00000000000..0f4e4f58253 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s16_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int16_t +test_vgetq_lane_s16_before (int16x8_t in) +{ + /* { dg-error "lane -1 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vgetq_lane_s16 (in, -1); +} + +int16_t +test_vgetq_lane_s16_beyond (int16x8_t in) +{ + /* { dg-error "lane 8 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vgetq_lane_s16 (in, 8); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s32_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s32_indices_1.c new file mode 100644 index 00000000000..68133b4bdca --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s32_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int32_t +test_vgetq_lane_s32_before (int32x4_t in) +{ + /* { dg-error "lane -1 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vgetq_lane_s32 (in, -1); +} + +int32_t +test_vgetq_lane_s32_beyond (int32x4_t in) +{ + /* { dg-error "lane 4 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vgetq_lane_s32 (in, 4); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s64_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s64_indices_1.c new file mode 100644 index 00000000000..4ac607fe2cc --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s64_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int64_t +test_vgetq_lane_s64_before (int64x2_t in) +{ + /* { dg-error "lane -1 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vgetq_lane_s64 (in, -1); +} + +int64_t +test_vgetq_lane_s64_beyond (int64x2_t in) +{ + /* { dg-error "lane 2 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vgetq_lane_s64 (in, 2); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s8_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s8_indices_1.c new file mode 100644 index 00000000000..0e44dbc1851 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_s8_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +int8_t +test_vgetq_lane_s8_before (int8x16_t in) +{ + /* { dg-error "lane -1 out of range 0 - 15" "" {target *-*-*} 0 } */ + return vgetq_lane_s8 (in, -1); +} + +int8_t +test_vgetq_lane_s8_beyond (int8x16_t in) +{ + /* { dg-error "lane 16 out of range 0 - 15" "" {target *-*-*} 0 } */ + return vgetq_lane_s8 (in, 16); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u16_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u16_indices_1.c new file mode 100644 index 00000000000..5ccea06f9ee --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u16_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint16_t +test_vgetq_lane_u16_before (uint16x8_t in) +{ + /* { dg-error "lane -1 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vgetq_lane_u16 (in, -1); +} + +uint16_t +test_vgetq_lane_u16_beyond (uint16x8_t in) +{ + /* { dg-error "lane 8 out of range 0 - 7" "" {target *-*-*} 0 } */ + return vgetq_lane_u16 (in, 8); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u32_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u32_indices_1.c new file mode 100644 index 00000000000..bfbf081cbe2 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u32_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint32_t +test_vgetq_lane_u32_before (uint32x4_t in) +{ + /* { dg-error "lane -1 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vgetq_lane_u32 (in, -1); +} + +uint32_t +test_vgetq_lane_u32_beyond (uint32x4_t in) +{ + /* { dg-error "lane 4 out of range 0 - 3" "" {target *-*-*} 0 } */ + return vgetq_lane_u32 (in, 4); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u64_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u64_indices_1.c new file mode 100644 index 00000000000..a0d426e84ad --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u64_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint64_t +test_vgetq_lane_u64_before (uint64x2_t in) +{ + /* { dg-error "lane -1 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vgetq_lane_u64 (in, -1); +} + +uint64_t +test_vgetq_lane_u64_beyond (uint64x2_t in) +{ + /* { dg-error "lane 2 out of range 0 - 1" "" {target *-*-*} 0 } */ + return vgetq_lane_u64 (in, 2); +} diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u8_indices_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u8_indices_1.c new file mode 100644 index 00000000000..c9ad6634d7b --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vgetq_lane_u8_indices_1.c @@ -0,0 +1,17 @@ +/* { dg-do assemble } */ + +#include + +uint8_t +test_vgetq_lane_u8_before (uint8x16_t in) +{ + /* { dg-error "lane -1 out of range 0 - 15" "" {target *-*-*} 0 } */ + return vgetq_lane_u8 (in, -1); +} + +uint8_t +test_vgetq_lane_u8_beyond (uint8x16_t in) +{ + /* { dg-error "lane 16 out of range 0 - 15" "" {target *-*-*} 0 } */ + return vgetq_lane_u8 (in, 16); +} -- 2.30.2