From 3eff57aacfef6e05f55e9dd6ecae3ef8568aaac4 Mon Sep 17 00:00:00 2001 From: Srinath Parvathaneni Date: Fri, 20 Mar 2020 11:44:08 +0000 Subject: [PATCH] [ARM][GCC][6x]:MVE ACLE vaddq intrinsics using arithmetic plus operator. This patch supports following MVE ACLE vaddq intrinsics. The RTL patterns for this intrinsics are added using arithmetic "plus" operator. vaddq_s8, vaddq_s16, vaddq_s32, vaddq_u8, vaddq_u16, vaddq_u32, vaddq_f16, vaddq_f32. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics 2020-03-20 Srinath Parvathaneni Andre Vieira Mihail Ionescu * config/arm/arm_mve.h (vaddq_s8): Define macro. (vaddq_s16): Likewise. (vaddq_s32): Likewise. (vaddq_u8): Likewise. (vaddq_u16): Likewise. (vaddq_u32): Likewise. (vaddq_f16): Likewise. (vaddq_f32): Likewise. (__arm_vaddq_s8): Define intrinsic. (__arm_vaddq_s16): Likewise. (__arm_vaddq_s32): Likewise. (__arm_vaddq_u8): Likewise. (__arm_vaddq_u16): Likewise. (__arm_vaddq_u32): Likewise. (__arm_vaddq_f16): Likewise. (__arm_vaddq_f32): Likewise. (vaddq): Define polymorphic variant. * config/arm/iterators.md (VNIM): Define mode iterator for common types Neon, IWMMXT and MVE. (VNINOTM): Likewise. * config/arm/mve.md (mve_vaddq): Define RTL pattern. (mve_vaddq_f): Define RTL pattern. * config/arm/neon.md (add3): Rename to addv4hf3 RTL pattern. (addv8hf3_neon): Define RTL pattern. * config/arm/vec-common.md (add3): Modify standard add RTL pattern to support MVE. (addv8hf3): Define standard RTL pattern for MVE and Neon. (add3): Modify existing standard add RTL pattern for Neon and IWMMXT. gcc/testsuite/ChangeLog: 2020-03-20 Srinath Parvathaneni Andre Vieira Mihail Ionescu * gcc.target/arm/mve/intrinsics/vaddq_f16.c: New test. * gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise. --- gcc/ChangeLog | 33 ++++++++++ gcc/config/arm/arm_mve.h | 66 +++++++++++++++++++ gcc/config/arm/iterators.md | 8 +++ gcc/config/arm/mve.md | 28 ++++++++ gcc/config/arm/neon.md | 32 ++++++--- gcc/config/arm/vec-common.md | 42 ++++++++++-- gcc/testsuite/ChangeLog | 13 ++++ .../gcc.target/arm/mve/intrinsics/vaddq_f16.c | 22 +++++++ .../gcc.target/arm/mve/intrinsics/vaddq_f32.c | 22 +++++++ .../gcc.target/arm/mve/intrinsics/vaddq_s16.c | 22 +++++++ .../gcc.target/arm/mve/intrinsics/vaddq_s32.c | 22 +++++++ .../gcc.target/arm/mve/intrinsics/vaddq_s8.c | 22 +++++++ .../gcc.target/arm/mve/intrinsics/vaddq_u16.c | 22 +++++++ .../gcc.target/arm/mve/intrinsics/vaddq_u32.c | 22 +++++++ .../gcc.target/arm/mve/intrinsics/vaddq_u8.c | 22 +++++++ 15 files changed, 383 insertions(+), 15 deletions(-) create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c create mode 100644 gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index c7b3325876d..b98f573e711 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,36 @@ +2020-03-20 Srinath Parvathaneni + Andre Vieira + Mihail Ionescu + + * config/arm/arm_mve.h (vaddq_s8): Define macro. + (vaddq_s16): Likewise. + (vaddq_s32): Likewise. + (vaddq_u8): Likewise. + (vaddq_u16): Likewise. + (vaddq_u32): Likewise. + (vaddq_f16): Likewise. + (vaddq_f32): Likewise. + (__arm_vaddq_s8): Define intrinsic. + (__arm_vaddq_s16): Likewise. + (__arm_vaddq_s32): Likewise. + (__arm_vaddq_u8): Likewise. + (__arm_vaddq_u16): Likewise. + (__arm_vaddq_u32): Likewise. + (__arm_vaddq_f16): Likewise. + (__arm_vaddq_f32): Likewise. + (vaddq): Define polymorphic variant. + * config/arm/iterators.md (VNIM): Define mode iterator for common types + Neon, IWMMXT and MVE. + (VNINOTM): Likewise. + * config/arm/mve.md (mve_vaddq): Define RTL pattern. + (mve_vaddq_f): Define RTL pattern. + * config/arm/neon.md (add3): Rename to addv4hf3 RTL pattern. + (addv8hf3_neon): Define RTL pattern. + * config/arm/vec-common.md (add3): Modify standard add RTL pattern + to support MVE. + (addv8hf3): Define standard RTL pattern for MVE and Neon. + (add3): Modify existing standard add RTL pattern for Neon and IWMMXT. + 2020-03-20 Martin Liska PR ipa/94232 diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index 5ea42bd6a5b..55c256910bb 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -1898,6 +1898,14 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t; #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) +#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b) +#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b) +#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b) +#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b) +#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b) +#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b) +#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b) +#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b) #endif __extension__ extern __inline void @@ -12341,6 +12349,48 @@ __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); } +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a + __b; +} + #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ __extension__ extern __inline void @@ -14707,6 +14757,20 @@ __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offs __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p); } +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b) +{ + return __a + __b; +} + #endif enum { @@ -15186,6 +15250,8 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \ + int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \ + int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \ diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md index 5c1a11bf7de..f3cbc0d0356 100644 --- a/gcc/config/arm/iterators.md +++ b/gcc/config/arm/iterators.md @@ -66,6 +66,14 @@ ;; Integer and float modes supported by Neon and IWMMXT. (define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF]) +;; Integer and float modes supported by Neon, IWMMXT and MVE, used by +;; arithmetic epxand patterns. +(define_mode_iterator VNIM [V16QI V8HI V4SI V4SF]) + +;; Integer and float modes supported by Neon and IWMMXT but not MVE, used by +;; arithmetic epxand patterns. +(define_mode_iterator VNINOTM [V2SI V4HI V8QI V2SF V2DI]) + ;; Integer and float modes supported by Neon, IWMMXT and MVE. (define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI]) diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index b80a2a66044..77b36a7a9a7 100644 --- a/gcc/config/arm/mve.md +++ b/gcc/config/arm/mve.md @@ -9643,3 +9643,31 @@ return ""; } [(set_attr "length" "4")]) + +;; +;; [vaddq_s, vaddq_u]) +;; +(define_insn "mve_vaddq" + [ + (set (match_operand:MVE_2 0 "s_register_operand" "=w") + (plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w") + (match_operand:MVE_2 2 "s_register_operand" "w"))) + ] + "TARGET_HAVE_MVE" + "vadd.i%# %q0, %q1, %q2" + [(set_attr "type" "mve_move") +]) + +;; +;; [vaddq_f]) +;; +(define_insn "mve_vaddq_f" + [ + (set (match_operand:MVE_0 0 "s_register_operand" "=w") + (plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w") + (match_operand:MVE_0 2 "s_register_operand" "w"))) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" + "vadd.f%# %q0, %q1, %q2" + [(set_attr "type" "mve_move") +]) diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md index fbfeef233f3..272e6c1e7cf 100644 --- a/gcc/config/arm/neon.md +++ b/gcc/config/arm/neon.md @@ -519,18 +519,30 @@ ;; As with SFmode, full support for HFmode vector arithmetic is only available ;; when flag-unsafe-math-optimizations is enabled. -(define_insn "add3" +;; Add pattern with modes V8HF and V4HF is split into separate patterns to add +;; support for standard pattern addv8hf3 in MVE. Following pattern is called +;; from "addv8hf3" standard pattern inside vec-common.md file. + +(define_insn "addv8hf3_neon" [(set - (match_operand:VH 0 "s_register_operand" "=w") - (plus:VH - (match_operand:VH 1 "s_register_operand" "w") - (match_operand:VH 2 "s_register_operand" "w")))] + (match_operand:V8HF 0 "s_register_operand" "=w") + (plus:V8HF + (match_operand:V8HF 1 "s_register_operand" "w") + (match_operand:V8HF 2 "s_register_operand" "w")))] "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations" - "vadd.\t%0, %1, %2" - [(set (attr "type") - (if_then_else (match_test "") - (const_string "neon_fp_addsub_s") - (const_string "neon_add")))] + "vadd.f16\t%0, %1, %2" + [(set_attr "type" "neon_fp_addsub_s_q")] +) + +(define_insn "addv4hf3" + [(set + (match_operand:V4HF 0 "s_register_operand" "=w") + (plus:V4HF + (match_operand:V4HF 1 "s_register_operand" "w") + (match_operand:V4HF 2 "s_register_operand" "w")))] + "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations" + "vadd.f16\t%0, %1, %2" + [(set_attr "type" "neon_fp_addsub_s_q")] ) (define_insn "add3_fp16" diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md index 916e4914a62..786daa62851 100644 --- a/gcc/config/arm/vec-common.md +++ b/gcc/config/arm/vec-common.md @@ -77,19 +77,51 @@ } }) -;; Vector arithmetic. Expanders are blank, then unnamed insns implement -;; patterns separately for IWMMXT and Neon. +;; Vector arithmetic. Expanders are blank, then unnamed insns implement +;; patterns separately for Neon, IWMMXT and MVE. (define_expand "add3" - [(set (match_operand:VALL 0 "s_register_operand") - (plus:VALL (match_operand:VALL 1 "s_register_operand") - (match_operand:VALL 2 "s_register_operand")))] + [(set (match_operand:VNIM 0 "s_register_operand") + (plus:VNIM (match_operand:VNIM 1 "s_register_operand") + (match_operand:VNIM 2 "s_register_operand")))] + "(TARGET_NEON && ((mode != V2SFmode && mode != V4SFmode) + || flag_unsafe_math_optimizations)) + || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode)) + || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE(mode)) + || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(mode))" +{ +}) + +;; Vector arithmetic. Expanders are blank, then unnamed insns implement +;; patterns separately for Neon and MVE. + +(define_expand "addv8hf3" + [(set (match_operand:V8HF 0 "s_register_operand") + (plus:V8HF (match_operand:V8HF 1 "s_register_operand") + (match_operand:V8HF 2 "s_register_operand")))] + "(TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(V8HFmode)) + || (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)" +{ + if (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations) + emit_insn (gen_addv8hf3_neon (operands[0], operands[1], operands[2])); +}) + +;; Vector arithmetic. Expanders are blank, then unnamed insns implement +;; patterns separately for Neon and IWMMXT. + +(define_expand "add3" + [(set (match_operand:VNINOTM 0 "s_register_operand") + (plus:VNINOTM (match_operand:VNINOTM 1 "s_register_operand") + (match_operand:VNINOTM 2 "s_register_operand")))] "(TARGET_NEON && ((mode != V2SFmode && mode != V4SFmode) || flag_unsafe_math_optimizations)) || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))" { }) +;; Vector arithmetic. Expanders are blank, then unnamed insns implement +;; patterns separately for IWMMXT and Neon. + (define_expand "sub3" [(set (match_operand:VALL 0 "s_register_operand") (minus:VALL (match_operand:VALL 1 "s_register_operand") diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 8f8b08868be..e4aeb834b2e 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,16 @@ +2020-03-20 Srinath Parvathaneni + Andre Vieira + Mihail Ionescu + + * gcc.target/arm/mve/intrinsics/vaddq_f16.c: New test. + * gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise. + 2020-03-20 Andre Vieira * gcc.target/arm/mve/intrinsics/mve_fp_fpu1.c: Fix testisms. diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c new file mode 100644 index 00000000000..53b84d59f85 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +float16x8_t +foo (float16x8_t a, float16x8_t b) +{ + return vaddq_f16 (a, b); +} + +/* { dg-final { scan-assembler "vadd.f16" } } */ + +float16x8_t +foo1 (float16x8_t a, float16x8_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.f16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c new file mode 100644 index 00000000000..9bb7d1c0eca --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +float32x4_t +foo (float32x4_t a, float32x4_t b) +{ + return vaddq_f32 (a, b); +} + +/* { dg-final { scan-assembler "vadd.f32" } } */ + +float32x4_t +foo1 (float32x4_t a, float32x4_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.f32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c new file mode 100644 index 00000000000..885473c9dfe --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int16x8_t +foo (int16x8_t a, int16x8_t b) +{ + return vaddq_s16 (a, b); +} + +/* { dg-final { scan-assembler "vadd.i16" } } */ + +int16x8_t +foo1 (int16x8_t a, int16x8_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.i16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c new file mode 100644 index 00000000000..90ea5019817 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int32x4_t +foo (int32x4_t a, int32x4_t b) +{ + return vaddq_s32 (a, b); +} + +/* { dg-final { scan-assembler "vadd.i32" } } */ + +int32x4_t +foo1 (int32x4_t a, int32x4_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.i32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c new file mode 100644 index 00000000000..dbde92affe5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +int8x16_t +foo (int8x16_t a, int8x16_t b) +{ + return vaddq_s8 (a, b); +} + +/* { dg-final { scan-assembler "vadd.i8" } } */ + +int8x16_t +foo1 (int8x16_t a, int8x16_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.i8" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c new file mode 100644 index 00000000000..bc966732cdd --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint16x8_t +foo (uint16x8_t a, uint16x8_t b) +{ + return vaddq_u16 (a, b); +} + +/* { dg-final { scan-assembler "vadd.i16" } } */ + +uint16x8_t +foo1 (uint16x8_t a, uint16x8_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.i16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c new file mode 100644 index 00000000000..ed262c29406 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint32x4_t +foo (uint32x4_t a, uint32x4_t b) +{ + return vaddq_u32 (a, b); +} + +/* { dg-final { scan-assembler "vadd.i32" } } */ + +uint32x4_t +foo1 (uint32x4_t a, uint32x4_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.i32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c new file mode 100644 index 00000000000..b12e657b7af --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +uint8x16_t +foo (uint8x16_t a, uint8x16_t b) +{ + return vaddq_u8 (a, b); +} + +/* { dg-final { scan-assembler "vadd.i8" } } */ + +uint8x16_t +foo1 (uint8x16_t a, uint8x16_t b) +{ + return vaddq (a, b); +} + +/* { dg-final { scan-assembler "vadd.i8" } } */ -- 2.30.2