From: Vlad Lazar Date: Fri, 31 Aug 2018 15:00:54 +0000 (+0000) Subject: [AArch64] Implement new intrinsics vabsd_s64 and vnegd_s64. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=66da5b53107962a1c115a9686f2220de27f276f7;p=gcc.git [AArch64] Implement new intrinsics vabsd_s64 and vnegd_s64. gcc/ 2018-08-31 Vlad Lazar * config/aarch64/arm_neon.h (vabsd_s64): New. (vnegd_s64): Likewise. gcc/testsuite/ 2018-08-31 Vlad Lazar * gcc.target/aarch64/scalar_intrinsics.c (test_vnegd_s64): New. * gcc.target/aarch64/vneg_s.c (RUN_TEST_SCALAR): New. (test_vnegd_s64): Likewise. * gcc.target/aarch64/vnegd_64.c: New. * gcc.target/aarch64/vabsd_64.c: New. * gcc.tartget/aarch64/vabs_intrinsic_3.c: New From-SVN: r264019 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index d1a081066bc..1e5d1ad9c62 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,8 @@ +2018-08-31 Vlad Lazar + + * config/aarch64/arm_neon.h (vabsd_s64): New. + (vnegd_s64): Likewise. + 2018-08-31 Martin Jambor * ipa-cp.c (estimate_local_effects): Replace wrong MAX with MIN. diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 2d18400040f..fc734e1aa9e 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -11822,6 +11822,18 @@ vabsq_s64 (int64x2_t __a) return __builtin_aarch64_absv2di (__a); } +/* Try to avoid moving between integer and vector registers. + For why the cast to unsigned is needed check the vnegd_s64 intrinsic. + There is a testcase related to this issue: + gcc.target/aarch64/vabsd_s64.c. */ + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabsd_s64 (int64_t __a) +{ + return __a < 0 ? - (uint64_t) __a : __a; +} + /* vadd */ __extension__ extern __inline int64_t @@ -22907,6 +22919,25 @@ vneg_s64 (int64x1_t __a) return -__a; } +/* According to the ACLE, the negative of the minimum (signed) + value is itself. This leads to a semantics mismatch, as this is + undefined behaviour in C. The value range predictor is not + aware that the negation of a negative number can still be negative + and it may try to fold the expression. See the test in + gcc.target/aarch64/vnegd_s64.c for an example. + + The cast below tricks the value range predictor to include + INT64_MIN in the range it computes. So for x in the range + [INT64_MIN, y] the range prediction after vnegd_s64 (x) will + be ~[INT64_MIN + 1, y]. */ + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vnegd_s64 (int64_t __a) +{ + return - (uint64_t) __a; +} + __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vnegq_f32 (float32x4_t __a) diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index a588e18871b..c10f1d299c5 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,12 @@ +2018-08-31 Vlad Lazar + + * gcc.target/aarch64/scalar_intrinsics.c (test_vnegd_s64): New. + * gcc.target/aarch64/vneg_s.c (RUN_TEST_SCALAR): New. + (test_vnegd_s64): Likewise. + * gcc.target/aarch64/vnegd_64.c: New. + * gcc.target/aarch64/vabsd_64.c: New. + * gcc.tartget/aarch64/vabs_intrinsic_3.c: New. + 2018-08-31 Nathan Sidwell PR c++/87155 diff --git a/gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c b/gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c index ea29066e369..d943989768d 100644 --- a/gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c +++ b/gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c @@ -627,6 +627,14 @@ test_vqabss_s32 (int32_t a) return vqabss_s32 (a); } +/* { dg-final { scan-assembler-times "\\tneg\\tx\[0-9\]+" 1 } } */ + +int64_t +test_vnegd_s64 (int64_t a) +{ + return vnegd_s64 (a); +} + /* { dg-final { scan-assembler-times "\\tsqneg\\tb\[0-9\]+" 1 } } */ int8_t diff --git a/gcc/testsuite/gcc.target/aarch64/vabs_intrinsic_3.c b/gcc/testsuite/gcc.target/aarch64/vabs_intrinsic_3.c new file mode 100644 index 00000000000..cf4e7ae4679 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vabs_intrinsic_3.c @@ -0,0 +1,39 @@ +/* Test the vabsd_s64 intrinsic. */ + +/* { dg-do run } */ +/* { dg-options "--save-temps -O2" } */ + +#include +#include + +extern void abort (void); + +#define force_simd(V1) asm volatile ("mov %d0, %1.d[0]" \ + : "=w"(V1) \ + : "w"(V1) \ + : /* No clobbers */); + +#define RUN_TEST(test, answ) \ +{ \ + force_simd (test); \ + force_simd (answ); \ + int64_t res = vabsd_s64 (test); \ + force_simd (res); \ + if (res != answ) \ + abort (); \ +} + +int64_t input[] = {INT64_MAX, 10, 0, -10, INT64_MIN + 1, INT64_MIN}; +int64_t expected[] = {INT64_MAX, 10, 0, 10, INT64_MAX, INT64_MIN}; + +int main (void) +{ + RUN_TEST (input[0], expected[0]); + RUN_TEST (input[1], expected[1]); + RUN_TEST (input[2], expected[2]); + RUN_TEST (input[3], expected[3]); + RUN_TEST (input[4], expected[4]); + RUN_TEST (input[5], expected[5]); + + return 0; +} diff --git a/gcc/testsuite/gcc.target/aarch64/vabsd_s64.c b/gcc/testsuite/gcc.target/aarch64/vabsd_s64.c new file mode 100644 index 00000000000..a0f88ee12c3 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vabsd_s64.c @@ -0,0 +1,34 @@ +/* Check that the compiler does not optimise the vabsd_s64 call out. + We need to check for this because there is a mismatch in semantics + between the ACLE, which states that he absolute value of the minimum + (signed) value is itself, and C, where this is undefined behaviour. */ + +/* { dg-do run } */ +/* { dg-options "--save-temps -fno-inline -O2" } */ + +#include +#include + +extern void abort (void); + +int +bar (int64_t x) +{ + if (x < (int64_t) 0) + return vabsd_s64 (x) < (int64_t) 0; + else + return -1; +} + +int +main (void) +{ + int ans = 1; + int res_abs = bar (INT64_MIN); + + if (res_abs != ans) + abort (); + + return 0; +} + diff --git a/gcc/testsuite/gcc.target/aarch64/vneg_s.c b/gcc/testsuite/gcc.target/aarch64/vneg_s.c index 911054053ea..e7f20f2831f 100644 --- a/gcc/testsuite/gcc.target/aarch64/vneg_s.c +++ b/gcc/testsuite/gcc.target/aarch64/vneg_s.c @@ -75,6 +75,18 @@ extern void abort (void); } \ } +#define RUN_TEST_SCALAR(test_val, answ_val, a, b) \ + { \ + int64_t res; \ + INHIB_OPTIMIZATION; \ + a = test_val; \ + b = answ_val; \ + force_simd (b); \ + force_simd (a); \ + res = vnegd_s64 (a); \ + force_simd (res); \ + } + int test_vneg_s8 () { @@ -177,7 +189,24 @@ test_vneg_s64 () return 0; } -/* { dg-final { scan-assembler-times "neg\\td\[0-9\]+, d\[0-9\]+" 8 } } */ +int +test_vnegd_s64 () +{ + int64_t a, b; + + RUN_TEST_SCALAR (TEST0, ANSW0, a, b); + RUN_TEST_SCALAR (TEST1, ANSW1, a, b); + RUN_TEST_SCALAR (TEST2, ANSW2, a, b); + RUN_TEST_SCALAR (TEST3, ANSW3, a, b); + RUN_TEST_SCALAR (TEST4, ANSW4, a, b); + RUN_TEST_SCALAR (TEST5, ANSW5, a, b); + RUN_TEST_SCALAR (LLONG_MAX, LLONG_MIN + 1, a, b); + RUN_TEST_SCALAR (LLONG_MIN, LLONG_MIN, a, b); + + return 0; +} + +/* { dg-final { scan-assembler-times "neg\\td\[0-9\]+, d\[0-9\]+" 16 } } */ int test_vnegq_s8 () @@ -283,6 +312,9 @@ main (int argc, char **argv) if (test_vneg_s64 ()) abort (); + if (test_vnegd_s64 ()) + abort (); + if (test_vnegq_s8 ()) abort (); diff --git a/gcc/testsuite/gcc.target/aarch64/vnegd_s64.c b/gcc/testsuite/gcc.target/aarch64/vnegd_s64.c new file mode 100644 index 00000000000..73d478ff49d --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vnegd_s64.c @@ -0,0 +1,36 @@ +/* Check that the compiler does not optimise the negation out. + We need to check for this because there is a mismatch in semantics + between the ACLE, which states that he negative of the minimum + (signed) value is itself and C, where this is undefined behaviour. */ + +/* { dg-do run } */ +/* { dg-options "--save-temps -O2" } */ + +#include +#include + +extern void abort (void); + +int +foo (int64_t x) +{ + if (x < (int64_t) 0) + return vnegd_s64 (x) < (int64_t) 0; + else + return -1; +} + +/* { dg-final { scan-assembler-times {neg\tx[0-9]+, x[0-9]+} 1 } } */ + +int +main (void) +{ + int ans = 1; + int res = foo (INT64_MIN); + + if (res != ans) + abort (); + + return 0; +} +