From: Alan Lawrence Date: Tue, 23 Sep 2014 18:48:50 +0000 (+0000) Subject: Relax check against commuting XOR and ASHIFTRT in combine.c X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=ed052e94aa6ed5dba5e0cdbcc6598e26c01a4155;p=gcc.git Relax check against commuting XOR and ASHIFTRT in combine.c gcc/: * combine.c (simplify_shift_const_1): Allow commuting (ashiftrt (xor)) when result_mode == shift_mode. gcc/testsuite/: * gcc.dg/combine_ashiftrt_1.c: New test. * gcc.dg/combine_ashiftrt_2.c: Likewise. * gcc.target/aarch64/singleton_intrinsics_1.c: Remove scan-assembler workarounds for cmge. * gcc.target/aarch64/simd/int_comparisons_1.c: Likewise; also check for absence of mvn. From-SVN: r215531 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5f50f383272..651497d3959 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,8 @@ +2014-09-23 Alan Lawrence + + * combine.c (simplify_shift_const_1): Allow commuting (ashiftrt (xor)) + when result_mode == shift_mode. + 2014-09-23 Kostya Serebryany Update to match the changed asan API. diff --git a/gcc/combine.c b/gcc/combine.c index 13284865a49..1457eabadf9 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -10255,8 +10255,10 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if (CONST_INT_P (XEXP (varop, 1)) /* We can't do this if we have (ashiftrt (xor)) and the - constant has its sign bit set in shift_mode. */ + constant has its sign bit set in shift_mode with shift_mode + wider than result_mode. */ && !(code == ASHIFTRT && GET_CODE (varop) == XOR + && result_mode != shift_mode && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)), shift_mode)) && (new_rtx = simplify_const_binary_operation @@ -10273,10 +10275,12 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, /* If we can't do that, try to simplify the shift in each arm of the logical expression, make a new logical expression, and apply - the inverse distributive law. This also can't be done - for some (ashiftrt (xor)). */ + the inverse distributive law. This also can't be done for + (ashiftrt (xor)) where we've widened the shift and the constant + changes the sign bit. */ if (CONST_INT_P (XEXP (varop, 1)) && !(code == ASHIFTRT && GET_CODE (varop) == XOR + && result_mode != shift_mode && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)), shift_mode))) { diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 206860a0f24..14d65db75aa 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,12 @@ +2014-09-23 Alan Lawrence + + * gcc.dg/combine_ashiftrt_1.c: New test. + * gcc.dg/combine_ashiftrt_2.c: Likewise. + * gcc.target/aarch64/singleton_intrinsics_1.c: Remove scan-assembler + workarounds for cmge. + * gcc.target/aarch64/simd/int_comparisons_1.c: Likewise; also check for + absence of mvn. + 2014-09-23 Paolo Carlini PR c++/61857 diff --git a/gcc/testsuite/gcc.dg/combine_ashiftrt_1.c b/gcc/testsuite/gcc.dg/combine_ashiftrt_1.c new file mode 100644 index 00000000000..90e64fd10dc --- /dev/null +++ b/gcc/testsuite/gcc.dg/combine_ashiftrt_1.c @@ -0,0 +1,18 @@ +/* { dg-do compile {target sparc64*-*-* aarch64*-*-* x86_64-*-* powerpc64*-*-*} } */ +/* { dg-options "-O2 -fdump-rtl-combine-all" } */ + +typedef long long int int64_t; + +int64_t +foo (int64_t a) +{ + return (~a) >> 63; +} + +/* The combine phase will try to combine not & ashiftrt, and + combine_simplify_rtx should transform (ashiftrt (not x) 63) + to (not (ashiftrt x 63)) and then to (neg (ge x 0)). We look for + the *attempt* to match this RTL pattern, regardless of whether an + actual insn may be found on the platform. */ +/* { dg-final { scan-rtl-dump "\\(neg:DI \\(ge:DI" "combine" } } */ +/* { dg-final { cleanup-rtl-dump "combine" } } */ diff --git a/gcc/testsuite/gcc.dg/combine_ashiftrt_2.c b/gcc/testsuite/gcc.dg/combine_ashiftrt_2.c new file mode 100644 index 00000000000..fd6827caed2 --- /dev/null +++ b/gcc/testsuite/gcc.dg/combine_ashiftrt_2.c @@ -0,0 +1,18 @@ +/* { dg-do compile {target arm*-*-* i?86-*-* powerpc-*-* sparc-*-*} } */ +/* { dg-options "-O2 -fdump-rtl-combine-all" } */ + +typedef long int32_t; + +int32_t +foo (int32_t a) +{ + return (~a) >> 31; +} + +/* The combine phase will try to combine not & ashiftrt, and + combine_simplify_rtx should transform (ashiftrt (not x) 31) + to (not (ashiftrt x 63)) and then to (neg (ge x 0)). We look for + the *attempt* to match this RTL pattern, regardless of whether an + actual insn may be found on the platform. */ +/* { dg-final { scan-rtl-dump "\\(neg:SI \\(ge:SI" "combine" } } */ +/* { dg-final { cleanup-rtl-dump "combine" } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/simd/int_comparisons_1.c b/gcc/testsuite/gcc.target/aarch64/simd/int_comparisons_1.c index cb0f4a04c0f..f2c55922f18 100644 --- a/gcc/testsuite/gcc.target/aarch64/simd/int_comparisons_1.c +++ b/gcc/testsuite/gcc.target/aarch64/simd/int_comparisons_1.c @@ -30,18 +30,16 @@ /* Comparisons against immediate zero, on the 8 signed integer types only. */ /* { dg-final { scan-assembler-times "\[ \t\]cmge\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */ -/* For int64_t and int64x1_t, combine_simplify_rtx failure of - https://gcc.gnu.org/ml/gcc/2014-06/msg00253.html - prevents generation of cmge....#0, instead producing mvn + sshr. */ -/* { #dg-final { scan-assembler-times "\[ \t\]cmge\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */ +/* { dg-final { scan-assembler-times "\[ \t\]cmge\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */ /* { dg-final { scan-assembler-times "\[ \t\]cmgt\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */ /* { dg-final { scan-assembler-times "\[ \t\]cmgt\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */ /* { dg-final { scan-assembler-times "\[ \t\]cmle\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */ /* { dg-final { scan-assembler-times "\[ \t\]cmle\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?0" 2 } } */ /* { dg-final { scan-assembler-times "\[ \t\]cmlt\[ \t\]+v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*v\[0-9\]+\.\[0-9\]+\[bshd\],\[ \t\]*#?0" 7 } } */ /* For int64_t and int64x1_t, cmlt ... #0 and sshr ... #63 are equivalent, - so allow either. cmgez issue above results in extra 2 * sshr....63. */ -/* { dg-final { scan-assembler-times "\[ \t\](?:cmlt|sshr)\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?(?:0|63)" 4 } } */ + so allow either. */ +/* { dg-final { scan-assembler-times "\[ \t\](?:cmlt|sshr)\[ \t\]+d\[0-9\]+,\[ \t\]*d\[0-9\]+,\[ \t\]*#?(?:0|63)" 2 } } */ // All should have been compiled into single insns without inverting result: /* { dg-final { scan-assembler-not "\[ \t\]not\[ \t\]" } } */ +/* { dg-final { scan-assembler-not "\[ \t\]mvn\[ \t\]" } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c b/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c index 8a8272ba48e..4a0934b01f9 100644 --- a/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c +++ b/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c @@ -57,8 +57,7 @@ test_vcle_s64 (int64x1_t a, int64x1_t b) return vcle_s64 (a, b); } -/* Idiom recognition will cause this testcase not to generate - the expected cmge instruction, so do not check for it. */ +/* { dg-final { scan-assembler-times "\\tcmge\\td\[0-9\]+, d\[0-9\]+, #?0" 1 } } */ uint64x1_t test_vcgez_s64 (int64x1_t a) @@ -236,8 +235,8 @@ test_vrshl_u64 (uint64x1_t a, int64x1_t b) return vrshl_u64 (a, b); } -/* { dg-final { scan-assembler-times "\\tsshr\\td\[0-9\]+" 3 } } */ -/* Idiom recognition compiles vcltz and vcgez to sshr rather than cmlt/cmge. */ +/* For int64x1_t, sshr...#63 is output instead of the equivalent cmlt...#0. */ +/* { dg-final { scan-assembler-times "\\tsshr\\td\[0-9\]+" 2 } } */ int64x1_t test_vshr_n_s64 (int64x1_t a)