From: Richard Sandiford Date: Thu, 15 Aug 2019 08:29:11 +0000 (+0000) Subject: [AArch64] Use SVE reversed shifts in preference to MOVPRFX X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=7d1f24018b04c13134bc47619fb8aaa390b01754;p=gcc.git [AArch64] Use SVE reversed shifts in preference to MOVPRFX This patch makes us use reversed SVE shifts when the first operand can't be tied to the output but the second can. This is tested more thoroughly by the ACLE patches but is really an independent improvement. 2019-08-15 Richard Sandiford Prathamesh Kulkarni gcc/ * config/aarch64/aarch64-sve.md (*v3): Add an alternative that uses reversed shifts. gcc/testsuite/ * gcc.target/aarch64/sve/shift_1.c: Accept reversed shifts. Co-Authored-By: Prathamesh Kulkarni From-SVN: r274512 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index c4e68eeb222..7a68a4554e2 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2019-08-15 Richard Sandiford + Prathamesh Kulkarni + + * config/aarch64/aarch64-sve.md (*v3): + Add an alternative that uses reversed shifts. + 2019-08-15 Kyrylo Tkachov * config/aarch64/aarch64-cores.def (cortex-a76): Use neoversen1 tuning diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index cf9073167f4..af68c17c989 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -2455,23 +2455,24 @@ ;; likely to gain much and would make the instruction seem less uniform ;; to the register allocator. (define_insn_and_split "*v3" - [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?&w") (unspec:SVE_I - [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + [(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl") (ASHIFT:SVE_I - (match_operand:SVE_I 2 "register_operand" "w, 0, w") - (match_operand:SVE_I 3 "aarch64_sve_shift_operand" "D, w, w"))] + (match_operand:SVE_I 2 "register_operand" "w, 0, w, w") + (match_operand:SVE_I 3 "aarch64_sve_shift_operand" "D, w, 0, w"))] UNSPEC_PRED_X))] "TARGET_SVE" "@ # \t%0., %1/m, %0., %3. + r\t%0., %1/m, %3., %2. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." "&& reload_completed && !register_operand (operands[3], mode)" [(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))] "" - [(set_attr "movprfx" "*,*,yes")] + [(set_attr "movprfx" "*,*,*,yes")] ) ;; Unpredicated shift operations by a constant (post-RA only). diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 63596d8497e..67d356986d8 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2019-08-15 Richard Sandiford + Prathamesh Kulkarni + + * gcc.target/aarch64/sve/shift_1.c: Accept reversed shifts. + 2019-08-15 Richard Sandiford Kugan Vivekanandarajah diff --git a/gcc/testsuite/gcc.target/aarch64/sve/shift_1.c b/gcc/testsuite/gcc.target/aarch64/sve/shift_1.c index f4c5ebd46af..5ee66da15ca 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/shift_1.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/shift_1.c @@ -75,9 +75,9 @@ DO_IMMEDIATE_OPS (63, int64_t, 63); /* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */ /* { dg-final { scan-assembler-times {\tlsl\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tasr\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tlsl\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tasrr?\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tlsrr?\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ +/* { dg-final { scan-assembler-times {\tlslr?\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d, z[0-9]+\.d\n} 2 } } */ /* { dg-final { scan-assembler-times {\tasr\tz[0-9]+\.b, z[0-9]+\.b, #5\n} 1 } } */ /* { dg-final { scan-assembler-times {\tlsr\tz[0-9]+\.b, z[0-9]+\.b, #5\n} 1 } } */