From: Kyrylo Tkachov Date: Tue, 9 Sep 2014 11:20:02 +0000 (+0000) Subject: [ARM][2/7] Convert FP mnemonics to UAL | add/sub/div/abs patterns. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=436c249d4410d7a0fe6be16d7cedf0826856ded8;p=gcc.git [ARM][2/7] Convert FP mnemonics to UAL | add/sub/div/abs patterns. * config/arm/vfp.md (*abssf2_vfp): Use UAL assembly syntax. (*absdf2_vfp): Likewise. (*negsf2_vfp): Likewise. (*negdf2_vfp): Likewise. (*addsf3_vfp): Likewise. (*adddf3_vfp): Likewise. (*subsf3_vfp): Likewise. (*subdf3_vfp): Likewise. (*divsf3_vfp): Likewise. (*divdf3_vfp): Likewise. * gcc.target/arm/vfp-1.c: Updated expected assembly. From-SVN: r215051 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index a4855233e93..0a2c1d6d6df 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,16 @@ +2014-09-09 Kyrylo Tkachov + + * config/arm/vfp.md (*abssf2_vfp): Use UAL assembly syntax. + (*absdf2_vfp): Likewise. + (*negsf2_vfp): Likewise. + (*negdf2_vfp): Likewise. + (*addsf3_vfp): Likewise. + (*adddf3_vfp): Likewise. + (*subsf3_vfp): Likewise. + (*subdf3_vfp): Likewise. + (*divsf3_vfp): Likewise. + (*divdf3_vfp): Likewise. + 2014-09-09 Kyrylo Tkachov * config/arm/arm.c (output_move_vfp): Use UAL syntax for load/store diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md index c14f8e4c246..755229cc480 100644 --- a/gcc/config/arm/vfp.md +++ b/gcc/config/arm/vfp.md @@ -588,7 +588,7 @@ [(set (match_operand:SF 0 "s_register_operand" "=t") (abs:SF (match_operand:SF 1 "s_register_operand" "t")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" - "fabss%?\\t%0, %1" + "vabs%?.f32\\t%0, %1" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "type" "ffariths")] @@ -598,7 +598,7 @@ [(set (match_operand:DF 0 "s_register_operand" "=w") (abs:DF (match_operand:DF 1 "s_register_operand" "w")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE" - "fabsd%?\\t%P0, %P1" + "vabs%?.f64\\t%P0, %P1" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "type" "ffarithd")] @@ -609,7 +609,7 @@ (neg:SF (match_operand:SF 1 "s_register_operand" "t,r")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" "@ - fnegs%?\\t%0, %1 + vneg%?.f32\\t%0, %1 eor%?\\t%0, %1, #-2147483648" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") @@ -621,7 +621,7 @@ (neg:DF (match_operand:DF 1 "s_register_operand" "w,0,r")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE" "@ - fnegd%?\\t%P0, %P1 + vneg%?.f64\\t%P0, %P1 # #" "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE && reload_completed @@ -671,7 +671,7 @@ (plus:SF (match_operand:SF 1 "s_register_operand" "t") (match_operand:SF 2 "s_register_operand" "t")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" - "fadds%?\\t%0, %1, %2" + "vadd%?.f32\\t%0, %1, %2" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "type" "fadds")] @@ -682,7 +682,7 @@ (plus:DF (match_operand:DF 1 "s_register_operand" "w") (match_operand:DF 2 "s_register_operand" "w")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE" - "faddd%?\\t%P0, %P1, %P2" + "vadd%?.f64\\t%P0, %P1, %P2" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "type" "faddd")] @@ -694,7 +694,7 @@ (minus:SF (match_operand:SF 1 "s_register_operand" "t") (match_operand:SF 2 "s_register_operand" "t")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" - "fsubs%?\\t%0, %1, %2" + "vsub%?.f32\\t%0, %1, %2" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "type" "fadds")] @@ -705,7 +705,7 @@ (minus:DF (match_operand:DF 1 "s_register_operand" "w") (match_operand:DF 2 "s_register_operand" "w")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE" - "fsubd%?\\t%P0, %P1, %P2" + "vsub%?.f64\\t%P0, %P1, %P2" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "type" "faddd")] @@ -722,7 +722,7 @@ (div:SF (match_operand:SF 1 "s_register_operand" "t,t") (match_operand:SF 2 "s_register_operand" "t,t")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP" - "fdivs%?\\t%0, %1, %2" + "vdiv%?.f32\\t%0, %1, %2" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "arch" "*,armv6_or_vfpv3") @@ -734,7 +734,7 @@ (div:DF (match_operand:DF 1 "s_register_operand" "w,w") (match_operand:DF 2 "s_register_operand" "w,w")))] "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE" - "fdivd%?\\t%P0, %P1, %P2" + "vdiv%?.f64\\t%P0, %P1, %P2" [(set_attr "predicable" "yes") (set_attr "predicable_short_it" "no") (set_attr "arch" "*,armv6_or_vfpv3") diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index c5358606c4d..a8fbe024105 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2014-09-09 Kyrylo Tkachov + + * gcc.target/arm/vfp-1.c: Updated expected assembly. + 2014-09-09 Kyrylo Tkachov * gcc.target/arm/pr51835.c: Update expected assembly. diff --git a/gcc/testsuite/gcc.target/arm/vfp-1.c b/gcc/testsuite/gcc.target/arm/vfp-1.c index 2355b4de37a..3027f1057b6 100644 --- a/gcc/testsuite/gcc.target/arm/vfp-1.c +++ b/gcc/testsuite/gcc.target/arm/vfp-1.c @@ -11,19 +11,19 @@ volatile float f1, f2, f3; void test_sf() { /* abssf2_vfp */ - /* { dg-final { scan-assembler "fabss" } } */ + /* { dg-final { scan-assembler "vabs.f32" } } */ f1 = fabsf (f1); /* negsf2_vfp */ - /* { dg-final { scan-assembler "fnegs" } } */ + /* { dg-final { scan-assembler "vneg.f32" } } */ f1 = -f1; /* addsf3_vfp */ - /* { dg-final { scan-assembler "fadds" } } */ + /* { dg-final { scan-assembler "vadd.f32" } } */ f1 = f2 + f3; /* subsf3_vfp */ - /* { dg-final { scan-assembler "fsubs" } } */ + /* { dg-final { scan-assembler "vsub.f32" } } */ f1 = f2 - f3; /* divsf3_vfp */ - /* { dg-final { scan-assembler "fdivs" } } */ + /* { dg-final { scan-assembler "vdiv.f32" } } */ f1 = f2 / f3; /* mulsf3_vfp */ /* { dg-final { scan-assembler "fmuls" } } */ @@ -52,19 +52,19 @@ volatile double d1, d2, d3; void test_df() { /* absdf2_vfp */ - /* { dg-final { scan-assembler "fabsd" } } */ + /* { dg-final { scan-assembler "vabs.f64" } } */ d1 = fabs (d1); /* negdf2_vfp */ - /* { dg-final { scan-assembler "fnegd" } } */ + /* { dg-final { scan-assembler "vneg.f64" } } */ d1 = -d1; /* adddf3_vfp */ - /* { dg-final { scan-assembler "faddd" } } */ + /* { dg-final { scan-assembler "vadd.f64" } } */ d1 = d2 + d3; /* subdf3_vfp */ - /* { dg-final { scan-assembler "fsubd" } } */ + /* { dg-final { scan-assembler "vsub.f64" } } */ d1 = d2 - d3; /* divdf3_vfp */ - /* { dg-final { scan-assembler "fdivd" } } */ + /* { dg-final { scan-assembler "vdiv.f64" } } */ d1 = d2 / d3; /* muldf3_vfp */ /* { dg-final { scan-assembler "fmuld" } } */