Account for the cost of generating loop masks
authorRichard Sandiford <richard.sandiford@arm.com>
Wed, 13 Nov 2019 09:12:17 +0000 (09:12 +0000)
committerRichard Sandiford <rsandifo@gcc.gnu.org>
Wed, 13 Nov 2019 09:12:17 +0000 (09:12 +0000)
We didn't take the cost of generating loop masks into account, and so
tended to underestimate the cost of loops that need multiple masks.

2019-11-13  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
* tree-vect-loop.c (vect_estimate_min_profitable_iters): Include
the cost of generating loop masks.

gcc/testsuite/
* gcc.target/aarch64/sve/mask_struct_store_3.c: Add
-fno-vect-cost-model.
* gcc.target/aarch64/sve/mask_struct_store_3_run.c: Likewise.
* gcc.target/aarch64/sve/peel_ind_2.c: Likewise.
* gcc.target/aarch64/sve/peel_ind_2_run.c: Likewise.
* gcc.target/aarch64/sve/peel_ind_3.c: Likewise.
* gcc.target/aarch64/sve/peel_ind_3_run.c: Likewise.

From-SVN: r278125

gcc/ChangeLog
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/aarch64/sve/mask_struct_store_3.c
gcc/testsuite/gcc.target/aarch64/sve/mask_struct_store_3_run.c
gcc/testsuite/gcc.target/aarch64/sve/peel_ind_2.c
gcc/testsuite/gcc.target/aarch64/sve/peel_ind_2_run.c
gcc/testsuite/gcc.target/aarch64/sve/peel_ind_3.c
gcc/testsuite/gcc.target/aarch64/sve/peel_ind_3_run.c
gcc/tree-vect-loop.c

index e7b04334fb5142462ca00806a03cbdeaf811e9a4..047052835f0ea1d06a77e3d5350a0f767302b180 100644 (file)
@@ -1,3 +1,8 @@
+2019-11-13  Richard Sandiford  <richard.sandiford@arm.com>
+
+       * tree-vect-loop.c (vect_estimate_min_profitable_iters): Include
+       the cost of generating loop masks.
+
 2019-11-13  Richard Sandiford  <richard.sandiford@arm.com>
 
        * tree-vectorizer.h (vect_apply_runtime_profitability_check_p):
index a253a5397cc006a34c041dd80b4d8db2db45b8ba..834c17a6d7f75d660149e6703c80f1914b27946e 100644 (file)
@@ -1,3 +1,13 @@
+2019-11-13  Richard Sandiford  <richard.sandiford@arm.com>
+
+       * gcc.target/aarch64/sve/mask_struct_store_3.c: Add
+       -fno-vect-cost-model.
+       * gcc.target/aarch64/sve/mask_struct_store_3_run.c: Likewise.
+       * gcc.target/aarch64/sve/peel_ind_2.c: Likewise.
+       * gcc.target/aarch64/sve/peel_ind_2_run.c: Likewise.
+       * gcc.target/aarch64/sve/peel_ind_3.c: Likewise.
+       * gcc.target/aarch64/sve/peel_ind_3_run.c: Likewise.
+
 2019-11-13  Richard Sandiford  <richard.sandiford@arm.com>
 
        PR c++/92206
index 001f5be8ff58bfcc75eccc4c050bef1e53faffeb..1765d54a483e76984af52b9f6f779b693965c035 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do compile } */
-/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -fno-vect-cost-model" } */
 
 #include <stdint.h>
 
index 31d661b65945de826bc5b56995b3c3d097728739..4dbe0335c72fa6f3d655159ae80cd29b3bb44d36 100644 (file)
@@ -1,5 +1,5 @@
 /* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math -fno-vect-cost-model" } */
 
 #include "mask_struct_store_3.c"
 
index e792cdf2cad297e7044fdecd576343c9ac212078..df82d58ea770d45b91aea806e832c9d2f6f9da03 100644 (file)
@@ -1,7 +1,7 @@
 /* { dg-do compile } */
 /* Pick an arbitrary target for which unaligned accesses are more
    expensive.  */
-/* { dg-options "-O3 -msve-vector-bits=256 -mtune=thunderx" } */
+/* { dg-options "-O3 -msve-vector-bits=256 -mtune=thunderx -fno-vect-cost-model" } */
 
 #define N 512
 #define START 7
index 9c5ae1bd06867ce074b185711bb61234c57c0faa..b9785356d1826effe192f849509ec2c47c6bdae8 100644 (file)
@@ -1,6 +1,6 @@
 /* { dg-do run { target aarch64_sve_hw } } */
 /* { dg-options "-O3 -mtune=thunderx" } */
-/* { dg-options "-O3 -mtune=thunderx -msve-vector-bits=256" { target aarch64_sve256_hw } } */
+/* { dg-options "-O3 -mtune=thunderx -msve-vector-bits=256 -fno-vect-cost-model" { target aarch64_sve256_hw } } */
 
 #include "peel_ind_2.c"
 
index 441589eef600df0d1b264780774a9bdc4deb975e..1707f02fe92a719cfcf1016c30a0e7fa72eb08fc 100644 (file)
@@ -1,7 +1,7 @@
 /* { dg-do compile } */
 /* Pick an arbitrary target for which unaligned accesses are more
    expensive.  */
-/* { dg-options "-O3 -msve-vector-bits=256 -mtune=thunderx" } */
+/* { dg-options "-O3 -msve-vector-bits=256 -mtune=thunderx -fno-vect-cost-model" } */
 
 #define N 32
 #define MAX_START 8
index 384a38eb8ec55f19f06268aaf2f70aaf76d807dc..98389675d79ddbffc14c4e0ef2320a937c9bf1cc 100644 (file)
@@ -1,6 +1,6 @@
 /* { dg-do run { target aarch64_sve_hw } } */
-/* { dg-options "-O3 -mtune=thunderx" } */
-/* { dg-options "-O3 -mtune=thunderx -msve-vector-bits=256" { target aarch64_sve256_hw } } */
+/* { dg-options "-O3 -mtune=thunderx -fno-vect-cost-model" } */
+/* { dg-options "-O3 -mtune=thunderx -msve-vector-bits=256 -fno-vect-cost-model" { target aarch64_sve256_hw } } */
 
 #include "peel_ind_3.c"
 
index 83fb8486640d6dda236b98d1b6dad08021b8c7f7..005fa308911bd3a2eb6a188e6fdf94f985ecb3ab 100644 (file)
@@ -3291,6 +3291,32 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
                                  si->kind, si->stmt_info, si->misalign,
                                  vect_epilogue);
        }
+
+      /* Calculate how many masks we need to generate.  */
+      unsigned int num_masks = 0;
+      rgroup_masks *rgm;
+      unsigned int num_vectors_m1;
+      FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), num_vectors_m1, rgm)
+       if (rgm->mask_type)
+         num_masks += num_vectors_m1 + 1;
+      gcc_assert (num_masks > 0);
+
+      /* In the worst case, we need to generate each mask in the prologue
+        and in the loop body.  One of the loop body mask instructions
+        replaces the comparison in the scalar loop, and since we don't
+        count the scalar comparison against the scalar body, we shouldn't
+        count that vector instruction against the vector body either.
+
+        Sometimes we can use unpacks instead of generating prologue
+        masks and sometimes the prologue mask will fold to a constant,
+        so the actual prologue cost might be smaller.  However, it's
+        simpler and safer to use the worst-case cost; if this ends up
+        being the tie-breaker between vectorizing or not, then it's
+        probably better not to vectorize.  */
+      (void) add_stmt_cost (target_cost_data, num_masks, vector_stmt,
+                           NULL, 0, vect_prologue);
+      (void) add_stmt_cost (target_cost_data, num_masks - 1, vector_stmt,
+                           NULL, 0, vect_body);
     }
   else if (npeel < 0)
     {