re PR tree-optimization/91207 (Wrong code with -O3)
authorRichard Biener <rguenther@suse.de>
Fri, 19 Jul 2019 08:47:41 +0000 (08:47 +0000)
committerRichard Biener <rguenth@gcc.gnu.org>
Fri, 19 Jul 2019 08:47:41 +0000 (08:47 +0000)
2019-07-19  Richard Biener  <rguenther@suse.de>

PR tree-optimization/91207
Revert
2019-07-17  Richard Biener  <rguenther@suse.de>

PR tree-optimization/91178
* tree-vect-stmts.c (get_group_load_store_type): For SLP
loads with a gap larger than the vector size always use
VMAT_STRIDED_SLP.
(vectorizable_load): For VMAT_STRIDED_SLP with a permutation
avoid loading vectors that are only contained in the gap
and thus are not needed.

* gcc.dg/torture/pr91207.c: New testcase.

From-SVN: r273593

gcc/ChangeLog
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.dg/torture/pr91207.c [new file with mode: 0644]
gcc/tree-vect-stmts.c

index 35e4f37caa654fc1ac4c4d839e95914058c9944e..95868abb6d454bf1662a1d5c1159c2a4bffcecdf 100644 (file)
@@ -1,3 +1,17 @@
+2019-07-19  Richard Biener  <rguenther@suse.de>
+
+       PR tree-optimization/91207
+       Revert
+       2019-07-17  Richard Biener  <rguenther@suse.de>
+
+       PR tree-optimization/91178
+       * tree-vect-stmts.c (get_group_load_store_type): For SLP
+       loads with a gap larger than the vector size always use
+       VMAT_STRIDED_SLP.
+       (vectorizable_load): For VMAT_STRIDED_SLP with a permutation
+       avoid loading vectors that are only contained in the gap
+       and thus are not needed.
+
 2019-07-18  Uroš Bizjak  <ubizjak@gmail.com>
 
        * config/i386/i386.md (*addqi_2_slp): Remove.
index 7eb95f7127d65c6c143e8271008c59fb3794a755..b4fca286108b58b41d339669933fb5239acfbcf3 100644 (file)
@@ -1,3 +1,8 @@
+2019-07-19  Richard Biener  <rguenther@suse.de>
+
+       PR tree-optimization/91207
+       * gcc.dg/torture/pr91207.c: New testcase.
+
 2019-07-18  Uroš Bizjak  <ubizjak@gmail.com>
 
        PR target/91188
diff --git a/gcc/testsuite/gcc.dg/torture/pr91207.c b/gcc/testsuite/gcc.dg/torture/pr91207.c
new file mode 100644 (file)
index 0000000..36d71d3
--- /dev/null
@@ -0,0 +1,25 @@
+/* { dg-do run } */
+
+long long a;
+int b[92][32];
+unsigned int c, d;
+
+void e(long long *f, int p2) { *f = p2; }
+
+int main()
+{
+  for (int i = 6; i <= 20; d = i++)
+    for (int j = 6; j <= 91; j++) {
+       for (int k = 16; k <= 31;k++)
+         b[j][k] ^= 7;
+       c *= d;
+    }
+
+  for (int i = 0; i < 21; ++i)
+    for (int j = 0; j < 32; ++j)
+      e(&a, b[i][j]);
+
+  if (a != 7)
+    __builtin_abort ();
+  return 0;
+}
index 5d05e108ede61fc3558f94ec92beabc3858ee328..601a6f55fbff388c89f88d994e790aebf2bf960e 100644 (file)
@@ -2267,14 +2267,6 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
                        / vect_get_scalar_dr_size (first_dr_info)))
            overrun_p = false;
 
-         /* If the gap at the end of the group exceeds a whole vector
-            in size use the strided SLP code which can skip code-generation
-            for the gap.  */
-         if (vls_type == VLS_LOAD && known_gt (gap, nunits))
-           *memory_access_type = VMAT_STRIDED_SLP;
-         else
-           *memory_access_type = VMAT_CONTIGUOUS;
-
          /* If the gap splits the vector in half and the target
             can do half-vector operations avoid the epilogue peeling
             by simply loading half of the vector only.  Usually
@@ -2282,8 +2274,7 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
          dr_alignment_support alignment_support_scheme;
          scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
          machine_mode vmode;
-         if (*memory_access_type == VMAT_CONTIGUOUS
-             && overrun_p
+         if (overrun_p
              && !masked_p
              && (((alignment_support_scheme
                      = vect_supportable_dr_alignment (first_dr_info, false)))
@@ -2306,6 +2297,7 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
                                 "Peeling for outer loop is not supported\n");
              return false;
            }
+         *memory_access_type = VMAT_CONTIGUOUS;
        }
     }
   else
@@ -8740,7 +8732,6 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
       /* Checked by get_load_store_type.  */
       unsigned int const_nunits = nunits.to_constant ();
       unsigned HOST_WIDE_INT cst_offset = 0;
-      unsigned int group_gap = 0;
 
       gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
       gcc_assert (!nested_in_vect_loop);
@@ -8758,7 +8749,6 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
       if (slp && grouped_load)
        {
          group_size = DR_GROUP_SIZE (first_stmt_info);
-         group_gap = DR_GROUP_GAP (first_stmt_info);
          ref_type = get_group_alias_ptr_type (first_stmt_info);
        }
       else
@@ -8902,14 +8892,6 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
          if (nloads > 1)
            vec_alloc (v, nloads);
          stmt_vec_info new_stmt_info = NULL;
-         if (slp && slp_perm
-             && (group_el % group_size) > group_size - group_gap
-             && (group_el % group_size) + nloads * lnel < group_size)
-           {
-             dr_chain.quick_push (NULL_TREE);
-             group_el += nloads * lnel;
-             continue;
-           }
          for (i = 0; i < nloads; i++)
            {
              tree this_off = build_int_cst (TREE_TYPE (alias_off),