re PR tree-optimization/68306 (ICE: in vectorizable_store, at tree-vect-stmts.c:5651)
authorRichard Biener <rguenther@suse.de>
Fri, 13 Nov 2015 12:14:57 +0000 (12:14 +0000)
committerRichard Biener <rguenth@gcc.gnu.org>
Fri, 13 Nov 2015 12:14:57 +0000 (12:14 +0000)
2015-11-13  Richard Biener  <rguenther@suse.de>

PR tree-optimization/68306
* tree-vect-data-refs.c (verify_data_ref_alignment): Move
loop related checks ...
(vect_verify_datarefs_alignment): ... here.
(vect_slp_analyze_and_verify_node_alignment): Compute and
verify alignment of the single DR that it matters.
* tree-vect-stmts.c (vectorizable_store): Add an assert.
(vectorizable_load): Add a comment.
* tree-vect-slp.c (vect_analyze_slp_cost_1): Fix DR used
for determining load cost.

* gcc.dg/pr68306.c: Adjust.
* gcc.dg/pr68306-2.c: New testcase.
* gcc.dg/pr68306-3.c: Likewise.

From-SVN: r230310

gcc/ChangeLog
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.dg/pr68306-2.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/pr68306-3.c [new file with mode: 0644]
gcc/testsuite/gcc.dg/pr68306.c
gcc/tree-vect-data-refs.c
gcc/tree-vect-slp.c
gcc/tree-vect-stmts.c

index a7517f9bcaaabbaf118d688f3458b9bd2ea7c2d7..1b16e12fe8a153e82ec63b45f3b992f0d1a2ba46 100644 (file)
@@ -1,3 +1,16 @@
+2015-11-13  Richard Biener  <rguenther@suse.de>
+
+       PR tree-optimization/68306
+       * tree-vect-data-refs.c (verify_data_ref_alignment): Move
+       loop related checks ...
+       (vect_verify_datarefs_alignment): ... here.
+       (vect_slp_analyze_and_verify_node_alignment): Compute and
+       verify alignment of the single DR that it matters.
+       * tree-vect-stmts.c (vectorizable_store): Add an assert.
+       (vectorizable_load): Add a comment.
+       * tree-vect-slp.c (vect_analyze_slp_cost_1): Fix DR used
+       for determining load cost.
+
 2015-11-13  Ilya Enkovich  <enkovich.gnu@gmail.com>
 
        * tree-vect-loop.c (vect_determine_vectorization_factor): Check
index ca626846f16c5eac65489891a01038b0c3f7d7e6..d0bce7f63357243a7d2bdf9863789d4cef07af26 100644 (file)
@@ -1,3 +1,10 @@
+2015-11-13  Richard Biener  <rguenther@suse.de>
+
+       PR tree-optimization/68306
+       * gcc.dg/pr68306.c: Adjust.
+       * gcc.dg/pr68306-2.c: New testcase.
+       * gcc.dg/pr68306-3.c: Likewise.
+
 2015-11-13  Ilya Enkovich  <enkovich.gnu@gmail.com>
 
        * g++.dg/vect/simd-bool-comparison-1.cc: New test.
diff --git a/gcc/testsuite/gcc.dg/pr68306-2.c b/gcc/testsuite/gcc.dg/pr68306-2.c
new file mode 100644 (file)
index 0000000..1d84e96
--- /dev/null
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+/* { dg-additional-options "-mno-sse -mno-mmx" { target x86_64-*-* } } */
+
+struct {
+    int tz_minuteswest;
+    int tz_dsttime;
+} a, b;
+void fn1() {
+    b.tz_minuteswest = a.tz_minuteswest;
+    b.tz_dsttime = a.tz_dsttime;
+}
diff --git a/gcc/testsuite/gcc.dg/pr68306-3.c b/gcc/testsuite/gcc.dg/pr68306-3.c
new file mode 100644 (file)
index 0000000..d03ce53
--- /dev/null
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+/* { dg-additional-options "-mno-sse -mno-mmx" { target x86_64-*-* } } */
+/* { dg-additional-options "-mno-altivec -mno-vsx" { target powerpc*-*-* } } */
+
+extern void fn2();
+struct {
+    unsigned qp_num;
+    unsigned starting_psn;
+    void *private_data;
+} a;
+struct {
+    unsigned id;
+    unsigned qpn;
+    unsigned psn;
+} b;
+void fn1() {
+    a.qp_num = b.qpn;
+    a.starting_psn = b.psn;
+    fn2(b.id);
+}
index b36fb3471176696a6d384129863885861b81ce58..e1805a79a32b89e2ed4be48b3abc668129856dd0 100644 (file)
@@ -1,5 +1,6 @@
 /* { dg-do compile } */
 /* { dg-options "-O3" } */
+/* { dg-additional-options "-mno-sse -mno-mmx" { target x86_64-*-* } } */
 
 enum powerpc_pmc_type { PPC_PMC_IBM };
 struct {
index 590d363b731af65ddf9a7bdcd8b9f443d13ba429..f9327d7d89901ee4be8827be3a0368acbc70795e 100644 (file)
@@ -916,22 +916,8 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
 static bool
 verify_data_ref_alignment (data_reference_p dr)
 {
-  enum dr_alignment_support supportable_dr_alignment;
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-
-  /* For interleaving, only the alignment of the first access matters.   */
-  if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
-      && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
-    return true;
-
-  /* Strided accesses perform only component accesses, alignment is
-     irrelevant for them.  */
-  if (STMT_VINFO_STRIDED_P (stmt_info)
-      && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
-    return true;
-
-  supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
+  enum dr_alignment_support supportable_dr_alignment
+    = vect_supportable_dr_alignment (dr, false);
   if (!supportable_dr_alignment)
     {
       if (dump_enabled_p ())
@@ -977,6 +963,18 @@ vect_verify_datarefs_alignment (loop_vec_info vinfo)
 
       if (!STMT_VINFO_RELEVANT_P (stmt_info))
        continue;
+
+      /* For interleaving, only the alignment of the first access matters.   */
+      if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
+         && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+       return true;
+
+      /* Strided accesses perform only component accesses, alignment is
+        irrelevant for them.  */
+      if (STMT_VINFO_STRIDED_P (stmt_info)
+         && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
+       return true;
+
       if (! verify_data_ref_alignment (dr))
        return false;
     }
@@ -2100,28 +2098,22 @@ vect_analyze_data_refs_alignment (loop_vec_info vinfo)
 static bool
 vect_slp_analyze_and_verify_node_alignment (slp_tree node)
 {
-  unsigned i;
-  gimple *stmt;
-  FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
+  /* We vectorize from the first scalar stmt in the node unless
+     the node is permuted in which case we start from the first
+     element in the group.  */
+  gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
+  if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
+    first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
+
+  data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+  if (! vect_compute_data_ref_alignment (dr)
+      || ! verify_data_ref_alignment (dr))
     {
-      stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-
-      /* Strided accesses perform only component accesses, misalignment
-        information is irrelevant for them.  */
-      if (STMT_VINFO_STRIDED_P (stmt_info)
-         && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
-       continue;
-
-      data_reference_p dr = STMT_VINFO_DATA_REF (stmt_info);
-      if (! vect_compute_data_ref_alignment (dr)
-         || ! verify_data_ref_alignment (dr))
-       {
-         if (dump_enabled_p ())
-           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                            "not vectorized: bad data alignment in basic "
-                            "block.\n");
-         return false;
-       }
+      if (dump_enabled_p ())
+       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+                        "not vectorized: bad data alignment in basic "
+                        "block.\n");
+      return false;
     }
 
   return true;
index bf6d1d8011ff51df9d828d83f568e4528a529396..f65837df4fcba2e8edfe2a60ff4d028cbad376fa 100644 (file)
@@ -1429,6 +1429,13 @@ vect_analyze_slp_cost_1 (slp_instance instance, slp_tree node,
        {
          int i;
          gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
+         /* If the load is permuted then the alignment is determined by
+            the first group element not by the first scalar stmt DR.  */
+         if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
+           {
+             stmt = GROUP_FIRST_ELEMENT (stmt_info);
+             stmt_info = vinfo_for_stmt (stmt);
+           }
          vect_model_load_cost (stmt_info, ncopies_for_cost, false,
                                node, prologue_cost_vec, body_cost_vec);
          /* If the load is permuted record the cost for the permutation.
index f7eee9116d0982ac11f1a5eadd7d2ccb95708941..0f64aaf19795880c63d910d71cc094a8d74b976f 100644 (file)
@@ -5464,6 +5464,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
              group.  */
           vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
           first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; 
+         gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
           first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
          op = gimple_assign_rhs1 (first_stmt);
         } 
@@ -6658,9 +6659,9 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
   if (grouped_load)
     {
       first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
-      if (slp
-          && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
-         && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
+      /* For BB vectorization we directly vectorize a subchain
+         without permutation.  */
+      if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
         first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
 
       /* Check if the chain of loads is already vectorized.  */