re PR rtl-optimization/89768 (ICE in compare_and_jump_seq at loop-unroll.c:838)
authorJakub Jelinek <jakub@redhat.com>
Tue, 19 Mar 2019 19:04:14 +0000 (20:04 +0100)
committerJakub Jelinek <jakub@gcc.gnu.org>
Tue, 19 Mar 2019 19:04:14 +0000 (20:04 +0100)
PR rtl-optimization/89768
* loop-unroll.c (unroll_loop_constant_iterations): Use gen_int_mode
instead of GEN_INT.
(unroll_loop_runtime_iterations): Likewise.

From-SVN: r269812

gcc/ChangeLog
gcc/loop-unroll.c

index 991f019bae4bd0b55f1f17a47100a5e7f99218eb..f4b59d7d9ebfe08094816c83436dcc2dc86b1dd3 100644 (file)
@@ -1,3 +1,10 @@
+2019-03-19  Jakub Jelinek  <jakub@redhat.com>
+
+       PR rtl-optimization/89768
+       * loop-unroll.c (unroll_loop_constant_iterations): Use gen_int_mode
+       instead of GEN_INT.
+       (unroll_loop_runtime_iterations): Likewise.
+
 2019-03-19  Martin Sebor  <msebor@redhat.com>
 
        PR tree-optimization/89644
index bc7840e4bfabb0af3564c49a48601aea4e2255ad..1b4c73b61c85855f2f62c3f6a42a283c9a3531f4 100644 (file)
@@ -652,7 +652,7 @@ unroll_loop_constant_iterations (struct loop *loop)
   if (loop->any_likely_upper_bound)
     loop->nb_iterations_likely_upper_bound
       = wi::udiv_trunc (loop->nb_iterations_likely_upper_bound, max_unroll + 1);
-  desc->niter_expr = GEN_INT (desc->niter);
+  desc->niter_expr = gen_int_mode (desc->niter, desc->mode);
 
   /* Remove the edges.  */
   FOR_EACH_VEC_ELT (remove_edges, i, e)
@@ -1020,9 +1020,9 @@ unroll_loop_runtime_iterations (struct loop *loop)
       preheader = split_edge (loop_preheader_edge (loop));
       /* Add in count of edge from switch block.  */
       preheader->count += iter_count;
-      branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
-                                         block_label (preheader), p,
-                                         NULL);
+      branch_code = compare_and_jump_seq (copy_rtx (niter),
+                                         gen_int_mode (j, desc->mode), EQ,
+                                         block_label (preheader), p, NULL);
 
       /* We rely on the fact that the compare and jump cannot be optimized out,
         and hence the cfg we create is correct.  */