remove pointer-set.[ch]
[gcc.git] / gcc / tree-ssa-loop-niter.c
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "calls.h"
26 #include "expr.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
30 #include "intl.h"
31 #include "hash-set.h"
32 #include "tree-ssa-alias.h"
33 #include "internal-fn.h"
34 #include "gimple-expr.h"
35 #include "is-a.h"
36 #include "gimple.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimple-ssa.h"
40 #include "tree-cfg.h"
41 #include "tree-phinodes.h"
42 #include "ssa-iterators.h"
43 #include "tree-ssa-loop-ivopts.h"
44 #include "tree-ssa-loop-niter.h"
45 #include "tree-ssa-loop.h"
46 #include "dumpfile.h"
47 #include "cfgloop.h"
48 #include "tree-chrec.h"
49 #include "tree-scalar-evolution.h"
50 #include "tree-data-ref.h"
51 #include "params.h"
52 #include "flags.h"
53 #include "diagnostic-core.h"
54 #include "tree-inline.h"
55 #include "tree-pass.h"
56 #include "stringpool.h"
57 #include "tree-ssanames.h"
58 #include "wide-int-print.h"
59
60
61 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
62
63 /* The maximum number of dominator BBs we search for conditions
64 of loop header copies we use for simplifying a conditional
65 expression. */
66 #define MAX_DOMINATORS_TO_WALK 8
67
68 /*
69
70 Analysis of number of iterations of an affine exit test.
71
72 */
73
74 /* Bounds on some value, BELOW <= X <= UP. */
75
76 typedef struct
77 {
78 mpz_t below, up;
79 } bounds;
80
81
82 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
83
84 static void
85 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
86 {
87 tree type = TREE_TYPE (expr);
88 tree op0, op1;
89 bool negate = false;
90
91 *var = expr;
92 mpz_set_ui (offset, 0);
93
94 switch (TREE_CODE (expr))
95 {
96 case MINUS_EXPR:
97 negate = true;
98 /* Fallthru. */
99
100 case PLUS_EXPR:
101 case POINTER_PLUS_EXPR:
102 op0 = TREE_OPERAND (expr, 0);
103 op1 = TREE_OPERAND (expr, 1);
104
105 if (TREE_CODE (op1) != INTEGER_CST)
106 break;
107
108 *var = op0;
109 /* Always sign extend the offset. */
110 wi::to_mpz (op1, offset, SIGNED);
111 if (negate)
112 mpz_neg (offset, offset);
113 break;
114
115 case INTEGER_CST:
116 *var = build_int_cst_type (type, 0);
117 wi::to_mpz (expr, offset, TYPE_SIGN (type));
118 break;
119
120 default:
121 break;
122 }
123 }
124
125 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
126 in TYPE to MIN and MAX. */
127
128 static void
129 determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
130 mpz_t min, mpz_t max)
131 {
132 wide_int minv, maxv;
133 enum value_range_type rtype = VR_VARYING;
134
135 /* If the expression is a constant, we know its value exactly. */
136 if (integer_zerop (var))
137 {
138 mpz_set (min, off);
139 mpz_set (max, off);
140 return;
141 }
142
143 get_type_static_bounds (type, min, max);
144
145 /* See if we have some range info from VRP. */
146 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
147 {
148 edge e = loop_preheader_edge (loop);
149 signop sgn = TYPE_SIGN (type);
150 gimple_stmt_iterator gsi;
151
152 /* Either for VAR itself... */
153 rtype = get_range_info (var, &minv, &maxv);
154 /* Or for PHI results in loop->header where VAR is used as
155 PHI argument from the loop preheader edge. */
156 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
157 {
158 gimple phi = gsi_stmt (gsi);
159 wide_int minc, maxc;
160 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
161 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
162 == VR_RANGE))
163 {
164 if (rtype != VR_RANGE)
165 {
166 rtype = VR_RANGE;
167 minv = minc;
168 maxv = maxc;
169 }
170 else
171 {
172 minv = wi::max (minv, minc, sgn);
173 maxv = wi::min (maxv, maxc, sgn);
174 /* If the PHI result range are inconsistent with
175 the VAR range, give up on looking at the PHI
176 results. This can happen if VR_UNDEFINED is
177 involved. */
178 if (wi::gt_p (minv, maxv, sgn))
179 {
180 rtype = get_range_info (var, &minv, &maxv);
181 break;
182 }
183 }
184 }
185 }
186 if (rtype == VR_RANGE)
187 {
188 mpz_t minm, maxm;
189 gcc_assert (wi::le_p (minv, maxv, sgn));
190 mpz_init (minm);
191 mpz_init (maxm);
192 wi::to_mpz (minv, minm, sgn);
193 wi::to_mpz (maxv, maxm, sgn);
194 mpz_add (minm, minm, off);
195 mpz_add (maxm, maxm, off);
196 /* If the computation may not wrap or off is zero, then this
197 is always fine. If off is negative and minv + off isn't
198 smaller than type's minimum, or off is positive and
199 maxv + off isn't bigger than type's maximum, use the more
200 precise range too. */
201 if (nowrap_type_p (type)
202 || mpz_sgn (off) == 0
203 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
204 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
205 {
206 mpz_set (min, minm);
207 mpz_set (max, maxm);
208 mpz_clear (minm);
209 mpz_clear (maxm);
210 return;
211 }
212 mpz_clear (minm);
213 mpz_clear (maxm);
214 }
215 }
216
217 /* If the computation may wrap, we know nothing about the value, except for
218 the range of the type. */
219 if (!nowrap_type_p (type))
220 return;
221
222 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
223 add it to MIN, otherwise to MAX. */
224 if (mpz_sgn (off) < 0)
225 mpz_add (max, max, off);
226 else
227 mpz_add (min, min, off);
228 }
229
230 /* Stores the bounds on the difference of the values of the expressions
231 (var + X) and (var + Y), computed in TYPE, to BNDS. */
232
233 static void
234 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
235 bounds *bnds)
236 {
237 int rel = mpz_cmp (x, y);
238 bool may_wrap = !nowrap_type_p (type);
239 mpz_t m;
240
241 /* If X == Y, then the expressions are always equal.
242 If X > Y, there are the following possibilities:
243 a) neither of var + X and var + Y overflow or underflow, or both of
244 them do. Then their difference is X - Y.
245 b) var + X overflows, and var + Y does not. Then the values of the
246 expressions are var + X - M and var + Y, where M is the range of
247 the type, and their difference is X - Y - M.
248 c) var + Y underflows and var + X does not. Their difference again
249 is M - X + Y.
250 Therefore, if the arithmetics in type does not overflow, then the
251 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
252 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
253 (X - Y, X - Y + M). */
254
255 if (rel == 0)
256 {
257 mpz_set_ui (bnds->below, 0);
258 mpz_set_ui (bnds->up, 0);
259 return;
260 }
261
262 mpz_init (m);
263 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
264 mpz_add_ui (m, m, 1);
265 mpz_sub (bnds->up, x, y);
266 mpz_set (bnds->below, bnds->up);
267
268 if (may_wrap)
269 {
270 if (rel > 0)
271 mpz_sub (bnds->below, bnds->below, m);
272 else
273 mpz_add (bnds->up, bnds->up, m);
274 }
275
276 mpz_clear (m);
277 }
278
279 /* From condition C0 CMP C1 derives information regarding the
280 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
281 and stores it to BNDS. */
282
283 static void
284 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
285 tree vary, mpz_t offy,
286 tree c0, enum tree_code cmp, tree c1,
287 bounds *bnds)
288 {
289 tree varc0, varc1, tmp, ctype;
290 mpz_t offc0, offc1, loffx, loffy, bnd;
291 bool lbound = false;
292 bool no_wrap = nowrap_type_p (type);
293 bool x_ok, y_ok;
294
295 switch (cmp)
296 {
297 case LT_EXPR:
298 case LE_EXPR:
299 case GT_EXPR:
300 case GE_EXPR:
301 STRIP_SIGN_NOPS (c0);
302 STRIP_SIGN_NOPS (c1);
303 ctype = TREE_TYPE (c0);
304 if (!useless_type_conversion_p (ctype, type))
305 return;
306
307 break;
308
309 case EQ_EXPR:
310 /* We could derive quite precise information from EQ_EXPR, however, such
311 a guard is unlikely to appear, so we do not bother with handling
312 it. */
313 return;
314
315 case NE_EXPR:
316 /* NE_EXPR comparisons do not contain much of useful information, except for
317 special case of comparing with the bounds of the type. */
318 if (TREE_CODE (c1) != INTEGER_CST
319 || !INTEGRAL_TYPE_P (type))
320 return;
321
322 /* Ensure that the condition speaks about an expression in the same type
323 as X and Y. */
324 ctype = TREE_TYPE (c0);
325 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
326 return;
327 c0 = fold_convert (type, c0);
328 c1 = fold_convert (type, c1);
329
330 if (TYPE_MIN_VALUE (type)
331 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
332 {
333 cmp = GT_EXPR;
334 break;
335 }
336 if (TYPE_MAX_VALUE (type)
337 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
338 {
339 cmp = LT_EXPR;
340 break;
341 }
342
343 return;
344 default:
345 return;
346 }
347
348 mpz_init (offc0);
349 mpz_init (offc1);
350 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
351 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
352
353 /* We are only interested in comparisons of expressions based on VARX and
354 VARY. TODO -- we might also be able to derive some bounds from
355 expressions containing just one of the variables. */
356
357 if (operand_equal_p (varx, varc1, 0))
358 {
359 tmp = varc0; varc0 = varc1; varc1 = tmp;
360 mpz_swap (offc0, offc1);
361 cmp = swap_tree_comparison (cmp);
362 }
363
364 if (!operand_equal_p (varx, varc0, 0)
365 || !operand_equal_p (vary, varc1, 0))
366 goto end;
367
368 mpz_init_set (loffx, offx);
369 mpz_init_set (loffy, offy);
370
371 if (cmp == GT_EXPR || cmp == GE_EXPR)
372 {
373 tmp = varx; varx = vary; vary = tmp;
374 mpz_swap (offc0, offc1);
375 mpz_swap (loffx, loffy);
376 cmp = swap_tree_comparison (cmp);
377 lbound = true;
378 }
379
380 /* If there is no overflow, the condition implies that
381
382 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
383
384 The overflows and underflows may complicate things a bit; each
385 overflow decreases the appropriate offset by M, and underflow
386 increases it by M. The above inequality would not necessarily be
387 true if
388
389 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
390 VARX + OFFC0 overflows, but VARX + OFFX does not.
391 This may only happen if OFFX < OFFC0.
392 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
393 VARY + OFFC1 underflows and VARY + OFFY does not.
394 This may only happen if OFFY > OFFC1. */
395
396 if (no_wrap)
397 {
398 x_ok = true;
399 y_ok = true;
400 }
401 else
402 {
403 x_ok = (integer_zerop (varx)
404 || mpz_cmp (loffx, offc0) >= 0);
405 y_ok = (integer_zerop (vary)
406 || mpz_cmp (loffy, offc1) <= 0);
407 }
408
409 if (x_ok && y_ok)
410 {
411 mpz_init (bnd);
412 mpz_sub (bnd, loffx, loffy);
413 mpz_add (bnd, bnd, offc1);
414 mpz_sub (bnd, bnd, offc0);
415
416 if (cmp == LT_EXPR)
417 mpz_sub_ui (bnd, bnd, 1);
418
419 if (lbound)
420 {
421 mpz_neg (bnd, bnd);
422 if (mpz_cmp (bnds->below, bnd) < 0)
423 mpz_set (bnds->below, bnd);
424 }
425 else
426 {
427 if (mpz_cmp (bnd, bnds->up) < 0)
428 mpz_set (bnds->up, bnd);
429 }
430 mpz_clear (bnd);
431 }
432
433 mpz_clear (loffx);
434 mpz_clear (loffy);
435 end:
436 mpz_clear (offc0);
437 mpz_clear (offc1);
438 }
439
440 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
441 The subtraction is considered to be performed in arbitrary precision,
442 without overflows.
443
444 We do not attempt to be too clever regarding the value ranges of X and
445 Y; most of the time, they are just integers or ssa names offsetted by
446 integer. However, we try to use the information contained in the
447 comparisons before the loop (usually created by loop header copying). */
448
449 static void
450 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
451 {
452 tree type = TREE_TYPE (x);
453 tree varx, vary;
454 mpz_t offx, offy;
455 mpz_t minx, maxx, miny, maxy;
456 int cnt = 0;
457 edge e;
458 basic_block bb;
459 tree c0, c1;
460 gimple cond;
461 enum tree_code cmp;
462
463 /* Get rid of unnecessary casts, but preserve the value of
464 the expressions. */
465 STRIP_SIGN_NOPS (x);
466 STRIP_SIGN_NOPS (y);
467
468 mpz_init (bnds->below);
469 mpz_init (bnds->up);
470 mpz_init (offx);
471 mpz_init (offy);
472 split_to_var_and_offset (x, &varx, offx);
473 split_to_var_and_offset (y, &vary, offy);
474
475 if (!integer_zerop (varx)
476 && operand_equal_p (varx, vary, 0))
477 {
478 /* Special case VARX == VARY -- we just need to compare the
479 offsets. The matters are a bit more complicated in the
480 case addition of offsets may wrap. */
481 bound_difference_of_offsetted_base (type, offx, offy, bnds);
482 }
483 else
484 {
485 /* Otherwise, use the value ranges to determine the initial
486 estimates on below and up. */
487 mpz_init (minx);
488 mpz_init (maxx);
489 mpz_init (miny);
490 mpz_init (maxy);
491 determine_value_range (loop, type, varx, offx, minx, maxx);
492 determine_value_range (loop, type, vary, offy, miny, maxy);
493
494 mpz_sub (bnds->below, minx, maxy);
495 mpz_sub (bnds->up, maxx, miny);
496 mpz_clear (minx);
497 mpz_clear (maxx);
498 mpz_clear (miny);
499 mpz_clear (maxy);
500 }
501
502 /* If both X and Y are constants, we cannot get any more precise. */
503 if (integer_zerop (varx) && integer_zerop (vary))
504 goto end;
505
506 /* Now walk the dominators of the loop header and use the entry
507 guards to refine the estimates. */
508 for (bb = loop->header;
509 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
510 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
511 {
512 if (!single_pred_p (bb))
513 continue;
514 e = single_pred_edge (bb);
515
516 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
517 continue;
518
519 cond = last_stmt (e->src);
520 c0 = gimple_cond_lhs (cond);
521 cmp = gimple_cond_code (cond);
522 c1 = gimple_cond_rhs (cond);
523
524 if (e->flags & EDGE_FALSE_VALUE)
525 cmp = invert_tree_comparison (cmp, false);
526
527 refine_bounds_using_guard (type, varx, offx, vary, offy,
528 c0, cmp, c1, bnds);
529 ++cnt;
530 }
531
532 end:
533 mpz_clear (offx);
534 mpz_clear (offy);
535 }
536
537 /* Update the bounds in BNDS that restrict the value of X to the bounds
538 that restrict the value of X + DELTA. X can be obtained as a
539 difference of two values in TYPE. */
540
541 static void
542 bounds_add (bounds *bnds, const widest_int &delta, tree type)
543 {
544 mpz_t mdelta, max;
545
546 mpz_init (mdelta);
547 wi::to_mpz (delta, mdelta, SIGNED);
548
549 mpz_init (max);
550 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
551
552 mpz_add (bnds->up, bnds->up, mdelta);
553 mpz_add (bnds->below, bnds->below, mdelta);
554
555 if (mpz_cmp (bnds->up, max) > 0)
556 mpz_set (bnds->up, max);
557
558 mpz_neg (max, max);
559 if (mpz_cmp (bnds->below, max) < 0)
560 mpz_set (bnds->below, max);
561
562 mpz_clear (mdelta);
563 mpz_clear (max);
564 }
565
566 /* Update the bounds in BNDS that restrict the value of X to the bounds
567 that restrict the value of -X. */
568
569 static void
570 bounds_negate (bounds *bnds)
571 {
572 mpz_t tmp;
573
574 mpz_init_set (tmp, bnds->up);
575 mpz_neg (bnds->up, bnds->below);
576 mpz_neg (bnds->below, tmp);
577 mpz_clear (tmp);
578 }
579
580 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
581
582 static tree
583 inverse (tree x, tree mask)
584 {
585 tree type = TREE_TYPE (x);
586 tree rslt;
587 unsigned ctr = tree_floor_log2 (mask);
588
589 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
590 {
591 unsigned HOST_WIDE_INT ix;
592 unsigned HOST_WIDE_INT imask;
593 unsigned HOST_WIDE_INT irslt = 1;
594
595 gcc_assert (cst_and_fits_in_hwi (x));
596 gcc_assert (cst_and_fits_in_hwi (mask));
597
598 ix = int_cst_value (x);
599 imask = int_cst_value (mask);
600
601 for (; ctr; ctr--)
602 {
603 irslt *= ix;
604 ix *= ix;
605 }
606 irslt &= imask;
607
608 rslt = build_int_cst_type (type, irslt);
609 }
610 else
611 {
612 rslt = build_int_cst (type, 1);
613 for (; ctr; ctr--)
614 {
615 rslt = int_const_binop (MULT_EXPR, rslt, x);
616 x = int_const_binop (MULT_EXPR, x, x);
617 }
618 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
619 }
620
621 return rslt;
622 }
623
624 /* Derives the upper bound BND on the number of executions of loop with exit
625 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
626 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
627 that the loop ends through this exit, i.e., the induction variable ever
628 reaches the value of C.
629
630 The value C is equal to final - base, where final and base are the final and
631 initial value of the actual induction variable in the analysed loop. BNDS
632 bounds the value of this difference when computed in signed type with
633 unbounded range, while the computation of C is performed in an unsigned
634 type with the range matching the range of the type of the induction variable.
635 In particular, BNDS.up contains an upper bound on C in the following cases:
636 -- if the iv must reach its final value without overflow, i.e., if
637 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
638 -- if final >= base, which we know to hold when BNDS.below >= 0. */
639
640 static void
641 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
642 bounds *bnds, bool exit_must_be_taken)
643 {
644 widest_int max;
645 mpz_t d;
646 tree type = TREE_TYPE (c);
647 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
648 || mpz_sgn (bnds->below) >= 0);
649
650 if (integer_onep (s)
651 || (TREE_CODE (c) == INTEGER_CST
652 && TREE_CODE (s) == INTEGER_CST
653 && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
654 || (TYPE_OVERFLOW_UNDEFINED (type)
655 && multiple_of_p (type, c, s)))
656 {
657 /* If C is an exact multiple of S, then its value will be reached before
658 the induction variable overflows (unless the loop is exited in some
659 other way before). Note that the actual induction variable in the
660 loop (which ranges from base to final instead of from 0 to C) may
661 overflow, in which case BNDS.up will not be giving a correct upper
662 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
663 no_overflow = true;
664 exit_must_be_taken = true;
665 }
666
667 /* If the induction variable can overflow, the number of iterations is at
668 most the period of the control variable (or infinite, but in that case
669 the whole # of iterations analysis will fail). */
670 if (!no_overflow)
671 {
672 max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
673 wi::to_mpz (max, bnd, UNSIGNED);
674 return;
675 }
676
677 /* Now we know that the induction variable does not overflow, so the loop
678 iterates at most (range of type / S) times. */
679 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
680
681 /* If the induction variable is guaranteed to reach the value of C before
682 overflow, ... */
683 if (exit_must_be_taken)
684 {
685 /* ... then we can strengthen this to C / S, and possibly we can use
686 the upper bound on C given by BNDS. */
687 if (TREE_CODE (c) == INTEGER_CST)
688 wi::to_mpz (c, bnd, UNSIGNED);
689 else if (bnds_u_valid)
690 mpz_set (bnd, bnds->up);
691 }
692
693 mpz_init (d);
694 wi::to_mpz (s, d, UNSIGNED);
695 mpz_fdiv_q (bnd, bnd, d);
696 mpz_clear (d);
697 }
698
699 /* Determines number of iterations of loop whose ending condition
700 is IV <> FINAL. TYPE is the type of the iv. The number of
701 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
702 we know that the exit must be taken eventually, i.e., that the IV
703 ever reaches the value FINAL (we derived this earlier, and possibly set
704 NITER->assumptions to make sure this is the case). BNDS contains the
705 bounds on the difference FINAL - IV->base. */
706
707 static bool
708 number_of_iterations_ne (tree type, affine_iv *iv, tree final,
709 struct tree_niter_desc *niter, bool exit_must_be_taken,
710 bounds *bnds)
711 {
712 tree niter_type = unsigned_type_for (type);
713 tree s, c, d, bits, assumption, tmp, bound;
714 mpz_t max;
715
716 niter->control = *iv;
717 niter->bound = final;
718 niter->cmp = NE_EXPR;
719
720 /* Rearrange the terms so that we get inequality S * i <> C, with S
721 positive. Also cast everything to the unsigned type. If IV does
722 not overflow, BNDS bounds the value of C. Also, this is the
723 case if the computation |FINAL - IV->base| does not overflow, i.e.,
724 if BNDS->below in the result is nonnegative. */
725 if (tree_int_cst_sign_bit (iv->step))
726 {
727 s = fold_convert (niter_type,
728 fold_build1 (NEGATE_EXPR, type, iv->step));
729 c = fold_build2 (MINUS_EXPR, niter_type,
730 fold_convert (niter_type, iv->base),
731 fold_convert (niter_type, final));
732 bounds_negate (bnds);
733 }
734 else
735 {
736 s = fold_convert (niter_type, iv->step);
737 c = fold_build2 (MINUS_EXPR, niter_type,
738 fold_convert (niter_type, final),
739 fold_convert (niter_type, iv->base));
740 }
741
742 mpz_init (max);
743 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
744 exit_must_be_taken);
745 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
746 TYPE_SIGN (niter_type));
747 mpz_clear (max);
748
749 /* First the trivial cases -- when the step is 1. */
750 if (integer_onep (s))
751 {
752 niter->niter = c;
753 return true;
754 }
755
756 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
757 is infinite. Otherwise, the number of iterations is
758 (inverse(s/d) * (c/d)) mod (size of mode/d). */
759 bits = num_ending_zeros (s);
760 bound = build_low_bits_mask (niter_type,
761 (TYPE_PRECISION (niter_type)
762 - tree_to_uhwi (bits)));
763
764 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
765 build_int_cst (niter_type, 1), bits);
766 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
767
768 if (!exit_must_be_taken)
769 {
770 /* If we cannot assume that the exit is taken eventually, record the
771 assumptions for divisibility of c. */
772 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
773 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
774 assumption, build_int_cst (niter_type, 0));
775 if (!integer_nonzerop (assumption))
776 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
777 niter->assumptions, assumption);
778 }
779
780 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
781 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
782 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
783 return true;
784 }
785
786 /* Checks whether we can determine the final value of the control variable
787 of the loop with ending condition IV0 < IV1 (computed in TYPE).
788 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
789 of the step. The assumptions necessary to ensure that the computation
790 of the final value does not overflow are recorded in NITER. If we
791 find the final value, we adjust DELTA and return TRUE. Otherwise
792 we return false. BNDS bounds the value of IV1->base - IV0->base,
793 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
794 true if we know that the exit must be taken eventually. */
795
796 static bool
797 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
798 struct tree_niter_desc *niter,
799 tree *delta, tree step,
800 bool exit_must_be_taken, bounds *bnds)
801 {
802 tree niter_type = TREE_TYPE (step);
803 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
804 tree tmod;
805 mpz_t mmod;
806 tree assumption = boolean_true_node, bound, noloop;
807 bool ret = false, fv_comp_no_overflow;
808 tree type1 = type;
809 if (POINTER_TYPE_P (type))
810 type1 = sizetype;
811
812 if (TREE_CODE (mod) != INTEGER_CST)
813 return false;
814 if (integer_nonzerop (mod))
815 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
816 tmod = fold_convert (type1, mod);
817
818 mpz_init (mmod);
819 wi::to_mpz (mod, mmod, UNSIGNED);
820 mpz_neg (mmod, mmod);
821
822 /* If the induction variable does not overflow and the exit is taken,
823 then the computation of the final value does not overflow. This is
824 also obviously the case if the new final value is equal to the
825 current one. Finally, we postulate this for pointer type variables,
826 as the code cannot rely on the object to that the pointer points being
827 placed at the end of the address space (and more pragmatically,
828 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
829 if (integer_zerop (mod) || POINTER_TYPE_P (type))
830 fv_comp_no_overflow = true;
831 else if (!exit_must_be_taken)
832 fv_comp_no_overflow = false;
833 else
834 fv_comp_no_overflow =
835 (iv0->no_overflow && integer_nonzerop (iv0->step))
836 || (iv1->no_overflow && integer_nonzerop (iv1->step));
837
838 if (integer_nonzerop (iv0->step))
839 {
840 /* The final value of the iv is iv1->base + MOD, assuming that this
841 computation does not overflow, and that
842 iv0->base <= iv1->base + MOD. */
843 if (!fv_comp_no_overflow)
844 {
845 bound = fold_build2 (MINUS_EXPR, type1,
846 TYPE_MAX_VALUE (type1), tmod);
847 assumption = fold_build2 (LE_EXPR, boolean_type_node,
848 iv1->base, bound);
849 if (integer_zerop (assumption))
850 goto end;
851 }
852 if (mpz_cmp (mmod, bnds->below) < 0)
853 noloop = boolean_false_node;
854 else if (POINTER_TYPE_P (type))
855 noloop = fold_build2 (GT_EXPR, boolean_type_node,
856 iv0->base,
857 fold_build_pointer_plus (iv1->base, tmod));
858 else
859 noloop = fold_build2 (GT_EXPR, boolean_type_node,
860 iv0->base,
861 fold_build2 (PLUS_EXPR, type1,
862 iv1->base, tmod));
863 }
864 else
865 {
866 /* The final value of the iv is iv0->base - MOD, assuming that this
867 computation does not overflow, and that
868 iv0->base - MOD <= iv1->base. */
869 if (!fv_comp_no_overflow)
870 {
871 bound = fold_build2 (PLUS_EXPR, type1,
872 TYPE_MIN_VALUE (type1), tmod);
873 assumption = fold_build2 (GE_EXPR, boolean_type_node,
874 iv0->base, bound);
875 if (integer_zerop (assumption))
876 goto end;
877 }
878 if (mpz_cmp (mmod, bnds->below) < 0)
879 noloop = boolean_false_node;
880 else if (POINTER_TYPE_P (type))
881 noloop = fold_build2 (GT_EXPR, boolean_type_node,
882 fold_build_pointer_plus (iv0->base,
883 fold_build1 (NEGATE_EXPR,
884 type1, tmod)),
885 iv1->base);
886 else
887 noloop = fold_build2 (GT_EXPR, boolean_type_node,
888 fold_build2 (MINUS_EXPR, type1,
889 iv0->base, tmod),
890 iv1->base);
891 }
892
893 if (!integer_nonzerop (assumption))
894 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
895 niter->assumptions,
896 assumption);
897 if (!integer_zerop (noloop))
898 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
899 niter->may_be_zero,
900 noloop);
901 bounds_add (bnds, wi::to_widest (mod), type);
902 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
903
904 ret = true;
905 end:
906 mpz_clear (mmod);
907 return ret;
908 }
909
910 /* Add assertions to NITER that ensure that the control variable of the loop
911 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
912 are TYPE. Returns false if we can prove that there is an overflow, true
913 otherwise. STEP is the absolute value of the step. */
914
915 static bool
916 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
917 struct tree_niter_desc *niter, tree step)
918 {
919 tree bound, d, assumption, diff;
920 tree niter_type = TREE_TYPE (step);
921
922 if (integer_nonzerop (iv0->step))
923 {
924 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
925 if (iv0->no_overflow)
926 return true;
927
928 /* If iv0->base is a constant, we can determine the last value before
929 overflow precisely; otherwise we conservatively assume
930 MAX - STEP + 1. */
931
932 if (TREE_CODE (iv0->base) == INTEGER_CST)
933 {
934 d = fold_build2 (MINUS_EXPR, niter_type,
935 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
936 fold_convert (niter_type, iv0->base));
937 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
938 }
939 else
940 diff = fold_build2 (MINUS_EXPR, niter_type, step,
941 build_int_cst (niter_type, 1));
942 bound = fold_build2 (MINUS_EXPR, type,
943 TYPE_MAX_VALUE (type), fold_convert (type, diff));
944 assumption = fold_build2 (LE_EXPR, boolean_type_node,
945 iv1->base, bound);
946 }
947 else
948 {
949 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
950 if (iv1->no_overflow)
951 return true;
952
953 if (TREE_CODE (iv1->base) == INTEGER_CST)
954 {
955 d = fold_build2 (MINUS_EXPR, niter_type,
956 fold_convert (niter_type, iv1->base),
957 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
958 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
959 }
960 else
961 diff = fold_build2 (MINUS_EXPR, niter_type, step,
962 build_int_cst (niter_type, 1));
963 bound = fold_build2 (PLUS_EXPR, type,
964 TYPE_MIN_VALUE (type), fold_convert (type, diff));
965 assumption = fold_build2 (GE_EXPR, boolean_type_node,
966 iv0->base, bound);
967 }
968
969 if (integer_zerop (assumption))
970 return false;
971 if (!integer_nonzerop (assumption))
972 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
973 niter->assumptions, assumption);
974
975 iv0->no_overflow = true;
976 iv1->no_overflow = true;
977 return true;
978 }
979
980 /* Add an assumption to NITER that a loop whose ending condition
981 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
982 bounds the value of IV1->base - IV0->base. */
983
984 static void
985 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
986 struct tree_niter_desc *niter, bounds *bnds)
987 {
988 tree assumption = boolean_true_node, bound, diff;
989 tree mbz, mbzl, mbzr, type1;
990 bool rolls_p, no_overflow_p;
991 widest_int dstep;
992 mpz_t mstep, max;
993
994 /* We are going to compute the number of iterations as
995 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
996 variant of TYPE. This formula only works if
997
998 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
999
1000 (where MAX is the maximum value of the unsigned variant of TYPE, and
1001 the computations in this formula are performed in full precision,
1002 i.e., without overflows).
1003
1004 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1005 we have a condition of the form iv0->base - step < iv1->base before the loop,
1006 and for loops iv0->base < iv1->base - step * i the condition
1007 iv0->base < iv1->base + step, due to loop header copying, which enable us
1008 to prove the lower bound.
1009
1010 The upper bound is more complicated. Unless the expressions for initial
1011 and final value themselves contain enough information, we usually cannot
1012 derive it from the context. */
1013
1014 /* First check whether the answer does not follow from the bounds we gathered
1015 before. */
1016 if (integer_nonzerop (iv0->step))
1017 dstep = wi::to_widest (iv0->step);
1018 else
1019 {
1020 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1021 dstep = -dstep;
1022 }
1023
1024 mpz_init (mstep);
1025 wi::to_mpz (dstep, mstep, UNSIGNED);
1026 mpz_neg (mstep, mstep);
1027 mpz_add_ui (mstep, mstep, 1);
1028
1029 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1030
1031 mpz_init (max);
1032 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1033 mpz_add (max, max, mstep);
1034 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1035 /* For pointers, only values lying inside a single object
1036 can be compared or manipulated by pointer arithmetics.
1037 Gcc in general does not allow or handle objects larger
1038 than half of the address space, hence the upper bound
1039 is satisfied for pointers. */
1040 || POINTER_TYPE_P (type));
1041 mpz_clear (mstep);
1042 mpz_clear (max);
1043
1044 if (rolls_p && no_overflow_p)
1045 return;
1046
1047 type1 = type;
1048 if (POINTER_TYPE_P (type))
1049 type1 = sizetype;
1050
1051 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1052 we must be careful not to introduce overflow. */
1053
1054 if (integer_nonzerop (iv0->step))
1055 {
1056 diff = fold_build2 (MINUS_EXPR, type1,
1057 iv0->step, build_int_cst (type1, 1));
1058
1059 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1060 0 address never belongs to any object, we can assume this for
1061 pointers. */
1062 if (!POINTER_TYPE_P (type))
1063 {
1064 bound = fold_build2 (PLUS_EXPR, type1,
1065 TYPE_MIN_VALUE (type), diff);
1066 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1067 iv0->base, bound);
1068 }
1069
1070 /* And then we can compute iv0->base - diff, and compare it with
1071 iv1->base. */
1072 mbzl = fold_build2 (MINUS_EXPR, type1,
1073 fold_convert (type1, iv0->base), diff);
1074 mbzr = fold_convert (type1, iv1->base);
1075 }
1076 else
1077 {
1078 diff = fold_build2 (PLUS_EXPR, type1,
1079 iv1->step, build_int_cst (type1, 1));
1080
1081 if (!POINTER_TYPE_P (type))
1082 {
1083 bound = fold_build2 (PLUS_EXPR, type1,
1084 TYPE_MAX_VALUE (type), diff);
1085 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1086 iv1->base, bound);
1087 }
1088
1089 mbzl = fold_convert (type1, iv0->base);
1090 mbzr = fold_build2 (MINUS_EXPR, type1,
1091 fold_convert (type1, iv1->base), diff);
1092 }
1093
1094 if (!integer_nonzerop (assumption))
1095 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1096 niter->assumptions, assumption);
1097 if (!rolls_p)
1098 {
1099 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1100 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1101 niter->may_be_zero, mbz);
1102 }
1103 }
1104
1105 /* Determines number of iterations of loop whose ending condition
1106 is IV0 < IV1. TYPE is the type of the iv. The number of
1107 iterations is stored to NITER. BNDS bounds the difference
1108 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1109 that the exit must be taken eventually. */
1110
1111 static bool
1112 number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1113 struct tree_niter_desc *niter,
1114 bool exit_must_be_taken, bounds *bnds)
1115 {
1116 tree niter_type = unsigned_type_for (type);
1117 tree delta, step, s;
1118 mpz_t mstep, tmp;
1119
1120 if (integer_nonzerop (iv0->step))
1121 {
1122 niter->control = *iv0;
1123 niter->cmp = LT_EXPR;
1124 niter->bound = iv1->base;
1125 }
1126 else
1127 {
1128 niter->control = *iv1;
1129 niter->cmp = GT_EXPR;
1130 niter->bound = iv0->base;
1131 }
1132
1133 delta = fold_build2 (MINUS_EXPR, niter_type,
1134 fold_convert (niter_type, iv1->base),
1135 fold_convert (niter_type, iv0->base));
1136
1137 /* First handle the special case that the step is +-1. */
1138 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1139 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1140 {
1141 /* for (i = iv0->base; i < iv1->base; i++)
1142
1143 or
1144
1145 for (i = iv1->base; i > iv0->base; i--).
1146
1147 In both cases # of iterations is iv1->base - iv0->base, assuming that
1148 iv1->base >= iv0->base.
1149
1150 First try to derive a lower bound on the value of
1151 iv1->base - iv0->base, computed in full precision. If the difference
1152 is nonnegative, we are done, otherwise we must record the
1153 condition. */
1154
1155 if (mpz_sgn (bnds->below) < 0)
1156 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1157 iv1->base, iv0->base);
1158 niter->niter = delta;
1159 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1160 TYPE_SIGN (niter_type));
1161 return true;
1162 }
1163
1164 if (integer_nonzerop (iv0->step))
1165 step = fold_convert (niter_type, iv0->step);
1166 else
1167 step = fold_convert (niter_type,
1168 fold_build1 (NEGATE_EXPR, type, iv1->step));
1169
1170 /* If we can determine the final value of the control iv exactly, we can
1171 transform the condition to != comparison. In particular, this will be
1172 the case if DELTA is constant. */
1173 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1174 exit_must_be_taken, bnds))
1175 {
1176 affine_iv zps;
1177
1178 zps.base = build_int_cst (niter_type, 0);
1179 zps.step = step;
1180 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1181 zps does not overflow. */
1182 zps.no_overflow = true;
1183
1184 return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1185 }
1186
1187 /* Make sure that the control iv does not overflow. */
1188 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1189 return false;
1190
1191 /* We determine the number of iterations as (delta + step - 1) / step. For
1192 this to work, we must know that iv1->base >= iv0->base - step + 1,
1193 otherwise the loop does not roll. */
1194 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1195
1196 s = fold_build2 (MINUS_EXPR, niter_type,
1197 step, build_int_cst (niter_type, 1));
1198 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1199 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1200
1201 mpz_init (mstep);
1202 mpz_init (tmp);
1203 wi::to_mpz (step, mstep, UNSIGNED);
1204 mpz_add (tmp, bnds->up, mstep);
1205 mpz_sub_ui (tmp, tmp, 1);
1206 mpz_fdiv_q (tmp, tmp, mstep);
1207 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1208 TYPE_SIGN (niter_type));
1209 mpz_clear (mstep);
1210 mpz_clear (tmp);
1211
1212 return true;
1213 }
1214
1215 /* Determines number of iterations of loop whose ending condition
1216 is IV0 <= IV1. TYPE is the type of the iv. The number of
1217 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1218 we know that this condition must eventually become false (we derived this
1219 earlier, and possibly set NITER->assumptions to make sure this
1220 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1221
1222 static bool
1223 number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1224 struct tree_niter_desc *niter, bool exit_must_be_taken,
1225 bounds *bnds)
1226 {
1227 tree assumption;
1228 tree type1 = type;
1229 if (POINTER_TYPE_P (type))
1230 type1 = sizetype;
1231
1232 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1233 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1234 value of the type. This we must know anyway, since if it is
1235 equal to this value, the loop rolls forever. We do not check
1236 this condition for pointer type ivs, as the code cannot rely on
1237 the object to that the pointer points being placed at the end of
1238 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1239 not defined for pointers). */
1240
1241 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1242 {
1243 if (integer_nonzerop (iv0->step))
1244 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1245 iv1->base, TYPE_MAX_VALUE (type));
1246 else
1247 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1248 iv0->base, TYPE_MIN_VALUE (type));
1249
1250 if (integer_zerop (assumption))
1251 return false;
1252 if (!integer_nonzerop (assumption))
1253 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1254 niter->assumptions, assumption);
1255 }
1256
1257 if (integer_nonzerop (iv0->step))
1258 {
1259 if (POINTER_TYPE_P (type))
1260 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1261 else
1262 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1263 build_int_cst (type1, 1));
1264 }
1265 else if (POINTER_TYPE_P (type))
1266 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1267 else
1268 iv0->base = fold_build2 (MINUS_EXPR, type1,
1269 iv0->base, build_int_cst (type1, 1));
1270
1271 bounds_add (bnds, 1, type1);
1272
1273 return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1274 bnds);
1275 }
1276
1277 /* Dumps description of affine induction variable IV to FILE. */
1278
1279 static void
1280 dump_affine_iv (FILE *file, affine_iv *iv)
1281 {
1282 if (!integer_zerop (iv->step))
1283 fprintf (file, "[");
1284
1285 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1286
1287 if (!integer_zerop (iv->step))
1288 {
1289 fprintf (file, ", + , ");
1290 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1291 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1292 }
1293 }
1294
1295 /* Determine the number of iterations according to condition (for staying
1296 inside loop) which compares two induction variables using comparison
1297 operator CODE. The induction variable on left side of the comparison
1298 is IV0, the right-hand side is IV1. Both induction variables must have
1299 type TYPE, which must be an integer or pointer type. The steps of the
1300 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1301
1302 LOOP is the loop whose number of iterations we are determining.
1303
1304 ONLY_EXIT is true if we are sure this is the only way the loop could be
1305 exited (including possibly non-returning function calls, exceptions, etc.)
1306 -- in this case we can use the information whether the control induction
1307 variables can overflow or not in a more efficient way.
1308
1309 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1310
1311 The results (number of iterations and assumptions as described in
1312 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1313 Returns false if it fails to determine number of iterations, true if it
1314 was determined (possibly with some assumptions). */
1315
1316 static bool
1317 number_of_iterations_cond (struct loop *loop,
1318 tree type, affine_iv *iv0, enum tree_code code,
1319 affine_iv *iv1, struct tree_niter_desc *niter,
1320 bool only_exit, bool every_iteration)
1321 {
1322 bool exit_must_be_taken = false, ret;
1323 bounds bnds;
1324
1325 /* If the test is not executed every iteration, wrapping may make the test
1326 to pass again.
1327 TODO: the overflow case can be still used as unreliable estimate of upper
1328 bound. But we have no API to pass it down to number of iterations code
1329 and, at present, it will not use it anyway. */
1330 if (!every_iteration
1331 && (!iv0->no_overflow || !iv1->no_overflow
1332 || code == NE_EXPR || code == EQ_EXPR))
1333 return false;
1334
1335 /* The meaning of these assumptions is this:
1336 if !assumptions
1337 then the rest of information does not have to be valid
1338 if may_be_zero then the loop does not roll, even if
1339 niter != 0. */
1340 niter->assumptions = boolean_true_node;
1341 niter->may_be_zero = boolean_false_node;
1342 niter->niter = NULL_TREE;
1343 niter->max = 0;
1344 niter->bound = NULL_TREE;
1345 niter->cmp = ERROR_MARK;
1346
1347 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1348 the control variable is on lhs. */
1349 if (code == GE_EXPR || code == GT_EXPR
1350 || (code == NE_EXPR && integer_zerop (iv0->step)))
1351 {
1352 SWAP (iv0, iv1);
1353 code = swap_tree_comparison (code);
1354 }
1355
1356 if (POINTER_TYPE_P (type))
1357 {
1358 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1359 to the same object. If they do, the control variable cannot wrap
1360 (as wrap around the bounds of memory will never return a pointer
1361 that would be guaranteed to point to the same object, even if we
1362 avoid undefined behavior by casting to size_t and back). */
1363 iv0->no_overflow = true;
1364 iv1->no_overflow = true;
1365 }
1366
1367 /* If the control induction variable does not overflow and the only exit
1368 from the loop is the one that we analyze, we know it must be taken
1369 eventually. */
1370 if (only_exit)
1371 {
1372 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1373 exit_must_be_taken = true;
1374 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1375 exit_must_be_taken = true;
1376 }
1377
1378 /* We can handle the case when neither of the sides of the comparison is
1379 invariant, provided that the test is NE_EXPR. This rarely occurs in
1380 practice, but it is simple enough to manage. */
1381 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1382 {
1383 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1384 if (code != NE_EXPR)
1385 return false;
1386
1387 iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
1388 iv0->step, iv1->step);
1389 iv0->no_overflow = false;
1390 iv1->step = build_int_cst (step_type, 0);
1391 iv1->no_overflow = true;
1392 }
1393
1394 /* If the result of the comparison is a constant, the loop is weird. More
1395 precise handling would be possible, but the situation is not common enough
1396 to waste time on it. */
1397 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1398 return false;
1399
1400 /* Ignore loops of while (i-- < 10) type. */
1401 if (code != NE_EXPR)
1402 {
1403 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1404 return false;
1405
1406 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1407 return false;
1408 }
1409
1410 /* If the loop exits immediately, there is nothing to do. */
1411 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1412 if (tem && integer_zerop (tem))
1413 {
1414 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1415 niter->max = 0;
1416 return true;
1417 }
1418
1419 /* OK, now we know we have a senseful loop. Handle several cases, depending
1420 on what comparison operator is used. */
1421 bound_difference (loop, iv1->base, iv0->base, &bnds);
1422
1423 if (dump_file && (dump_flags & TDF_DETAILS))
1424 {
1425 fprintf (dump_file,
1426 "Analyzing # of iterations of loop %d\n", loop->num);
1427
1428 fprintf (dump_file, " exit condition ");
1429 dump_affine_iv (dump_file, iv0);
1430 fprintf (dump_file, " %s ",
1431 code == NE_EXPR ? "!="
1432 : code == LT_EXPR ? "<"
1433 : "<=");
1434 dump_affine_iv (dump_file, iv1);
1435 fprintf (dump_file, "\n");
1436
1437 fprintf (dump_file, " bounds on difference of bases: ");
1438 mpz_out_str (dump_file, 10, bnds.below);
1439 fprintf (dump_file, " ... ");
1440 mpz_out_str (dump_file, 10, bnds.up);
1441 fprintf (dump_file, "\n");
1442 }
1443
1444 switch (code)
1445 {
1446 case NE_EXPR:
1447 gcc_assert (integer_zerop (iv1->step));
1448 ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1449 exit_must_be_taken, &bnds);
1450 break;
1451
1452 case LT_EXPR:
1453 ret = number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1454 &bnds);
1455 break;
1456
1457 case LE_EXPR:
1458 ret = number_of_iterations_le (type, iv0, iv1, niter, exit_must_be_taken,
1459 &bnds);
1460 break;
1461
1462 default:
1463 gcc_unreachable ();
1464 }
1465
1466 mpz_clear (bnds.up);
1467 mpz_clear (bnds.below);
1468
1469 if (dump_file && (dump_flags & TDF_DETAILS))
1470 {
1471 if (ret)
1472 {
1473 fprintf (dump_file, " result:\n");
1474 if (!integer_nonzerop (niter->assumptions))
1475 {
1476 fprintf (dump_file, " under assumptions ");
1477 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1478 fprintf (dump_file, "\n");
1479 }
1480
1481 if (!integer_zerop (niter->may_be_zero))
1482 {
1483 fprintf (dump_file, " zero if ");
1484 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1485 fprintf (dump_file, "\n");
1486 }
1487
1488 fprintf (dump_file, " # of iterations ");
1489 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1490 fprintf (dump_file, ", bounded by ");
1491 print_decu (niter->max, dump_file);
1492 fprintf (dump_file, "\n");
1493 }
1494 else
1495 fprintf (dump_file, " failed\n\n");
1496 }
1497 return ret;
1498 }
1499
1500 /* Substitute NEW for OLD in EXPR and fold the result. */
1501
1502 static tree
1503 simplify_replace_tree (tree expr, tree old, tree new_tree)
1504 {
1505 unsigned i, n;
1506 tree ret = NULL_TREE, e, se;
1507
1508 if (!expr)
1509 return NULL_TREE;
1510
1511 /* Do not bother to replace constants. */
1512 if (CONSTANT_CLASS_P (old))
1513 return expr;
1514
1515 if (expr == old
1516 || operand_equal_p (expr, old, 0))
1517 return unshare_expr (new_tree);
1518
1519 if (!EXPR_P (expr))
1520 return expr;
1521
1522 n = TREE_OPERAND_LENGTH (expr);
1523 for (i = 0; i < n; i++)
1524 {
1525 e = TREE_OPERAND (expr, i);
1526 se = simplify_replace_tree (e, old, new_tree);
1527 if (e == se)
1528 continue;
1529
1530 if (!ret)
1531 ret = copy_node (expr);
1532
1533 TREE_OPERAND (ret, i) = se;
1534 }
1535
1536 return (ret ? fold (ret) : expr);
1537 }
1538
1539 /* Expand definitions of ssa names in EXPR as long as they are simple
1540 enough, and return the new expression. */
1541
1542 tree
1543 expand_simple_operations (tree expr)
1544 {
1545 unsigned i, n;
1546 tree ret = NULL_TREE, e, ee, e1;
1547 enum tree_code code;
1548 gimple stmt;
1549
1550 if (expr == NULL_TREE)
1551 return expr;
1552
1553 if (is_gimple_min_invariant (expr))
1554 return expr;
1555
1556 code = TREE_CODE (expr);
1557 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1558 {
1559 n = TREE_OPERAND_LENGTH (expr);
1560 for (i = 0; i < n; i++)
1561 {
1562 e = TREE_OPERAND (expr, i);
1563 ee = expand_simple_operations (e);
1564 if (e == ee)
1565 continue;
1566
1567 if (!ret)
1568 ret = copy_node (expr);
1569
1570 TREE_OPERAND (ret, i) = ee;
1571 }
1572
1573 if (!ret)
1574 return expr;
1575
1576 fold_defer_overflow_warnings ();
1577 ret = fold (ret);
1578 fold_undefer_and_ignore_overflow_warnings ();
1579 return ret;
1580 }
1581
1582 if (TREE_CODE (expr) != SSA_NAME)
1583 return expr;
1584
1585 stmt = SSA_NAME_DEF_STMT (expr);
1586 if (gimple_code (stmt) == GIMPLE_PHI)
1587 {
1588 basic_block src, dest;
1589
1590 if (gimple_phi_num_args (stmt) != 1)
1591 return expr;
1592 e = PHI_ARG_DEF (stmt, 0);
1593
1594 /* Avoid propagating through loop exit phi nodes, which
1595 could break loop-closed SSA form restrictions. */
1596 dest = gimple_bb (stmt);
1597 src = single_pred (dest);
1598 if (TREE_CODE (e) == SSA_NAME
1599 && src->loop_father != dest->loop_father)
1600 return expr;
1601
1602 return expand_simple_operations (e);
1603 }
1604 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1605 return expr;
1606
1607 /* Avoid expanding to expressions that contain SSA names that need
1608 to take part in abnormal coalescing. */
1609 ssa_op_iter iter;
1610 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
1611 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
1612 return expr;
1613
1614 e = gimple_assign_rhs1 (stmt);
1615 code = gimple_assign_rhs_code (stmt);
1616 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1617 {
1618 if (is_gimple_min_invariant (e))
1619 return e;
1620
1621 if (code == SSA_NAME)
1622 return expand_simple_operations (e);
1623
1624 return expr;
1625 }
1626
1627 switch (code)
1628 {
1629 CASE_CONVERT:
1630 /* Casts are simple. */
1631 ee = expand_simple_operations (e);
1632 return fold_build1 (code, TREE_TYPE (expr), ee);
1633
1634 case PLUS_EXPR:
1635 case MINUS_EXPR:
1636 case POINTER_PLUS_EXPR:
1637 /* And increments and decrements by a constant are simple. */
1638 e1 = gimple_assign_rhs2 (stmt);
1639 if (!is_gimple_min_invariant (e1))
1640 return expr;
1641
1642 ee = expand_simple_operations (e);
1643 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1644
1645 default:
1646 return expr;
1647 }
1648 }
1649
1650 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1651 expression (or EXPR unchanged, if no simplification was possible). */
1652
1653 static tree
1654 tree_simplify_using_condition_1 (tree cond, tree expr)
1655 {
1656 bool changed;
1657 tree e, te, e0, e1, e2, notcond;
1658 enum tree_code code = TREE_CODE (expr);
1659
1660 if (code == INTEGER_CST)
1661 return expr;
1662
1663 if (code == TRUTH_OR_EXPR
1664 || code == TRUTH_AND_EXPR
1665 || code == COND_EXPR)
1666 {
1667 changed = false;
1668
1669 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1670 if (TREE_OPERAND (expr, 0) != e0)
1671 changed = true;
1672
1673 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1674 if (TREE_OPERAND (expr, 1) != e1)
1675 changed = true;
1676
1677 if (code == COND_EXPR)
1678 {
1679 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1680 if (TREE_OPERAND (expr, 2) != e2)
1681 changed = true;
1682 }
1683 else
1684 e2 = NULL_TREE;
1685
1686 if (changed)
1687 {
1688 if (code == COND_EXPR)
1689 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1690 else
1691 expr = fold_build2 (code, boolean_type_node, e0, e1);
1692 }
1693
1694 return expr;
1695 }
1696
1697 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1698 propagation, and vice versa. Fold does not handle this, since it is
1699 considered too expensive. */
1700 if (TREE_CODE (cond) == EQ_EXPR)
1701 {
1702 e0 = TREE_OPERAND (cond, 0);
1703 e1 = TREE_OPERAND (cond, 1);
1704
1705 /* We know that e0 == e1. Check whether we cannot simplify expr
1706 using this fact. */
1707 e = simplify_replace_tree (expr, e0, e1);
1708 if (integer_zerop (e) || integer_nonzerop (e))
1709 return e;
1710
1711 e = simplify_replace_tree (expr, e1, e0);
1712 if (integer_zerop (e) || integer_nonzerop (e))
1713 return e;
1714 }
1715 if (TREE_CODE (expr) == EQ_EXPR)
1716 {
1717 e0 = TREE_OPERAND (expr, 0);
1718 e1 = TREE_OPERAND (expr, 1);
1719
1720 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1721 e = simplify_replace_tree (cond, e0, e1);
1722 if (integer_zerop (e))
1723 return e;
1724 e = simplify_replace_tree (cond, e1, e0);
1725 if (integer_zerop (e))
1726 return e;
1727 }
1728 if (TREE_CODE (expr) == NE_EXPR)
1729 {
1730 e0 = TREE_OPERAND (expr, 0);
1731 e1 = TREE_OPERAND (expr, 1);
1732
1733 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1734 e = simplify_replace_tree (cond, e0, e1);
1735 if (integer_zerop (e))
1736 return boolean_true_node;
1737 e = simplify_replace_tree (cond, e1, e0);
1738 if (integer_zerop (e))
1739 return boolean_true_node;
1740 }
1741
1742 te = expand_simple_operations (expr);
1743
1744 /* Check whether COND ==> EXPR. */
1745 notcond = invert_truthvalue (cond);
1746 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1747 if (e && integer_nonzerop (e))
1748 return e;
1749
1750 /* Check whether COND ==> not EXPR. */
1751 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1752 if (e && integer_zerop (e))
1753 return e;
1754
1755 return expr;
1756 }
1757
1758 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1759 expression (or EXPR unchanged, if no simplification was possible).
1760 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1761 of simple operations in definitions of ssa names in COND are expanded,
1762 so that things like casts or incrementing the value of the bound before
1763 the loop do not cause us to fail. */
1764
1765 static tree
1766 tree_simplify_using_condition (tree cond, tree expr)
1767 {
1768 cond = expand_simple_operations (cond);
1769
1770 return tree_simplify_using_condition_1 (cond, expr);
1771 }
1772
1773 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1774 Returns the simplified expression (or EXPR unchanged, if no
1775 simplification was possible).*/
1776
1777 static tree
1778 simplify_using_initial_conditions (struct loop *loop, tree expr)
1779 {
1780 edge e;
1781 basic_block bb;
1782 gimple stmt;
1783 tree cond;
1784 int cnt = 0;
1785
1786 if (TREE_CODE (expr) == INTEGER_CST)
1787 return expr;
1788
1789 /* Limit walking the dominators to avoid quadraticness in
1790 the number of BBs times the number of loops in degenerate
1791 cases. */
1792 for (bb = loop->header;
1793 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
1794 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1795 {
1796 if (!single_pred_p (bb))
1797 continue;
1798 e = single_pred_edge (bb);
1799
1800 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
1801 continue;
1802
1803 stmt = last_stmt (e->src);
1804 cond = fold_build2 (gimple_cond_code (stmt),
1805 boolean_type_node,
1806 gimple_cond_lhs (stmt),
1807 gimple_cond_rhs (stmt));
1808 if (e->flags & EDGE_FALSE_VALUE)
1809 cond = invert_truthvalue (cond);
1810 expr = tree_simplify_using_condition (cond, expr);
1811 ++cnt;
1812 }
1813
1814 return expr;
1815 }
1816
1817 /* Tries to simplify EXPR using the evolutions of the loop invariants
1818 in the superloops of LOOP. Returns the simplified expression
1819 (or EXPR unchanged, if no simplification was possible). */
1820
1821 static tree
1822 simplify_using_outer_evolutions (struct loop *loop, tree expr)
1823 {
1824 enum tree_code code = TREE_CODE (expr);
1825 bool changed;
1826 tree e, e0, e1, e2;
1827
1828 if (is_gimple_min_invariant (expr))
1829 return expr;
1830
1831 if (code == TRUTH_OR_EXPR
1832 || code == TRUTH_AND_EXPR
1833 || code == COND_EXPR)
1834 {
1835 changed = false;
1836
1837 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
1838 if (TREE_OPERAND (expr, 0) != e0)
1839 changed = true;
1840
1841 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
1842 if (TREE_OPERAND (expr, 1) != e1)
1843 changed = true;
1844
1845 if (code == COND_EXPR)
1846 {
1847 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
1848 if (TREE_OPERAND (expr, 2) != e2)
1849 changed = true;
1850 }
1851 else
1852 e2 = NULL_TREE;
1853
1854 if (changed)
1855 {
1856 if (code == COND_EXPR)
1857 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1858 else
1859 expr = fold_build2 (code, boolean_type_node, e0, e1);
1860 }
1861
1862 return expr;
1863 }
1864
1865 e = instantiate_parameters (loop, expr);
1866 if (is_gimple_min_invariant (e))
1867 return e;
1868
1869 return expr;
1870 }
1871
1872 /* Returns true if EXIT is the only possible exit from LOOP. */
1873
1874 bool
1875 loop_only_exit_p (const struct loop *loop, const_edge exit)
1876 {
1877 basic_block *body;
1878 gimple_stmt_iterator bsi;
1879 unsigned i;
1880 gimple call;
1881
1882 if (exit != single_exit (loop))
1883 return false;
1884
1885 body = get_loop_body (loop);
1886 for (i = 0; i < loop->num_nodes; i++)
1887 {
1888 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
1889 {
1890 call = gsi_stmt (bsi);
1891 if (gimple_code (call) != GIMPLE_CALL)
1892 continue;
1893
1894 if (gimple_has_side_effects (call))
1895 {
1896 free (body);
1897 return false;
1898 }
1899 }
1900 }
1901
1902 free (body);
1903 return true;
1904 }
1905
1906 /* Stores description of number of iterations of LOOP derived from
1907 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1908 useful information could be derived (and fields of NITER has
1909 meaning described in comments at struct tree_niter_desc
1910 declaration), false otherwise. If WARN is true and
1911 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1912 potentially unsafe assumptions.
1913 When EVERY_ITERATION is true, only tests that are known to be executed
1914 every iteration are considered (i.e. only test that alone bounds the loop).
1915 */
1916
1917 bool
1918 number_of_iterations_exit (struct loop *loop, edge exit,
1919 struct tree_niter_desc *niter,
1920 bool warn, bool every_iteration)
1921 {
1922 gimple stmt;
1923 tree type;
1924 tree op0, op1;
1925 enum tree_code code;
1926 affine_iv iv0, iv1;
1927 bool safe;
1928
1929 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
1930
1931 if (every_iteration && !safe)
1932 return false;
1933
1934 niter->assumptions = boolean_false_node;
1935 stmt = last_stmt (exit->src);
1936 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1937 return false;
1938
1939 /* We want the condition for staying inside loop. */
1940 code = gimple_cond_code (stmt);
1941 if (exit->flags & EDGE_TRUE_VALUE)
1942 code = invert_tree_comparison (code, false);
1943
1944 switch (code)
1945 {
1946 case GT_EXPR:
1947 case GE_EXPR:
1948 case LT_EXPR:
1949 case LE_EXPR:
1950 case NE_EXPR:
1951 break;
1952
1953 default:
1954 return false;
1955 }
1956
1957 op0 = gimple_cond_lhs (stmt);
1958 op1 = gimple_cond_rhs (stmt);
1959 type = TREE_TYPE (op0);
1960
1961 if (TREE_CODE (type) != INTEGER_TYPE
1962 && !POINTER_TYPE_P (type))
1963 return false;
1964
1965 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
1966 return false;
1967 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
1968 return false;
1969
1970 /* We don't want to see undefined signed overflow warnings while
1971 computing the number of iterations. */
1972 fold_defer_overflow_warnings ();
1973
1974 iv0.base = expand_simple_operations (iv0.base);
1975 iv1.base = expand_simple_operations (iv1.base);
1976 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1977 loop_only_exit_p (loop, exit), safe))
1978 {
1979 fold_undefer_and_ignore_overflow_warnings ();
1980 return false;
1981 }
1982
1983 if (optimize >= 3)
1984 {
1985 niter->assumptions = simplify_using_outer_evolutions (loop,
1986 niter->assumptions);
1987 niter->may_be_zero = simplify_using_outer_evolutions (loop,
1988 niter->may_be_zero);
1989 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
1990 }
1991
1992 niter->assumptions
1993 = simplify_using_initial_conditions (loop,
1994 niter->assumptions);
1995 niter->may_be_zero
1996 = simplify_using_initial_conditions (loop,
1997 niter->may_be_zero);
1998
1999 fold_undefer_and_ignore_overflow_warnings ();
2000
2001 /* If NITER has simplified into a constant, update MAX. */
2002 if (TREE_CODE (niter->niter) == INTEGER_CST)
2003 niter->max = wi::to_widest (niter->niter);
2004
2005 if (integer_onep (niter->assumptions))
2006 return true;
2007
2008 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
2009 But if we can prove that there is overflow or some other source of weird
2010 behavior, ignore the loop even with -funsafe-loop-optimizations. */
2011 if (integer_zerop (niter->assumptions) || !single_exit (loop))
2012 return false;
2013
2014 if (flag_unsafe_loop_optimizations)
2015 niter->assumptions = boolean_true_node;
2016
2017 if (warn)
2018 {
2019 const char *wording;
2020 location_t loc = gimple_location (stmt);
2021
2022 /* We can provide a more specific warning if one of the operator is
2023 constant and the other advances by +1 or -1. */
2024 if (!integer_zerop (iv1.step)
2025 ? (integer_zerop (iv0.step)
2026 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
2027 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
2028 wording =
2029 flag_unsafe_loop_optimizations
2030 ? N_("assuming that the loop is not infinite")
2031 : N_("cannot optimize possibly infinite loops");
2032 else
2033 wording =
2034 flag_unsafe_loop_optimizations
2035 ? N_("assuming that the loop counter does not overflow")
2036 : N_("cannot optimize loop, the loop counter may overflow");
2037
2038 warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
2039 OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
2040 }
2041
2042 return flag_unsafe_loop_optimizations;
2043 }
2044
2045 /* Try to determine the number of iterations of LOOP. If we succeed,
2046 expression giving number of iterations is returned and *EXIT is
2047 set to the edge from that the information is obtained. Otherwise
2048 chrec_dont_know is returned. */
2049
2050 tree
2051 find_loop_niter (struct loop *loop, edge *exit)
2052 {
2053 unsigned i;
2054 vec<edge> exits = get_loop_exit_edges (loop);
2055 edge ex;
2056 tree niter = NULL_TREE, aniter;
2057 struct tree_niter_desc desc;
2058
2059 *exit = NULL;
2060 FOR_EACH_VEC_ELT (exits, i, ex)
2061 {
2062 if (!number_of_iterations_exit (loop, ex, &desc, false))
2063 continue;
2064
2065 if (integer_nonzerop (desc.may_be_zero))
2066 {
2067 /* We exit in the first iteration through this exit.
2068 We won't find anything better. */
2069 niter = build_int_cst (unsigned_type_node, 0);
2070 *exit = ex;
2071 break;
2072 }
2073
2074 if (!integer_zerop (desc.may_be_zero))
2075 continue;
2076
2077 aniter = desc.niter;
2078
2079 if (!niter)
2080 {
2081 /* Nothing recorded yet. */
2082 niter = aniter;
2083 *exit = ex;
2084 continue;
2085 }
2086
2087 /* Prefer constants, the lower the better. */
2088 if (TREE_CODE (aniter) != INTEGER_CST)
2089 continue;
2090
2091 if (TREE_CODE (niter) != INTEGER_CST)
2092 {
2093 niter = aniter;
2094 *exit = ex;
2095 continue;
2096 }
2097
2098 if (tree_int_cst_lt (aniter, niter))
2099 {
2100 niter = aniter;
2101 *exit = ex;
2102 continue;
2103 }
2104 }
2105 exits.release ();
2106
2107 return niter ? niter : chrec_dont_know;
2108 }
2109
2110 /* Return true if loop is known to have bounded number of iterations. */
2111
2112 bool
2113 finite_loop_p (struct loop *loop)
2114 {
2115 widest_int nit;
2116 int flags;
2117
2118 if (flag_unsafe_loop_optimizations)
2119 return true;
2120 flags = flags_from_decl_or_type (current_function_decl);
2121 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2122 {
2123 if (dump_file && (dump_flags & TDF_DETAILS))
2124 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2125 loop->num);
2126 return true;
2127 }
2128
2129 if (loop->any_upper_bound
2130 || max_loop_iterations (loop, &nit))
2131 {
2132 if (dump_file && (dump_flags & TDF_DETAILS))
2133 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2134 loop->num);
2135 return true;
2136 }
2137 return false;
2138 }
2139
2140 /*
2141
2142 Analysis of a number of iterations of a loop by a brute-force evaluation.
2143
2144 */
2145
2146 /* Bound on the number of iterations we try to evaluate. */
2147
2148 #define MAX_ITERATIONS_TO_TRACK \
2149 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2150
2151 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2152 result by a chain of operations such that all but exactly one of their
2153 operands are constants. */
2154
2155 static gimple
2156 chain_of_csts_start (struct loop *loop, tree x)
2157 {
2158 gimple stmt = SSA_NAME_DEF_STMT (x);
2159 tree use;
2160 basic_block bb = gimple_bb (stmt);
2161 enum tree_code code;
2162
2163 if (!bb
2164 || !flow_bb_inside_loop_p (loop, bb))
2165 return NULL;
2166
2167 if (gimple_code (stmt) == GIMPLE_PHI)
2168 {
2169 if (bb == loop->header)
2170 return stmt;
2171
2172 return NULL;
2173 }
2174
2175 if (gimple_code (stmt) != GIMPLE_ASSIGN
2176 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2177 return NULL;
2178
2179 code = gimple_assign_rhs_code (stmt);
2180 if (gimple_references_memory_p (stmt)
2181 || TREE_CODE_CLASS (code) == tcc_reference
2182 || (code == ADDR_EXPR
2183 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2184 return NULL;
2185
2186 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2187 if (use == NULL_TREE)
2188 return NULL;
2189
2190 return chain_of_csts_start (loop, use);
2191 }
2192
2193 /* Determines whether the expression X is derived from a result of a phi node
2194 in header of LOOP such that
2195
2196 * the derivation of X consists only from operations with constants
2197 * the initial value of the phi node is constant
2198 * the value of the phi node in the next iteration can be derived from the
2199 value in the current iteration by a chain of operations with constants.
2200
2201 If such phi node exists, it is returned, otherwise NULL is returned. */
2202
2203 static gimple
2204 get_base_for (struct loop *loop, tree x)
2205 {
2206 gimple phi;
2207 tree init, next;
2208
2209 if (is_gimple_min_invariant (x))
2210 return NULL;
2211
2212 phi = chain_of_csts_start (loop, x);
2213 if (!phi)
2214 return NULL;
2215
2216 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2217 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2218
2219 if (TREE_CODE (next) != SSA_NAME)
2220 return NULL;
2221
2222 if (!is_gimple_min_invariant (init))
2223 return NULL;
2224
2225 if (chain_of_csts_start (loop, next) != phi)
2226 return NULL;
2227
2228 return phi;
2229 }
2230
2231 /* Given an expression X, then
2232
2233 * if X is NULL_TREE, we return the constant BASE.
2234 * otherwise X is a SSA name, whose value in the considered loop is derived
2235 by a chain of operations with constant from a result of a phi node in
2236 the header of the loop. Then we return value of X when the value of the
2237 result of this phi node is given by the constant BASE. */
2238
2239 static tree
2240 get_val_for (tree x, tree base)
2241 {
2242 gimple stmt;
2243
2244 gcc_checking_assert (is_gimple_min_invariant (base));
2245
2246 if (!x)
2247 return base;
2248
2249 stmt = SSA_NAME_DEF_STMT (x);
2250 if (gimple_code (stmt) == GIMPLE_PHI)
2251 return base;
2252
2253 gcc_checking_assert (is_gimple_assign (stmt));
2254
2255 /* STMT must be either an assignment of a single SSA name or an
2256 expression involving an SSA name and a constant. Try to fold that
2257 expression using the value for the SSA name. */
2258 if (gimple_assign_ssa_name_copy_p (stmt))
2259 return get_val_for (gimple_assign_rhs1 (stmt), base);
2260 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2261 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2262 {
2263 return fold_build1 (gimple_assign_rhs_code (stmt),
2264 gimple_expr_type (stmt),
2265 get_val_for (gimple_assign_rhs1 (stmt), base));
2266 }
2267 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2268 {
2269 tree rhs1 = gimple_assign_rhs1 (stmt);
2270 tree rhs2 = gimple_assign_rhs2 (stmt);
2271 if (TREE_CODE (rhs1) == SSA_NAME)
2272 rhs1 = get_val_for (rhs1, base);
2273 else if (TREE_CODE (rhs2) == SSA_NAME)
2274 rhs2 = get_val_for (rhs2, base);
2275 else
2276 gcc_unreachable ();
2277 return fold_build2 (gimple_assign_rhs_code (stmt),
2278 gimple_expr_type (stmt), rhs1, rhs2);
2279 }
2280 else
2281 gcc_unreachable ();
2282 }
2283
2284
2285 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2286 by brute force -- i.e. by determining the value of the operands of the
2287 condition at EXIT in first few iterations of the loop (assuming that
2288 these values are constant) and determining the first one in that the
2289 condition is not satisfied. Returns the constant giving the number
2290 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2291
2292 tree
2293 loop_niter_by_eval (struct loop *loop, edge exit)
2294 {
2295 tree acnd;
2296 tree op[2], val[2], next[2], aval[2];
2297 gimple phi, cond;
2298 unsigned i, j;
2299 enum tree_code cmp;
2300
2301 cond = last_stmt (exit->src);
2302 if (!cond || gimple_code (cond) != GIMPLE_COND)
2303 return chrec_dont_know;
2304
2305 cmp = gimple_cond_code (cond);
2306 if (exit->flags & EDGE_TRUE_VALUE)
2307 cmp = invert_tree_comparison (cmp, false);
2308
2309 switch (cmp)
2310 {
2311 case EQ_EXPR:
2312 case NE_EXPR:
2313 case GT_EXPR:
2314 case GE_EXPR:
2315 case LT_EXPR:
2316 case LE_EXPR:
2317 op[0] = gimple_cond_lhs (cond);
2318 op[1] = gimple_cond_rhs (cond);
2319 break;
2320
2321 default:
2322 return chrec_dont_know;
2323 }
2324
2325 for (j = 0; j < 2; j++)
2326 {
2327 if (is_gimple_min_invariant (op[j]))
2328 {
2329 val[j] = op[j];
2330 next[j] = NULL_TREE;
2331 op[j] = NULL_TREE;
2332 }
2333 else
2334 {
2335 phi = get_base_for (loop, op[j]);
2336 if (!phi)
2337 return chrec_dont_know;
2338 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2339 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2340 }
2341 }
2342
2343 /* Don't issue signed overflow warnings. */
2344 fold_defer_overflow_warnings ();
2345
2346 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2347 {
2348 for (j = 0; j < 2; j++)
2349 aval[j] = get_val_for (op[j], val[j]);
2350
2351 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2352 if (acnd && integer_zerop (acnd))
2353 {
2354 fold_undefer_and_ignore_overflow_warnings ();
2355 if (dump_file && (dump_flags & TDF_DETAILS))
2356 fprintf (dump_file,
2357 "Proved that loop %d iterates %d times using brute force.\n",
2358 loop->num, i);
2359 return build_int_cst (unsigned_type_node, i);
2360 }
2361
2362 for (j = 0; j < 2; j++)
2363 {
2364 val[j] = get_val_for (next[j], val[j]);
2365 if (!is_gimple_min_invariant (val[j]))
2366 {
2367 fold_undefer_and_ignore_overflow_warnings ();
2368 return chrec_dont_know;
2369 }
2370 }
2371 }
2372
2373 fold_undefer_and_ignore_overflow_warnings ();
2374
2375 return chrec_dont_know;
2376 }
2377
2378 /* Finds the exit of the LOOP by that the loop exits after a constant
2379 number of iterations and stores the exit edge to *EXIT. The constant
2380 giving the number of iterations of LOOP is returned. The number of
2381 iterations is determined using loop_niter_by_eval (i.e. by brute force
2382 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2383 determines the number of iterations, chrec_dont_know is returned. */
2384
2385 tree
2386 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2387 {
2388 unsigned i;
2389 vec<edge> exits = get_loop_exit_edges (loop);
2390 edge ex;
2391 tree niter = NULL_TREE, aniter;
2392
2393 *exit = NULL;
2394
2395 /* Loops with multiple exits are expensive to handle and less important. */
2396 if (!flag_expensive_optimizations
2397 && exits.length () > 1)
2398 {
2399 exits.release ();
2400 return chrec_dont_know;
2401 }
2402
2403 FOR_EACH_VEC_ELT (exits, i, ex)
2404 {
2405 if (!just_once_each_iteration_p (loop, ex->src))
2406 continue;
2407
2408 aniter = loop_niter_by_eval (loop, ex);
2409 if (chrec_contains_undetermined (aniter))
2410 continue;
2411
2412 if (niter
2413 && !tree_int_cst_lt (aniter, niter))
2414 continue;
2415
2416 niter = aniter;
2417 *exit = ex;
2418 }
2419 exits.release ();
2420
2421 return niter ? niter : chrec_dont_know;
2422 }
2423
2424 /*
2425
2426 Analysis of upper bounds on number of iterations of a loop.
2427
2428 */
2429
2430 static widest_int derive_constant_upper_bound_ops (tree, tree,
2431 enum tree_code, tree);
2432
2433 /* Returns a constant upper bound on the value of the right-hand side of
2434 an assignment statement STMT. */
2435
2436 static widest_int
2437 derive_constant_upper_bound_assign (gimple stmt)
2438 {
2439 enum tree_code code = gimple_assign_rhs_code (stmt);
2440 tree op0 = gimple_assign_rhs1 (stmt);
2441 tree op1 = gimple_assign_rhs2 (stmt);
2442
2443 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2444 op0, code, op1);
2445 }
2446
2447 /* Returns a constant upper bound on the value of expression VAL. VAL
2448 is considered to be unsigned. If its type is signed, its value must
2449 be nonnegative. */
2450
2451 static widest_int
2452 derive_constant_upper_bound (tree val)
2453 {
2454 enum tree_code code;
2455 tree op0, op1;
2456
2457 extract_ops_from_tree (val, &code, &op0, &op1);
2458 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2459 }
2460
2461 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2462 whose type is TYPE. The expression is considered to be unsigned. If
2463 its type is signed, its value must be nonnegative. */
2464
2465 static widest_int
2466 derive_constant_upper_bound_ops (tree type, tree op0,
2467 enum tree_code code, tree op1)
2468 {
2469 tree subtype, maxt;
2470 widest_int bnd, max, mmax, cst;
2471 gimple stmt;
2472
2473 if (INTEGRAL_TYPE_P (type))
2474 maxt = TYPE_MAX_VALUE (type);
2475 else
2476 maxt = upper_bound_in_type (type, type);
2477
2478 max = wi::to_widest (maxt);
2479
2480 switch (code)
2481 {
2482 case INTEGER_CST:
2483 return wi::to_widest (op0);
2484
2485 CASE_CONVERT:
2486 subtype = TREE_TYPE (op0);
2487 if (!TYPE_UNSIGNED (subtype)
2488 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2489 that OP0 is nonnegative. */
2490 && TYPE_UNSIGNED (type)
2491 && !tree_expr_nonnegative_p (op0))
2492 {
2493 /* If we cannot prove that the casted expression is nonnegative,
2494 we cannot establish more useful upper bound than the precision
2495 of the type gives us. */
2496 return max;
2497 }
2498
2499 /* We now know that op0 is an nonnegative value. Try deriving an upper
2500 bound for it. */
2501 bnd = derive_constant_upper_bound (op0);
2502
2503 /* If the bound does not fit in TYPE, max. value of TYPE could be
2504 attained. */
2505 if (wi::ltu_p (max, bnd))
2506 return max;
2507
2508 return bnd;
2509
2510 case PLUS_EXPR:
2511 case POINTER_PLUS_EXPR:
2512 case MINUS_EXPR:
2513 if (TREE_CODE (op1) != INTEGER_CST
2514 || !tree_expr_nonnegative_p (op0))
2515 return max;
2516
2517 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2518 choose the most logical way how to treat this constant regardless
2519 of the signedness of the type. */
2520 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
2521 if (code != MINUS_EXPR)
2522 cst = -cst;
2523
2524 bnd = derive_constant_upper_bound (op0);
2525
2526 if (wi::neg_p (cst))
2527 {
2528 cst = -cst;
2529 /* Avoid CST == 0x80000... */
2530 if (wi::neg_p (cst))
2531 return max;;
2532
2533 /* OP0 + CST. We need to check that
2534 BND <= MAX (type) - CST. */
2535
2536 mmax -= cst;
2537 if (wi::ltu_p (bnd, max))
2538 return max;
2539
2540 return bnd + cst;
2541 }
2542 else
2543 {
2544 /* OP0 - CST, where CST >= 0.
2545
2546 If TYPE is signed, we have already verified that OP0 >= 0, and we
2547 know that the result is nonnegative. This implies that
2548 VAL <= BND - CST.
2549
2550 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2551 otherwise the operation underflows.
2552 */
2553
2554 /* This should only happen if the type is unsigned; however, for
2555 buggy programs that use overflowing signed arithmetics even with
2556 -fno-wrapv, this condition may also be true for signed values. */
2557 if (wi::ltu_p (bnd, cst))
2558 return max;
2559
2560 if (TYPE_UNSIGNED (type))
2561 {
2562 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2563 wide_int_to_tree (type, cst));
2564 if (!tem || integer_nonzerop (tem))
2565 return max;
2566 }
2567
2568 bnd -= cst;
2569 }
2570
2571 return bnd;
2572
2573 case FLOOR_DIV_EXPR:
2574 case EXACT_DIV_EXPR:
2575 if (TREE_CODE (op1) != INTEGER_CST
2576 || tree_int_cst_sign_bit (op1))
2577 return max;
2578
2579 bnd = derive_constant_upper_bound (op0);
2580 return wi::udiv_floor (bnd, wi::to_widest (op1));
2581
2582 case BIT_AND_EXPR:
2583 if (TREE_CODE (op1) != INTEGER_CST
2584 || tree_int_cst_sign_bit (op1))
2585 return max;
2586 return wi::to_widest (op1);
2587
2588 case SSA_NAME:
2589 stmt = SSA_NAME_DEF_STMT (op0);
2590 if (gimple_code (stmt) != GIMPLE_ASSIGN
2591 || gimple_assign_lhs (stmt) != op0)
2592 return max;
2593 return derive_constant_upper_bound_assign (stmt);
2594
2595 default:
2596 return max;
2597 }
2598 }
2599
2600 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2601
2602 static void
2603 do_warn_aggressive_loop_optimizations (struct loop *loop,
2604 widest_int i_bound, gimple stmt)
2605 {
2606 /* Don't warn if the loop doesn't have known constant bound. */
2607 if (!loop->nb_iterations
2608 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
2609 || !warn_aggressive_loop_optimizations
2610 /* To avoid warning multiple times for the same loop,
2611 only start warning when we preserve loops. */
2612 || (cfun->curr_properties & PROP_loops) == 0
2613 /* Only warn once per loop. */
2614 || loop->warned_aggressive_loop_optimizations
2615 /* Only warn if undefined behavior gives us lower estimate than the
2616 known constant bound. */
2617 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
2618 /* And undefined behavior happens unconditionally. */
2619 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
2620 return;
2621
2622 edge e = single_exit (loop);
2623 if (e == NULL)
2624 return;
2625
2626 gimple estmt = last_stmt (e->src);
2627 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
2628 "iteration %E invokes undefined behavior",
2629 wide_int_to_tree (TREE_TYPE (loop->nb_iterations),
2630 i_bound)))
2631 inform (gimple_location (estmt), "containing loop");
2632 loop->warned_aggressive_loop_optimizations = true;
2633 }
2634
2635 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2636 is true if the loop is exited immediately after STMT, and this exit
2637 is taken at last when the STMT is executed BOUND + 1 times.
2638 REALISTIC is true if BOUND is expected to be close to the real number
2639 of iterations. UPPER is true if we are sure the loop iterates at most
2640 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
2641
2642 static void
2643 record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
2644 gimple at_stmt, bool is_exit, bool realistic, bool upper)
2645 {
2646 widest_int delta;
2647
2648 if (dump_file && (dump_flags & TDF_DETAILS))
2649 {
2650 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2651 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2652 fprintf (dump_file, " is %sexecuted at most ",
2653 upper ? "" : "probably ");
2654 print_generic_expr (dump_file, bound, TDF_SLIM);
2655 fprintf (dump_file, " (bounded by ");
2656 print_decu (i_bound, dump_file);
2657 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2658 }
2659
2660 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2661 real number of iterations. */
2662 if (TREE_CODE (bound) != INTEGER_CST)
2663 realistic = false;
2664 else
2665 gcc_checking_assert (i_bound == wi::to_widest (bound));
2666 if (!upper && !realistic)
2667 return;
2668
2669 /* If we have a guaranteed upper bound, record it in the appropriate
2670 list, unless this is an !is_exit bound (i.e. undefined behavior in
2671 at_stmt) in a loop with known constant number of iterations. */
2672 if (upper
2673 && (is_exit
2674 || loop->nb_iterations == NULL_TREE
2675 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
2676 {
2677 struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
2678
2679 elt->bound = i_bound;
2680 elt->stmt = at_stmt;
2681 elt->is_exit = is_exit;
2682 elt->next = loop->bounds;
2683 loop->bounds = elt;
2684 }
2685
2686 /* If statement is executed on every path to the loop latch, we can directly
2687 infer the upper bound on the # of iterations of the loop. */
2688 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
2689 return;
2690
2691 /* Update the number of iteration estimates according to the bound.
2692 If at_stmt is an exit then the loop latch is executed at most BOUND times,
2693 otherwise it can be executed BOUND + 1 times. We will lower the estimate
2694 later if such statement must be executed on last iteration */
2695 if (is_exit)
2696 delta = 0;
2697 else
2698 delta = 1;
2699 widest_int new_i_bound = i_bound + delta;
2700
2701 /* If an overflow occurred, ignore the result. */
2702 if (wi::ltu_p (new_i_bound, delta))
2703 return;
2704
2705 if (upper && !is_exit)
2706 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
2707 record_niter_bound (loop, new_i_bound, realistic, upper);
2708 }
2709
2710 /* Record the estimate on number of iterations of LOOP based on the fact that
2711 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2712 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2713 estimated number of iterations is expected to be close to the real one.
2714 UPPER is true if we are sure the induction variable does not wrap. */
2715
2716 static void
2717 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
2718 tree low, tree high, bool realistic, bool upper)
2719 {
2720 tree niter_bound, extreme, delta;
2721 tree type = TREE_TYPE (base), unsigned_type;
2722
2723 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2724 return;
2725
2726 if (dump_file && (dump_flags & TDF_DETAILS))
2727 {
2728 fprintf (dump_file, "Induction variable (");
2729 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
2730 fprintf (dump_file, ") ");
2731 print_generic_expr (dump_file, base, TDF_SLIM);
2732 fprintf (dump_file, " + ");
2733 print_generic_expr (dump_file, step, TDF_SLIM);
2734 fprintf (dump_file, " * iteration does not wrap in statement ");
2735 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2736 fprintf (dump_file, " in loop %d.\n", loop->num);
2737 }
2738
2739 unsigned_type = unsigned_type_for (type);
2740 base = fold_convert (unsigned_type, base);
2741 step = fold_convert (unsigned_type, step);
2742
2743 if (tree_int_cst_sign_bit (step))
2744 {
2745 extreme = fold_convert (unsigned_type, low);
2746 if (TREE_CODE (base) != INTEGER_CST)
2747 base = fold_convert (unsigned_type, high);
2748 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
2749 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
2750 }
2751 else
2752 {
2753 extreme = fold_convert (unsigned_type, high);
2754 if (TREE_CODE (base) != INTEGER_CST)
2755 base = fold_convert (unsigned_type, low);
2756 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
2757 }
2758
2759 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2760 would get out of the range. */
2761 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2762 widest_int max = derive_constant_upper_bound (niter_bound);
2763 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2764 }
2765
2766 /* Determine information about number of iterations a LOOP from the index
2767 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2768 guaranteed to be executed in every iteration of LOOP. Callback for
2769 for_each_index. */
2770
2771 struct ilb_data
2772 {
2773 struct loop *loop;
2774 gimple stmt;
2775 };
2776
2777 static bool
2778 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
2779 {
2780 struct ilb_data *data = (struct ilb_data *) dta;
2781 tree ev, init, step;
2782 tree low, high, type, next;
2783 bool sign, upper = true, at_end = false;
2784 struct loop *loop = data->loop;
2785 bool reliable = true;
2786
2787 if (TREE_CODE (base) != ARRAY_REF)
2788 return true;
2789
2790 /* For arrays at the end of the structure, we are not guaranteed that they
2791 do not really extend over their declared size. However, for arrays of
2792 size greater than one, this is unlikely to be intended. */
2793 if (array_at_struct_end_p (base))
2794 {
2795 at_end = true;
2796 upper = false;
2797 }
2798
2799 struct loop *dloop = loop_containing_stmt (data->stmt);
2800 if (!dloop)
2801 return true;
2802
2803 ev = analyze_scalar_evolution (dloop, *idx);
2804 ev = instantiate_parameters (loop, ev);
2805 init = initial_condition (ev);
2806 step = evolution_part_in_loop_num (ev, loop->num);
2807
2808 if (!init
2809 || !step
2810 || TREE_CODE (step) != INTEGER_CST
2811 || integer_zerop (step)
2812 || tree_contains_chrecs (init, NULL)
2813 || chrec_contains_symbols_defined_in_loop (init, loop->num))
2814 return true;
2815
2816 low = array_ref_low_bound (base);
2817 high = array_ref_up_bound (base);
2818
2819 /* The case of nonconstant bounds could be handled, but it would be
2820 complicated. */
2821 if (TREE_CODE (low) != INTEGER_CST
2822 || !high
2823 || TREE_CODE (high) != INTEGER_CST)
2824 return true;
2825 sign = tree_int_cst_sign_bit (step);
2826 type = TREE_TYPE (step);
2827
2828 /* The array of length 1 at the end of a structure most likely extends
2829 beyond its bounds. */
2830 if (at_end
2831 && operand_equal_p (low, high, 0))
2832 return true;
2833
2834 /* In case the relevant bound of the array does not fit in type, or
2835 it does, but bound + step (in type) still belongs into the range of the
2836 array, the index may wrap and still stay within the range of the array
2837 (consider e.g. if the array is indexed by the full range of
2838 unsigned char).
2839
2840 To make things simpler, we require both bounds to fit into type, although
2841 there are cases where this would not be strictly necessary. */
2842 if (!int_fits_type_p (high, type)
2843 || !int_fits_type_p (low, type))
2844 return true;
2845 low = fold_convert (type, low);
2846 high = fold_convert (type, high);
2847
2848 if (sign)
2849 next = fold_binary (PLUS_EXPR, type, low, step);
2850 else
2851 next = fold_binary (PLUS_EXPR, type, high, step);
2852
2853 if (tree_int_cst_compare (low, next) <= 0
2854 && tree_int_cst_compare (next, high) <= 0)
2855 return true;
2856
2857 /* If access is not executed on every iteration, we must ensure that overlow may
2858 not make the access valid later. */
2859 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
2860 && scev_probably_wraps_p (initial_condition_in_loop_num (ev, loop->num),
2861 step, data->stmt, loop, true))
2862 reliable = false;
2863
2864 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, reliable, upper);
2865 return true;
2866 }
2867
2868 /* Determine information about number of iterations a LOOP from the bounds
2869 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2870 STMT is guaranteed to be executed in every iteration of LOOP.*/
2871
2872 static void
2873 infer_loop_bounds_from_ref (struct loop *loop, gimple stmt, tree ref)
2874 {
2875 struct ilb_data data;
2876
2877 data.loop = loop;
2878 data.stmt = stmt;
2879 for_each_index (&ref, idx_infer_loop_bounds, &data);
2880 }
2881
2882 /* Determine information about number of iterations of a LOOP from the way
2883 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2884 executed in every iteration of LOOP. */
2885
2886 static void
2887 infer_loop_bounds_from_array (struct loop *loop, gimple stmt)
2888 {
2889 if (is_gimple_assign (stmt))
2890 {
2891 tree op0 = gimple_assign_lhs (stmt);
2892 tree op1 = gimple_assign_rhs1 (stmt);
2893
2894 /* For each memory access, analyze its access function
2895 and record a bound on the loop iteration domain. */
2896 if (REFERENCE_CLASS_P (op0))
2897 infer_loop_bounds_from_ref (loop, stmt, op0);
2898
2899 if (REFERENCE_CLASS_P (op1))
2900 infer_loop_bounds_from_ref (loop, stmt, op1);
2901 }
2902 else if (is_gimple_call (stmt))
2903 {
2904 tree arg, lhs;
2905 unsigned i, n = gimple_call_num_args (stmt);
2906
2907 lhs = gimple_call_lhs (stmt);
2908 if (lhs && REFERENCE_CLASS_P (lhs))
2909 infer_loop_bounds_from_ref (loop, stmt, lhs);
2910
2911 for (i = 0; i < n; i++)
2912 {
2913 arg = gimple_call_arg (stmt, i);
2914 if (REFERENCE_CLASS_P (arg))
2915 infer_loop_bounds_from_ref (loop, stmt, arg);
2916 }
2917 }
2918 }
2919
2920 /* Determine information about number of iterations of a LOOP from the fact
2921 that pointer arithmetics in STMT does not overflow. */
2922
2923 static void
2924 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple stmt)
2925 {
2926 tree def, base, step, scev, type, low, high;
2927 tree var, ptr;
2928
2929 if (!is_gimple_assign (stmt)
2930 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
2931 return;
2932
2933 def = gimple_assign_lhs (stmt);
2934 if (TREE_CODE (def) != SSA_NAME)
2935 return;
2936
2937 type = TREE_TYPE (def);
2938 if (!nowrap_type_p (type))
2939 return;
2940
2941 ptr = gimple_assign_rhs1 (stmt);
2942 if (!expr_invariant_in_loop_p (loop, ptr))
2943 return;
2944
2945 var = gimple_assign_rhs2 (stmt);
2946 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
2947 return;
2948
2949 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2950 if (chrec_contains_undetermined (scev))
2951 return;
2952
2953 base = initial_condition_in_loop_num (scev, loop->num);
2954 step = evolution_part_in_loop_num (scev, loop->num);
2955
2956 if (!base || !step
2957 || TREE_CODE (step) != INTEGER_CST
2958 || tree_contains_chrecs (base, NULL)
2959 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2960 return;
2961
2962 low = lower_bound_in_type (type, type);
2963 high = upper_bound_in_type (type, type);
2964
2965 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
2966 produce a NULL pointer. The contrary would mean NULL points to an object,
2967 while NULL is supposed to compare unequal with the address of all objects.
2968 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
2969 NULL pointer since that would mean wrapping, which we assume here not to
2970 happen. So, we can exclude NULL from the valid range of pointer
2971 arithmetic. */
2972 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
2973 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
2974
2975 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2976 }
2977
2978 /* Determine information about number of iterations of a LOOP from the fact
2979 that signed arithmetics in STMT does not overflow. */
2980
2981 static void
2982 infer_loop_bounds_from_signedness (struct loop *loop, gimple stmt)
2983 {
2984 tree def, base, step, scev, type, low, high;
2985
2986 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2987 return;
2988
2989 def = gimple_assign_lhs (stmt);
2990
2991 if (TREE_CODE (def) != SSA_NAME)
2992 return;
2993
2994 type = TREE_TYPE (def);
2995 if (!INTEGRAL_TYPE_P (type)
2996 || !TYPE_OVERFLOW_UNDEFINED (type))
2997 return;
2998
2999 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3000 if (chrec_contains_undetermined (scev))
3001 return;
3002
3003 base = initial_condition_in_loop_num (scev, loop->num);
3004 step = evolution_part_in_loop_num (scev, loop->num);
3005
3006 if (!base || !step
3007 || TREE_CODE (step) != INTEGER_CST
3008 || tree_contains_chrecs (base, NULL)
3009 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3010 return;
3011
3012 low = lower_bound_in_type (type, type);
3013 high = upper_bound_in_type (type, type);
3014
3015 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3016 }
3017
3018 /* The following analyzers are extracting informations on the bounds
3019 of LOOP from the following undefined behaviors:
3020
3021 - data references should not access elements over the statically
3022 allocated size,
3023
3024 - signed variables should not overflow when flag_wrapv is not set.
3025 */
3026
3027 static void
3028 infer_loop_bounds_from_undefined (struct loop *loop)
3029 {
3030 unsigned i;
3031 basic_block *bbs;
3032 gimple_stmt_iterator bsi;
3033 basic_block bb;
3034 bool reliable;
3035
3036 bbs = get_loop_body (loop);
3037
3038 for (i = 0; i < loop->num_nodes; i++)
3039 {
3040 bb = bbs[i];
3041
3042 /* If BB is not executed in each iteration of the loop, we cannot
3043 use the operations in it to infer reliable upper bound on the
3044 # of iterations of the loop. However, we can use it as a guess.
3045 Reliable guesses come only from array bounds. */
3046 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3047
3048 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3049 {
3050 gimple stmt = gsi_stmt (bsi);
3051
3052 infer_loop_bounds_from_array (loop, stmt);
3053
3054 if (reliable)
3055 {
3056 infer_loop_bounds_from_signedness (loop, stmt);
3057 infer_loop_bounds_from_pointer_arith (loop, stmt);
3058 }
3059 }
3060
3061 }
3062
3063 free (bbs);
3064 }
3065
3066 /* Compare wide ints, callback for qsort. */
3067
3068 static int
3069 wide_int_cmp (const void *p1, const void *p2)
3070 {
3071 const widest_int *d1 = (const widest_int *) p1;
3072 const widest_int *d2 = (const widest_int *) p2;
3073 return wi::cmpu (*d1, *d2);
3074 }
3075
3076 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3077 Lookup by binary search. */
3078
3079 static int
3080 bound_index (vec<widest_int> bounds, const widest_int &bound)
3081 {
3082 unsigned int end = bounds.length ();
3083 unsigned int begin = 0;
3084
3085 /* Find a matching index by means of a binary search. */
3086 while (begin != end)
3087 {
3088 unsigned int middle = (begin + end) / 2;
3089 widest_int index = bounds[middle];
3090
3091 if (index == bound)
3092 return middle;
3093 else if (wi::ltu_p (index, bound))
3094 begin = middle + 1;
3095 else
3096 end = middle;
3097 }
3098 gcc_unreachable ();
3099 }
3100
3101 /* We recorded loop bounds only for statements dominating loop latch (and thus
3102 executed each loop iteration). If there are any bounds on statements not
3103 dominating the loop latch we can improve the estimate by walking the loop
3104 body and seeing if every path from loop header to loop latch contains
3105 some bounded statement. */
3106
3107 static void
3108 discover_iteration_bound_by_body_walk (struct loop *loop)
3109 {
3110 struct nb_iter_bound *elt;
3111 vec<widest_int> bounds = vNULL;
3112 vec<vec<basic_block> > queues = vNULL;
3113 vec<basic_block> queue = vNULL;
3114 ptrdiff_t queue_index;
3115 ptrdiff_t latch_index = 0;
3116
3117 /* Discover what bounds may interest us. */
3118 for (elt = loop->bounds; elt; elt = elt->next)
3119 {
3120 widest_int bound = elt->bound;
3121
3122 /* Exit terminates loop at given iteration, while non-exits produce undefined
3123 effect on the next iteration. */
3124 if (!elt->is_exit)
3125 {
3126 bound += 1;
3127 /* If an overflow occurred, ignore the result. */
3128 if (bound == 0)
3129 continue;
3130 }
3131
3132 if (!loop->any_upper_bound
3133 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3134 bounds.safe_push (bound);
3135 }
3136
3137 /* Exit early if there is nothing to do. */
3138 if (!bounds.exists ())
3139 return;
3140
3141 if (dump_file && (dump_flags & TDF_DETAILS))
3142 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3143
3144 /* Sort the bounds in decreasing order. */
3145 bounds.qsort (wide_int_cmp);
3146
3147 /* For every basic block record the lowest bound that is guaranteed to
3148 terminate the loop. */
3149
3150 hash_map<basic_block, ptrdiff_t> bb_bounds;
3151 for (elt = loop->bounds; elt; elt = elt->next)
3152 {
3153 widest_int bound = elt->bound;
3154 if (!elt->is_exit)
3155 {
3156 bound += 1;
3157 /* If an overflow occurred, ignore the result. */
3158 if (bound == 0)
3159 continue;
3160 }
3161
3162 if (!loop->any_upper_bound
3163 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3164 {
3165 ptrdiff_t index = bound_index (bounds, bound);
3166 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3167 if (!entry)
3168 bb_bounds.put (gimple_bb (elt->stmt), index);
3169 else if ((ptrdiff_t)*entry > index)
3170 *entry = index;
3171 }
3172 }
3173
3174 hash_map<basic_block, ptrdiff_t> block_priority;
3175
3176 /* Perform shortest path discovery loop->header ... loop->latch.
3177
3178 The "distance" is given by the smallest loop bound of basic block
3179 present in the path and we look for path with largest smallest bound
3180 on it.
3181
3182 To avoid the need for fibonacci heap on double ints we simply compress
3183 double ints into indexes to BOUNDS array and then represent the queue
3184 as arrays of queues for every index.
3185 Index of BOUNDS.length() means that the execution of given BB has
3186 no bounds determined.
3187
3188 VISITED is a pointer map translating basic block into smallest index
3189 it was inserted into the priority queue with. */
3190 latch_index = -1;
3191
3192 /* Start walk in loop header with index set to infinite bound. */
3193 queue_index = bounds.length ();
3194 queues.safe_grow_cleared (queue_index + 1);
3195 queue.safe_push (loop->header);
3196 queues[queue_index] = queue;
3197 block_priority.put (loop->header, queue_index);
3198
3199 for (; queue_index >= 0; queue_index--)
3200 {
3201 if (latch_index < queue_index)
3202 {
3203 while (queues[queue_index].length ())
3204 {
3205 basic_block bb;
3206 ptrdiff_t bound_index = queue_index;
3207 edge e;
3208 edge_iterator ei;
3209
3210 queue = queues[queue_index];
3211 bb = queue.pop ();
3212
3213 /* OK, we later inserted the BB with lower priority, skip it. */
3214 if (*block_priority.get (bb) > queue_index)
3215 continue;
3216
3217 /* See if we can improve the bound. */
3218 ptrdiff_t *entry = bb_bounds.get (bb);
3219 if (entry && *entry < bound_index)
3220 bound_index = *entry;
3221
3222 /* Insert succesors into the queue, watch for latch edge
3223 and record greatest index we saw. */
3224 FOR_EACH_EDGE (e, ei, bb->succs)
3225 {
3226 bool insert = false;
3227
3228 if (loop_exit_edge_p (loop, e))
3229 continue;
3230
3231 if (e == loop_latch_edge (loop)
3232 && latch_index < bound_index)
3233 latch_index = bound_index;
3234 else if (!(entry = block_priority.get (e->dest)))
3235 {
3236 insert = true;
3237 block_priority.put (e->dest, bound_index);
3238 }
3239 else if (*entry < bound_index)
3240 {
3241 insert = true;
3242 *entry = bound_index;
3243 }
3244
3245 if (insert)
3246 queues[bound_index].safe_push (e->dest);
3247 }
3248 }
3249 }
3250 queues[queue_index].release ();
3251 }
3252
3253 gcc_assert (latch_index >= 0);
3254 if ((unsigned)latch_index < bounds.length ())
3255 {
3256 if (dump_file && (dump_flags & TDF_DETAILS))
3257 {
3258 fprintf (dump_file, "Found better loop bound ");
3259 print_decu (bounds[latch_index], dump_file);
3260 fprintf (dump_file, "\n");
3261 }
3262 record_niter_bound (loop, bounds[latch_index], false, true);
3263 }
3264
3265 queues.release ();
3266 bounds.release ();
3267 }
3268
3269 /* See if every path cross the loop goes through a statement that is known
3270 to not execute at the last iteration. In that case we can decrese iteration
3271 count by 1. */
3272
3273 static void
3274 maybe_lower_iteration_bound (struct loop *loop)
3275 {
3276 hash_set<gimple> *not_executed_last_iteration = NULL;
3277 struct nb_iter_bound *elt;
3278 bool found_exit = false;
3279 vec<basic_block> queue = vNULL;
3280 bitmap visited;
3281
3282 /* Collect all statements with interesting (i.e. lower than
3283 nb_iterations_upper_bound) bound on them.
3284
3285 TODO: Due to the way record_estimate choose estimates to store, the bounds
3286 will be always nb_iterations_upper_bound-1. We can change this to record
3287 also statements not dominating the loop latch and update the walk bellow
3288 to the shortest path algorthm. */
3289 for (elt = loop->bounds; elt; elt = elt->next)
3290 {
3291 if (!elt->is_exit
3292 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
3293 {
3294 if (!not_executed_last_iteration)
3295 not_executed_last_iteration = new hash_set<gimple>;
3296 not_executed_last_iteration->add (elt->stmt);
3297 }
3298 }
3299 if (!not_executed_last_iteration)
3300 return;
3301
3302 /* Start DFS walk in the loop header and see if we can reach the
3303 loop latch or any of the exits (including statements with side
3304 effects that may terminate the loop otherwise) without visiting
3305 any of the statements known to have undefined effect on the last
3306 iteration. */
3307 queue.safe_push (loop->header);
3308 visited = BITMAP_ALLOC (NULL);
3309 bitmap_set_bit (visited, loop->header->index);
3310 found_exit = false;
3311
3312 do
3313 {
3314 basic_block bb = queue.pop ();
3315 gimple_stmt_iterator gsi;
3316 bool stmt_found = false;
3317
3318 /* Loop for possible exits and statements bounding the execution. */
3319 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3320 {
3321 gimple stmt = gsi_stmt (gsi);
3322 if (not_executed_last_iteration->contains (stmt))
3323 {
3324 stmt_found = true;
3325 break;
3326 }
3327 if (gimple_has_side_effects (stmt))
3328 {
3329 found_exit = true;
3330 break;
3331 }
3332 }
3333 if (found_exit)
3334 break;
3335
3336 /* If no bounding statement is found, continue the walk. */
3337 if (!stmt_found)
3338 {
3339 edge e;
3340 edge_iterator ei;
3341
3342 FOR_EACH_EDGE (e, ei, bb->succs)
3343 {
3344 if (loop_exit_edge_p (loop, e)
3345 || e == loop_latch_edge (loop))
3346 {
3347 found_exit = true;
3348 break;
3349 }
3350 if (bitmap_set_bit (visited, e->dest->index))
3351 queue.safe_push (e->dest);
3352 }
3353 }
3354 }
3355 while (queue.length () && !found_exit);
3356
3357 /* If every path through the loop reach bounding statement before exit,
3358 then we know the last iteration of the loop will have undefined effect
3359 and we can decrease number of iterations. */
3360
3361 if (!found_exit)
3362 {
3363 if (dump_file && (dump_flags & TDF_DETAILS))
3364 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3365 "undefined statement must be executed at the last iteration.\n");
3366 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
3367 false, true);
3368 }
3369 BITMAP_FREE (visited);
3370 queue.release ();
3371 delete not_executed_last_iteration;
3372 }
3373
3374 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3375 is true also use estimates derived from undefined behavior. */
3376
3377 static void
3378 estimate_numbers_of_iterations_loop (struct loop *loop)
3379 {
3380 vec<edge> exits;
3381 tree niter, type;
3382 unsigned i;
3383 struct tree_niter_desc niter_desc;
3384 edge ex;
3385 widest_int bound;
3386 edge likely_exit;
3387
3388 /* Give up if we already have tried to compute an estimation. */
3389 if (loop->estimate_state != EST_NOT_COMPUTED)
3390 return;
3391
3392 loop->estimate_state = EST_AVAILABLE;
3393 /* Force estimate compuation but leave any existing upper bound in place. */
3394 loop->any_estimate = false;
3395
3396 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3397 to be constant, we avoid undefined behavior implied bounds and instead
3398 diagnose those loops with -Waggressive-loop-optimizations. */
3399 number_of_latch_executions (loop);
3400
3401 exits = get_loop_exit_edges (loop);
3402 likely_exit = single_likely_exit (loop);
3403 FOR_EACH_VEC_ELT (exits, i, ex)
3404 {
3405 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
3406 continue;
3407
3408 niter = niter_desc.niter;
3409 type = TREE_TYPE (niter);
3410 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3411 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3412 build_int_cst (type, 0),
3413 niter);
3414 record_estimate (loop, niter, niter_desc.max,
3415 last_stmt (ex->src),
3416 true, ex == likely_exit, true);
3417 }
3418 exits.release ();
3419
3420 if (flag_aggressive_loop_optimizations)
3421 infer_loop_bounds_from_undefined (loop);
3422
3423 discover_iteration_bound_by_body_walk (loop);
3424
3425 maybe_lower_iteration_bound (loop);
3426
3427 /* If we have a measured profile, use it to estimate the number of
3428 iterations. */
3429 if (loop->header->count != 0)
3430 {
3431 gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
3432 bound = gcov_type_to_wide_int (nit);
3433 record_niter_bound (loop, bound, true, false);
3434 }
3435
3436 /* If we know the exact number of iterations of this loop, try to
3437 not break code with undefined behavior by not recording smaller
3438 maximum number of iterations. */
3439 if (loop->nb_iterations
3440 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
3441 {
3442 loop->any_upper_bound = true;
3443 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
3444 }
3445 }
3446
3447 /* Sets NIT to the estimated number of executions of the latch of the
3448 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3449 large as the number of iterations. If we have no reliable estimate,
3450 the function returns false, otherwise returns true. */
3451
3452 bool
3453 estimated_loop_iterations (struct loop *loop, widest_int *nit)
3454 {
3455 /* When SCEV information is available, try to update loop iterations
3456 estimate. Otherwise just return whatever we recorded earlier. */
3457 if (scev_initialized_p ())
3458 estimate_numbers_of_iterations_loop (loop);
3459
3460 return (get_estimated_loop_iterations (loop, nit));
3461 }
3462
3463 /* Similar to estimated_loop_iterations, but returns the estimate only
3464 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3465 on the number of iterations of LOOP could not be derived, returns -1. */
3466
3467 HOST_WIDE_INT
3468 estimated_loop_iterations_int (struct loop *loop)
3469 {
3470 widest_int nit;
3471 HOST_WIDE_INT hwi_nit;
3472
3473 if (!estimated_loop_iterations (loop, &nit))
3474 return -1;
3475
3476 if (!wi::fits_shwi_p (nit))
3477 return -1;
3478 hwi_nit = nit.to_shwi ();
3479
3480 return hwi_nit < 0 ? -1 : hwi_nit;
3481 }
3482
3483
3484 /* Sets NIT to an upper bound for the maximum number of executions of the
3485 latch of the LOOP. If we have no reliable estimate, the function returns
3486 false, otherwise returns true. */
3487
3488 bool
3489 max_loop_iterations (struct loop *loop, widest_int *nit)
3490 {
3491 /* When SCEV information is available, try to update loop iterations
3492 estimate. Otherwise just return whatever we recorded earlier. */
3493 if (scev_initialized_p ())
3494 estimate_numbers_of_iterations_loop (loop);
3495
3496 return get_max_loop_iterations (loop, nit);
3497 }
3498
3499 /* Similar to max_loop_iterations, but returns the estimate only
3500 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3501 on the number of iterations of LOOP could not be derived, returns -1. */
3502
3503 HOST_WIDE_INT
3504 max_loop_iterations_int (struct loop *loop)
3505 {
3506 widest_int nit;
3507 HOST_WIDE_INT hwi_nit;
3508
3509 if (!max_loop_iterations (loop, &nit))
3510 return -1;
3511
3512 if (!wi::fits_shwi_p (nit))
3513 return -1;
3514 hwi_nit = nit.to_shwi ();
3515
3516 return hwi_nit < 0 ? -1 : hwi_nit;
3517 }
3518
3519 /* Returns an estimate for the number of executions of statements
3520 in the LOOP. For statements before the loop exit, this exceeds
3521 the number of execution of the latch by one. */
3522
3523 HOST_WIDE_INT
3524 estimated_stmt_executions_int (struct loop *loop)
3525 {
3526 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3527 HOST_WIDE_INT snit;
3528
3529 if (nit == -1)
3530 return -1;
3531
3532 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3533
3534 /* If the computation overflows, return -1. */
3535 return snit < 0 ? -1 : snit;
3536 }
3537
3538 /* Sets NIT to the estimated maximum number of executions of the latch of the
3539 LOOP, plus one. If we have no reliable estimate, the function returns
3540 false, otherwise returns true. */
3541
3542 bool
3543 max_stmt_executions (struct loop *loop, widest_int *nit)
3544 {
3545 widest_int nit_minus_one;
3546
3547 if (!max_loop_iterations (loop, nit))
3548 return false;
3549
3550 nit_minus_one = *nit;
3551
3552 *nit += 1;
3553
3554 return wi::gtu_p (*nit, nit_minus_one);
3555 }
3556
3557 /* Sets NIT to the estimated number of executions of the latch of the
3558 LOOP, plus one. If we have no reliable estimate, the function returns
3559 false, otherwise returns true. */
3560
3561 bool
3562 estimated_stmt_executions (struct loop *loop, widest_int *nit)
3563 {
3564 widest_int nit_minus_one;
3565
3566 if (!estimated_loop_iterations (loop, nit))
3567 return false;
3568
3569 nit_minus_one = *nit;
3570
3571 *nit += 1;
3572
3573 return wi::gtu_p (*nit, nit_minus_one);
3574 }
3575
3576 /* Records estimates on numbers of iterations of loops. */
3577
3578 void
3579 estimate_numbers_of_iterations (void)
3580 {
3581 struct loop *loop;
3582
3583 /* We don't want to issue signed overflow warnings while getting
3584 loop iteration estimates. */
3585 fold_defer_overflow_warnings ();
3586
3587 FOR_EACH_LOOP (loop, 0)
3588 {
3589 estimate_numbers_of_iterations_loop (loop);
3590 }
3591
3592 fold_undefer_and_ignore_overflow_warnings ();
3593 }
3594
3595 /* Returns true if statement S1 dominates statement S2. */
3596
3597 bool
3598 stmt_dominates_stmt_p (gimple s1, gimple s2)
3599 {
3600 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
3601
3602 if (!bb1
3603 || s1 == s2)
3604 return true;
3605
3606 if (bb1 == bb2)
3607 {
3608 gimple_stmt_iterator bsi;
3609
3610 if (gimple_code (s2) == GIMPLE_PHI)
3611 return false;
3612
3613 if (gimple_code (s1) == GIMPLE_PHI)
3614 return true;
3615
3616 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
3617 if (gsi_stmt (bsi) == s1)
3618 return true;
3619
3620 return false;
3621 }
3622
3623 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
3624 }
3625
3626 /* Returns true when we can prove that the number of executions of
3627 STMT in the loop is at most NITER, according to the bound on
3628 the number of executions of the statement NITER_BOUND->stmt recorded in
3629 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
3630
3631 ??? This code can become quite a CPU hog - we can have many bounds,
3632 and large basic block forcing stmt_dominates_stmt_p to be queried
3633 many times on a large basic blocks, so the whole thing is O(n^2)
3634 for scev_probably_wraps_p invocation (that can be done n times).
3635
3636 It would make more sense (and give better answers) to remember BB
3637 bounds computed by discover_iteration_bound_by_body_walk. */
3638
3639 static bool
3640 n_of_executions_at_most (gimple stmt,
3641 struct nb_iter_bound *niter_bound,
3642 tree niter)
3643 {
3644 widest_int bound = niter_bound->bound;
3645 tree nit_type = TREE_TYPE (niter), e;
3646 enum tree_code cmp;
3647
3648 gcc_assert (TYPE_UNSIGNED (nit_type));
3649
3650 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3651 the number of iterations is small. */
3652 if (!wi::fits_to_tree_p (bound, nit_type))
3653 return false;
3654
3655 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3656 times. This means that:
3657
3658 -- if NITER_BOUND->is_exit is true, then everything after
3659 it at most NITER_BOUND->bound times.
3660
3661 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3662 is executed, then NITER_BOUND->stmt is executed as well in the same
3663 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
3664
3665 If we can determine that NITER_BOUND->stmt is always executed
3666 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
3667 We conclude that if both statements belong to the same
3668 basic block and STMT is before NITER_BOUND->stmt and there are no
3669 statements with side effects in between. */
3670
3671 if (niter_bound->is_exit)
3672 {
3673 if (stmt == niter_bound->stmt
3674 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3675 return false;
3676 cmp = GE_EXPR;
3677 }
3678 else
3679 {
3680 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3681 {
3682 gimple_stmt_iterator bsi;
3683 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
3684 || gimple_code (stmt) == GIMPLE_PHI
3685 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
3686 return false;
3687
3688 /* By stmt_dominates_stmt_p we already know that STMT appears
3689 before NITER_BOUND->STMT. Still need to test that the loop
3690 can not be terinated by a side effect in between. */
3691 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
3692 gsi_next (&bsi))
3693 if (gimple_has_side_effects (gsi_stmt (bsi)))
3694 return false;
3695 bound += 1;
3696 if (bound == 0
3697 || !wi::fits_to_tree_p (bound, nit_type))
3698 return false;
3699 }
3700 cmp = GT_EXPR;
3701 }
3702
3703 e = fold_binary (cmp, boolean_type_node,
3704 niter, wide_int_to_tree (nit_type, bound));
3705 return e && integer_nonzerop (e);
3706 }
3707
3708 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
3709
3710 bool
3711 nowrap_type_p (tree type)
3712 {
3713 if (INTEGRAL_TYPE_P (type)
3714 && TYPE_OVERFLOW_UNDEFINED (type))
3715 return true;
3716
3717 if (POINTER_TYPE_P (type))
3718 return true;
3719
3720 return false;
3721 }
3722
3723 /* Return false only when the induction variable BASE + STEP * I is
3724 known to not overflow: i.e. when the number of iterations is small
3725 enough with respect to the step and initial condition in order to
3726 keep the evolution confined in TYPEs bounds. Return true when the
3727 iv is known to overflow or when the property is not computable.
3728
3729 USE_OVERFLOW_SEMANTICS is true if this function should assume that
3730 the rules for overflow of the given language apply (e.g., that signed
3731 arithmetics in C does not overflow). */
3732
3733 bool
3734 scev_probably_wraps_p (tree base, tree step,
3735 gimple at_stmt, struct loop *loop,
3736 bool use_overflow_semantics)
3737 {
3738 tree delta, step_abs;
3739 tree unsigned_type, valid_niter;
3740 tree type = TREE_TYPE (step);
3741 tree e;
3742 widest_int niter;
3743 struct nb_iter_bound *bound;
3744
3745 /* FIXME: We really need something like
3746 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
3747
3748 We used to test for the following situation that frequently appears
3749 during address arithmetics:
3750
3751 D.1621_13 = (long unsigned intD.4) D.1620_12;
3752 D.1622_14 = D.1621_13 * 8;
3753 D.1623_15 = (doubleD.29 *) D.1622_14;
3754
3755 And derived that the sequence corresponding to D_14
3756 can be proved to not wrap because it is used for computing a
3757 memory access; however, this is not really the case -- for example,
3758 if D_12 = (unsigned char) [254,+,1], then D_14 has values
3759 2032, 2040, 0, 8, ..., but the code is still legal. */
3760
3761 if (chrec_contains_undetermined (base)
3762 || chrec_contains_undetermined (step))
3763 return true;
3764
3765 if (integer_zerop (step))
3766 return false;
3767
3768 /* If we can use the fact that signed and pointer arithmetics does not
3769 wrap, we are done. */
3770 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
3771 return false;
3772
3773 /* To be able to use estimates on number of iterations of the loop,
3774 we must have an upper bound on the absolute value of the step. */
3775 if (TREE_CODE (step) != INTEGER_CST)
3776 return true;
3777
3778 /* Don't issue signed overflow warnings. */
3779 fold_defer_overflow_warnings ();
3780
3781 /* Otherwise, compute the number of iterations before we reach the
3782 bound of the type, and verify that the loop is exited before this
3783 occurs. */
3784 unsigned_type = unsigned_type_for (type);
3785 base = fold_convert (unsigned_type, base);
3786
3787 if (tree_int_cst_sign_bit (step))
3788 {
3789 tree extreme = fold_convert (unsigned_type,
3790 lower_bound_in_type (type, type));
3791 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3792 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
3793 fold_convert (unsigned_type, step));
3794 }
3795 else
3796 {
3797 tree extreme = fold_convert (unsigned_type,
3798 upper_bound_in_type (type, type));
3799 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3800 step_abs = fold_convert (unsigned_type, step);
3801 }
3802
3803 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3804
3805 estimate_numbers_of_iterations_loop (loop);
3806
3807 if (max_loop_iterations (loop, &niter)
3808 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
3809 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
3810 wide_int_to_tree (TREE_TYPE (valid_niter),
3811 niter))) != NULL
3812 && integer_nonzerop (e))
3813 {
3814 fold_undefer_and_ignore_overflow_warnings ();
3815 return false;
3816 }
3817 if (at_stmt)
3818 for (bound = loop->bounds; bound; bound = bound->next)
3819 {
3820 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
3821 {
3822 fold_undefer_and_ignore_overflow_warnings ();
3823 return false;
3824 }
3825 }
3826
3827 fold_undefer_and_ignore_overflow_warnings ();
3828
3829 /* At this point we still don't have a proof that the iv does not
3830 overflow: give up. */
3831 return true;
3832 }
3833
3834 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3835
3836 void
3837 free_numbers_of_iterations_estimates_loop (struct loop *loop)
3838 {
3839 struct nb_iter_bound *bound, *next;
3840
3841 loop->nb_iterations = NULL;
3842 loop->estimate_state = EST_NOT_COMPUTED;
3843 for (bound = loop->bounds; bound; bound = next)
3844 {
3845 next = bound->next;
3846 ggc_free (bound);
3847 }
3848
3849 loop->bounds = NULL;
3850 }
3851
3852 /* Frees the information on upper bounds on numbers of iterations of loops. */
3853
3854 void
3855 free_numbers_of_iterations_estimates (void)
3856 {
3857 struct loop *loop;
3858
3859 FOR_EACH_LOOP (loop, 0)
3860 {
3861 free_numbers_of_iterations_estimates_loop (loop);
3862 }
3863 }
3864
3865 /* Substitute value VAL for ssa name NAME inside expressions held
3866 at LOOP. */
3867
3868 void
3869 substitute_in_loop_info (struct loop *loop, tree name, tree val)
3870 {
3871 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
3872 }